hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f719b1a74f416ee6980e89aaf0546a68140e9287
| 14,523
|
py
|
Python
|
nova/virt/fake.py
|
bopopescu/nova-29
|
3b8957a5f9656884ecb14755516097c049a18f67
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/fake.py
|
bopopescu/nova-29
|
3b8957a5f9656884ecb14755516097c049a18f67
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/fake.py
|
bopopescu/nova-29
|
3b8957a5f9656884ecb14755516097c049a18f67
|
[
"Apache-2.0"
] | 1
|
2020-07-24T07:27:49.000Z
|
2020-07-24T07:27:49.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
from nova.compute import power_state
from nova.compute import task_states
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt import virtapi
LOG = logging.getLogger(__name__)
_FAKE_NODES = ['fake-mini']
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
get_host_stats()
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = ['fake-mini']
class FakeInstance(object):
def __init__(self, name, state):
self.name = name
self.state = state
def __getitem__(self, key):
return getattr(self, key)
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
}
"""Fake hypervisor driver"""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status_base = {
'host_name-description': 'Fake Host',
'host_hostname': 'fake-mini',
'host_memory_total': 8000000000,
'host_memory_overhead': 10000000,
'host_memory_free': 7900000000,
'host_memory_free_computed': 7900000000,
'host_other_config': {},
'host_ip_address': '192.168.1.109',
'host_cpu_info': {},
'disk_available': 500000000000,
'disk_total': 600000000000,
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'fake-mini',
'hypervisor_hostname': 'fake-mini',
}
self._mounts = {}
def init_host(self, host):
return
def list_instances(self):
return self.instances.keys()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
name = instance['name']
state = power_state.RUNNING
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
def snapshot(self, context, instance, name, update_task_state):
if not instance['name'] in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
pass
@staticmethod
def get_host_ip_addr():
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
pass
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
pass
def power_off(self, instance):
pass
def power_on(self, instance):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, instance):
pass
def resume(self, instance, network_info, block_device_info=None):
pass
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
key = instance['name']
if key in self.instances:
del self.instances[key]
else:
LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
{'key': key,
'inst': self.instances}, instance=instance)
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info"""
instance_name = instance['name']
if not instance_name in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
return True
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach the disk attached to the instance"""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
return True
def get_info(self, instance):
if instance['name'] not in self.instances:
raise exception.InstanceNotFound(instance_id=instance['name'])
i = self.instances[instance['name']]
return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def get_diagnostics(self, instance_name):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
bw = []
return bw
def get_all_volume_usage(self, context, instances, start_time,
stop_time=None):
"""Return usage info for volumes attached to vms on
a given host"""
volusage = []
return volusage
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
def interface_stats(self, instance_name, iface_id):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, instance):
return {'internal_access_path': 'FAKE',
'host': 'fakevncconsole.com',
'port': 6969}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_security_group_members(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def refresh_provider_fw_rules(self):
pass
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
if nodename not in _FAKE_NODES:
raise exception.NovaException("node %s is not found" % nodename)
dic = {'vcpus': 1,
'memory_mb': 8192,
'local_gb': 1028,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
'hypervisor_hostname': nodename,
'cpu_info': '?'}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def get_instance_disk_info(self, instance_name):
return
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
return
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
return
def unfilter_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
"""Removes the named VM, as if it crashed. For testing"""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
stats = []
for nodename in _FAKE_NODES:
host_status = self.host_status_base.copy()
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
stats.append(host_status)
if len(stats) == 0:
raise exception.NovaException("FakeDriver has no node")
elif len(stats) == 1:
return stats[0]
else:
return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_disk_available_least(self):
pass
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
def get_available_nodes(self):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def list_instance_uuids(self):
return []
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
return db.instance_update_and_get_original(context,
instance_uuid,
updates)
def instance_get_by_uuid(self, context, instance_uuid):
return db.instance_get_by_uuid(context, instance_uuid)
def instance_get_all_by_host(self, context, host):
return db.instance_get_all_by_host(context, host)
def aggregate_get_by_host(self, context, host, key=None):
return db.aggregate_get_by_host(context, host, key=key)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
return db.aggregate_metadata_add(context, aggregate['id'], metadata,
set_delete=set_delete)
def aggregate_metadata_delete(self, context, aggregate, key):
return db.aggregate_metadata_delete(context, aggregate['id'], key)
def security_group_get_by_instance(self, context, instance):
return db.security_group_get_by_instance(context, instance['id'])
def security_group_rule_get_by_security_group(self, context,
security_group):
return db.security_group_rule_get_by_security_group(
context, security_group['id'])
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return db.agent_build_get_by_triple(context,
hypervisor, os, architecture)
| 32.783296
| 79
| 0.6111
|
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
from nova.compute import power_state
from nova.compute import task_states
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt import virtapi
LOG = logging.getLogger(__name__)
_FAKE_NODES = ['fake-mini']
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
get_host_stats()
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = ['fake-mini']
class FakeInstance(object):
def __init__(self, name, state):
self.name = name
self.state = state
def __getitem__(self, key):
return getattr(self, key)
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
}
"""Fake hypervisor driver"""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status_base = {
'host_name-description': 'Fake Host',
'host_hostname': 'fake-mini',
'host_memory_total': 8000000000,
'host_memory_overhead': 10000000,
'host_memory_free': 7900000000,
'host_memory_free_computed': 7900000000,
'host_other_config': {},
'host_ip_address': '192.168.1.109',
'host_cpu_info': {},
'disk_available': 500000000000,
'disk_total': 600000000000,
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'fake-mini',
'hypervisor_hostname': 'fake-mini',
}
self._mounts = {}
def init_host(self, host):
return
def list_instances(self):
return self.instances.keys()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
name = instance['name']
state = power_state.RUNNING
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
def snapshot(self, context, instance, name, update_task_state):
if not instance['name'] in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
pass
@staticmethod
def get_host_ip_addr():
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
pass
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
pass
def power_off(self, instance):
pass
def power_on(self, instance):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, instance):
pass
def resume(self, instance, network_info, block_device_info=None):
pass
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
key = instance['name']
if key in self.instances:
del self.instances[key]
else:
LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
{'key': key,
'inst': self.instances}, instance=instance)
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info"""
instance_name = instance['name']
if not instance_name in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
return True
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach the disk attached to the instance"""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
return True
def get_info(self, instance):
if instance['name'] not in self.instances:
raise exception.InstanceNotFound(instance_id=instance['name'])
i = self.instances[instance['name']]
return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def get_diagnostics(self, instance_name):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
bw = []
return bw
def get_all_volume_usage(self, context, instances, start_time,
stop_time=None):
"""Return usage info for volumes attached to vms on
a given host"""
volusage = []
return volusage
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
def interface_stats(self, instance_name, iface_id):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, instance):
return {'internal_access_path': 'FAKE',
'host': 'fakevncconsole.com',
'port': 6969}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_security_group_members(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def refresh_provider_fw_rules(self):
pass
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
if nodename not in _FAKE_NODES:
raise exception.NovaException("node %s is not found" % nodename)
dic = {'vcpus': 1,
'memory_mb': 8192,
'local_gb': 1028,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
'hypervisor_hostname': nodename,
'cpu_info': '?'}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def get_instance_disk_info(self, instance_name):
return
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
return
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
return
def unfilter_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
"""Removes the named VM, as if it crashed. For testing"""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
stats = []
for nodename in _FAKE_NODES:
host_status = self.host_status_base.copy()
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
stats.append(host_status)
if len(stats) == 0:
raise exception.NovaException("FakeDriver has no node")
elif len(stats) == 1:
return stats[0]
else:
return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_disk_available_least(self):
pass
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
def get_available_nodes(self):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def list_instance_uuids(self):
return []
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
return db.instance_update_and_get_original(context,
instance_uuid,
updates)
def instance_get_by_uuid(self, context, instance_uuid):
return db.instance_get_by_uuid(context, instance_uuid)
def instance_get_all_by_host(self, context, host):
return db.instance_get_all_by_host(context, host)
def aggregate_get_by_host(self, context, host, key=None):
return db.aggregate_get_by_host(context, host, key=key)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
return db.aggregate_metadata_add(context, aggregate['id'], metadata,
set_delete=set_delete)
def aggregate_metadata_delete(self, context, aggregate, key):
return db.aggregate_metadata_delete(context, aggregate['id'], key)
def security_group_get_by_instance(self, context, instance):
return db.security_group_get_by_instance(context, instance['id'])
def security_group_rule_get_by_security_group(self, context,
security_group):
return db.security_group_rule_get_by_security_group(
context, security_group['id'])
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return db.agent_build_get_by_triple(context,
hypervisor, os, architecture)
| false
| true
|
f719b22b9c5885616b30cc4050c5cf2de4e5b710
| 1,553
|
py
|
Python
|
services/storage/tests/helpers/utils_assert.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 25
|
2018-04-13T12:44:12.000Z
|
2022-03-12T15:01:17.000Z
|
services/storage/tests/helpers/utils_assert.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 2,553
|
2018-01-18T17:11:55.000Z
|
2022-03-31T16:26:40.000Z
|
services/storage/tests/helpers/utils_assert.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | 20
|
2018-01-18T19:45:33.000Z
|
2022-03-29T07:08:47.000Z
|
from pprint import pformat
from aiohttp import web
from servicelib.aiohttp.rest_responses import unwrap_envelope
async def assert_status(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
assert (
response.status == expected_cls.status_code
), f"got {response.status}, expected {expected_cls.status_code}:\n data:{data},\n error:{error}"
if issubclass(expected_cls, web.HTTPError):
do_assert_error(data, error, expected_cls, expected_msg)
elif issubclass(expected_cls, web.HTTPNoContent):
assert not data, pformat(data)
assert not error, pformat(error)
else:
assert data is not None, pformat(data)
assert not error, pformat(error)
if expected_msg:
assert expected_msg in data["message"]
return data, error
async def assert_error(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
return do_assert_error(data, error, expected_cls, expected_msg)
def do_assert_error(
data, error, expected_cls: web.HTTPException, expected_msg: str = None
):
assert not data, pformat(data)
assert error, pformat(error)
# TODO: improve error messages
assert len(error["errors"]) == 1
err = error["errors"][0]
if expected_msg:
assert expected_msg in err["message"]
assert expected_cls.__name__ == err["code"]
return data, error
| 28.759259
| 100
| 0.701223
|
from pprint import pformat
from aiohttp import web
from servicelib.aiohttp.rest_responses import unwrap_envelope
async def assert_status(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
assert (
response.status == expected_cls.status_code
), f"got {response.status}, expected {expected_cls.status_code}:\n data:{data},\n error:{error}"
if issubclass(expected_cls, web.HTTPError):
do_assert_error(data, error, expected_cls, expected_msg)
elif issubclass(expected_cls, web.HTTPNoContent):
assert not data, pformat(data)
assert not error, pformat(error)
else:
assert data is not None, pformat(data)
assert not error, pformat(error)
if expected_msg:
assert expected_msg in data["message"]
return data, error
async def assert_error(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
return do_assert_error(data, error, expected_cls, expected_msg)
def do_assert_error(
data, error, expected_cls: web.HTTPException, expected_msg: str = None
):
assert not data, pformat(data)
assert error, pformat(error)
assert len(error["errors"]) == 1
err = error["errors"][0]
if expected_msg:
assert expected_msg in err["message"]
assert expected_cls.__name__ == err["code"]
return data, error
| true
| true
|
f719b29d98e47c2ec8027200215ed81276adcb8f
| 11,098
|
py
|
Python
|
core/externals/update-engine/externals/gdata-objectivec-client/Source/Tests/GDataTestHTTPServer.py
|
tuxera/macfuse_with_externals
|
96df0e71824f37332c65a9465d55e9966e67be7d
|
[
"AML"
] | 1
|
2017-11-25T18:56:35.000Z
|
2017-11-25T18:56:35.000Z
|
core/externals/update-engine/externals/gdata-objectivec-client/Source/Tests/GDataTestHTTPServer.py
|
tuxera/macfuse_with_externals
|
96df0e71824f37332c65a9465d55e9966e67be7d
|
[
"AML"
] | null | null | null |
core/externals/update-engine/externals/gdata-objectivec-client/Source/Tests/GDataTestHTTPServer.py
|
tuxera/macfuse_with_externals
|
96df0e71824f37332c65a9465d55e9966e67be7d
|
[
"AML"
] | null | null | null |
#!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple server for testing the Objective-C GData Framework
This http server is for use by GDataServiceTest.m in testing
both authentication and object retrieval.
Requests to the path /accounts/ClientLogin are assumed to be
for login; other requests are for object retrieval
"""
import string
import cgi
import time
import os
import sys
import re
import mimetypes
import socket
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from optparse import OptionParser
class ServerTimeoutException(Exception):
pass
class HTTPTimeoutServer(HTTPServer):
"""HTTP server for testing network requests.
This server will throw an exception if it receives no connections for
several minutes. We use this to ensure that the server will be cleaned
up if something goes wrong during the unit testing.
"""
def get_request(self):
self.socket.settimeout(120.0)
result = None
while result is None:
try:
result = self.socket.accept()
except socket.timeout:
raise ServerTimeoutException
result[0].settimeout(None)
return result
class SimpleServer(BaseHTTPRequestHandler):
"""HTTP request handler for testing GData network requests.
This is an implementation of a request handler for BaseHTTPServer,
specifically designed for GData service code usage.
Normal requests for GET/POST/PUT simply retrieve the file from the
supplied path, starting in the current directory. A cookie called
TestCookie is set by the response header, with the value of the filename
requested.
DELETE requests always succeed.
Appending ?status=n results in a failure with status value n.
Paths ending in .auth have the .auth extension stripped, and must have
an authorization header of "GoogleLogin auth=GoodAuthToken" to succeed.
Paths ending in .authsub have the .authsub extension stripped, and must have
an authorization header of "AuthSub token=GoodAuthSubToken" to succeed.
Paths ending in .authwww have the .authwww extension stripped, and must have
an authorization header for GoodWWWUser:GoodWWWPassword to succeed.
Successful results have a Last-Modified header set; if that header's value
("thursday") is supplied in a request's "If-Modified-Since" header, the
result is 304 (Not Modified).
Requests to /accounts/ClientLogin will fail if supplied with a body
containing Passwd=bad. If they contain logintoken and logincaptcha values,
those must be logintoken=CapToken&logincaptch=good to succeed.
"""
def do_GET(self):
self.doAllRequests()
def do_POST(self):
self.doAllRequests()
def do_PUT(self):
self.doAllRequests()
def do_DELETE(self):
self.doAllRequests()
def doAllRequests(self):
# This method handles all expected incoming requests
#
# Requests to path /accounts/ClientLogin are assumed to be for signing in
#
# Other paths are for retrieving a local xml file. An .auth appended
# to an xml file path will require authentication (meaning the Authorization
# header must be present with the value "GoogleLogin auth=GoodAuthToken".)
# Delete commands succeed but return no data.
#
# GData override headers are supported.
#
# Any auth password is valid except "bad", which will fail, and "captcha",
# which will fail unless the authentication request's post string includes
# "logintoken=CapToken&logincaptcha=good"
# We will use a readable default result string since it should never show up
# in output
resultString = "default GDataTestServer result\n";
resultStatus = 0
headerType = "text/plain"
postString = ""
modifiedDate = "thursday" # clients should treat dates as opaque, generally
# auth queries and some GData queries include post data
postLength = int(self.headers.getheader("Content-Length", "0"));
if postLength > 0:
postString = self.rfile.read(postLength)
ifModifiedSince = self.headers.getheader("If-Modified-Since", "");
# retrieve the auth header
authorization = self.headers.getheader("Authorization", "")
# require basic auth if the file path ends with the string ".authwww"
# GoodWWWUser:GoodWWWPassword is base64 R29vZFdXV1VzZXI6R29vZFdXV1Bhc3N3b3Jk
if self.path.endswith(".authwww"):
if authorization != "Basic R29vZFdXV1VzZXI6R29vZFdXV1Bhc3N3b3Jk":
self.send_response(401)
self.send_header('WWW-Authenticate', "Basic realm='testrealm'")
self.send_header('Content-type', 'text/html')
self.end_headers()
return
self.path = self.path[:-8] # remove the .authwww at the end
# require Google auth if the file path ends with the string ".auth"
# or ".authsub"
if self.path.endswith(".auth"):
if authorization != "GoogleLogin auth=GoodAuthToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-5] # remove the .auth at the end
if self.path.endswith(".authsub"):
if authorization != "AuthSub token=GoodAuthSubToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-8] # remove the .authsub at the end
overrideHeader = self.headers.getheader("X-HTTP-Method-Override", "")
httpCommand = self.command
if httpCommand == "POST" and len(overrideHeader) > 0:
httpCommand = overrideHeader
try:
if self.path.endswith("/accounts/ClientLogin"):
#
# it's a sign-in attempt; it's good unless the password is "bad" or
# "captcha"
#
# use regular expression to find the password
password = ""
searchResult = re.search("(Passwd=)([^&\n]*)", postString)
if searchResult:
password = searchResult.group(2)
if password == "bad":
resultString = "Error=BadAuthentication\n"
resultStatus = 403
elif password == "captcha":
logintoken = ""
logincaptcha = ""
# use regular expressions to find the captcha token and answer
searchResult = re.search("(logintoken=)([^&\n]*)", postString);
if searchResult:
logintoken = searchResult.group(2)
searchResult = re.search("(logincaptcha=)([^&\n]*)", postString);
if searchResult:
logincaptcha = searchResult.group(2)
# if the captcha token is "CapToken" and the answer is "good"
# then it's a valid sign in
if (logintoken == "CapToken") and (logincaptcha == "good"):
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
else:
# incorrect captcha token or answer provided
resultString = ("Error=CaptchaRequired\nCaptchaToken=CapToken\n"
"CaptchaUrl=CapUrl\n")
resultStatus = 403
else:
# valid username/password
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
elif httpCommand == "DELETE":
#
# it's an object delete; read and return empty data
#
resultString = ""
resultStatus = 200
headerType = "text/plain"
else:
# queries that have something like "?status=456" should fail with the
# status code
searchResult = re.search("(status=)([0-9]+)", self.path)
if searchResult:
status = searchResult.group(2)
self.send_error(int(status),
"Test HTTP server status parameter: %s" % self.path)
return
# queries that have something like "?statusxml=456" should fail with the
# status code and structured XML response
searchResult = re.search("(statusxml=)([0-9]+)", self.path)
if searchResult:
status = searchResult.group(2)
self.send_response(int(status))
self.send_header("Content-type",
"application/vnd.google.gdata.error+xml")
self.end_headers()
resultString = ("<errors xmlns='http://schemas.google.com/g/2005'>"
"<error><domain>GData</domain><code>code_%s</code>"
"<internalReason>forced status error on path %s</internalReason>"
"<extendedHelp>http://help.com</extendedHelp>"
"<sendReport>http://report.com</sendReport></error>"
"</errors>" % (status, self.path))
self.wfile.write(resultString)
return
# if the client gave us back our modified date, then say there's no
# change in the response
if ifModifiedSince == modifiedDate:
self.send_response(304) # Not Modified
return
else:
#
# it's an object fetch; read and return the XML file
#
f = open("." + self.path)
resultString = f.read()
f.close()
resultStatus = 200
fileTypeInfo = mimetypes.guess_type("." + self.path)
headerType = fileTypeInfo[0] # first part of the tuple is mime type
self.send_response(resultStatus)
self.send_header("Content-type", headerType)
self.send_header("Last-Modified", modifiedDate)
# set TestCookie to equal the file name requested
cookieValue = os.path.basename("." + self.path)
self.send_header('Set-Cookie', 'TestCookie=%s' % cookieValue)
self.end_headers()
self.wfile.write(resultString)
except IOError:
self.send_error(404,"File Not Found: %s" % self.path)
def main():
try:
parser = OptionParser()
parser.add_option("-p", "--port", dest="port", help="Port to run server on",
type="int", default="80")
parser.add_option("-r", "--root", dest="root", help="Where to root server",
default=".")
(options, args) = parser.parse_args()
os.chdir(options.root)
server = HTTPTimeoutServer(("127.0.0.1", options.port), SimpleServer)
sys.stdout.write("started GDataTestServer.py...");
sys.stdout.flush();
server.serve_forever()
except KeyboardInterrupt:
print "^C received, shutting down server"
server.socket.close()
except ServerTimeoutException:
print "Too long since the last request, shutting down server"
server.socket.close()
if __name__ == "__main__":
main()
| 36.149837
| 80
| 0.656785
|
"""A simple server for testing the Objective-C GData Framework
This http server is for use by GDataServiceTest.m in testing
both authentication and object retrieval.
Requests to the path /accounts/ClientLogin are assumed to be
for login; other requests are for object retrieval
"""
import string
import cgi
import time
import os
import sys
import re
import mimetypes
import socket
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from optparse import OptionParser
class ServerTimeoutException(Exception):
pass
class HTTPTimeoutServer(HTTPServer):
"""HTTP server for testing network requests.
This server will throw an exception if it receives no connections for
several minutes. We use this to ensure that the server will be cleaned
up if something goes wrong during the unit testing.
"""
def get_request(self):
self.socket.settimeout(120.0)
result = None
while result is None:
try:
result = self.socket.accept()
except socket.timeout:
raise ServerTimeoutException
result[0].settimeout(None)
return result
class SimpleServer(BaseHTTPRequestHandler):
"""HTTP request handler for testing GData network requests.
This is an implementation of a request handler for BaseHTTPServer,
specifically designed for GData service code usage.
Normal requests for GET/POST/PUT simply retrieve the file from the
supplied path, starting in the current directory. A cookie called
TestCookie is set by the response header, with the value of the filename
requested.
DELETE requests always succeed.
Appending ?status=n results in a failure with status value n.
Paths ending in .auth have the .auth extension stripped, and must have
an authorization header of "GoogleLogin auth=GoodAuthToken" to succeed.
Paths ending in .authsub have the .authsub extension stripped, and must have
an authorization header of "AuthSub token=GoodAuthSubToken" to succeed.
Paths ending in .authwww have the .authwww extension stripped, and must have
an authorization header for GoodWWWUser:GoodWWWPassword to succeed.
Successful results have a Last-Modified header set; if that header's value
("thursday") is supplied in a request's "If-Modified-Since" header, the
result is 304 (Not Modified).
Requests to /accounts/ClientLogin will fail if supplied with a body
containing Passwd=bad. If they contain logintoken and logincaptcha values,
those must be logintoken=CapToken&logincaptch=good to succeed.
"""
def do_GET(self):
self.doAllRequests()
def do_POST(self):
self.doAllRequests()
def do_PUT(self):
self.doAllRequests()
def do_DELETE(self):
self.doAllRequests()
def doAllRequests(self):
# "logintoken=CapToken&logincaptcha=good"
# We will use a readable default result string since it should never show up
# in output
resultString = "default GDataTestServer result\n";
resultStatus = 0
headerType = "text/plain"
postString = ""
modifiedDate = "thursday" # clients should treat dates as opaque, generally
# auth queries and some GData queries include post data
postLength = int(self.headers.getheader("Content-Length", "0"));
if postLength > 0:
postString = self.rfile.read(postLength)
ifModifiedSince = self.headers.getheader("If-Modified-Since", "");
# retrieve the auth header
authorization = self.headers.getheader("Authorization", "")
# require basic auth if the file path ends with the string ".authwww"
# GoodWWWUser:GoodWWWPassword is base64 R29vZFdXV1VzZXI6R29vZFdXV1Bhc3N3b3Jk
if self.path.endswith(".authwww"):
if authorization != "Basic R29vZFdXV1VzZXI6R29vZFdXV1Bhc3N3b3Jk":
self.send_response(401)
self.send_header('WWW-Authenticate', "Basic realm='testrealm'")
self.send_header('Content-type', 'text/html')
self.end_headers()
return
self.path = self.path[:-8] # remove the .authwww at the end
# require Google auth if the file path ends with the string ".auth"
# or ".authsub"
if self.path.endswith(".auth"):
if authorization != "GoogleLogin auth=GoodAuthToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-5] # remove the .auth at the end
if self.path.endswith(".authsub"):
if authorization != "AuthSub token=GoodAuthSubToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-8] # remove the .authsub at the end
overrideHeader = self.headers.getheader("X-HTTP-Method-Override", "")
httpCommand = self.command
if httpCommand == "POST" and len(overrideHeader) > 0:
httpCommand = overrideHeader
try:
if self.path.endswith("/accounts/ClientLogin"):
#
# it's a sign-in attempt; it's good unless the password is "bad" or
# "captcha"
#
# use regular expression to find the password
password = ""
searchResult = re.search("(Passwd=)([^&\n]*)", postString)
if searchResult:
password = searchResult.group(2)
if password == "bad":
resultString = "Error=BadAuthentication\n"
resultStatus = 403
elif password == "captcha":
logintoken = ""
logincaptcha = ""
# use regular expressions to find the captcha token and answer
searchResult = re.search("(logintoken=)([^&\n]*)", postString);
if searchResult:
logintoken = searchResult.group(2)
searchResult = re.search("(logincaptcha=)([^&\n]*)", postString);
if searchResult:
logincaptcha = searchResult.group(2)
# if the captcha token is "CapToken" and the answer is "good"
# then it's a valid sign in
if (logintoken == "CapToken") and (logincaptcha == "good"):
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
else:
resultString = ("Error=CaptchaRequired\nCaptchaToken=CapToken\n"
"CaptchaUrl=CapUrl\n")
resultStatus = 403
else:
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
elif httpCommand == "DELETE":
#
resultString = ""
resultStatus = 200
headerType = "text/plain"
else:
# queries that have something like "?status=456" should fail with the
# status code
searchResult = re.search("(status=)([0-9]+)", self.path)
if searchResult:
status = searchResult.group(2)
self.send_error(int(status),
"Test HTTP server status parameter: %s" % self.path)
return
# queries that have something like "?statusxml=456" should fail with the
# status code and structured XML response
searchResult = re.search("(statusxml=)([0-9]+)", self.path)
if searchResult:
status = searchResult.group(2)
self.send_response(int(status))
self.send_header("Content-type",
"application/vnd.google.gdata.error+xml")
self.end_headers()
resultString = ("<errors xmlns='http://schemas.google.com/g/2005'>"
"<error><domain>GData</domain><code>code_%s</code>"
"<internalReason>forced status error on path %s</internalReason>"
"<extendedHelp>http://help.com</extendedHelp>"
"<sendReport>http://report.com</sendReport></error>"
"</errors>" % (status, self.path))
self.wfile.write(resultString)
return
# if the client gave us back our modified date, then say there's no
if ifModifiedSince == modifiedDate:
self.send_response(304)
return
else:
#
f = open("." + self.path)
resultString = f.read()
f.close()
resultStatus = 200
fileTypeInfo = mimetypes.guess_type("." + self.path)
headerType = fileTypeInfo[0] # first part of the tuple is mime type
self.send_response(resultStatus)
self.send_header("Content-type", headerType)
self.send_header("Last-Modified", modifiedDate)
# set TestCookie to equal the file name requested
cookieValue = os.path.basename("." + self.path)
self.send_header('Set-Cookie', 'TestCookie=%s' % cookieValue)
self.end_headers()
self.wfile.write(resultString)
except IOError:
self.send_error(404,"File Not Found: %s" % self.path)
def main():
try:
parser = OptionParser()
parser.add_option("-p", "--port", dest="port", help="Port to run server on",
type="int", default="80")
parser.add_option("-r", "--root", dest="root", help="Where to root server",
default=".")
(options, args) = parser.parse_args()
os.chdir(options.root)
server = HTTPTimeoutServer(("127.0.0.1", options.port), SimpleServer)
sys.stdout.write("started GDataTestServer.py...");
sys.stdout.flush();
server.serve_forever()
except KeyboardInterrupt:
print "^C received, shutting down server"
server.socket.close()
except ServerTimeoutException:
print "Too long since the last request, shutting down server"
server.socket.close()
if __name__ == "__main__":
main()
| false
| true
|
f719b3cf4408be63834d8d778dce83c706005a42
| 2,919
|
py
|
Python
|
python/src/ties/util/version.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-10T19:02:27.000Z
|
2020-04-10T19:02:27.000Z
|
python/src/ties/util/version.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
python/src/ties/util/version.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
import argparse
from os.path import abspath, isfile
from pkg_resources import resource_filename
class VersionAction(argparse.Action):
def __init__(self, option_strings, dest, version=None, **kwargs):
kwargs['nargs'] = 0
self._version = version
super(VersionAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.exit(message="{}\n".format(self._version))
def _get_version_number():
return '0.9.3'
def _get_build_number():
resource_version_path = abspath(resource_filename(__name__, 'build_number.txt'))
if isfile(resource_version_path):
with open(resource_version_path, 'r', encoding='utf-8') as f:
build_number = f.read().strip()
if build_number:
return build_number
else:
return None
else:
return None
def _get_build_time():
resource_version_path = abspath(resource_filename(__name__, 'build_time.txt'))
if isfile(resource_version_path):
with open(resource_version_path, 'r', encoding='utf-8') as f:
build_time = f.read().strip()
if build_time:
return build_time
else:
return None
else:
return None
def version_string():
version_number = _get_version_number()
build_number = _get_build_number()
build_time = _get_build_time()
version = "version {}".format(version_number)
if build_number is not None:
version += "\nbuild {}".format(build_number)
if build_time is not None:
version += "\nbuilt on {}".format(build_time)
return version
| 39.445946
| 84
| 0.528606
| true
| true
|
|
f719b481bbf26bf74e10817f58f02d7b6a184525
| 905
|
py
|
Python
|
packages/pyre/xml/ElementDescriptor.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/xml/ElementDescriptor.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/xml/ElementDescriptor.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
from .Descriptor import Descriptor
class ElementDescriptor(Descriptor):
"""
Descriptor class that gathers all the metadata about a document tag that was provided by
the user during the DTD declaration. It is used by DTD derived classes to decorate the
Document instance and the tag handlers with the information needed by the Reader so it can
process XML documents
"""
# element meta data
handler = None # the Node descendant that handles parsing events for this document element
attributes = () # a list of the tag attribute descriptors that encode the document DTD
# meta methods
def __init__(self, *, tag, handler, root=False):
super().__init__(name=tag)
self.handler = handler
self.root = root
return
# end of file
| 25.857143
| 94
| 0.693923
|
from .Descriptor import Descriptor
class ElementDescriptor(Descriptor):
handler = None
attributes = ()
def __init__(self, *, tag, handler, root=False):
super().__init__(name=tag)
self.handler = handler
self.root = root
return
| true
| true
|
f719b4bd078cf626a5dea79e89509d44970085fe
| 1,812
|
py
|
Python
|
pliers/tests/extractors/api/test_clarifai_extractors.py
|
adelavega/pliers
|
dee21102689c77a56b7da48bf9a0ac10c90be0eb
|
[
"BSD-3-Clause"
] | null | null | null |
pliers/tests/extractors/api/test_clarifai_extractors.py
|
adelavega/pliers
|
dee21102689c77a56b7da48bf9a0ac10c90be0eb
|
[
"BSD-3-Clause"
] | null | null | null |
pliers/tests/extractors/api/test_clarifai_extractors.py
|
adelavega/pliers
|
dee21102689c77a56b7da48bf9a0ac10c90be0eb
|
[
"BSD-3-Clause"
] | null | null | null |
from os.path import join
from ...utils import get_test_data_path
from pliers.extractors import ClarifaiAPIExtractor
from pliers.stimuli import ImageStim
from pliers.extractors.base import merge_results
import numpy as np
import pytest
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
result = ClarifaiAPIExtractor().transform(stim).to_df()
assert result['apple'][0] > 0.5
assert result.ix[:, 5][0] > 0.0
result = ClarifaiAPIExtractor(max_concepts=5).transform(stim).to_df()
assert result.shape == (1, 9)
result = ClarifaiAPIExtractor(
min_value=0.9).transform(stim).to_df(object_id=False)
assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])
concepts = ['cat', 'dog']
result = ClarifaiAPIExtractor(select_concepts=concepts).transform(stim)
result = result.to_df()
assert result.shape == (1, 6)
assert 'cat' in result.columns and 'dog' in result.columns
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor_batch():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
stim2 = ImageStim(join(image_dir, 'obama.jpg'))
ext = ClarifaiAPIExtractor()
results = ext.transform([stim, stim2])
results = merge_results(results)
assert results['ClarifaiAPIExtractor#apple'][0] > 0.5 or \
results['ClarifaiAPIExtractor#apple'][1] > 0.5
# This takes too long to execute
# video = VideoStim(join(get_test_data_path(), 'video', 'small.mp4'))
# results = ExtractorResult.merge_stims(ext.transform(video))
# assert 'Lego' in results.columns and 'robot' in results.columns
| 38.553191
| 75
| 0.711921
|
from os.path import join
from ...utils import get_test_data_path
from pliers.extractors import ClarifaiAPIExtractor
from pliers.stimuli import ImageStim
from pliers.extractors.base import merge_results
import numpy as np
import pytest
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
result = ClarifaiAPIExtractor().transform(stim).to_df()
assert result['apple'][0] > 0.5
assert result.ix[:, 5][0] > 0.0
result = ClarifaiAPIExtractor(max_concepts=5).transform(stim).to_df()
assert result.shape == (1, 9)
result = ClarifaiAPIExtractor(
min_value=0.9).transform(stim).to_df(object_id=False)
assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])
concepts = ['cat', 'dog']
result = ClarifaiAPIExtractor(select_concepts=concepts).transform(stim)
result = result.to_df()
assert result.shape == (1, 6)
assert 'cat' in result.columns and 'dog' in result.columns
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor_batch():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
stim2 = ImageStim(join(image_dir, 'obama.jpg'))
ext = ClarifaiAPIExtractor()
results = ext.transform([stim, stim2])
results = merge_results(results)
assert results['ClarifaiAPIExtractor#apple'][0] > 0.5 or \
results['ClarifaiAPIExtractor#apple'][1] > 0.5
| true
| true
|
f719b4dc7ae13b6947c48e17f17fc0bd12e5e231
| 23,805
|
py
|
Python
|
src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 3
|
2021-06-24T01:54:25.000Z
|
2021-12-12T16:21:24.000Z
|
src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 79
|
2021-06-23T10:40:10.000Z
|
2021-12-16T07:59:42.000Z
|
src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 5
|
2021-07-04T07:38:50.000Z
|
2021-12-12T16:18:47.000Z
|
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import torch
import ntpath
import shutil
import numpy as np
import onnxruntime as ort
from torchvision.transforms import transforms as T
from opendr.engine.learners import Learner
from opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator
from opendr.perception.object_tracking_2d.logger import Logger
from opendr.perception.object_tracking_2d.datasets.mot_dataset import JointDataset, RawMotDatasetIterator
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.model import create_model
from opendr.perception.object_tracking_2d.fair_mot.algorithm.run import train, evaluate
from opendr.perception.object_tracking_2d.fair_mot.algorithm.load import load_from_checkpoint
from opendr.perception.object_tracking_2d.datasets.mot_dataset import letterbox, process as process_dataset
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.tracker.multitracker import JDETracker
from opendr.engine.data import Image
from opendr.engine.target import TrackingAnnotation, TrackingAnnotationList
from opendr.engine.constants import OPENDR_SERVER_URL
from urllib.request import urlretrieve
class ObjectTracking2DFairMotLearner(Learner):
def __init__(
self,
lr=0.0001,
iters=-1,
batch_size=4,
optimizer="adam",
lr_schedule="",
backbone="dla_34",
network_head="",
checkpoint_after_iter=0,
checkpoint_load_iter=0,
temp_path="",
device="cuda",
threshold=0.3,
scale=1.0,
lr_step=[20],
head_conv=256,
ltrb=True,
num_classes=1,
reg_offset=True,
gpus=[0],
num_workers=4,
mse_loss=False,
reg_loss='l1',
dense_wh=False,
cat_spec_wh=False,
reid_dim=128,
norm_wh=False,
wh_weight=0.1,
off_weight=1,
id_weight=1,
num_epochs=30,
hm_weight=1,
down_ratio=4,
max_objs=500,
track_buffer=30,
image_mean=[0.408, 0.447, 0.47],
image_std=[0.289, 0.274, 0.278],
frame_rate=30,
min_box_area=100,
):
# Pass the shared parameters on super's constructor so they can get initialized as class attributes
super(ObjectTracking2DFairMotLearner, self).__init__(
lr=lr,
iters=iters,
batch_size=batch_size,
optimizer=optimizer,
lr_schedule=lr_schedule,
backbone=backbone,
network_head=network_head,
checkpoint_after_iter=checkpoint_after_iter,
checkpoint_load_iter=checkpoint_load_iter,
temp_path=temp_path,
device=device,
threshold=threshold,
scale=scale,
)
self.ltrb = ltrb
self.head_conv = head_conv
self.num_classes = num_classes
self.reid_dim = reid_dim
self.reg_offset = reg_offset
self.gpus = gpus
self.num_workers = num_workers
self.mse_loss = mse_loss
self.reg_loss = reg_loss
self.dense_wh = dense_wh
self.cat_spec_wh = cat_spec_wh
self.reid_dim = reid_dim
self.norm_wh = norm_wh
self.wh_weight = wh_weight
self.off_weight = off_weight
self.id_weight = id_weight
self.num_epochs = num_epochs
self.lr_step = lr_step
self.hm_weight = hm_weight
self.down_ratio = down_ratio
self.max_objs = max_objs
self.track_buffer = track_buffer
self.image_mean = image_mean
self.image_mean = image_mean
self.image_std = image_std
self.frame_rate = frame_rate
self.min_box_area = min_box_area
main_batch_size = self.batch_size // len(self.gpus)
rest_batch_size = (self.batch_size - main_batch_size)
self.chunk_sizes = [main_batch_size]
for i in range(len(self.gpus) - 1):
worker_chunk_size = rest_batch_size // (len(self.gpus) - 1)
if i < rest_batch_size % (len(self.gpus) - 1):
worker_chunk_size += 1
self.chunk_sizes.append(worker_chunk_size)
self.__create_model()
def save(self, path, verbose=False):
"""
This method is used to save a trained model.
Provided with the path, absolute or relative, including a *folder* name, it creates a directory with the name
of the *folder* provided and saves the model inside with a proper format and a .json file with metadata.
If self.optimize was ran previously, it saves the optimized ONNX model in a similar fashion, by copying it
from the self.temp_path it was saved previously during conversion.
:param path: for the model to be saved, including the folder name
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
if self.model is None and self.ort_session is None:
raise UserWarning("No model is loaded, cannot save.")
folder_name, _, tail = self.__extract_trailing(path) # Extract trailing folder name from path
# Also extract folder name without any extension if extension is erroneously provided
folder_name_no_ext = folder_name.split(sep='.')[0]
# Extract path without folder name, by removing folder name from original path
path_no_folder_name = ''.join(path.rsplit(folder_name, 1))
# If tail is '', then path was a/b/c/, which leaves a trailing double '/'
if tail == '':
path_no_folder_name = path_no_folder_name[0:-1] # Remove one '/'
# Create model directory
new_path = path_no_folder_name + folder_name_no_ext
os.makedirs(new_path, exist_ok=True)
model_metadata = {"model_paths": [], "framework": "pytorch", "format": "", "has_data": False,
"inference_params": {}, "optimized": None, "optimizer_info": {}}
if self.model.ort_session is None:
model_metadata["model_paths"] = [
folder_name_no_ext + ".pth",
]
model_metadata["optimized"] = False
model_metadata["format"] = "pth"
torch.save({
'state_dict': self.model.state_dict()
}, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0]))
if verbose:
print("Saved Pytorch model.")
else:
model_metadata["model_paths"] = [
folder_name_no_ext + ".onnx"
]
model_metadata["optimized"] = True
model_metadata["format"] = "onnx"
shutil.copy2(
os.path.join(self.temp_path, "onnx_model_temp.onnx"),
os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0])
)
if verbose:
print("Saved ONNX model.")
with open(os.path.join(new_path, folder_name_no_ext + ".json"), 'w') as outfile:
json.dump(model_metadata, outfile)
def load(
self,
path,
verbose=False,
):
"""
Loads the model from inside the path provided, based on the metadata .json file included.
:param path: path of the directory the model was saved
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
model_name, _, _ = self.__extract_trailing(path) # Trailing folder name from the path provided
with open(os.path.join(path, model_name + ".json")) as metadata_file:
metadata = json.load(metadata_file)
if not metadata["optimized"]:
self.__load_from_pth(self.model, os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded Pytorch model.")
else:
self.__load_rpn_from_onnx(os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded ONNX model.")
def reset(self):
self.tracker.reset()
def fit(
self,
dataset,
val_dataset=None,
val_epochs=-1,
logging_path=None,
silent=False,
verbose=False,
train_split_paths=None,
val_split_paths=None,
resume_optimizer=False,
nID=None
):
if train_split_paths is None:
train_split_paths = {
"mot20": os.path.join(
"perception", "object_tracking_2d", "datasets", "splits", "mot20.train"
)
}
if val_split_paths is None:
val_split_paths = train_split_paths
logger = Logger(silent, verbose, logging_path)
(
input_dataset_iterator,
eval_dataset_iterator,
) = self._prepare_datasets(
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_val_dataset=val_epochs > 0,
)
if nID is None:
nID = input_dataset_iterator.nID if hasattr(input_dataset_iterator, "nID") else dataset.nID
checkpoints_path = os.path.join(self.temp_path, "checkpoints")
if self.checkpoint_after_iter != 0 or self.checkpoint_load_iter != 0:
os.makedirs(checkpoints_path, exist_ok=True)
start_epoch = 0
if self.checkpoint_load_iter != 0:
_, _, start_epoch = load_from_checkpoint(
self.model, os.path.join(checkpoints_path, f"checkpoint_{self.checkpoint_load_iter}.pth"),
self.model_optimizer, resume_optimizer, self.lr, self.lr_step, log=logger.log,
)
last_eval_result = train(
self.model,
self.infer,
self.model_optimizer,
input_dataset_iterator,
eval_dataset_iterator,
self.batch_size,
self.num_workers,
self.gpus,
self.chunk_sizes,
self.iters,
"train", # exp_id,
self.device,
silent, # hide_data_time,
1 if verbose else (-1 if silent else 10), # print_iter,
self.mse_loss,
self.reg_loss,
self.dense_wh,
self.cat_spec_wh,
self.reid_dim,
nID,
self.norm_wh,
1, # num_stack,
self.wh_weight,
self.off_weight,
self.id_weight,
self.num_epochs,
self.lr_step,
self.temp_path,
self.lr,
self.reg_offset,
self.hm_weight,
checkpoints_path,
self.checkpoint_after_iter,
start_epoch,
val_epochs=val_epochs,
log=logger.log,
)
logger.close()
return last_eval_result
def eval(
self,
dataset,
val_split_paths=None,
logging_path=None,
silent=False,
verbose=False,
):
logger = Logger(silent, verbose, logging_path)
(
_,
eval_dataset_iterator,
) = self._prepare_datasets(
None,
dataset,
None,
val_split_paths,
require_dataset=False,
)
result = evaluate(self.infer, dataset)
logger.log(Logger.LOG_WHEN_NORMAL, result)
logger.close()
return result
def infer(self, batch, frame_ids=None, img_size=(1088, 608)):
if self.model is None:
raise ValueError("No model loaded or created")
self.model.eval()
is_single_image = False
if isinstance(batch, Image):
batch = [batch]
is_single_image = True
elif not isinstance(batch, list):
raise ValueError("Input batch should be an engine.Image or a list of engine.Image")
if frame_ids is None:
frame_ids = [-1] * len(batch)
elif is_single_image:
frame_ids = [frame_ids]
results = []
for image, frame_id in zip(batch, frame_ids):
img0 = image.convert("channels_last", "bgr") # BGR
img, _, _, _ = letterbox(img0, height=img_size[1], width=img_size[0])
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
blob = torch.from_numpy(img).to(self.device).unsqueeze(0)
online_targets = self.tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(t.score)
result = TrackingAnnotationList([
TrackingAnnotation(
name=0,
top=tlwh[0],
left=tlwh[1],
width=tlwh[2],
height=tlwh[3],
id=id,
score=score,
frame=frame_id,
) for tlwh, id, score in zip(
online_tlwhs,
online_ids,
online_scores
)
])
results.append(result)
if is_single_image:
results = results[0]
return results
def optimize(self, do_constant_folding=False, img_size=(1088, 608), optimizable_dcn_v2=False):
"""
Optimize method converts the model to ONNX format and saves the
model in the parent directory defined by self.temp_path. The ONNX model is then loaded.
:param do_constant_folding: whether to optimize constants, defaults to 'False'
:type do_constant_folding: bool, optional
"""
if not optimizable_dcn_v2:
raise Exception("Can not optimize the model while DCNv2 implementation is not optimizable")
if self.model is None:
raise UserWarning("No model is loaded, cannot optimize. Load or train a model first.")
if self.model.ort_session is not None:
raise UserWarning("Model is already optimized in ONNX.")
input_shape = [
1,
3,
img_size[1],
img_size[0],
]
try:
self.__convert_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
except FileNotFoundError:
# Create temp directory
os.makedirs(self.temp_path, exist_ok=True)
self.__convert_rpn_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
self.__load_rpn_from_onnx(os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"))
@staticmethod
def download(model_name, path, server_url=None):
if server_url is None and model_name not in [
"crowdhuman_dla34",
"fairmot_dla34",
]:
raise ValueError("Unknown model_name: " + model_name)
os.makedirs(path, exist_ok=True)
if server_url is None:
server_url = os.path.join(
OPENDR_SERVER_URL, "perception", "object_tracking_2d",
"fair_mot"
)
url = os.path.join(
server_url, model_name
)
model_dir = os.path.join(path, model_name)
os.makedirs(model_dir, exist_ok=True)
urlretrieve(os.path.join(
url, model_name + ".json"
), os.path.join(
model_dir, model_name + ".json"
))
try:
urlretrieve(os.path.join(
url, model_name + ".pth"
), os.path.join(
model_dir, model_name + ".pth"
))
except Exception:
urlretrieve(os.path.join(
url, model_name + ".tckpt"
), os.path.join(
model_dir, model_name + ".pth"
))
print("Downloaded model", model_name, "to", model_dir)
return model_dir
def __convert_to_onnx(self, input_shape, output_name, do_constant_folding=False, verbose=False):
inp = torch.randn(input_shape).to(self.device)
input_names = ["data"]
output_names = self.heads.keys()
torch.onnx.export(
self.model, inp, output_name, verbose=verbose, enable_onnx_checker=True,
do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names
)
def __load_from_onnx(self, path):
"""
This method loads an ONNX model from the path provided into an onnxruntime inference session.
:param path: path to ONNX model
:type path: str
"""
self.model.rpn_ort_session = ort.InferenceSession(path)
# The comments below are the alternative way to use the onnx model, it might be useful in the future
# depending on how ONNX saving/loading will be implemented across the toolkit.
# # Load the ONNX model
# self.model = onnx.load(path)
#
# # Check that the IR is well formed
# onnx.checker.check_model(self.model)
#
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(self.model.graph)
def __load_from_pth(self, model, path, use_original_dict=False):
all_params = torch.load(path, map_location=self.device)
model.load_state_dict(all_params if use_original_dict else all_params["state_dict"])
def _prepare_datasets(
self,
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_dataset=True,
require_val_dataset=True,
):
input_dataset_iterator = None
eval_dataset_iterator = None
if isinstance(dataset, ExternalDataset):
dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset")
transforms = T.Compose([T.ToTensor()])
input_dataset_iterator = JointDataset(
dataset_path,
train_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
augment=False, transforms=transforms,
)
elif isinstance(dataset, DatasetIterator):
input_dataset_iterator = MappedDatasetIterator(
dataset,
lambda d: process_dataset(
d[0], d[1], self.ltrb, self.down_ratio,
self.max_objs, self.num_classes, self.mse_loss
)
)
else:
if require_dataset or dataset is not None:
raise ValueError(
"dataset parameter should be an ExternalDataset or a DatasetIterator"
)
if isinstance(val_dataset, ExternalDataset):
val_dataset_path = val_dataset.path
if val_dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(val_dataset) +
") is given as a val_dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif isinstance(val_dataset, DatasetIterator):
eval_dataset_iterator = val_dataset
elif val_dataset is None:
if isinstance(dataset, ExternalDataset):
val_dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif require_val_dataset:
raise ValueError(
"val_dataset is None and can't be derived from" +
" the dataset object because the dataset is not an ExternalDataset"
)
else:
eval_dataset_iterator = input_dataset_iterator
else:
raise ValueError(
"val_dataset parameter should be an ExternalDataset or a DatasetIterator or None"
)
return input_dataset_iterator, eval_dataset_iterator
def __create_model(self):
heads = {
'hm': self.num_classes,
'wh': 2 if not self.ltrb else 4,
'id': self.reid_dim
}
if self.reg_offset:
heads.update({'reg': 2})
self.heads = heads
self.model = create_model(self.backbone, heads, self.head_conv)
self.model.to(self.device)
self.model.ort_session = None
self.model.heads_names = heads.keys()
self.model_optimizer = torch.optim.Adam(self.model.parameters(), self.lr)
self.tracker = JDETracker(
self.model,
self.threshold,
self.track_buffer,
self.max_objs,
self.image_mean,
self.image_std,
self.down_ratio,
self.num_classes,
self.reg_offset,
self.ltrb,
self.frame_rate,
)
@staticmethod
def __extract_trailing(path):
"""
Extracts the trailing folder name or filename from a path provided in an OS-generic way, also handling
cases where the last trailing character is a separator. Returns the folder name and the split head and tail.
:param path: the path to extract the trailing filename or folder name from
:type path: str
:return: the folder name, the head and tail of the path
:rtype: tuple of three strings
"""
head, tail = ntpath.split(path)
folder_name = tail or ntpath.basename(head) # handle both a/b/c and a/b/c/
return folder_name, head, tail
| 34.650655
| 117
| 0.585003
|
import os
import json
import torch
import ntpath
import shutil
import numpy as np
import onnxruntime as ort
from torchvision.transforms import transforms as T
from opendr.engine.learners import Learner
from opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator
from opendr.perception.object_tracking_2d.logger import Logger
from opendr.perception.object_tracking_2d.datasets.mot_dataset import JointDataset, RawMotDatasetIterator
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.model import create_model
from opendr.perception.object_tracking_2d.fair_mot.algorithm.run import train, evaluate
from opendr.perception.object_tracking_2d.fair_mot.algorithm.load import load_from_checkpoint
from opendr.perception.object_tracking_2d.datasets.mot_dataset import letterbox, process as process_dataset
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.tracker.multitracker import JDETracker
from opendr.engine.data import Image
from opendr.engine.target import TrackingAnnotation, TrackingAnnotationList
from opendr.engine.constants import OPENDR_SERVER_URL
from urllib.request import urlretrieve
class ObjectTracking2DFairMotLearner(Learner):
def __init__(
self,
lr=0.0001,
iters=-1,
batch_size=4,
optimizer="adam",
lr_schedule="",
backbone="dla_34",
network_head="",
checkpoint_after_iter=0,
checkpoint_load_iter=0,
temp_path="",
device="cuda",
threshold=0.3,
scale=1.0,
lr_step=[20],
head_conv=256,
ltrb=True,
num_classes=1,
reg_offset=True,
gpus=[0],
num_workers=4,
mse_loss=False,
reg_loss='l1',
dense_wh=False,
cat_spec_wh=False,
reid_dim=128,
norm_wh=False,
wh_weight=0.1,
off_weight=1,
id_weight=1,
num_epochs=30,
hm_weight=1,
down_ratio=4,
max_objs=500,
track_buffer=30,
image_mean=[0.408, 0.447, 0.47],
image_std=[0.289, 0.274, 0.278],
frame_rate=30,
min_box_area=100,
):
super(ObjectTracking2DFairMotLearner, self).__init__(
lr=lr,
iters=iters,
batch_size=batch_size,
optimizer=optimizer,
lr_schedule=lr_schedule,
backbone=backbone,
network_head=network_head,
checkpoint_after_iter=checkpoint_after_iter,
checkpoint_load_iter=checkpoint_load_iter,
temp_path=temp_path,
device=device,
threshold=threshold,
scale=scale,
)
self.ltrb = ltrb
self.head_conv = head_conv
self.num_classes = num_classes
self.reid_dim = reid_dim
self.reg_offset = reg_offset
self.gpus = gpus
self.num_workers = num_workers
self.mse_loss = mse_loss
self.reg_loss = reg_loss
self.dense_wh = dense_wh
self.cat_spec_wh = cat_spec_wh
self.reid_dim = reid_dim
self.norm_wh = norm_wh
self.wh_weight = wh_weight
self.off_weight = off_weight
self.id_weight = id_weight
self.num_epochs = num_epochs
self.lr_step = lr_step
self.hm_weight = hm_weight
self.down_ratio = down_ratio
self.max_objs = max_objs
self.track_buffer = track_buffer
self.image_mean = image_mean
self.image_mean = image_mean
self.image_std = image_std
self.frame_rate = frame_rate
self.min_box_area = min_box_area
main_batch_size = self.batch_size // len(self.gpus)
rest_batch_size = (self.batch_size - main_batch_size)
self.chunk_sizes = [main_batch_size]
for i in range(len(self.gpus) - 1):
worker_chunk_size = rest_batch_size // (len(self.gpus) - 1)
if i < rest_batch_size % (len(self.gpus) - 1):
worker_chunk_size += 1
self.chunk_sizes.append(worker_chunk_size)
self.__create_model()
def save(self, path, verbose=False):
if self.model is None and self.ort_session is None:
raise UserWarning("No model is loaded, cannot save.")
folder_name, _, tail = self.__extract_trailing(path) # Extract trailing folder name from path
# Also extract folder name without any extension if extension is erroneously provided
folder_name_no_ext = folder_name.split(sep='.')[0]
# Extract path without folder name, by removing folder name from original path
path_no_folder_name = ''.join(path.rsplit(folder_name, 1))
# If tail is '', then path was a/b/c/, which leaves a trailing double '/'
if tail == '':
path_no_folder_name = path_no_folder_name[0:-1] # Remove one '/'
# Create model directory
new_path = path_no_folder_name + folder_name_no_ext
os.makedirs(new_path, exist_ok=True)
model_metadata = {"model_paths": [], "framework": "pytorch", "format": "", "has_data": False,
"inference_params": {}, "optimized": None, "optimizer_info": {}}
if self.model.ort_session is None:
model_metadata["model_paths"] = [
folder_name_no_ext + ".pth",
]
model_metadata["optimized"] = False
model_metadata["format"] = "pth"
torch.save({
'state_dict': self.model.state_dict()
}, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0]))
if verbose:
print("Saved Pytorch model.")
else:
model_metadata["model_paths"] = [
folder_name_no_ext + ".onnx"
]
model_metadata["optimized"] = True
model_metadata["format"] = "onnx"
shutil.copy2(
os.path.join(self.temp_path, "onnx_model_temp.onnx"),
os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0])
)
if verbose:
print("Saved ONNX model.")
with open(os.path.join(new_path, folder_name_no_ext + ".json"), 'w') as outfile:
json.dump(model_metadata, outfile)
def load(
self,
path,
verbose=False,
):
model_name, _, _ = self.__extract_trailing(path) # Trailing folder name from the path provided
with open(os.path.join(path, model_name + ".json")) as metadata_file:
metadata = json.load(metadata_file)
if not metadata["optimized"]:
self.__load_from_pth(self.model, os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded Pytorch model.")
else:
self.__load_rpn_from_onnx(os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded ONNX model.")
def reset(self):
self.tracker.reset()
def fit(
self,
dataset,
val_dataset=None,
val_epochs=-1,
logging_path=None,
silent=False,
verbose=False,
train_split_paths=None,
val_split_paths=None,
resume_optimizer=False,
nID=None
):
if train_split_paths is None:
train_split_paths = {
"mot20": os.path.join(
"perception", "object_tracking_2d", "datasets", "splits", "mot20.train"
)
}
if val_split_paths is None:
val_split_paths = train_split_paths
logger = Logger(silent, verbose, logging_path)
(
input_dataset_iterator,
eval_dataset_iterator,
) = self._prepare_datasets(
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_val_dataset=val_epochs > 0,
)
if nID is None:
nID = input_dataset_iterator.nID if hasattr(input_dataset_iterator, "nID") else dataset.nID
checkpoints_path = os.path.join(self.temp_path, "checkpoints")
if self.checkpoint_after_iter != 0 or self.checkpoint_load_iter != 0:
os.makedirs(checkpoints_path, exist_ok=True)
start_epoch = 0
if self.checkpoint_load_iter != 0:
_, _, start_epoch = load_from_checkpoint(
self.model, os.path.join(checkpoints_path, f"checkpoint_{self.checkpoint_load_iter}.pth"),
self.model_optimizer, resume_optimizer, self.lr, self.lr_step, log=logger.log,
)
last_eval_result = train(
self.model,
self.infer,
self.model_optimizer,
input_dataset_iterator,
eval_dataset_iterator,
self.batch_size,
self.num_workers,
self.gpus,
self.chunk_sizes,
self.iters,
"train", # exp_id,
self.device,
silent, # hide_data_time,
1 if verbose else (-1 if silent else 10), # print_iter,
self.mse_loss,
self.reg_loss,
self.dense_wh,
self.cat_spec_wh,
self.reid_dim,
nID,
self.norm_wh,
1, # num_stack,
self.wh_weight,
self.off_weight,
self.id_weight,
self.num_epochs,
self.lr_step,
self.temp_path,
self.lr,
self.reg_offset,
self.hm_weight,
checkpoints_path,
self.checkpoint_after_iter,
start_epoch,
val_epochs=val_epochs,
log=logger.log,
)
logger.close()
return last_eval_result
def eval(
self,
dataset,
val_split_paths=None,
logging_path=None,
silent=False,
verbose=False,
):
logger = Logger(silent, verbose, logging_path)
(
_,
eval_dataset_iterator,
) = self._prepare_datasets(
None,
dataset,
None,
val_split_paths,
require_dataset=False,
)
result = evaluate(self.infer, dataset)
logger.log(Logger.LOG_WHEN_NORMAL, result)
logger.close()
return result
def infer(self, batch, frame_ids=None, img_size=(1088, 608)):
if self.model is None:
raise ValueError("No model loaded or created")
self.model.eval()
is_single_image = False
if isinstance(batch, Image):
batch = [batch]
is_single_image = True
elif not isinstance(batch, list):
raise ValueError("Input batch should be an engine.Image or a list of engine.Image")
if frame_ids is None:
frame_ids = [-1] * len(batch)
elif is_single_image:
frame_ids = [frame_ids]
results = []
for image, frame_id in zip(batch, frame_ids):
img0 = image.convert("channels_last", "bgr") # BGR
img, _, _, _ = letterbox(img0, height=img_size[1], width=img_size[0])
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
blob = torch.from_numpy(img).to(self.device).unsqueeze(0)
online_targets = self.tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(t.score)
result = TrackingAnnotationList([
TrackingAnnotation(
name=0,
top=tlwh[0],
left=tlwh[1],
width=tlwh[2],
height=tlwh[3],
id=id,
score=score,
frame=frame_id,
) for tlwh, id, score in zip(
online_tlwhs,
online_ids,
online_scores
)
])
results.append(result)
if is_single_image:
results = results[0]
return results
def optimize(self, do_constant_folding=False, img_size=(1088, 608), optimizable_dcn_v2=False):
if not optimizable_dcn_v2:
raise Exception("Can not optimize the model while DCNv2 implementation is not optimizable")
if self.model is None:
raise UserWarning("No model is loaded, cannot optimize. Load or train a model first.")
if self.model.ort_session is not None:
raise UserWarning("Model is already optimized in ONNX.")
input_shape = [
1,
3,
img_size[1],
img_size[0],
]
try:
self.__convert_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
except FileNotFoundError:
# Create temp directory
os.makedirs(self.temp_path, exist_ok=True)
self.__convert_rpn_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
self.__load_rpn_from_onnx(os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"))
@staticmethod
def download(model_name, path, server_url=None):
if server_url is None and model_name not in [
"crowdhuman_dla34",
"fairmot_dla34",
]:
raise ValueError("Unknown model_name: " + model_name)
os.makedirs(path, exist_ok=True)
if server_url is None:
server_url = os.path.join(
OPENDR_SERVER_URL, "perception", "object_tracking_2d",
"fair_mot"
)
url = os.path.join(
server_url, model_name
)
model_dir = os.path.join(path, model_name)
os.makedirs(model_dir, exist_ok=True)
urlretrieve(os.path.join(
url, model_name + ".json"
), os.path.join(
model_dir, model_name + ".json"
))
try:
urlretrieve(os.path.join(
url, model_name + ".pth"
), os.path.join(
model_dir, model_name + ".pth"
))
except Exception:
urlretrieve(os.path.join(
url, model_name + ".tckpt"
), os.path.join(
model_dir, model_name + ".pth"
))
print("Downloaded model", model_name, "to", model_dir)
return model_dir
def __convert_to_onnx(self, input_shape, output_name, do_constant_folding=False, verbose=False):
inp = torch.randn(input_shape).to(self.device)
input_names = ["data"]
output_names = self.heads.keys()
torch.onnx.export(
self.model, inp, output_name, verbose=verbose, enable_onnx_checker=True,
do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names
)
def __load_from_onnx(self, path):
self.model.rpn_ort_session = ort.InferenceSession(path)
# The comments below are the alternative way to use the onnx model, it might be useful in the future
# depending on how ONNX saving/loading will be implemented across the toolkit.
# # Load the ONNX model
# self.model = onnx.load(path)
#
# # Check that the IR is well formed
# onnx.checker.check_model(self.model)
#
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(self.model.graph)
def __load_from_pth(self, model, path, use_original_dict=False):
all_params = torch.load(path, map_location=self.device)
model.load_state_dict(all_params if use_original_dict else all_params["state_dict"])
def _prepare_datasets(
self,
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_dataset=True,
require_val_dataset=True,
):
input_dataset_iterator = None
eval_dataset_iterator = None
if isinstance(dataset, ExternalDataset):
dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset")
transforms = T.Compose([T.ToTensor()])
input_dataset_iterator = JointDataset(
dataset_path,
train_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
augment=False, transforms=transforms,
)
elif isinstance(dataset, DatasetIterator):
input_dataset_iterator = MappedDatasetIterator(
dataset,
lambda d: process_dataset(
d[0], d[1], self.ltrb, self.down_ratio,
self.max_objs, self.num_classes, self.mse_loss
)
)
else:
if require_dataset or dataset is not None:
raise ValueError(
"dataset parameter should be an ExternalDataset or a DatasetIterator"
)
if isinstance(val_dataset, ExternalDataset):
val_dataset_path = val_dataset.path
if val_dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(val_dataset) +
") is given as a val_dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif isinstance(val_dataset, DatasetIterator):
eval_dataset_iterator = val_dataset
elif val_dataset is None:
if isinstance(dataset, ExternalDataset):
val_dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif require_val_dataset:
raise ValueError(
"val_dataset is None and can't be derived from" +
" the dataset object because the dataset is not an ExternalDataset"
)
else:
eval_dataset_iterator = input_dataset_iterator
else:
raise ValueError(
"val_dataset parameter should be an ExternalDataset or a DatasetIterator or None"
)
return input_dataset_iterator, eval_dataset_iterator
def __create_model(self):
heads = {
'hm': self.num_classes,
'wh': 2 if not self.ltrb else 4,
'id': self.reid_dim
}
if self.reg_offset:
heads.update({'reg': 2})
self.heads = heads
self.model = create_model(self.backbone, heads, self.head_conv)
self.model.to(self.device)
self.model.ort_session = None
self.model.heads_names = heads.keys()
self.model_optimizer = torch.optim.Adam(self.model.parameters(), self.lr)
self.tracker = JDETracker(
self.model,
self.threshold,
self.track_buffer,
self.max_objs,
self.image_mean,
self.image_std,
self.down_ratio,
self.num_classes,
self.reg_offset,
self.ltrb,
self.frame_rate,
)
@staticmethod
def __extract_trailing(path):
head, tail = ntpath.split(path)
folder_name = tail or ntpath.basename(head)
return folder_name, head, tail
| true
| true
|
f719b58aacd4b24349689985096bc6a158cb01c2
| 2,736
|
py
|
Python
|
tests/crawler/media/test_bcc.py
|
allenyummy/GoodInfo
|
94ab7421d1377450ac4cfdfd6e4667fa52b20d0c
|
[
"MIT"
] | 1
|
2022-01-17T14:06:27.000Z
|
2022-01-17T14:06:27.000Z
|
tests/crawler/media/test_bcc.py
|
allenyummy/GoodInfo
|
94ab7421d1377450ac4cfdfd6e4667fa52b20d0c
|
[
"MIT"
] | 9
|
2021-08-12T07:39:01.000Z
|
2021-08-20T08:38:29.000Z
|
tests/crawler/media/test_bcc.py
|
allenyummy/GoodInfo
|
94ab7421d1377450ac4cfdfd6e4667fa52b20d0c
|
[
"MIT"
] | 1
|
2022-02-21T15:45:13.000Z
|
2022-02-21T15:45:13.000Z
|
# encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from src.crawler.media import bcc
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="中國廣播公司_1",
link="https://www.bcc.com.tw/newsView.6473942",
expected_output=NewsStruct(
title="「這家超商」6/23開賣快篩試劑 雙北2門市限量100盒",
content="\r\n 為了方便民眾居家檢測新冠肺炎,食藥署在19日公布核准5款家用快篩試劑,可就近到藥局、醫療器材販售業者,如藥妝店、醫療器材行、便利商店等商家選購。萊爾富位於雙北的2家門市明(23)日起將首度開賣家用快篩試劑,每店限量100盒,售完為止。萊爾富首度引進國產泰博科技的「福爾威創家用新型冠狀病毒抗原快速檢驗套組」,明天下午3點起,將在台北市迪化店、北縣五工店限量開賣,每盒5入售價1700元,每店限量100盒,不拆售。根據食藥署公布的指引,如果快篩陽性,居家檢疫或隔離者須先與衛生單位聯繫,一般民眾則到社區採檢院所採檢確認;如果是陰性,民眾仍要遵循防疫規範,做好個人防護,持續自我健康管理。(快篩試劑資料照)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2021/06/22 18:49 報導",
link="https://www.bcc.com.tw/newsView.6473942",
),
)
TEST_DATA_2 = TEST_DATA(
name="中國廣播公司_2",
link="https://www.bcc.com.tw/newsView.4839712",
expected_output=NewsStruct(
title="台積電衝關未成 聯電ADR爆漲股價再登新高",
content="\r\n 半導體類股正當紅,台積電今天(24日)早盤衝關500元短暫達標後拉回,聯電延續昨天的強勢,在ADR飆漲超過20%助威下,股價漲幅超過7%,最高攻至39.7元,市值擠下股王大立光,繼續成為台股人氣王。因為聯電的狂飆,大盤儘管稍事休息,拉回的幅度也很有限。(張佳琪報導)台股週一的兩大支柱台積電、聯電,週二股價兩樣情,台積電挑戰500元大關,早盤開盤隨即攻頂,但是衝高後買盤追價謹慎,導致股價翻黑呈現小跌。聯電因週一股價漲停板鎖住,美國ADR強漲20.24%,帶動股價開盤後強勢走高,隨即衝過39元一路向上,攻至39.7元,股價又改寫18年新高,且追價買單積極,漲幅超過7%,市值擠下股王大立光。讓股價瞬間點火爆衝的關鍵是美系外資分析師最新出具的報告大力看好聯電。理由是受惠於5G、AI、高速運算等發展,聯電產用率將提高至90%到95%,因此,8吋晶圓價格調漲、12吋晶圓產用率提升,以及28奈米拓展有成,推估聯電明後年資本支出將達12億美元,重申「買進」評等,目標價由32元上調至54.5元。分析師表示,三大法人週一同步大買聯電,週二的漲勢,內外資應都有貢獻。至於是否漲到外資報告訂下的目標價,分析師認為,以今年聯電EPS預估2.25元推算,如果漲到54.5元,本益比落在24倍,雖然高但不至於離譜,因此認為如果外資買盤力道夠強,目標價就可能達標。(圖:雅虎奇摩)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2020/11/24 11:26 報導",
link="https://www.bcc.com.tw/newsView.4839712",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return bcc.BCCNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
| 36
| 652
| 0.69883
|
import logging
import pytest
from collections import namedtuple
from src.crawler.media import bcc
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="中國廣播公司_1",
link="https://www.bcc.com.tw/newsView.6473942",
expected_output=NewsStruct(
title="「這家超商」6/23開賣快篩試劑 雙北2門市限量100盒",
content="\r\n 為了方便民眾居家檢測新冠肺炎,食藥署在19日公布核准5款家用快篩試劑,可就近到藥局、醫療器材販售業者,如藥妝店、醫療器材行、便利商店等商家選購。萊爾富位於雙北的2家門市明(23)日起將首度開賣家用快篩試劑,每店限量100盒,售完為止。萊爾富首度引進國產泰博科技的「福爾威創家用新型冠狀病毒抗原快速檢驗套組」,明天下午3點起,將在台北市迪化店、北縣五工店限量開賣,每盒5入售價1700元,每店限量100盒,不拆售。根據食藥署公布的指引,如果快篩陽性,居家檢疫或隔離者須先與衛生單位聯繫,一般民眾則到社區採檢院所採檢確認;如果是陰性,民眾仍要遵循防疫規範,做好個人防護,持續自我健康管理。(快篩試劑資料照)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2021/06/22 18:49 報導",
link="https://www.bcc.com.tw/newsView.6473942",
),
)
TEST_DATA_2 = TEST_DATA(
name="中國廣播公司_2",
link="https://www.bcc.com.tw/newsView.4839712",
expected_output=NewsStruct(
title="台積電衝關未成 聯電ADR爆漲股價再登新高",
content="\r\n 半導體類股正當紅,台積電今天(24日)早盤衝關500元短暫達標後拉回,聯電延續昨天的強勢,在ADR飆漲超過20%助威下,股價漲幅超過7%,最高攻至39.7元,市值擠下股王大立光,繼續成為台股人氣王。因為聯電的狂飆,大盤儘管稍事休息,拉回的幅度也很有限。(張佳琪報導)台股週一的兩大支柱台積電、聯電,週二股價兩樣情,台積電挑戰500元大關,早盤開盤隨即攻頂,但是衝高後買盤追價謹慎,導致股價翻黑呈現小跌。聯電因週一股價漲停板鎖住,美國ADR強漲20.24%,帶動股價開盤後強勢走高,隨即衝過39元一路向上,攻至39.7元,股價又改寫18年新高,且追價買單積極,漲幅超過7%,市值擠下股王大立光。讓股價瞬間點火爆衝的關鍵是美系外資分析師最新出具的報告大力看好聯電。理由是受惠於5G、AI、高速運算等發展,聯電產用率將提高至90%到95%,因此,8吋晶圓價格調漲、12吋晶圓產用率提升,以及28奈米拓展有成,推估聯電明後年資本支出將達12億美元,重申「買進」評等,目標價由32元上調至54.5元。分析師表示,三大法人週一同步大買聯電,週二的漲勢,內外資應都有貢獻。至於是否漲到外資報告訂下的目標價,分析師認為,以今年聯電EPS預估2.25元推算,如果漲到54.5元,本益比落在24倍,雖然高但不至於離譜,因此認為如果外資買盤力道夠強,目標價就可能達標。(圖:雅虎奇摩)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2020/11/24 11:26 報導",
link="https://www.bcc.com.tw/newsView.4839712",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return bcc.BCCNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
| true
| true
|
f719b5a93057ca90d71d3ce08000892efc53327a
| 659
|
py
|
Python
|
2-hard/following-integer/main.py
|
mpillar/codeeval
|
ad1fc5aea277575dcce6ad5db230d7a2bfe41eed
|
[
"Unlicense"
] | 21
|
2015-02-09T18:41:15.000Z
|
2021-07-31T02:43:28.000Z
|
2-hard/following-integer/main.py
|
mpillar/codeeval
|
ad1fc5aea277575dcce6ad5db230d7a2bfe41eed
|
[
"Unlicense"
] | null | null | null |
2-hard/following-integer/main.py
|
mpillar/codeeval
|
ad1fc5aea277575dcce6ad5db230d7a2bfe41eed
|
[
"Unlicense"
] | 37
|
2015-01-06T06:20:17.000Z
|
2021-06-21T18:22:13.000Z
|
import sys
def get_digits_ignore_zero(x):
digits = {}
for digit in str(x):
if digit == '0':
continue
if digit in digits:
digits[digit] += 1
else:
digits[digit] = 1
return digits
def following_integer(x):
original_digits = get_digits_ignore_zero(x)
while True:
x += 1
digits = get_digits_ignore_zero(x)
if original_digits == digits:
return x
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
if len(test) == 0:
continue
test = int(test)
print(following_integer(test))
test_cases.close()
| 21.966667
| 47
| 0.576631
|
import sys
def get_digits_ignore_zero(x):
digits = {}
for digit in str(x):
if digit == '0':
continue
if digit in digits:
digits[digit] += 1
else:
digits[digit] = 1
return digits
def following_integer(x):
original_digits = get_digits_ignore_zero(x)
while True:
x += 1
digits = get_digits_ignore_zero(x)
if original_digits == digits:
return x
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
if len(test) == 0:
continue
test = int(test)
print(following_integer(test))
test_cases.close()
| true
| true
|
f719b60f710335528b05a8c8cbb30e8033fe17df
| 13,939
|
py
|
Python
|
tests/base_test_class.py
|
uncycler/django-DefectDojo
|
d7523e1dc34af47185830c13bfa7aedfc667dd60
|
[
"BSD-3-Clause"
] | 3
|
2020-10-27T08:58:03.000Z
|
2021-04-28T14:20:16.000Z
|
tests/base_test_class.py
|
uncycler/django-DefectDojo
|
d7523e1dc34af47185830c13bfa7aedfc667dd60
|
[
"BSD-3-Clause"
] | 82
|
2020-11-06T22:34:05.000Z
|
2021-08-10T16:30:48.000Z
|
tests/base_test_class.py
|
uncycler/django-DefectDojo
|
d7523e1dc34af47185830c13bfa7aedfc667dd60
|
[
"BSD-3-Clause"
] | 2
|
2022-02-07T09:57:28.000Z
|
2022-03-11T08:42:59.000Z
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException
import unittest
import os
import re
# import time
dd_driver = None
dd_driver_options = None
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
global dd_driver
if not dd_driver:
# setupModule and tearDownModule are not working in our scenario, so for now we use setupClass and a global variable
# global variables are dirty, but in unit tests scenario's like these they are acceptable
print('launching browser for: ', cls.__name__)
global dd_driver_options
dd_driver_options = Options()
# headless means no UI, if you want to see what is happening remove headless. Adding detach will leave the window open after the test
dd_driver_options.add_argument("--headless")
# dd_driver_options.add_experimental_option("detach", True)
# the next 2 maybe needed in some scenario's for example on WSL or other headless situations
dd_driver_options.add_argument("--no-sandbox")
# dd_driver_options.add_argument("--disable-dev-shm-usage")
dd_driver_options.add_argument("--disable-gpu") # on windows sometimes chrome can't start with certain gpu driver versions, even in headless mode
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
dd_driver_options.add_argument("--window-size=1280,1024")
# dd_driver_options.add_argument("--start-maximized")
dd_driver_options.set_capability("acceptInsecureCerts", True)
# some extra logging can be turned on if you want to query the browser javascripe console in your tests
desired = webdriver.DesiredCapabilities.CHROME
desired['goog:loggingPrefs'] = {'browser': 'ALL'}
# change path of chromedriver according to which directory you have chromedriver.
print('starting chromedriver with options: ', vars(dd_driver_options), desired)
dd_driver = webdriver.Chrome('chromedriver', chrome_options=dd_driver_options, desired_capabilities=desired)
# best practice is only use explicit waits
dd_driver.implicitly_wait(1)
cls.driver = dd_driver
cls.base_url = os.environ['DD_BASE_URL']
def setUp(self):
self.verificationErrors = []
self.accept_next_alert = True
self.accept_javascript_errors = False
self.driver.execute_script("console.clear()")
# clear browser console logs?
def login_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
driver.find_element_by_css_selector("button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def goto_product_overview(self, driver):
driver.get(self.base_url + "product")
self.wait_for_datatable_if_content("no_products", "products_wrapper")
def goto_component_overview(self, driver):
driver.get(self.base_url + "components")
def goto_active_engagements_overview(self, driver):
# return self.goto_engagements_internal(driver, 'engagement')
# engagement overview doesn't seem to have the datatables yet modifying the DOM
# https://github.com/DefectDojo/django-DefectDojo/issues/2173
driver.get(self.base_url + 'engagement')
# self.goto_engagements_internal(driver, 'engagement')
return driver
def goto_all_engagements_overview(self, driver):
return self.goto_engagements_internal(driver, 'engagements_all')
def goto_engagements_internal(self, driver, rel_url):
driver.get(self.base_url + rel_url)
self.wait_for_datatable_if_content("no_engagements", "engagements_wrapper")
return driver
def goto_all_findings_list(self, driver):
driver.get(self.base_url + "finding")
self.wait_for_datatable_if_content("no_findings", "open_findings_wrapper")
def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
no_content = None
try:
no_content = self.driver.find_element_by_id(no_content_id)
except:
pass
if no_content is None:
# wait for product_wrapper div as datatables javascript modifies the DOM on page load.
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID, wrapper_id)))
def is_element_by_css_selector_present(self, selector, text=None):
elems = self.driver.find_elements_by_css_selector(selector)
if len(elems) == 0:
# print('no elements!')
return False
if text is None:
return True
for elem in elems:
print(elem.text)
if text in elem.text:
# print('contains!')
return True
# print('text mismatch!')
return False
def is_success_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-success', text=text)
def is_error_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-danger', text=text)
def is_text_present_on_page(self, text):
# DEBUG: couldn't find: Product type added successfully. path: //*[contains(text(),'Product type added successfully.')]
# can't get this xpath to work
# path = "//*[contains(text(), '" + text + "')]"
# elems = self.driver.find_elements_by_xpath(path)
# if len(elems) == 0:
# print("DEBUG: couldn't find: ", text, "path: ", path)
body = self.driver.find_element_by_tag_name("body")
return re.search(text, body.text)
def element_exists_by_id(self, id):
elems = self.driver.find_elements_by_id(id)
return len(elems) > 0
def change_system_setting(self, id, enable=True):
print("changing system setting " + id + " enable: " + str(enable))
driver = self.login_page()
driver.get(self.base_url + 'system_settings')
is_enabled = driver.find_element_by_id(id).is_selected()
if (enable and not is_enabled) or (not enable and is_enabled):
# driver.find_element_by_xpath('//*[@id=' + id + ']').click()
driver.find_element_by_id(id).click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
is_enabled = driver.find_element_by_id(id).is_selected()
if enable:
self.assertTrue(is_enabled)
if not enable:
self.assertFalse(is_enabled)
return is_enabled
def enable_system_setting(self, id):
return self.change_system_setting(id, enable=True)
def disable_system_setting(self, id):
return self.change_system_setting(id, enable=False)
def enable_jira(self):
return self.enable_system_setting('id_enable_jira')
def disable_jira(self):
return self.disable_system_setting('id_enable_jira')
def disable_github(self):
return self.disable_system_setting('id_enable_github')
def enable_github(self):
return self.enable_system_setting('id_enable_github')
def enable_block_execution(self):
# we set the admin user (ourselves) to have block_execution checked
# this will force dedupe to happen synchronously, among other things like notifications, rules, ...
driver = self.login_page()
driver.get(self.base_url + 'profile')
if not driver.find_element_by_id('id_block_execution').is_selected():
driver.find_element_by_xpath('//*[@id="id_block_execution"]').click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
self.assertTrue(driver.find_element_by_id('id_block_execution').is_selected())
return driver
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def assertNoConsoleErrors(self):
"""
Sample output for levels (i.e. errors are SEVERE)
{'level': 'DEBUG', 'message': 'http://localhost:8080/product/type/4/edit 560:12 "debug"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'INFO', 'message': 'http://localhost:8080/product/type/4/edit 561:16 "info"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'WARNING', 'message': 'http://localhost:8080/product/type/4/edit 562:16 "warning"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'SEVERE', 'message': 'http://localhost:8080/product/type/4/edit 563:16 "error"', 'source': 'console-api', 'timestamp': 1583952828410}
"""
for entry in WebdriverOnlyNewLogFacade(self.driver).get_log('browser'):
"""
images are not working in current docker/travis deployment, so ignore those 404s
see: https://github.com/DefectDojo/django-DefectDojo/issues/2045
examples:
http://localhost:8080/static/dojo/img/zoom-in.cur - Failed to load resource: the server responded with a status of 404 (Not Found)
http://localhost:8080/media/CACHE/images/finding_images/1bf9c0b1-5ed1-4b4e-9551-bcbfd198b90a/7d8d9af058566b8f2fe6548d96c63237.jpg - Failed to load resource: the server responded with a status of 404 (Not Found)
"""
accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)'
# accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)|(bootstrap\-chosen\.css\.map)'
if (entry['level'] == 'SEVERE'):
# print(self.driver.current_url) # TODO actually this seems to be the previous url
# self.driver.save_screenshot("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.png")
# with open("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.html", "w") as f:
# f.write(self.driver.page_source)
print(entry)
print('There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens')
print('Currently there is no reliable way to find out at which url the error happened, but it could be: .' + self.driver.current_url)
if self.accept_javascript_errors:
print('WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!')
elif re.search(accepted_javascript_messages, entry['message']):
print('WARNING: skipping javascript errors related to finding images, see https://github.com/DefectDojo/django-DefectDojo/issues/2045')
else:
self.assertNotEqual(entry['level'], 'SEVERE')
return True
def tearDown(self):
self.assertNoConsoleErrors()
self.assertEqual([], self.verificationErrors)
@classmethod
def tearDownDriver(cls):
print('tearDownDriver: ', cls.__name__)
global dd_driver
if dd_driver:
if not dd_driver_options.experimental_options or not dd_driver_options.experimental_options['detach']:
print('closing browser')
dd_driver.quit()
class WebdriverOnlyNewLogFacade(object):
last_timestamp = 0
def __init__(self, webdriver):
self._webdriver = webdriver
def get_log(self, log_type):
last_timestamp = self.last_timestamp
entries = self._webdriver.get_log(log_type)
filtered = []
for entry in entries:
# check the logged timestamp against the
# stored timestamp
if entry["timestamp"] > self.last_timestamp:
filtered.append(entry)
# save the last timestamp only if newer
# in this set of logs
if entry["timestamp"] > last_timestamp:
last_timestamp = entry["timestamp"]
# store the very last timestamp
self.last_timestamp = last_timestamp
return filtered
def on_exception_html_source_logger(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
f = open("selenium_page_source.html", "w", encoding='utf-8')
f.writelines(self.driver.page_source)
# time.sleep(30)
raise(e)
return wrapper
| 43.423676
| 222
| 0.65801
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException
import unittest
import os
import re
dd_driver = None
dd_driver_options = None
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
global dd_driver
if not dd_driver:
print('launching browser for: ', cls.__name__)
global dd_driver_options
dd_driver_options = Options()
# headless means no UI, if you want to see what is happening remove headless. Adding detach will leave the window open after the test
dd_driver_options.add_argument("--headless")
# dd_driver_options.add_experimental_option("detach", True)
# the next 2 maybe needed in some scenario's for example on WSL or other headless situations
dd_driver_options.add_argument("--no-sandbox")
dd_driver_options.add_argument("--disable-gpu")
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
dd_driver_options.add_argument("--window-size=1280,1024")
# dd_driver_options.add_argument("--start-maximized")
dd_driver_options.set_capability("acceptInsecureCerts", True)
# some extra logging can be turned on if you want to query the browser javascripe console in your tests
desired = webdriver.DesiredCapabilities.CHROME
desired['goog:loggingPrefs'] = {'browser': 'ALL'}
# change path of chromedriver according to which directory you have chromedriver.
print('starting chromedriver with options: ', vars(dd_driver_options), desired)
dd_driver = webdriver.Chrome('chromedriver', chrome_options=dd_driver_options, desired_capabilities=desired)
# best practice is only use explicit waits
dd_driver.implicitly_wait(1)
cls.driver = dd_driver
cls.base_url = os.environ['DD_BASE_URL']
def setUp(self):
self.verificationErrors = []
self.accept_next_alert = True
self.accept_javascript_errors = False
self.driver.execute_script("console.clear()")
# clear browser console logs?
def login_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
driver.find_element_by_css_selector("button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def goto_product_overview(self, driver):
driver.get(self.base_url + "product")
self.wait_for_datatable_if_content("no_products", "products_wrapper")
def goto_component_overview(self, driver):
driver.get(self.base_url + "components")
def goto_active_engagements_overview(self, driver):
# return self.goto_engagements_internal(driver, 'engagement')
# engagement overview doesn't seem to have the datatables yet modifying the DOM
driver.get(self.base_url + 'engagement')
return driver
def goto_all_engagements_overview(self, driver):
return self.goto_engagements_internal(driver, 'engagements_all')
def goto_engagements_internal(self, driver, rel_url):
driver.get(self.base_url + rel_url)
self.wait_for_datatable_if_content("no_engagements", "engagements_wrapper")
return driver
def goto_all_findings_list(self, driver):
driver.get(self.base_url + "finding")
self.wait_for_datatable_if_content("no_findings", "open_findings_wrapper")
def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
no_content = None
try:
no_content = self.driver.find_element_by_id(no_content_id)
except:
pass
if no_content is None:
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID, wrapper_id)))
def is_element_by_css_selector_present(self, selector, text=None):
elems = self.driver.find_elements_by_css_selector(selector)
if len(elems) == 0:
return False
if text is None:
return True
for elem in elems:
print(elem.text)
if text in elem.text:
return True
return False
def is_success_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-success', text=text)
def is_error_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-danger', text=text)
def is_text_present_on_page(self, text):
# can't get this xpath to work
body = self.driver.find_element_by_tag_name("body")
return re.search(text, body.text)
def element_exists_by_id(self, id):
elems = self.driver.find_elements_by_id(id)
return len(elems) > 0
def change_system_setting(self, id, enable=True):
print("changing system setting " + id + " enable: " + str(enable))
driver = self.login_page()
driver.get(self.base_url + 'system_settings')
is_enabled = driver.find_element_by_id(id).is_selected()
if (enable and not is_enabled) or (not enable and is_enabled):
# driver.find_element_by_xpath('//*[@id=' + id + ']').click()
driver.find_element_by_id(id).click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
is_enabled = driver.find_element_by_id(id).is_selected()
if enable:
self.assertTrue(is_enabled)
if not enable:
self.assertFalse(is_enabled)
return is_enabled
def enable_system_setting(self, id):
return self.change_system_setting(id, enable=True)
def disable_system_setting(self, id):
return self.change_system_setting(id, enable=False)
def enable_jira(self):
return self.enable_system_setting('id_enable_jira')
def disable_jira(self):
return self.disable_system_setting('id_enable_jira')
def disable_github(self):
return self.disable_system_setting('id_enable_github')
def enable_github(self):
return self.enable_system_setting('id_enable_github')
def enable_block_execution(self):
driver = self.login_page()
driver.get(self.base_url + 'profile')
if not driver.find_element_by_id('id_block_execution').is_selected():
driver.find_element_by_xpath('//*[@id="id_block_execution"]').click()
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(driver.find_element_by_id('id_block_execution').is_selected())
return driver
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def assertNoConsoleErrors(self):
for entry in WebdriverOnlyNewLogFacade(self.driver).get_log('browser'):
accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)'
# accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)|(bootstrap\-chosen\.css\.map)'
if (entry['level'] == 'SEVERE'):
# print(self.driver.current_url) # TODO actually this seems to be the previous url
# self.driver.save_screenshot("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.png")
# with open("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.html", "w") as f:
# f.write(self.driver.page_source)
print(entry)
print('There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens')
print('Currently there is no reliable way to find out at which url the error happened, but it could be: .' + self.driver.current_url)
if self.accept_javascript_errors:
print('WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!')
elif re.search(accepted_javascript_messages, entry['message']):
print('WARNING: skipping javascript errors related to finding images, see https://github.com/DefectDojo/django-DefectDojo/issues/2045')
else:
self.assertNotEqual(entry['level'], 'SEVERE')
return True
def tearDown(self):
self.assertNoConsoleErrors()
self.assertEqual([], self.verificationErrors)
@classmethod
def tearDownDriver(cls):
print('tearDownDriver: ', cls.__name__)
global dd_driver
if dd_driver:
if not dd_driver_options.experimental_options or not dd_driver_options.experimental_options['detach']:
print('closing browser')
dd_driver.quit()
class WebdriverOnlyNewLogFacade(object):
last_timestamp = 0
def __init__(self, webdriver):
self._webdriver = webdriver
def get_log(self, log_type):
last_timestamp = self.last_timestamp
entries = self._webdriver.get_log(log_type)
filtered = []
for entry in entries:
# check the logged timestamp against the
# stored timestamp
if entry["timestamp"] > self.last_timestamp:
filtered.append(entry)
# save the last timestamp only if newer
# in this set of logs
if entry["timestamp"] > last_timestamp:
last_timestamp = entry["timestamp"]
# store the very last timestamp
self.last_timestamp = last_timestamp
return filtered
def on_exception_html_source_logger(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
f = open("selenium_page_source.html", "w", encoding='utf-8')
f.writelines(self.driver.page_source)
# time.sleep(30)
raise(e)
return wrapper
| true
| true
|
f719b6cebef2b6af3c2533bfa679463c3243666f
| 397
|
py
|
Python
|
Code/Assignment/Assignment/asgi.py
|
vedez/SDEV2004
|
b028c8454ddca9a1abeb95df95e7f189867dd346
|
[
"MIT"
] | null | null | null |
Code/Assignment/Assignment/asgi.py
|
vedez/SDEV2004
|
b028c8454ddca9a1abeb95df95e7f189867dd346
|
[
"MIT"
] | null | null | null |
Code/Assignment/Assignment/asgi.py
|
vedez/SDEV2004
|
b028c8454ddca9a1abeb95df95e7f189867dd346
|
[
"MIT"
] | null | null | null |
"""
ASGI config for Assignment project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Assignment.settings')
application = get_asgi_application()
| 23.352941
| 78
| 0.788413
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Assignment.settings')
application = get_asgi_application()
| true
| true
|
f719b6d868fa7d2ce1c38e9b3db6ae27ddd83ee7
| 1,459
|
py
|
Python
|
python/vanitygen_onion.py
|
5kyc0d3r/Junk
|
f95fc9beaaf5f234102e213bd977de51cafdcebe
|
[
"MIT"
] | null | null | null |
python/vanitygen_onion.py
|
5kyc0d3r/Junk
|
f95fc9beaaf5f234102e213bd977de51cafdcebe
|
[
"MIT"
] | null | null | null |
python/vanitygen_onion.py
|
5kyc0d3r/Junk
|
f95fc9beaaf5f234102e213bd977de51cafdcebe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
MIT License
Copyright (c) 2017 5kyc0d3r
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# This script helps you generate a customized .onion domain for your hidden service on the tor network.
# This should not be used if you require high performance for the domain generation process because
# it will be very slow since it was written in Python. However, Cython support will be added soon which
# will significantly boost the domain generation process.
| 47.064516
| 103
| 0.797807
| true
| true
|
|
f719b711c4580588d5faede2a699731e7e1104b7
| 73,903
|
py
|
Python
|
src/sage/rings/derivation.py
|
sheerluck/sage
|
b5e572b7d231f70c139d9978d68add80c4ef353d
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/rings/derivation.py
|
sheerluck/sage
|
b5e572b7d231f70c139d9978d68add80c4ef353d
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/rings/derivation.py
|
sheerluck/sage
|
b5e572b7d231f70c139d9978d68add80c4ef353d
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
r"""
Derivations
Let `A` be a ring and `B` be an bimodule over `A`.
A derivation `d : A \to B` is an additive map that satisfies
the Leibniz rule
.. MATH::
d(xy) = x d(y) + d(x) y.
If `B` is an algebra over `A` and if we are given in addition a
ring homomorphism `\theta : A \to B`, a twisted derivation with respect
to `\theta` (or a `\theta`-derivation) is an additive map `d : A \to B`
such that
.. MATH::
d(xy) = \theta(x) d(y) + d(x) y.
When `\theta` is the morphism defining the structure of `A`-algebra
on `B`, a `\theta`-derivation is nothing but a derivation.
In general, if `\iota : A \to B` denotes the defining morphism above,
one easily checks that `\theta - \iota` is a `\theta`-derivation.
This file provides support for derivations and twisted derivations
over commutative rings with values in algebras (i.e. we require
that `B` is a commutative `A`-algebra).
In this case, the set of derivations (resp. `\theta`-derivations)
is a module over `B`.
Given a ring `A`, the module of derivations over `A` can be created
as follows::
sage: A.<x,y,z> = QQ[]
sage: M = A.derivation_module()
sage: M
Module of derivations over Multivariate Polynomial Ring in x, y, z over Rational Field
The method :meth:`~sage.rings.derivation.RingDerivationModule.gens`
returns the generators of this module::
sage: A.<x,y,z> = QQ[]
sage: M = A.derivation_module()
sage: M.gens()
(d/dx, d/dy, d/dz)
We can combine them in order to create all derivations::
sage: d = 2*M.gen(0) + z*M.gen(1) + (x^2 + y^2)*M.gen(2)
sage: d
2*d/dx + z*d/dy + (x^2 + y^2)*d/dz
and now play with them::
sage: d(x + y + z)
x^2 + y^2 + z + 2
sage: P = A.random_element()
sage: Q = A.random_element()
sage: d(P*Q) == P*d(Q) + d(P)*Q
True
Alternatively we can use the method
:meth:`~sage.rings.ring.CommutativeRing.derivation`
of the ring `A` to create derivations::
sage: Dx = A.derivation(x); Dx
d/dx
sage: Dy = A.derivation(y); Dy
d/dy
sage: Dz = A.derivation(z); Dz
d/dz
sage: A.derivation([2, z, x^2+y^2])
2*d/dx + z*d/dy + (x^2 + y^2)*d/dz
Sage knows moreover that `M` is a Lie algebra::
sage: M.category()
Join of Category of lie algebras with basis over Rational Field
and Category of modules with basis over Multivariate Polynomial Ring in x, y, z over Rational Field
Computations of Lie brackets are implemented as well::
sage: Dx.bracket(Dy)
0
sage: d.bracket(Dx)
-2*x*d/dz
At the creation of a module of derivations, a codomain can be specified::
sage: B = A.fraction_field()
sage: A.derivation_module(B)
Module of derivations from Multivariate Polynomial Ring in x, y, z over Rational Field
to Fraction Field of Multivariate Polynomial Ring in x, y, z over Rational Field
Alternatively, one can specify a morphism `f` with domain `A`.
In this case, the codomain of the derivations is the codomain of
`f` but the latter is viewed as an algebra over `A` through the
homomorphism `f`.
This construction is useful, for example, if we want to work with
derivations on `A` at a certain point, e.g. `(0,1,2)`. Indeed,
in order to achieve this, we first define the evaluation map at
this point::
sage: ev = A.hom([QQ(0), QQ(1), QQ(2)])
sage: ev
Ring morphism:
From: Multivariate Polynomial Ring in x, y, z over Rational Field
To: Rational Field
Defn: x |--> 0
y |--> 1
z |--> 2
Now we use this ring homomorphism to define a structure of `A`-algebra
on `\QQ` and then build the following module of derivations::
sage: M = A.derivation_module(ev)
sage: M
Module of derivations from Multivariate Polynomial Ring in x, y, z over Rational Field to Rational Field
sage: M.gens()
(d/dx, d/dy, d/dz)
Elements in `M` then acts as derivations at `(0,1,2)`::
sage: Dx = M.gen(0)
sage: Dy = M.gen(1)
sage: Dz = M.gen(2)
sage: f = x^2 + y^2 + z^2
sage: Dx(f) # = 2*x evaluated at (0,1,2)
0
sage: Dy(f) # = 2*y evaluated at (0,1,2)
2
sage: Dz(f) # = 2*z evaluated at (0,1,2)
4
Twisted derivations are handled similarly::
sage: theta = B.hom([B(y),B(z),B(x)])
sage: theta
Ring endomorphism of Fraction Field of Multivariate Polynomial Ring in x, y, z over Rational Field
Defn: x |--> y
y |--> z
z |--> x
sage: M = B.derivation_module(twist=theta)
sage: M
Module of twisted derivations over Fraction Field of Multivariate Polynomial Ring
in x, y, z over Rational Field (twisting morphism: x |--> y, y |--> z, z |--> x)
Over a field, one proves that every `\theta`-derivation is a multiple
of `\theta - id`, so that::
sage: d = M.gen(); d
[x |--> y, y |--> z, z |--> x] - id
and then::
sage: d(x)
-x + y
sage: d(y)
-y + z
sage: d(z)
x - z
sage: d(x + y + z)
0
AUTHOR:
- Xavier Caruso (2018-09)
"""
# ***************************************************************************
# Copyright (C) 2018 Xavier Caruso <xavier.caruso@normalesup.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
# ***************************************************************************
from sage.structure.richcmp import op_EQ, op_NE
from sage.structure.unique_representation import UniqueRepresentation
from sage.sets.family import Family
from sage.modules.module import Module
from sage.structure.element import ModuleElement
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring import PolynomialRing_general
from sage.rings.polynomial.multi_polynomial_ring_base import MPolynomialRing_base
from sage.rings.power_series_ring import PowerSeriesRing_generic
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.rings.fraction_field import FractionField_generic
from sage.rings.quotient_ring import QuotientRing_generic
from sage.rings.polynomial.polynomial_quotient_ring import PolynomialQuotientRing_generic
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing_generic
from sage.rings.padics.padic_generic import pAdicGeneric
from sage.categories.number_fields import NumberFields
from sage.categories.finite_fields import FiniteFields
from sage.categories.modules import Modules
from sage.categories.modules_with_basis import ModulesWithBasis
from sage.categories.lie_algebras import LieAlgebras
from sage.categories.map import Map
from sage.categories.rings import Rings
from sage.misc.latex import latex
class RingDerivationModule(Module, UniqueRepresentation):
"""
A class for modules of derivations over a commutative ring.
"""
def __init__(self, domain, codomain, twist=None):
"""
Initialize this module of derivation.
TESTS::
sage: A.<x,y> = QQ[]
sage: M = A.derivation_module()
sage: TestSuite(M).run()
sage: from sage.rings.derivation import RingDerivationModule
sage: R5.<x> = GF(5)[]
sage: R25.<x> = GF(25)[]
sage: R7.<x> = GF(7)[]
sage: RingDerivationModule(R5, R25)
Module of derivations from Univariate Polynomial Ring in x over Finite Field of size 5 to Univariate Polynomial Ring in x over Finite Field in z2 of size 5^2
sage: RingDerivationModule(R5, R5^2)
Traceback (most recent call last):
...
TypeError: the codomain must be an algebra over the domain or a morphism with the correct domain
sage: RingDerivationModule(R5, R7)
Traceback (most recent call last):
...
TypeError: the codomain must be an algebra over the domain or a morphism with the correct domain
sage: theta = R5.hom([R5.gen()^2])
sage: RingDerivationModule(R5, R25, twist=theta)
Module of twisted derivations from Univariate Polynomial Ring in x over Finite Field of size 5 to Univariate Polynomial Ring in x over Finite Field in z2 of size 5^2 (twisting morphism: x |--> x^2)
sage: RingDerivationModule(R7, R7, twist=theta)
Traceback (most recent call last):
...
TypeError: the domain of the derivation must coerce to the domain of the twisting homomorphism
"""
if domain not in Rings().Commutative():
raise TypeError("the domain must be a commutative ring")
if codomain in Rings().Commutative() and codomain.has_coerce_map_from(domain):
defining_morphism = codomain.coerce_map_from(domain)
elif (isinstance(codomain,Map)
and codomain.category_for().is_subcategory(Rings())
and codomain.domain().has_coerce_map_from(domain)):
if codomain.domain() is domain:
defining_morphism = codomain
else:
defining_morphism = codomain * codomain.domain().coerce_map_from(domain)
codomain = defining_morphism.codomain()
else:
raise TypeError("the codomain must be an algebra over the domain"
" or a morphism with the correct domain")
if twist is not None:
if not (isinstance(twist, Map) and twist.category_for().is_subcategory(Rings())):
raise TypeError("the twisting homomorphism must be an homomorphism of rings")
if twist.domain() is not domain:
map = twist.domain().coerce_map_from(domain)
if map is None:
raise TypeError("the domain of the derivation must coerce"
" to the domain of the twisting homomorphism")
twist = twist * map
if twist.codomain() is not codomain:
map = codomain.coerce_map_from(twist.codomain())
if map is None:
raise TypeError("the codomain of the twisting homomorphism"
" must coerce to the codomain of the derivation")
twist = map * twist
# We check if the twisting morphism is the defining morphism
try:
if twist == defining_morphism:
twist = None
else:
for g in domain.gens():
if twist(g) != defining_morphism(g):
break
else:
twist = None
except (AttributeError, NotImplementedError):
pass
self._domain = domain
self._codomain = codomain
self._defining_morphism = defining_morphism
self._twist = twist
self._base_derivation = None
self._gens = None
self._basis = self._dual_basis = None
# Currently basis and gens play exactly the same role because
# the only rings that are supported lead to free modules of derivations
# So the code is a bit redundant but we except to be able to cover more
# rings (with non free modules of derivations) in a near future
self._constants = (ZZ, False)
if twist is not None:
self.Element = RingDerivationWithTwist_generic
if domain.is_field():
self._gens = [ 1 ]
self._basis = [ 1 ]
elif (domain is ZZ or domain in NumberFields() or domain in FiniteFields()
or isinstance(domain, IntegerModRing_generic)
or (isinstance(domain, pAdicGeneric) and (domain.is_field() or domain.absolute_e() == 1))):
self.Element = RingDerivationWithoutTwist_zero
self._gens = [ ]
self._basis = [ ]
self._dual_basis = [ ]
self._constants = (domain, True)
elif (isinstance(domain, (PolynomialRing_general, MPolynomialRing_base, PowerSeriesRing_generic, LaurentSeriesRing))
or (isinstance(domain, FractionField_generic)
and isinstance(domain.ring(), (PolynomialRing_general, MPolynomialRing_base)))):
self._base_derivation = RingDerivationModule(domain.base_ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_function
try:
self._gens = self._base_derivation.gens() + domain.gens()
except NotImplementedError:
pass
try:
self._basis = tuple(self._base_derivation.basis()) + domain.gens()
self._dual_basis = tuple(self._base_derivation.dual_basis()) + domain.gens()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
if domain.characteristic() == 0:
self._constants = (constants, sharp)
else:
# in this case, the constants are polynomials in x^p
# TODO: implement this
self._constants = (constants, False)
elif isinstance(domain, FractionField_generic):
self._base_derivation = RingDerivationModule(domain.ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_fraction_field
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants.fraction_field(), False)
elif isinstance(domain, PolynomialQuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.base(), defining_morphism)
modulus = domain.modulus()
for der in self._base_derivation.gens():
if der(modulus) != 0:
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False) # can we do better?
elif isinstance(domain, QuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.cover_ring(), defining_morphism)
if any(der(modulus) != 0 for modulus in domain.defining_ideal().gens()
for der in self._base_derivation.gens()):
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False) # can we do better?
else:
raise NotImplementedError("derivations over this ring is not implemented")
if self._basis is None:
category = Modules(codomain)
else:
category = ModulesWithBasis(codomain)
if self._twist is None and domain is codomain:
category &= LieAlgebras(self._constants[0])
Module.__init__(self, codomain, category=category)
if self._gens is not None:
self._gens = [self.element_class(self, x) for x in self._gens]
if self._basis is not None:
self._basis = [self.element_class(self, x) for x in self._basis]
if self._dual_basis is not None:
self._dual_basis = [domain(x) for x in self._dual_basis]
def __hash__(self):
"""
Return a hash of ``self``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: hash(M) == hash((M.domain(), M.codomain(), M.twisting_morphism()))
True
"""
return hash((self._domain, self._codomain, self._twist))
def _coerce_map_from_(self, R):
"""
Return ``True`` if there is a coercion map from ``R``
to this module.
EXAMPLES::
sage: A.<x> = QQ[]
sage: B.<y> = A[]
sage: M1 = A.derivation_module(); M1
Module of derivations over Univariate Polynomial Ring in x over Rational Field
sage: M2 = A.derivation_module(B); M2
Module of derivations from Univariate Polynomial Ring in x over Rational Field
to Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field
sage: M1._coerce_map_from_(M2) is None
True
sage: M1.has_coerce_map_from(M2)
False
sage: M2.has_coerce_map_from(M1)
True
sage: M1.has_coerce_map_from(ZZ)
False
sage: M1.has_coerce_map_from(QQ)
False
sage: M1.has_coerce_map_from(A)
False
"""
if isinstance(R, RingDerivationModule):
if R.domain().has_coerce_map_from(self._domain) and self._codomain.has_coerce_map_from(R.codomain()):
morR = R.defining_morphism()
morS = self._defining_morphism
try:
# this test is not perfect
for g in self._domain.gens():
if morR(g) != morS(g):
return False
return True
except (AttributeError, NotImplementedError):
pass
return super(RingDerivationModule, self)._coerce_map_from_(R)
def _repr_(self):
"""
Return a string representation of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: R.derivation_module()
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: theta = R.hom([y,x])
sage: R.derivation_module(twist=theta)
Module of twisted derivations over Multivariate Polynomial Ring in x, y
over Integer Ring (twisting morphism: x |--> y, y |--> x)
"""
t = ""
if self._twist is None:
s = "Module of derivations"
else:
s = "Module of twisted derivations"
try:
t = " (twisting morphism: %s)" % self._twist._repr_short()
except AttributeError:
pass
if self._domain is self._codomain:
s += " over %s" % self._domain
else:
s += " from %s to %s" % (self._domain, self._codomain)
return s + t
def domain(self):
"""
Return the domain of the derivations in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.domain()
Multivariate Polynomial Ring in x, y over Integer Ring
"""
return self._domain
def codomain(self):
"""
Return the codomain of the derivations in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.codomain()
Multivariate Polynomial Ring in x, y over Integer Ring
"""
return self._codomain
def defining_morphism(self):
"""
Return the morphism defining the structure of algebra
of the codomain over the domain.
EXAMPLES::
sage: R.<x> = QQ[]
sage: M = R.derivation_module()
sage: M.defining_morphism()
Identity endomorphism of Univariate Polynomial Ring in x over Rational Field
sage: S.<y> = R[]
sage: M = R.derivation_module(S)
sage: M.defining_morphism()
Polynomial base injection morphism:
From: Univariate Polynomial Ring in x over Rational Field
To: Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field
sage: ev = R.hom([QQ(0)])
sage: M = R.derivation_module(ev)
sage: M.defining_morphism()
Ring morphism:
From: Univariate Polynomial Ring in x over Rational Field
To: Rational Field
Defn: x |--> 0
"""
return self._defining_morphism
def twisting_morphism(self):
r"""
Return the twisting homomorphism of the derivations in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: M = R.derivation_module(twist=theta); M
Module of twisted derivations over Multivariate Polynomial Ring in x, y
over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.twisting_morphism()
Ring endomorphism of Multivariate Polynomial Ring in x, y over Integer Ring
Defn: x |--> y
y |--> x
When the derivations are untwisted, this method returns nothing::
sage: M = R.derivation_module()
sage: M.twisting_morphism()
"""
return self._twist
def ngens(self):
r"""
Return the number of generators of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.ngens()
2
Indeed, generators are::
sage: M.gens()
(d/dx, d/dy)
We check that, for a nontrivial twist over a field, the module of
twisted derivation is a vector space of dimension 1 generated by
``twist - id``::
sage: K = R.fraction_field()
sage: theta = K.hom([K(y),K(x)])
sage: M = K.derivation_module(twist=theta); M
Module of twisted derivations over Fraction Field of Multivariate Polynomial
Ring in x, y over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.ngens()
1
sage: M.gen()
[x |--> y, y |--> x] - id
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return len(self._gens)
def gens(self):
r"""
Return the generators of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.gens()
(d/dx, d/dy)
We check that, for a nontrivial twist over a field, the module of
twisted derivation is a vector space of dimension 1 generated by
``twist - id``::
sage: K = R.fraction_field()
sage: theta = K.hom([K(y),K(x)])
sage: M = K.derivation_module(twist=theta); M
Module of twisted derivations over Fraction Field of Multivariate Polynomial
Ring in x, y over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.gens()
([x |--> y, y |--> x] - id,)
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return tuple(self._gens)
def gen(self, n=0):
r"""
Return the ``n``-th generator of this module of derivations.
INPUT:
- ``n`` -- an integer (default: ``0``)
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.gen()
d/dx
sage: M.gen(1)
d/dy
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
try:
return self._gens[n]
except IndexError:
raise ValueError("generator not defined")
def basis(self):
r"""
Return a basis of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
"""
if self._basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._basis)
def dual_basis(self):
r"""
Return the dual basis of the canonical basis of this module of
derivations (which is that returned by the method :meth:`basis`).
.. NOTE::
The dual basis of `(d_1, \dots, d_n)` is a family
`(x_1, \ldots, x_n)` of elements in the domain such
that `d_i(x_i) = 1` and `d_i(x_j) = 0` if `i \neq j`.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: M.dual_basis()
Family (x, y)
"""
if self._dual_basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._dual_basis)
def ring_of_constants(self):
r"""
Return the subring of the domain consisting of elements
`x` such that `d(x) = 0` for all derivation `d` in this module.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: M.ring_of_constants()
Rational Field
"""
if not self._constants[1]:
raise NotImplementedError("the computation of the ring of constants"
" is not implemented for this derivation module")
return self._constants[0]
def random_element(self, *args, **kwds):
r"""
Return a random derivation in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.random_element() # random
(x^2 + x*y - 3*y^2 + x + 1)*d/dx + (-2*x^2 + 3*x*y + 10*y^2 + 2*x + 8)*d/dy
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return self([ self._codomain.random_element(*args, **kwds) for _ in range(len(self._gens)) ])
def some_elements(self):
r"""
Return a list of elements of this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.some_elements()
[d/dx, d/dy, x*d/dx, x*d/dy, y*d/dx, y*d/dy]
"""
if self._gens is None:
return self.an_element()
if self._dual_basis is None:
return self._gens
return self._gens + [f * D for f in self._dual_basis for D in self._gens]
# The class RingDerivation does not derive from Map (or RingMap)
# because we don't want to see derivations as morphisms in some
# category since they are not stable by composition.
class RingDerivation(ModuleElement):
r"""
An abstract class for twisted and untwisted derivations over
commutative rings.
TESTS::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x) + 2*R.derivation(y); f
d/dx + 2*d/dy
sage: f(x*y)
2*x + y
"""
def __call__(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = x*R.derivation(x) + y*R.derivation(y)
sage: f(x^2 + 3*x*y - y^2)
2*x^2 + 6*x*y - 2*y^2
"""
arg = self.parent().domain()(x)
return self._call_(arg)
def domain(self):
"""
Return the domain of this derivation.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: f = R.derivation(y); f
d/dy
sage: f.domain()
Multivariate Polynomial Ring in x, y over Rational Field
sage: f.domain() is R
True
"""
return self.parent().domain()
def codomain(self):
"""
Return the codomain of this derivation.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f = R.derivation(); f
d/dx
sage: f.codomain()
Univariate Polynomial Ring in x over Rational Field
sage: f.codomain() is R
True
::
sage: S.<y> = R[]
sage: M = R.derivation_module(S)
sage: M.random_element().codomain()
Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field
sage: M.random_element().codomain() is S
True
"""
return self.parent().codomain()
class RingDerivationWithoutTwist(RingDerivation):
"""
An abstract class for untwisted derivations.
"""
def _repr_(self):
r"""
Return a string representation of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: R.derivation(x)
d/dx
sage: R.derivation(y)
d/dy
"""
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "A derivation on %s" % parent.domain()
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "d/d%s" % dual_basis[i]
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s*%s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s*%s" % (-c, ddx)
else:
s += " + (%s)*%s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def _latex_(self):
r"""
Return a LaTeX representation of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: ddx = R.derivation(x)
sage: ddy = R.derivation(y)
sage: latex(ddx)
\frac{d}{dx}
sage: latex(ddy)
\frac{d}{dy}
sage: latex(ddx + ddy)
\frac{d}{dx} + \frac{d}{dy}
"""
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "\\text{A derivation on } %s" % latex(parent.domain())
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "\\frac{d}{d%s}" % latex(dual_basis[i])
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s %s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s %s" % (-c, ddx)
else:
s += " + \\left(%s\\right) %s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: R.derivation(x).list()
[1, 0]
sage: R.derivation(y).list()
[0, 1]
sage: f = x*R.derivation(x) + y*R.derivation(y); f
x*d/dx + y*d/dy
sage: f.list()
[x, y]
"""
parent = self.parent()
return [self(x) for x in parent.dual_basis()]
def monomial_coefficients(self):
r"""
Return dictionary of nonzero coordinates (on the canonical
basis) of this derivation.
More precisely, this returns a dictionary whose keys are indices
of basis elements and whose values are the corresponding coefficients.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: R.derivation(x).monomial_coefficients()
{0: 1}
sage: R.derivation(y).monomial_coefficients()
{1: 1}
sage: f = x*R.derivation(x) + y*R.derivation(y); f
x*d/dx + y*d/dy
sage: f.monomial_coefficients()
{0: x, 1: y}
"""
dual_basis = self.parent().dual_basis()
dict = { }
for i in range(len(dual_basis)):
c = self(dual_basis[i])
if c != 0:
dict[i] = c
return dict
def is_zero(self):
"""
Return ``True`` if this derivation is zero.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(); f
d/dx
sage: f.is_zero()
False
sage: (f-f).is_zero()
True
"""
for c in self.list():
if not c.is_zero():
return False
return True
def _richcmp_(self, other, op):
"""
Compare this derivation with ``other`` according
to the comparison operator ``op``.
EXAMPLES::
sage: R.<x,y,z> = GF(5)[]
sage: D = sum(v*R.derivation(v) for v in R.gens()); D
x*d/dx + y*d/dy + z*d/dz
sage: D.pth_power() == D
True
"""
if op == op_EQ:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() == other.list()
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() != other.list()
else:
return True
return NotImplemented
def _bracket_(self, other):
"""
Return the Lie bracket (that is the commutator) of
this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx._bracket_(Dy)
0
sage: Dx.bracket(x*Dy)
d/dy
TESTS::
sage: M = R.derivation_module()
sage: X = M.random_element()
sage: X.bracket(X)
0
We check the Jacobi identity::
sage: Y = M.random_element()
sage: Z = M.random_element()
sage: X.bracket(Y.bracket(Z)) + Y.bracket(Z.bracket(X)) + Z.bracket(X.bracket(Y))
0
and the product rule::
sage: f = R.random_element()
sage: X.bracket(f*Y) == X(f)*Y + f*X.bracket(Y)
True
"""
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the bracket is only defined for derivations with same domain and codomain")
arg = [ ]
for x in parent.dual_basis():
arg.append(self(other(x)) - other(self(x)))
return parent(arg)
def pth_power(self):
r"""
Return the `p`-th power of this derivation where `p`
is the characteristic of the domain.
.. NOTE::
Leibniz rule implies that this is again a derivation.
EXAMPLES::
sage: R.<x,y> = GF(5)[]
sage: Dx = R.derivation(x)
sage: Dx.pth_power()
0
sage: (x*Dx).pth_power()
x*d/dx
sage: (x^6*Dx).pth_power()
x^26*d/dx
sage: Dy = R.derivation(y)
sage: (x*Dx + y*Dy).pth_power()
x*d/dx + y*d/dy
An error is raised if the domain has characteristic zero::
sage: R.<x,y> = QQ[]
sage: Dx = R.derivation(x)
sage: Dx.pth_power()
Traceback (most recent call last):
...
TypeError: the domain of the derivation must have positive and prime characteristic
or if the characteristic is not a prime number::
sage: R.<x,y> = Integers(10)[]
sage: Dx = R.derivation(x)
sage: Dx.pth_power()
Traceback (most recent call last):
...
TypeError: the domain of the derivation must have positive and prime characteristic
TESTS::
sage: R.<x,y> = GF(3)[]
sage: D = R.derivation_module().random_element()
sage: Dp = D.pth_power()
sage: f = R.random_element()
sage: Dp(f) == D(D(D(f)))
True
sage: D.bracket(Dp)
0
"""
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the derivation must have the same domain and codomain")
p = parent.domain().characteristic()
if not p.is_prime():
raise TypeError("the domain of the derivation must have positive and prime characteristic")
arg = [ ]
for x in parent.dual_basis():
res = x
for _ in range(p):
res = self(res)
arg.append(res)
return parent(arg)
def precompose(self, morphism):
r"""
Return the derivation obtained by applying first
``morphism`` and then this derivation.
INPUT:
- ``morphism`` -- a homomorphism of rings whose codomain is
the domain of this derivation or a ring that coerces to
the domain of this derivation
EXAMPLES::
sage: A.<x> = QQ[]
sage: B.<x,y> = QQ[]
sage: D = B.derivation(x) - 2*x*B.derivation(y); D
d/dx - 2*x*d/dy
When restricting to ``A``, the term ``d/dy`` disappears
(since it vanishes on ``A``)::
sage: D.precompose(A)
d/dx
If we restrict to another well chosen subring, the derivation vanishes::
sage: C.<t> = QQ[]
sage: f = C.hom([x^2 + y]); f
Ring morphism:
From: Univariate Polynomial Ring in t over Rational Field
To: Multivariate Polynomial Ring in x, y over Rational Field
Defn: t |--> x^2 + y
sage: D.precompose(f)
0
Note that this method cannot be used to compose derivations::
sage: D.precompose(D)
Traceback (most recent call last):
...
TypeError: you must give an homomorphism of rings
TESTS::
sage: D.precompose(C)
Traceback (most recent call last):
...
TypeError: the given ring does not coerce to the domain of the derivation
"""
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism)
arg = [ ]
for x in M.dual_basis():
arg.append(self(morphism(x)))
return M(arg)
def postcompose(self, morphism):
"""
Return the derivation obtained by applying first
this derivation and then ``morphism``.
INPUT:
- ``morphism`` -- a homomorphism of rings whose domain is
the codomain of this derivation or a ring into which the
codomain of this derivation coerces
EXAMPLES::
sage: A.<x,y>= QQ[]
sage: ev = A.hom([QQ(0), QQ(1)])
sage: Dx = A.derivation(x)
sage: Dy = A.derivation(y)
We can define the derivation at `(0,1)` just by postcomposing
with ``ev``::
sage: dx = Dx.postcompose(ev)
sage: dy = Dy.postcompose(ev)
sage: f = x^2 + y^2
sage: dx(f)
0
sage: dy(f)
2
Note that we cannot avoid the creation of the evaluation morphism:
if we pass in ``QQ`` instead, an error is raised since there is
no coercion morphism from ``A`` to ``QQ``::
sage: Dx.postcompose(QQ)
Traceback (most recent call last):
...
TypeError: the codomain of the derivation does not coerce to the given ring
Note that this method cannot be used to compose derivations::
sage: Dx.precompose(Dy)
Traceback (most recent call last):
...
TypeError: you must give an homomorphism of rings
"""
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism())
arg = [ ]
for x in M.dual_basis():
arg.append(morphism(self(x)))
return M(arg)
def extend_to_fraction_field(self):
r"""
Return the extension of this derivation to fraction fields of
the domain and the codomain.
EXAMPLES::
sage: S.<x> = QQ[]
sage: d = S.derivation()
sage: d
d/dx
sage: D = d.extend_to_fraction_field()
sage: D
d/dx
sage: D.domain()
Fraction Field of Univariate Polynomial Ring in x over Rational Field
sage: D(1/x)
-1/x^2
"""
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
M = RingDerivationModule(domain, codomain)
try:
return M(self)
except (ValueError, NotImplementedError):
return M(self.list())
class RingDerivationWithoutTwist_zero(RingDerivationWithoutTwist):
"""
This class can only represent the zero derivation.
It is used when the parent is the zero derivation module
(e.g., when its domain is ``ZZ``, ``QQ``, a finite field, etc.)
"""
def __init__(self, parent, arg=None):
"""
Initialize this derivation.
TESTS::
sage: M = ZZ.derivation_module()
sage: der = M(); der
0
sage: from sage.rings.derivation import RingDerivationWithoutTwist_zero
sage: isinstance(der, RingDerivationWithoutTwist_zero)
True
sage: TestSuite(der).run()
"""
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if arg and not (isinstance(arg, RingDerivation) and arg.is_zero()):
raise ValueError("unable to create the derivation")
RingDerivation.__init__(self, parent)
def _repr_(self):
"""
Return a string representation of this derivation.
EXAMPLES::
sage: M = ZZ.derivation_module()
sage: M()
0
"""
return "0"
def _latex_(self):
"""
Return a string representation of this derivation.
EXAMPLES::
sage: M = ZZ.derivation_module()
sage: latex(M())
0
"""
return "0"
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx + Dy
d/dx + d/dy
"""
return other
def _sub_(self, other):
"""
Return the difference of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx - Dy
d/dx - d/dy
"""
return -other
def _neg_(self):
"""
Return the opposite of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: -Dx
-d/dx
"""
return self
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dx * 2
2*d/dx
sage: Dx * x^2
x^2*d/dx
"""
return self
def _rmul_(self, left):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: 2 * Dx
2*d/dx
sage: x^2 * Dx
x^2*d/dx
"""
return self
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = x*R.derivation(x) + y*R.derivation(y)
sage: f(x^2 + 3*x*y - y^2)
2*x^2 + 6*x*y - 2*y^2
"""
return self.parent().codomain().zero()
def _bracket_(self, other):
"""
Return the Lie bracket (that is the commutator) of
this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx._bracket_(Dy)
0
"""
return self
def is_zero(self):
"""
Return ``True`` if this derivation vanishes.
EXAMPLES::
sage: M = QQ.derivation_module()
sage: M().is_zero()
True
"""
return True
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: M = QQ.derivation_module()
sage: M().list()
[]
"""
return []
class RingDerivationWithoutTwist_wrapper(RingDerivationWithoutTwist):
"""
This class is a wrapper for derivation.
It is useful for changing the parent without changing the
computation rules for derivations. It is used for derivations
over fraction fields and quotient rings.
"""
def __init__(self, parent, arg=None):
"""
Initialize this derivation.
TESTS::
sage: from sage.rings.derivation import RingDerivationWithoutTwist_wrapper
sage: R.<x,y> = GF(5)[]
sage: S = R.quo([x^5, y^5])
sage: M = S.derivation_module()
sage: der = M.random_element()
sage: isinstance(der, RingDerivationWithoutTwist_wrapper)
True
sage: TestSuite(der).run()
"""
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if isinstance(arg, RingDerivationWithoutTwist_wrapper):
self._base_derivation = arg._base_derivation
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: Dy = S.derivation(y)
sage: Dx + Dy
d/dx + d/dy
"""
return type(self)(self.parent(), self._base_derivation + other._base_derivation)
def _sub_(self, other):
"""
Return the difference of this derivation and ``other``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: Dy = S.derivation(y)
sage: Dx - Dy
d/dx - d/dy
"""
return type(self)(self.parent(), self._base_derivation - other._base_derivation)
def _neg_(self):
"""
Return the opposite of this derivation.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: -Dx
-d/dx
"""
return type(self)(self.parent(), -self._base_derivation)
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: Dx * 2
2*d/dx
sage: Dx * x^2
x^2*d/dx
"""
return type(self)(self.parent(), self._base_derivation * factor)
def _rmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: 2 * Dx
2*d/dx
sage: x^2 * Dx
x^2*d/dx
"""
return type(self)(self.parent(), factor * self._base_derivation)
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: M = S.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: S.derivation(x).list()
[1, 0]
sage: S.derivation(y).list()
[0, 1]
sage: f = x*S.derivation(x) + y*S.derivation(y); f
x*d/dx + y*d/dy
sage: f.list()
[x, y]
"""
return self._base_derivation.list()
class RingDerivationWithoutTwist_function(RingDerivationWithoutTwist):
"""
A class for untwisted derivations over rings whose elements
are either polynomials, rational fractions, power series or
Laurent series.
"""
def __init__(self, parent, arg=None):
"""
Initialize this derivation.
TESTS::
sage: R.<x,y> = ZZ[]
sage: R.derivation(x)
d/dx
sage: der = R.derivation([1,2])
sage: der
d/dx + 2*d/dy
sage: TestSuite(der).run()
"""
domain = parent.domain()
codomain = parent.codomain()
ngens = domain.ngens()
self._base_derivation = parent._base_derivation()
self._images = [codomain.zero() for _ in range(ngens)]
if arg is None:
arg = domain.gen()
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if not arg:
pass
elif (isinstance(arg, RingDerivationWithoutTwist_function)
and parent.has_coerce_map_from(arg.parent())):
self._base_derivation = parent._base_derivation(arg._base_derivation)
self._images = [codomain(x) for x in arg._images]
elif isinstance(arg, (tuple, list)):
if len(arg) < ngens:
raise ValueError("the number of images is incorrect")
self._base_derivation = parent._base_derivation(arg[:-ngens])
self._images = [codomain(x) for x in arg[-ngens:]]
else:
for i in range(ngens):
if arg == domain.gen(i):
self._base_derivation = parent._base_derivation()
self._images[i] = codomain.one()
break
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx + Dy
d/dx + d/dy
"""
base_derivation = self._base_derivation + other._base_derivation
im = [ self._images[i] + other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _sub_(self, other):
"""
Return the subtraction of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx - Dy
d/dx - d/dy
"""
base_derivation = self._base_derivation - other._base_derivation
im = [ self._images[i] - other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _rmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: 2 * Dx
2*d/dx
sage: x^2 * Dx
x^2*d/dx
"""
factor = self.parent().codomain()(factor)
base_derivation = factor * self._base_derivation
im = [ factor*x for x in self._images ]
return type(self)(self.parent(), [base_derivation] + im)
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dx * 2
2*d/dx
sage: Dx * x^2
x^2*d/dx
"""
return self._rmul_(factor)
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: D = x*R.derivation(x) + y*R.derivation(y)
sage: D(x^2 + 3*x*y - y^2)
2*x^2 + 6*x*y - 2*y^2
"""
parent = self.parent()
domain = parent.domain()
codomain = parent.codomain()
defining_morphism = parent.defining_morphism()
if isinstance(domain, FractionField_generic):
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = num.map_coefficients(self._base_derivation, codomain)(*domain.gens())
vp = den.map_coefficients(self._base_derivation, codomain)(*domain.gens())
res = (up*v - u*vp) / (v*v)
else:
res = x.map_coefficients(self._base_derivation, codomain)(*domain.gens())
for i in range(len(self._images)):
res += defining_morphism(x.derivative(domain.gen(i))) * self._images[i]
return res
def is_zero(self):
"""
Return ``True`` if this derivation is zero.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(); f
d/dx
sage: f.is_zero()
False
sage: (f-f).is_zero()
True
"""
if not self._base_derivation.is_zero():
return False
return all(im == 0 for im in self._images)
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: R.<x,y> = GF(5)[[]]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: R.derivation(x).list()
[1, 0]
sage: R.derivation(y).list()
[0, 1]
sage: f = x*R.derivation(x) + y*R.derivation(y); f
x*d/dx + y*d/dy
sage: f.list()
[x, y]
"""
return self._base_derivation.list() + self._images
class RingDerivationWithoutTwist_fraction_field(RingDerivationWithoutTwist_wrapper):
"""
This class handles derivations over fraction fields.
"""
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: K = R.fraction_field()
sage: f = K.derivation(); f
d/dx
sage: f(1/x)
(-1)/x^2
"""
defining_morphism = self.parent().defining_morphism()
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = self._base_derivation(u)
vp = self._base_derivation(v)
return (up*v - u*vp) / (v*v)
class RingDerivationWithoutTwist_quotient(RingDerivationWithoutTwist_wrapper):
"""
This class handles derivations over quotient rings.
"""
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: f = x^3*S.derivation(); f
x^3*d/dx
sage: f(x^3)
0
"""
return self._base_derivation(x.lift())
class RingDerivationWithTwist_generic(RingDerivation):
r"""
The class handles `\theta`-derivations of the form
`\lambda (\theta - \iota)` (where `\iota` is the defining
morphism of the codomain over the domain) for a scalar
`\lambda` varying in the codomain.
"""
def __init__(self, parent, scalar=0):
"""
Initialize this derivation.
TESTS::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: R.derivation(twist=theta)
0
sage: R.derivation(1, twist=theta)
[x |--> y, y |--> x] - id
sage: der = R.derivation(x, twist=theta)
sage: TestSuite(der).run()
"""
codomain = parent.codomain()
self._scalar = codomain(scalar)
RingDerivation.__init__(self, parent)
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: f = R.derivation(1, twist=theta)
sage: hash(f) # random
-6511057926760520014
"""
return hash(self._scalar)
def _repr_(self):
r"""
Return a string representation of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: R.derivation(1, twist=theta)
[x |--> y, y |--> x] - id
"""
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "id"
else:
sdef = "[%s]" % defining_morphism._repr_short()
except (AttributeError, NotImplementedError):
sdef = "defining_morphism"
try:
stwi = "[%s]" % twisting_morphism._repr_short()
except AttributeError:
stwi = "twisting_morphism"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s*" % sc
elif (-scalar)._is_atomic():
s = "-%s*" % (-scalar)
else:
s = "(%s)*" % sc
return "%s(%s - %s)" % (s, stwi, sdef)
def _latex_(self):
r"""
Return a LaTeX representation of this derivation.
EXAMPLES::
sage: k.<a> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: der = k.derivation(a+1, twist=Frob)
sage: latex(der)
\left(a + 1\right) \left(\left[a \mapsto a^{5}\right] - \text{id}\right)
"""
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "\\text{id}"
else:
sdef = "\\left[%s\\right]" % latex(defining_morphism)
except (AttributeError, NotImplementedError):
sdef = "\\text{defining morphism}"
try:
stwi = "\\left[%s\\right]" % latex(twisting_morphism)
except AttributeError:
stwi = "\\text{twisting morphism}"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s " % sc
elif (-scalar)._is_atomic():
s = "-%s " % (-scalar)
else:
s = "\\left(%s\\right) " % sc
return "%s \\left(%s - %s\\right)" % (s, stwi, sdef)
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: der1 = R.derivation(x, twist=theta); der1
x*([x |--> y, y |--> x] - id)
sage: der2 = R.derivation(y, twist=theta); der2
y*([x |--> y, y |--> x] - id)
sage: der1 + der2
(x + y)*([x |--> y, y |--> x] - id)
"""
return type(self)(self.parent(), self._scalar + other._scalar)
def _sub_(self, other):
"""
Return the subtraction of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: der1 = R.derivation(x, twist=theta); der1
x*([x |--> y, y |--> x] - id)
sage: der2 = R.derivation(y, twist=theta); der2
y*([x |--> y, y |--> x] - id)
sage: der1 - der2
(x - y)*([x |--> y, y |--> x] - id)
TESTS::
sage: der1 - der1
0
sage: der2 - der2
0
"""
return type(self)(self.parent(), self._scalar - other._scalar)
def _rmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: y * D
x*y*([x |--> y, y |--> x] - id)
"""
return type(self)(self.parent(), factor * self._scalar)
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: D * y
x*y*([x |--> y, y |--> x] - id)
"""
return self._rmul_(factor)
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: f = R.derivation(1, twist=theta); f
[x |--> y, y |--> x] - id
sage: f(x)
-x + y
"""
parent = self.parent()
return self._scalar * (parent.twisting_morphism()(x) - parent.defining_morphism()(x))
def list(self):
"""
Return the list of coefficient of this twisted derivation
on the canonical basis.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: K = R.fraction_field()
sage: theta = K.hom([y,x])
sage: M = K.derivation_module(twist=theta)
sage: M.basis()
Family (twisting_morphism - id,)
sage: f = (x+y) * M.gen()
sage: f
(x + y)*(twisting_morphism - id)
sage: f.list()
[x + y]
"""
return [ self._scalar ]
def precompose(self, morphism):
r"""
Return the twisted derivation obtained by applying first
``morphism`` and then this twisted derivation.
INPUT:
- ``morphism`` -- a homomorphism of rings whose codomain is
the domain of this derivation or a ring that coerces to
the domain of this derivation
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: f = R.hom([x^2, y^3])
sage: g = D.postcompose(f); g
x^2*([x |--> y^3, y |--> x^2] - [x |--> x^2, y |--> y^3])
Observe that the `g` is no longer a `\theta`-derivation but
a `(f \circ \theta)`-derivation::
sage: g.parent().twisting_morphism()
Ring endomorphism of Multivariate Polynomial Ring in x, y over Integer Ring
Defn: x |--> y^3
y |--> x^2
"""
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism,
parent.twisting_morphism() * morphism)
return M(self._scalar)
def postcompose(self, morphism):
r"""
Return the twisted derivation obtained by applying first
this twisted derivation and then ``morphism``.
INPUT:
- ``morphism`` -- a homomorphism of rings whose domain is
the codomain of this derivation or a ring into which the
codomain of this derivation
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: f = R.hom([x^2, y^3])
sage: g = D.precompose(f); g
x*([x |--> y^2, y |--> x^3] - [x |--> x^2, y |--> y^3])
Observe that the `g` is no longer a `\theta`-derivation but
a `(\theta \circ f)`-derivation::
sage: g.parent().twisting_morphism()
Ring endomorphism of Multivariate Polynomial Ring in x, y over Integer Ring
Defn: x |--> y^2
y |--> x^3
"""
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism(),
morphism * parent.twisting_morphism())
return M(morphism(self._scalar))
def _richcmp_(self, other, op):
"""
Compare this derivation with ``other`` according
to the comparison operator ``op``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: Dx = R.derivation(x, twist=theta); Dx
x*([x |--> y, y |--> x] - id)
sage: Dy = R.derivation(y, twist=theta); Dy
y*([x |--> y, y |--> x] - id)
sage: D = R.derivation(x+y, twist=theta); D
(x + y)*([x |--> y, y |--> x] - id)
sage: Dx == Dy
False
sage: D == Dx + Dy
True
sage: D != Dy
True
"""
if op == op_EQ:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar == other._scalar
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar != other._scalar
else:
return True
return NotImplemented
def extend_to_fraction_field(self):
r"""
Return the extension of this derivation to fraction fields of
the domain and the codomain.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: d = R.derivation(x, twist=theta)
sage: d
x*([x |--> y, y |--> x] - id)
sage: D = d.extend_to_fraction_field()
sage: D
x*([x |--> y, y |--> x] - id)
sage: D.domain()
Fraction Field of Multivariate Polynomial Ring in x, y over Integer Ring
sage: D(1/x)
(x - y)/y
"""
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
twist = parent.twisting_morphism().extend_to_fraction_field()
M = RingDerivationModule(domain, codomain, twist)
return M(codomain(self._scalar))
| 31.663668
| 209
| 0.529153
|
from sage.structure.richcmp import op_EQ, op_NE
from sage.structure.unique_representation import UniqueRepresentation
from sage.sets.family import Family
from sage.modules.module import Module
from sage.structure.element import ModuleElement
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring import PolynomialRing_general
from sage.rings.polynomial.multi_polynomial_ring_base import MPolynomialRing_base
from sage.rings.power_series_ring import PowerSeriesRing_generic
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.rings.fraction_field import FractionField_generic
from sage.rings.quotient_ring import QuotientRing_generic
from sage.rings.polynomial.polynomial_quotient_ring import PolynomialQuotientRing_generic
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing_generic
from sage.rings.padics.padic_generic import pAdicGeneric
from sage.categories.number_fields import NumberFields
from sage.categories.finite_fields import FiniteFields
from sage.categories.modules import Modules
from sage.categories.modules_with_basis import ModulesWithBasis
from sage.categories.lie_algebras import LieAlgebras
from sage.categories.map import Map
from sage.categories.rings import Rings
from sage.misc.latex import latex
class RingDerivationModule(Module, UniqueRepresentation):
def __init__(self, domain, codomain, twist=None):
if domain not in Rings().Commutative():
raise TypeError("the domain must be a commutative ring")
if codomain in Rings().Commutative() and codomain.has_coerce_map_from(domain):
defining_morphism = codomain.coerce_map_from(domain)
elif (isinstance(codomain,Map)
and codomain.category_for().is_subcategory(Rings())
and codomain.domain().has_coerce_map_from(domain)):
if codomain.domain() is domain:
defining_morphism = codomain
else:
defining_morphism = codomain * codomain.domain().coerce_map_from(domain)
codomain = defining_morphism.codomain()
else:
raise TypeError("the codomain must be an algebra over the domain"
" or a morphism with the correct domain")
if twist is not None:
if not (isinstance(twist, Map) and twist.category_for().is_subcategory(Rings())):
raise TypeError("the twisting homomorphism must be an homomorphism of rings")
if twist.domain() is not domain:
map = twist.domain().coerce_map_from(domain)
if map is None:
raise TypeError("the domain of the derivation must coerce"
" to the domain of the twisting homomorphism")
twist = twist * map
if twist.codomain() is not codomain:
map = codomain.coerce_map_from(twist.codomain())
if map is None:
raise TypeError("the codomain of the twisting homomorphism"
" must coerce to the codomain of the derivation")
twist = map * twist
try:
if twist == defining_morphism:
twist = None
else:
for g in domain.gens():
if twist(g) != defining_morphism(g):
break
else:
twist = None
except (AttributeError, NotImplementedError):
pass
self._domain = domain
self._codomain = codomain
self._defining_morphism = defining_morphism
self._twist = twist
self._base_derivation = None
self._gens = None
self._basis = self._dual_basis = None
self._constants = (ZZ, False)
if twist is not None:
self.Element = RingDerivationWithTwist_generic
if domain.is_field():
self._gens = [ 1 ]
self._basis = [ 1 ]
elif (domain is ZZ or domain in NumberFields() or domain in FiniteFields()
or isinstance(domain, IntegerModRing_generic)
or (isinstance(domain, pAdicGeneric) and (domain.is_field() or domain.absolute_e() == 1))):
self.Element = RingDerivationWithoutTwist_zero
self._gens = [ ]
self._basis = [ ]
self._dual_basis = [ ]
self._constants = (domain, True)
elif (isinstance(domain, (PolynomialRing_general, MPolynomialRing_base, PowerSeriesRing_generic, LaurentSeriesRing))
or (isinstance(domain, FractionField_generic)
and isinstance(domain.ring(), (PolynomialRing_general, MPolynomialRing_base)))):
self._base_derivation = RingDerivationModule(domain.base_ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_function
try:
self._gens = self._base_derivation.gens() + domain.gens()
except NotImplementedError:
pass
try:
self._basis = tuple(self._base_derivation.basis()) + domain.gens()
self._dual_basis = tuple(self._base_derivation.dual_basis()) + domain.gens()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
if domain.characteristic() == 0:
self._constants = (constants, sharp)
else:
self._constants = (constants, False)
elif isinstance(domain, FractionField_generic):
self._base_derivation = RingDerivationModule(domain.ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_fraction_field
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants.fraction_field(), False)
elif isinstance(domain, PolynomialQuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.base(), defining_morphism)
modulus = domain.modulus()
for der in self._base_derivation.gens():
if der(modulus) != 0:
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False)
elif isinstance(domain, QuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.cover_ring(), defining_morphism)
if any(der(modulus) != 0 for modulus in domain.defining_ideal().gens()
for der in self._base_derivation.gens()):
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False)
else:
raise NotImplementedError("derivations over this ring is not implemented")
if self._basis is None:
category = Modules(codomain)
else:
category = ModulesWithBasis(codomain)
if self._twist is None and domain is codomain:
category &= LieAlgebras(self._constants[0])
Module.__init__(self, codomain, category=category)
if self._gens is not None:
self._gens = [self.element_class(self, x) for x in self._gens]
if self._basis is not None:
self._basis = [self.element_class(self, x) for x in self._basis]
if self._dual_basis is not None:
self._dual_basis = [domain(x) for x in self._dual_basis]
def __hash__(self):
return hash((self._domain, self._codomain, self._twist))
def _coerce_map_from_(self, R):
if isinstance(R, RingDerivationModule):
if R.domain().has_coerce_map_from(self._domain) and self._codomain.has_coerce_map_from(R.codomain()):
morR = R.defining_morphism()
morS = self._defining_morphism
try:
for g in self._domain.gens():
if morR(g) != morS(g):
return False
return True
except (AttributeError, NotImplementedError):
pass
return super(RingDerivationModule, self)._coerce_map_from_(R)
def _repr_(self):
t = ""
if self._twist is None:
s = "Module of derivations"
else:
s = "Module of twisted derivations"
try:
t = " (twisting morphism: %s)" % self._twist._repr_short()
except AttributeError:
pass
if self._domain is self._codomain:
s += " over %s" % self._domain
else:
s += " from %s to %s" % (self._domain, self._codomain)
return s + t
def domain(self):
return self._domain
def codomain(self):
return self._codomain
def defining_morphism(self):
return self._defining_morphism
def twisting_morphism(self):
return self._twist
def ngens(self):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return len(self._gens)
def gens(self):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return tuple(self._gens)
def gen(self, n=0):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
try:
return self._gens[n]
except IndexError:
raise ValueError("generator not defined")
def basis(self):
if self._basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._basis)
def dual_basis(self):
if self._dual_basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._dual_basis)
def ring_of_constants(self):
if not self._constants[1]:
raise NotImplementedError("the computation of the ring of constants"
" is not implemented for this derivation module")
return self._constants[0]
def random_element(self, *args, **kwds):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return self([ self._codomain.random_element(*args, **kwds) for _ in range(len(self._gens)) ])
def some_elements(self):
if self._gens is None:
return self.an_element()
if self._dual_basis is None:
return self._gens
return self._gens + [f * D for f in self._dual_basis for D in self._gens]
# category since they are not stable by composition.
class RingDerivation(ModuleElement):
def __call__(self, x):
arg = self.parent().domain()(x)
return self._call_(arg)
def domain(self):
return self.parent().domain()
def codomain(self):
return self.parent().codomain()
class RingDerivationWithoutTwist(RingDerivation):
def _repr_(self):
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "A derivation on %s" % parent.domain()
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "d/d%s" % dual_basis[i]
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s*%s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s*%s" % (-c, ddx)
else:
s += " + (%s)*%s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def _latex_(self):
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "\\text{A derivation on } %s" % latex(parent.domain())
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "\\frac{d}{d%s}" % latex(dual_basis[i])
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s %s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s %s" % (-c, ddx)
else:
s += " + \\left(%s\\right) %s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def list(self):
parent = self.parent()
return [self(x) for x in parent.dual_basis()]
def monomial_coefficients(self):
dual_basis = self.parent().dual_basis()
dict = { }
for i in range(len(dual_basis)):
c = self(dual_basis[i])
if c != 0:
dict[i] = c
return dict
def is_zero(self):
for c in self.list():
if not c.is_zero():
return False
return True
def _richcmp_(self, other, op):
if op == op_EQ:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() == other.list()
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() != other.list()
else:
return True
return NotImplemented
def _bracket_(self, other):
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the bracket is only defined for derivations with same domain and codomain")
arg = [ ]
for x in parent.dual_basis():
arg.append(self(other(x)) - other(self(x)))
return parent(arg)
def pth_power(self):
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the derivation must have the same domain and codomain")
p = parent.domain().characteristic()
if not p.is_prime():
raise TypeError("the domain of the derivation must have positive and prime characteristic")
arg = [ ]
for x in parent.dual_basis():
res = x
for _ in range(p):
res = self(res)
arg.append(res)
return parent(arg)
def precompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism)
arg = [ ]
for x in M.dual_basis():
arg.append(self(morphism(x)))
return M(arg)
def postcompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism())
arg = [ ]
for x in M.dual_basis():
arg.append(morphism(self(x)))
return M(arg)
def extend_to_fraction_field(self):
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
M = RingDerivationModule(domain, codomain)
try:
return M(self)
except (ValueError, NotImplementedError):
return M(self.list())
class RingDerivationWithoutTwist_zero(RingDerivationWithoutTwist):
def __init__(self, parent, arg=None):
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if arg and not (isinstance(arg, RingDerivation) and arg.is_zero()):
raise ValueError("unable to create the derivation")
RingDerivation.__init__(self, parent)
def _repr_(self):
return "0"
def _latex_(self):
return "0"
def __hash__(self):
return hash(tuple(self.list()))
def _add_(self, other):
return other
def _sub_(self, other):
return -other
def _neg_(self):
return self
def _lmul_(self, factor):
return self
def _rmul_(self, left):
return self
def _call_(self, x):
return self.parent().codomain().zero()
def _bracket_(self, other):
return self
def is_zero(self):
return True
def list(self):
return []
class RingDerivationWithoutTwist_wrapper(RingDerivationWithoutTwist):
def __init__(self, parent, arg=None):
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if isinstance(arg, RingDerivationWithoutTwist_wrapper):
self._base_derivation = arg._base_derivation
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
return hash(tuple(self.list()))
def _add_(self, other):
return type(self)(self.parent(), self._base_derivation + other._base_derivation)
def _sub_(self, other):
return type(self)(self.parent(), self._base_derivation - other._base_derivation)
def _neg_(self):
return type(self)(self.parent(), -self._base_derivation)
def _lmul_(self, factor):
return type(self)(self.parent(), self._base_derivation * factor)
def _rmul_(self, factor):
return type(self)(self.parent(), factor * self._base_derivation)
def list(self):
return self._base_derivation.list()
class RingDerivationWithoutTwist_function(RingDerivationWithoutTwist):
def __init__(self, parent, arg=None):
domain = parent.domain()
codomain = parent.codomain()
ngens = domain.ngens()
self._base_derivation = parent._base_derivation()
self._images = [codomain.zero() for _ in range(ngens)]
if arg is None:
arg = domain.gen()
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if not arg:
pass
elif (isinstance(arg, RingDerivationWithoutTwist_function)
and parent.has_coerce_map_from(arg.parent())):
self._base_derivation = parent._base_derivation(arg._base_derivation)
self._images = [codomain(x) for x in arg._images]
elif isinstance(arg, (tuple, list)):
if len(arg) < ngens:
raise ValueError("the number of images is incorrect")
self._base_derivation = parent._base_derivation(arg[:-ngens])
self._images = [codomain(x) for x in arg[-ngens:]]
else:
for i in range(ngens):
if arg == domain.gen(i):
self._base_derivation = parent._base_derivation()
self._images[i] = codomain.one()
break
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
return hash(tuple(self.list()))
def _add_(self, other):
base_derivation = self._base_derivation + other._base_derivation
im = [ self._images[i] + other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _sub_(self, other):
base_derivation = self._base_derivation - other._base_derivation
im = [ self._images[i] - other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _rmul_(self, factor):
factor = self.parent().codomain()(factor)
base_derivation = factor * self._base_derivation
im = [ factor*x for x in self._images ]
return type(self)(self.parent(), [base_derivation] + im)
def _lmul_(self, factor):
return self._rmul_(factor)
def _call_(self, x):
parent = self.parent()
domain = parent.domain()
codomain = parent.codomain()
defining_morphism = parent.defining_morphism()
if isinstance(domain, FractionField_generic):
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = num.map_coefficients(self._base_derivation, codomain)(*domain.gens())
vp = den.map_coefficients(self._base_derivation, codomain)(*domain.gens())
res = (up*v - u*vp) / (v*v)
else:
res = x.map_coefficients(self._base_derivation, codomain)(*domain.gens())
for i in range(len(self._images)):
res += defining_morphism(x.derivative(domain.gen(i))) * self._images[i]
return res
def is_zero(self):
if not self._base_derivation.is_zero():
return False
return all(im == 0 for im in self._images)
def list(self):
return self._base_derivation.list() + self._images
class RingDerivationWithoutTwist_fraction_field(RingDerivationWithoutTwist_wrapper):
def __hash__(self):
return hash(tuple(self.list()))
def _call_(self, x):
defining_morphism = self.parent().defining_morphism()
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = self._base_derivation(u)
vp = self._base_derivation(v)
return (up*v - u*vp) / (v*v)
class RingDerivationWithoutTwist_quotient(RingDerivationWithoutTwist_wrapper):
def __hash__(self):
return hash(tuple(self.list()))
def _call_(self, x):
return self._base_derivation(x.lift())
class RingDerivationWithTwist_generic(RingDerivation):
def __init__(self, parent, scalar=0):
codomain = parent.codomain()
self._scalar = codomain(scalar)
RingDerivation.__init__(self, parent)
def __hash__(self):
return hash(self._scalar)
def _repr_(self):
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "id"
else:
sdef = "[%s]" % defining_morphism._repr_short()
except (AttributeError, NotImplementedError):
sdef = "defining_morphism"
try:
stwi = "[%s]" % twisting_morphism._repr_short()
except AttributeError:
stwi = "twisting_morphism"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s*" % sc
elif (-scalar)._is_atomic():
s = "-%s*" % (-scalar)
else:
s = "(%s)*" % sc
return "%s(%s - %s)" % (s, stwi, sdef)
def _latex_(self):
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "\\text{id}"
else:
sdef = "\\left[%s\\right]" % latex(defining_morphism)
except (AttributeError, NotImplementedError):
sdef = "\\text{defining morphism}"
try:
stwi = "\\left[%s\\right]" % latex(twisting_morphism)
except AttributeError:
stwi = "\\text{twisting morphism}"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s " % sc
elif (-scalar)._is_atomic():
s = "-%s " % (-scalar)
else:
s = "\\left(%s\\right) " % sc
return "%s \\left(%s - %s\\right)" % (s, stwi, sdef)
def _add_(self, other):
return type(self)(self.parent(), self._scalar + other._scalar)
def _sub_(self, other):
return type(self)(self.parent(), self._scalar - other._scalar)
def _rmul_(self, factor):
return type(self)(self.parent(), factor * self._scalar)
def _lmul_(self, factor):
return self._rmul_(factor)
def _call_(self, x):
parent = self.parent()
return self._scalar * (parent.twisting_morphism()(x) - parent.defining_morphism()(x))
def list(self):
return [ self._scalar ]
def precompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism,
parent.twisting_morphism() * morphism)
return M(self._scalar)
def postcompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism(),
morphism * parent.twisting_morphism())
return M(morphism(self._scalar))
def _richcmp_(self, other, op):
if op == op_EQ:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar == other._scalar
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar != other._scalar
else:
return True
return NotImplemented
def extend_to_fraction_field(self):
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
twist = parent.twisting_morphism().extend_to_fraction_field()
M = RingDerivationModule(domain, codomain, twist)
return M(codomain(self._scalar))
| true
| true
|
f719b7c6bd2479d28d7a6679e56b280ca817a0bb
| 1,669
|
py
|
Python
|
py/orbit/py_linac/overlapping_fields/jparc_enge_func_factory.py
|
LeoRya/py-orbit
|
340b14b6fd041ed8ec2cc25b0821b85742aabe0c
|
[
"MIT"
] | 17
|
2018-02-09T23:39:06.000Z
|
2022-03-04T16:27:04.000Z
|
py/orbit/py_linac/overlapping_fields/jparc_enge_func_factory.py
|
LeoRya/py-orbit
|
340b14b6fd041ed8ec2cc25b0821b85742aabe0c
|
[
"MIT"
] | 22
|
2017-05-31T19:40:14.000Z
|
2021-09-24T22:07:47.000Z
|
py/orbit/py_linac/overlapping_fields/jparc_enge_func_factory.py
|
LeoRya/py-orbit
|
340b14b6fd041ed8ec2cc25b0821b85742aabe0c
|
[
"MIT"
] | 37
|
2016-12-08T19:39:35.000Z
|
2022-02-11T19:59:34.000Z
|
#!/usr/bin/env python
#--------------------------------------------------------------
# This is a Enge Function Factory specific for the J-PARC. Some
# Enge's function parameters are defined by the aperture and length,
# and others are defined by the field distribution formula from Trace3D
# documentation.
#--------------------------------------------------------------
import math
import sys
import os
from overlapping_quad_fields_lib import PMQ_Trace3D_Function
from overlapping_quad_fields_lib import EngeFunction
from overlapping_quad_fields_lib import SimpleQuadFieldFunc
def JPARC_EngeFunctionFactory(quad):
"""
It generates the Enge's Function for J-PARC quads.
"""
name = quad.getName()
length_param = quad.getLength()
#---- general PMQ function described in Trace3D documentation
if(quad.hasParam("radIn") and quad.hasParam("radOut")):
radIn = quad.getParam("radIn")
radOut = quad.getParam("radOut")
cutoff_level = 0.01
if(name == "LI_DTL1:DTQ01"): cutoff_level = 0.02
func = PMQ_Trace3D_Function(length_param,radIn,radOut,cutoff_level)
return func
#----- general Enge's Function
if(quad.hasParam("aperture")):
acceptance_diameter_param = quad.getParam("aperture")
cutoff_level = 0.01
func = EngeFunction(length_param,acceptance_diameter_param,cutoff_level)
return func
else:
msg = "SNS_EngeFunctionFactory Python function. "
msg += os.linesep
msg += "Cannot create the EngeFunction for the quad!"
msg += os.linesep
msg = msg + "quad name = " + quad.getName()
msg = msg + os.linesep
msg = msg + "It does not have the aperture parameter!"
msg = msg + os.linesep
orbitFinalize(msg)
return None
| 34.061224
| 74
| 0.688436
|
# and others are defined by the field distribution formula from Trace3D
# documentation.
#--------------------------------------------------------------
import math
import sys
import os
from overlapping_quad_fields_lib import PMQ_Trace3D_Function
from overlapping_quad_fields_lib import EngeFunction
from overlapping_quad_fields_lib import SimpleQuadFieldFunc
def JPARC_EngeFunctionFactory(quad):
name = quad.getName()
length_param = quad.getLength()
#---- general PMQ function described in Trace3D documentation
if(quad.hasParam("radIn") and quad.hasParam("radOut")):
radIn = quad.getParam("radIn")
radOut = quad.getParam("radOut")
cutoff_level = 0.01
if(name == "LI_DTL1:DTQ01"): cutoff_level = 0.02
func = PMQ_Trace3D_Function(length_param,radIn,radOut,cutoff_level)
return func
#----- general Enge's Function
if(quad.hasParam("aperture")):
acceptance_diameter_param = quad.getParam("aperture")
cutoff_level = 0.01
func = EngeFunction(length_param,acceptance_diameter_param,cutoff_level)
return func
else:
msg = "SNS_EngeFunctionFactory Python function. "
msg += os.linesep
msg += "Cannot create the EngeFunction for the quad!"
msg += os.linesep
msg = msg + "quad name = " + quad.getName()
msg = msg + os.linesep
msg = msg + "It does not have the aperture parameter!"
msg = msg + os.linesep
orbitFinalize(msg)
return None
| true
| true
|
f719b9a65c9a3077b966cb0086383cf3d2d3c035
| 498
|
py
|
Python
|
meiduo_mall/utils/secret.py
|
liusudo123/meiduo_project
|
3bf92fff56bf47777795cf9078ff285eb004b81f
|
[
"MIT"
] | null | null | null |
meiduo_mall/utils/secret.py
|
liusudo123/meiduo_project
|
3bf92fff56bf47777795cf9078ff285eb004b81f
|
[
"MIT"
] | null | null | null |
meiduo_mall/utils/secret.py
|
liusudo123/meiduo_project
|
3bf92fff56bf47777795cf9078ff285eb004b81f
|
[
"MIT"
] | null | null | null |
# 1.装包
# 2.导包
from django.conf import settings
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
# 3.实例化
# 4.加密解密
class SecretOauth(object):
# 加密
def dumps(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.dumps(data)
return result.decode()
# 解密
def loads(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.loads(data)
return result
| 21.652174
| 71
| 0.670683
|
from django.conf import settings
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
class SecretOauth(object):
def dumps(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.dumps(data)
return result.decode()
def loads(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.loads(data)
return result
| true
| true
|
f719b9b7e40ad20e1eac164cd3eb7a2cf77da67a
| 3,848
|
py
|
Python
|
Phys_Seg/run.py
|
pedrob37/Phys_Seg
|
7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee
|
[
"Apache-2.0"
] | 1
|
2021-09-27T09:58:56.000Z
|
2021-09-27T09:58:56.000Z
|
Phys_Seg/run.py
|
pedrob37/Phys_Seg
|
7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee
|
[
"Apache-2.0"
] | null | null | null |
Phys_Seg/run.py
|
pedrob37/Phys_Seg
|
7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee
|
[
"Apache-2.0"
] | null | null | null |
import torch
import numpy as np
import SimpleITK as sitk
from Phys_Seg.data_loading import load_and_preprocess, save_segmentation_nifti, read_file, save_img
from Phys_Seg.predict_case import predict_phys_seg, physics_preprocessing, image_preprocessing
import importlib
from Phys_Seg.utils import postprocess_prediction, get_params_fname, maybe_download_parameters
from network_architecture import nnUNet
import os
import Phys_Seg
def apply_phys_seg(img, out_fname):
img_itk = sitk.ReadImage(img)
img_npy = sitk.GetArrayFromImage(img_itk)
out = sitk.GetImageFromArray(img_npy)
out.CopyInformation(img_itk)
sitk.WriteImage(out, out_fname)
def run_phys_seg(mri_fnames, output_fnames, sequence='MPRAGE', physics_params=None,
# config_file=os.path.join(Phys_Seg.__path__[0], "config.py"),
device=None, overwrite=True):
"""
:param mri_fnames: str or list/tuple of str
:param output_fnames: str or list/tuple of str. If list: must have the same length as output_fnames
:param sequence: MPRAGE or SPGR (for now)
:param config_file: config.py
:param device: either int (for device id) or 'cpu'
:param overwrite: True or False
:param postprocess: whether to do postprocessing or not. Postprocessing here consists of simply discarding all
but the largest predicted connected component. Default False
:return:
"""
physics_input_size = {'MPRAGE': 4,
'SPGR': 6}
# Load in model weights
maybe_download_parameters(sequence=sequence, physics_flag=True if physics_params else False)
params_file = get_params_fname(sequence=sequence, physics_flag=True if physics_params else False)
net = nnUNet(1, 4, physics_flag=True if physics_params else False,
physics_input=physics_input_size[sequence],
physics_output=40)
if device == "cpu":
net = net.cpu()
else:
net.cuda(device)
net = torch.nn.DataParallel(net, device_ids=[device, int(1-device)])
net.to(f'cuda:{net.device_ids[0]}')
# net = torch.nn.DataParallel(net)
if not isinstance(mri_fnames, (list, tuple)):
mri_fnames = [mri_fnames]
if not isinstance(output_fnames, (list, tuple)):
output_fnames = [output_fnames]
params = torch.load(params_file, map_location=lambda storage, loc: storage)
for in_fname, out_fname in zip(mri_fnames, output_fnames):
if overwrite or not (os.path.isfile(out_fname)):
print("File:", in_fname)
print("preprocessing...")
try:
data, aff = read_file(in_fname)
except RuntimeError:
print("\nERROR\nCould not read file", in_fname, "\n")
continue
except AssertionError as e:
print(e)
continue
# Process data
if physics_params is not None:
physics_params = eval(physics_params)
# Convert TR to pTD
physics_params[1] = physics_params[1] - physics_params[0]
print(physics_params)
processed_physics = physics_preprocessing(np.array(physics_params), sequence)
else:
processed_physics = None
data = image_preprocessing(patient_data=data)
print("prediction (CNN id)...")
net.load_state_dict(params['model_state_dict'])
net.eval()
seg = predict_phys_seg(net=net,
patient_data=data,
processed_physics=processed_physics,
main_device=device)
print("exporting segmentation...")
save_segmentation_nifti(seg, aff, out_fname)
# apply_phys_seg(in_fname, out_fname)
| 38.09901
| 114
| 0.64527
|
import torch
import numpy as np
import SimpleITK as sitk
from Phys_Seg.data_loading import load_and_preprocess, save_segmentation_nifti, read_file, save_img
from Phys_Seg.predict_case import predict_phys_seg, physics_preprocessing, image_preprocessing
import importlib
from Phys_Seg.utils import postprocess_prediction, get_params_fname, maybe_download_parameters
from network_architecture import nnUNet
import os
import Phys_Seg
def apply_phys_seg(img, out_fname):
img_itk = sitk.ReadImage(img)
img_npy = sitk.GetArrayFromImage(img_itk)
out = sitk.GetImageFromArray(img_npy)
out.CopyInformation(img_itk)
sitk.WriteImage(out, out_fname)
def run_phys_seg(mri_fnames, output_fnames, sequence='MPRAGE', physics_params=None,
device=None, overwrite=True):
physics_input_size = {'MPRAGE': 4,
'SPGR': 6}
maybe_download_parameters(sequence=sequence, physics_flag=True if physics_params else False)
params_file = get_params_fname(sequence=sequence, physics_flag=True if physics_params else False)
net = nnUNet(1, 4, physics_flag=True if physics_params else False,
physics_input=physics_input_size[sequence],
physics_output=40)
if device == "cpu":
net = net.cpu()
else:
net.cuda(device)
net = torch.nn.DataParallel(net, device_ids=[device, int(1-device)])
net.to(f'cuda:{net.device_ids[0]}')
if not isinstance(mri_fnames, (list, tuple)):
mri_fnames = [mri_fnames]
if not isinstance(output_fnames, (list, tuple)):
output_fnames = [output_fnames]
params = torch.load(params_file, map_location=lambda storage, loc: storage)
for in_fname, out_fname in zip(mri_fnames, output_fnames):
if overwrite or not (os.path.isfile(out_fname)):
print("File:", in_fname)
print("preprocessing...")
try:
data, aff = read_file(in_fname)
except RuntimeError:
print("\nERROR\nCould not read file", in_fname, "\n")
continue
except AssertionError as e:
print(e)
continue
if physics_params is not None:
physics_params = eval(physics_params)
physics_params[1] = physics_params[1] - physics_params[0]
print(physics_params)
processed_physics = physics_preprocessing(np.array(physics_params), sequence)
else:
processed_physics = None
data = image_preprocessing(patient_data=data)
print("prediction (CNN id)...")
net.load_state_dict(params['model_state_dict'])
net.eval()
seg = predict_phys_seg(net=net,
patient_data=data,
processed_physics=processed_physics,
main_device=device)
print("exporting segmentation...")
save_segmentation_nifti(seg, aff, out_fname)
| true
| true
|
f719bb906e369e26b721b5b82e53ff4644582d3b
| 3,541
|
py
|
Python
|
lzo_indexer/indexer.py
|
krux/python-lzo-indexer
|
21fdd821a38d9b941c02036b7f30a15891311a7d
|
[
"Apache-2.0"
] | 8
|
2015-09-12T17:11:00.000Z
|
2021-04-22T01:35:26.000Z
|
lzo_indexer/indexer.py
|
krux/python-lzo-indexer
|
21fdd821a38d9b941c02036b7f30a15891311a7d
|
[
"Apache-2.0"
] | null | null | null |
lzo_indexer/indexer.py
|
krux/python-lzo-indexer
|
21fdd821a38d9b941c02036b7f30a15891311a7d
|
[
"Apache-2.0"
] | 4
|
2015-06-18T01:04:19.000Z
|
2018-09-28T16:33:54.000Z
|
import struct
from collections import namedtuple
from StringIO import StringIO
# Magic string expected at the start of the file to verify it's LZO
_LZO_MAGIC = bytearray("\x89LZO\x00\r\n\x1a\n")
_COMPRESSION_CHECKSUMS = (0x02, 0x200) # ADLER32 CRC32
_DECOMPRESSION_CHECKSUMS = (0x01, 0x100) # ADLER32 CRC32
def _parse_header(lzo_file):
"""Parse and verify the header of an LZO file, returning a tuple
of the number of compressed/decompressed checksums expected at the
end of each block.
"""
if lzo_file.tell() != 0:
raise Exception("File object must be at offset 0")
# Parse the header
if lzo_file.read(9) != _LZO_MAGIC:
raise Exception("Invalid lzo file")
# Ignore a bunch of values from the header
# TODO: We should validate these
lzop_version = lzo_file.read(2)
library_version = lzo_file.read(2)
extract_version = lzo_file.read(2)
method = lzo_file.read(1)
level = lzo_file.read(1)
# Checksum flags
flags, = struct.unpack(">I", lzo_file.read(4))
num_compressed_checksums = 0
for idx, flag in enumerate(_COMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_compressed_checksums += 1
num_decompressed_checksums = 0
for idx, flag in enumerate(_DECOMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_decompressed_checksums += 1
# Parse out the mode/mtime/gmtdiff values we're not interested in
mode = lzo_file.read(4)
mtime = lzo_file.read(4)
gmtdiff = lzo_file.read(4)
# Extract the filename
filename_length = ord(lzo_file.read(1))
if filename_length > 0:
filename = str(lzo_file.read(filename_length))
# TODO: Verify the header checksum against these bytes
lzo_file.read(4)
# Process extra header field for lzo < 1.08. This is a checksum that
# needs to also be validated
if (flags & 0x00000040) != 0:
size, = struct.unpack(">I", lzo_file.read(4))
if size > 0:
lzo_file.read(size)
lzo_file.read(4)
return num_compressed_checksums, num_decompressed_checksums
def get_lzo_blocks(lzo_file):
"""Return a generator containing all of the block offsets for each
compressed block of data in the LZO file.
"""
num_compressed_chksms, num_decompressed_chksms = _parse_header(lzo_file)
while True:
decompressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
if decompressed_blocksize == 0:
break
compressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
num_chksms_to_skip = num_decompressed_chksms
if decompressed_blocksize == compressed_blocksize:
num_chksms_to_skip += num_compressed_chksms
skip = 4 * num_chksms_to_skip
position = lzo_file.tell()
block_start = position - 8 # Rewind back to before the block headers
next_block = position + compressed_blocksize + skip
yield block_start
lzo_file.seek(next_block) # Seek to the next block
def index_lzo_string(string):
"""Return a generator containing block offsets for each compressed block
of data in the LZO string.
"""
index = StringIO()
index_lzo_file(StringIO(string), index)
return index.getvalue()
def index_lzo_file(lzo_file, index_file):
"""Index the given LZO file and write the index to the given output stream.
"""
for block_offset in get_lzo_blocks(lzo_file):
index_file.write(struct.pack(">Q", block_offset))
return index_file
| 29.508333
| 79
| 0.680316
|
import struct
from collections import namedtuple
from StringIO import StringIO
_LZO_MAGIC = bytearray("\x89LZO\x00\r\n\x1a\n")
_COMPRESSION_CHECKSUMS = (0x02, 0x200) # ADLER32 CRC32
_DECOMPRESSION_CHECKSUMS = (0x01, 0x100) # ADLER32 CRC32
def _parse_header(lzo_file):
if lzo_file.tell() != 0:
raise Exception("File object must be at offset 0")
# Parse the header
if lzo_file.read(9) != _LZO_MAGIC:
raise Exception("Invalid lzo file")
# Ignore a bunch of values from the header
# TODO: We should validate these
lzop_version = lzo_file.read(2)
library_version = lzo_file.read(2)
extract_version = lzo_file.read(2)
method = lzo_file.read(1)
level = lzo_file.read(1)
# Checksum flags
flags, = struct.unpack(">I", lzo_file.read(4))
num_compressed_checksums = 0
for idx, flag in enumerate(_COMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_compressed_checksums += 1
num_decompressed_checksums = 0
for idx, flag in enumerate(_DECOMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_decompressed_checksums += 1
# Parse out the mode/mtime/gmtdiff values we're not interested in
mode = lzo_file.read(4)
mtime = lzo_file.read(4)
gmtdiff = lzo_file.read(4)
filename_length = ord(lzo_file.read(1))
if filename_length > 0:
filename = str(lzo_file.read(filename_length))
lzo_file.read(4)
if (flags & 0x00000040) != 0:
size, = struct.unpack(">I", lzo_file.read(4))
if size > 0:
lzo_file.read(size)
lzo_file.read(4)
return num_compressed_checksums, num_decompressed_checksums
def get_lzo_blocks(lzo_file):
num_compressed_chksms, num_decompressed_chksms = _parse_header(lzo_file)
while True:
decompressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
if decompressed_blocksize == 0:
break
compressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
num_chksms_to_skip = num_decompressed_chksms
if decompressed_blocksize == compressed_blocksize:
num_chksms_to_skip += num_compressed_chksms
skip = 4 * num_chksms_to_skip
position = lzo_file.tell()
block_start = position - 8
next_block = position + compressed_blocksize + skip
yield block_start
lzo_file.seek(next_block)
def index_lzo_string(string):
index = StringIO()
index_lzo_file(StringIO(string), index)
return index.getvalue()
def index_lzo_file(lzo_file, index_file):
for block_offset in get_lzo_blocks(lzo_file):
index_file.write(struct.pack(">Q", block_offset))
return index_file
| true
| true
|
f719bbd224fa1f348d74df1adf6270da318609b3
| 1,028
|
py
|
Python
|
reference/ddtrace/ext/aws.py
|
stschenk/opentelemetry-python-contrib
|
28c1331e571d386baab74f5028e3268e4bfda4cd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
reference/ddtrace/ext/aws.py
|
stschenk/opentelemetry-python-contrib
|
28c1331e571d386baab74f5028e3268e4bfda4cd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-12-12T17:59:41.000Z
|
2020-12-12T18:54:03.000Z
|
reference/ddtrace/ext/aws.py
|
stschenk/opentelemetry-python-contrib
|
28c1331e571d386baab74f5028e3268e4bfda4cd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-10-22T04:16:33.000Z
|
2020-10-22T04:16:33.000Z
|
from ..utils.formats import flatten_dict
DENYLIST_ENDPOINT = ['kms', 'sts']
DENYLIST_ENDPOINT_TAGS = {
's3': ['params.Body'],
}
def truncate_arg_value(value, max_len=1024):
"""Truncate values which are bytes and greater than `max_len`.
Useful for parameters like 'Body' in `put_object` operations.
"""
if isinstance(value, bytes) and len(value) > max_len:
return b'...'
return value
def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in DENYLIST_ENDPOINT:
denylisted = DENYLIST_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict(
(name, value)
for (name, value) in zip(args_names, args)
if name in args_traced
)
tags = flatten_dict(tags)
tags = {
k: truncate_arg_value(v)
for k, v in tags.items()
if k not in denylisted
}
span.set_tags(tags)
REGION = 'aws.region'
AGENT = 'aws.agent'
OPERATION = 'aws.operation'
| 25.7
| 74
| 0.622568
|
from ..utils.formats import flatten_dict
DENYLIST_ENDPOINT = ['kms', 'sts']
DENYLIST_ENDPOINT_TAGS = {
's3': ['params.Body'],
}
def truncate_arg_value(value, max_len=1024):
if isinstance(value, bytes) and len(value) > max_len:
return b'...'
return value
def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in DENYLIST_ENDPOINT:
denylisted = DENYLIST_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict(
(name, value)
for (name, value) in zip(args_names, args)
if name in args_traced
)
tags = flatten_dict(tags)
tags = {
k: truncate_arg_value(v)
for k, v in tags.items()
if k not in denylisted
}
span.set_tags(tags)
REGION = 'aws.region'
AGENT = 'aws.agent'
OPERATION = 'aws.operation'
| true
| true
|
f719bbfb410401300cb793e160dd34ffe11f0df1
| 426
|
py
|
Python
|
list_comprehensions.py
|
rjayasin/list-comprehension
|
6937f4f6dec8b1b8722c31356db32de18795de8b
|
[
"MIT"
] | null | null | null |
list_comprehensions.py
|
rjayasin/list-comprehension
|
6937f4f6dec8b1b8722c31356db32de18795de8b
|
[
"MIT"
] | null | null | null |
list_comprehensions.py
|
rjayasin/list-comprehension
|
6937f4f6dec8b1b8722c31356db32de18795de8b
|
[
"MIT"
] | null | null | null |
import math
#compute primes using list difference
#from http://www.secnetix.de/olli/Python/list_comprehensions.hawk
noprimes = [j for i in range(2, 8) for j in range(i*2, 50, i)]
difference = [x for x in range(2, 50) if x not in noprimes]
# print(difference)
#my own version, a little more complicated
primes = [x for x in range(1, 51) if not any([y for y in range(2, int(math.sqrt(x) + 1)) if x % y == 0])]
# print(primes)
| 35.5
| 105
| 0.692488
|
import math
noprimes = [j for i in range(2, 8) for j in range(i*2, 50, i)]
difference = [x for x in range(2, 50) if x not in noprimes]
primes = [x for x in range(1, 51) if not any([y for y in range(2, int(math.sqrt(x) + 1)) if x % y == 0])]
| true
| true
|
f719bcfdda7fd95388f3a3f5283d672ebcdb37cb
| 5,859
|
py
|
Python
|
apps/translations/tests/test_helpers.py
|
Joergen/olympia
|
eb84203469adbb6584e50d7bb6f9de7f20980dac
|
[
"BSD-3-Clause"
] | 1
|
2015-10-29T06:55:20.000Z
|
2015-10-29T06:55:20.000Z
|
apps/translations/tests/test_helpers.py
|
magopian/olympia
|
70cad15111a89e3d5c715cbade8925b12d1b98dc
|
[
"BSD-3-Clause"
] | null | null | null |
apps/translations/tests/test_helpers.py
|
magopian/olympia
|
70cad15111a89e3d5c715cbade8925b12d1b98dc
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from django.utils import translation
import jingo
import pytest
from mock import Mock, patch
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from translations import helpers
from translations.fields import save_signal
from translations.models import PurifiedTranslation
from translations.tests.testapp.models import TranslatedModel
pytestmark = pytest.mark.django_db
def super():
jingo.load_helpers()
def test_locale_html():
"""Test HTML attributes for languages different than the site language"""
testfield = Mock()
# same language: no need for attributes
this_lang = translation.get_language()
testfield.locale = this_lang
s = helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
# non-rtl language
testfield.locale = 'de'
s = helpers.locale_html(testfield)
eq_(s, ' lang="de" dir="ltr"')
# rtl language
for lang in settings.RTL_LANGUAGES:
testfield.locale = lang
s = helpers.locale_html(testfield)
eq_(s, ' lang="%s" dir="rtl"' % testfield.locale)
def test_locale_html_xss():
"""Test for nastiness-removal in the transfield's locale"""
testfield = Mock()
# same language: no need for attributes
testfield.locale = '<script>alert(1)</script>'
s = helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
"""locale_html must still work if field is None."""
s = helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(6) }}').render({'s': t})
eq_(actual, s)
def test_truncate_purified_field_xss():
"""Truncating should not introduce xss issues."""
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(100) }}').render({'s': t})
eq_(actual, 'safe <script>alert("omg")</script>')
actual = jingo.env.from_string('{{ s|truncate(5) }}').render({'s': t})
eq_(actual, 'safe ...')
def test_clean():
# Links are not mangled, bad HTML is escaped, newlines are slimmed.
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
eq_(helpers.clean(s),
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
eq_(jingo.env.from_string('{{ s|clean }}').render({'s': s}), s)
def test_no_links():
s = 'a <a href="http://url.link">http://example.com</a>, http://text.link'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'a http://example.com, http://text.link')
# Bad markup.
s = '<http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}), '')
# Bad markup.
s = 'some text <http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'some text')
def test_l10n_menu():
# No remove_locale_url provided.
menu = helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
# Specific remove_locale_url provided (eg for user).
menu = helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
# Use the remove_locale_url taken from the addon in the context.
menu = helpers.l10n_menu({'addon': Addon()},
remove_locale_url='some/url/')
assert 'data-rm-locale="/developers/addon/None/rmlocale"' in menu, menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(amo.tests.TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
eq_(helpers.all_locales(addon, field_name), None)
addon = Mock()
field_name = 'description'
del addon.description
eq_(helpers.all_locales(addon, field_name), None)
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = helpers.all_locales(obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result
| 33.672414
| 79
| 0.635774
|
from django.conf import settings
from django.utils import translation
import jingo
import pytest
from mock import Mock, patch
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from translations import helpers
from translations.fields import save_signal
from translations.models import PurifiedTranslation
from translations.tests.testapp.models import TranslatedModel
pytestmark = pytest.mark.django_db
def super():
jingo.load_helpers()
def test_locale_html():
testfield = Mock()
this_lang = translation.get_language()
testfield.locale = this_lang
s = helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
testfield.locale = 'de'
s = helpers.locale_html(testfield)
eq_(s, ' lang="de" dir="ltr"')
for lang in settings.RTL_LANGUAGES:
testfield.locale = lang
s = helpers.locale_html(testfield)
eq_(s, ' lang="%s" dir="rtl"' % testfield.locale)
def test_locale_html_xss():
testfield = Mock()
testfield.locale = '<script>alert(1)</script>'
s = helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
s = helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(6) }}').render({'s': t})
eq_(actual, s)
def test_truncate_purified_field_xss():
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(100) }}').render({'s': t})
eq_(actual, 'safe <script>alert("omg")</script>')
actual = jingo.env.from_string('{{ s|truncate(5) }}').render({'s': t})
eq_(actual, 'safe ...')
def test_clean():
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
eq_(helpers.clean(s),
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
eq_(jingo.env.from_string('{{ s|clean }}').render({'s': s}), s)
def test_no_links():
s = 'a <a href="http://url.link">http://example.com</a>, http://text.link'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'a http://example.com, http://text.link')
s = '<http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}), '')
s = 'some text <http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'some text')
def test_l10n_menu():
menu = helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
menu = helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
menu = helpers.l10n_menu({'addon': Addon()},
remove_locale_url='some/url/')
assert 'data-rm-locale="/developers/addon/None/rmlocale"' in menu, menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(amo.tests.TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
eq_(helpers.all_locales(addon, field_name), None)
addon = Mock()
field_name = 'description'
del addon.description
eq_(helpers.all_locales(addon, field_name), None)
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = helpers.all_locales(obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result
| true
| true
|
f719bd0e61d8fc8ee4756b2db46ad0dfa8dfa39d
| 6,499
|
py
|
Python
|
twisted/test/test_text.py
|
sxamit/twisted
|
30f6966329c857c3631c60aeb420d84d7828e01e
|
[
"MIT",
"Unlicense"
] | 1
|
2017-08-07T14:52:02.000Z
|
2017-08-07T14:52:02.000Z
|
Lib/site-packages/twisted/test/test_text.py
|
adzhou/Python27
|
a7113b69d54a04cc780143241c2f1fe81939ad3a
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/twisted/test/test_text.py
|
adzhou/Python27
|
a7113b69d54a04cc780143241c2f1fe81939ad3a
|
[
"bzip2-1.0.6"
] | 1
|
2018-11-07T12:52:07.000Z
|
2018-11-07T12:52:07.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.text}.
"""
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import text
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTests(unittest.TestCase):
"""
Tests for L{text.greedyWrap}.
"""
def setUp(self):
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self):
"""
Compare the number of words.
"""
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
"""
Compare the lists of words.
"""
words = []
for line in self.output:
words.extend(line.split())
# Using assertEqual here prints out some
# rather too long lists.
self.assertTrue(self.sampleSplitText == words)
def test_lineLength(self):
"""
Check the length of the lines.
"""
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
self.lineWidth, failures))
def test_doubleNewline(self):
"""
Allow paragraphs delimited by two \ns.
"""
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTests(unittest.TestCase):
"""
Tests for L{isMultiline} and L{endsInNewline}.
"""
def test_isMultiline(self):
"""
L{text.isMultiline} returns C{True} if the string has a newline in it.
"""
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self):
"""
L{text.endsInNewline} returns C{True} if the string ends in a newline.
"""
s = 'newline\n'
m = text.endsInNewline(s)
self.assertTrue(m)
s = 'oldline'
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTests(unittest.TestCase):
"""
Tests for L{text.stringyString}.
"""
def test_tuple(self):
"""
Tuple elements are displayed on separate lines.
"""
s = ('a', 'b')
m = text.stringyString(s)
self.assertEqual(m, '(a,\n b,)\n')
def test_dict(self):
"""
Dicts elements are displayed using C{str()}.
"""
s = {'a': 0}
m = text.stringyString(s)
self.assertEqual(m, '{a: 0}')
def test_list(self):
"""
List elements are displayed on separate lines using C{str()}.
"""
s = ['a', 'b']
m = text.stringyString(s)
self.assertEqual(m, '[a,\n b,]\n')
class SplitTests(unittest.TestCase):
"""
Tests for L{text.splitQuoted}.
"""
def test_oneWord(self):
"""
Splitting strings with one-word phrases.
"""
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
# Some of the many tests that would fail:
#def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.assertEqual(['With', phrase, 'between.'], r)
#def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.assertEqual(["One Phrase"], r)
class StrFileTests(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEqual(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEqual(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEqual(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEqual(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEqual(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEqual(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEqual(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEqual(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEqual(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEqual(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEqual(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEqual(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEqual(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEqual(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEqual(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEqual(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEqual(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEqual(True, text.strFile("ThIs is A test STRING", self.io, False))
| 26.744856
| 103
| 0.59086
|
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import text
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTests(unittest.TestCase):
def setUp(self):
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self):
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
words = []
for line in self.output:
words.extend(line.split())
self.assertTrue(self.sampleSplitText == words)
def test_lineLength(self):
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
self.lineWidth, failures))
def test_doubleNewline(self):
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTests(unittest.TestCase):
def test_isMultiline(self):
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self):
s = 'newline\n'
m = text.endsInNewline(s)
self.assertTrue(m)
s = 'oldline'
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTests(unittest.TestCase):
def test_tuple(self):
s = ('a', 'b')
m = text.stringyString(s)
self.assertEqual(m, '(a,\n b,)\n')
def test_dict(self):
s = {'a': 0}
m = text.stringyString(s)
self.assertEqual(m, '{a: 0}')
def test_list(self):
s = ['a', 'b']
m = text.stringyString(s)
self.assertEqual(m, '[a,\n b,]\n')
class SplitTests(unittest.TestCase):
def test_oneWord(self):
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
class StrFileTests(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEqual(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEqual(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEqual(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEqual(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEqual(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEqual(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEqual(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEqual(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEqual(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEqual(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEqual(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEqual(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEqual(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEqual(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEqual(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEqual(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEqual(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEqual(True, text.strFile("ThIs is A test STRING", self.io, False))
| true
| true
|
f719bed52604d78cd372c38b0ba41bc4f013d7b2
| 311
|
py
|
Python
|
routes/show_bp.py
|
Silve1ra/fyyur
|
580562cc592d587c9bed4f080b856664abb9f70d
|
[
"MIT"
] | 1
|
2021-09-17T11:56:38.000Z
|
2021-09-17T11:56:38.000Z
|
routes/show_bp.py
|
Silve1ra/fyyur
|
580562cc592d587c9bed4f080b856664abb9f70d
|
[
"MIT"
] | null | null | null |
routes/show_bp.py
|
Silve1ra/fyyur
|
580562cc592d587c9bed4f080b856664abb9f70d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
from controllers.show import shows, create_shows, create_show_submission
show_bp = Blueprint('show_bp', __name__)
show_bp.route('/', methods=['GET'])(shows)
show_bp.route('/create', methods=['GET'])(create_shows)
show_bp.route('/create', methods=['POST'])(create_show_submission)
| 31.1
| 72
| 0.762058
|
from flask import Blueprint
from controllers.show import shows, create_shows, create_show_submission
show_bp = Blueprint('show_bp', __name__)
show_bp.route('/', methods=['GET'])(shows)
show_bp.route('/create', methods=['GET'])(create_shows)
show_bp.route('/create', methods=['POST'])(create_show_submission)
| true
| true
|
f719bf0a49a2168cb3b4abfd826a62d6032ed825
| 7,399
|
py
|
Python
|
nova/api/openstack/compute/plugins/v3/cloudpipe.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/plugins/v3/cloudpipe.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/plugins/v3/cloudpipe.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connect your vlan to the world."""
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import timeutils
from webob import exc
from nova.api.openstack.compute.schemas.v3 import cloudpipe
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import network
from nova import objects
from nova import utils
CONF = cfg.CONF
CONF.import_opt('keys_path', 'nova.crypto')
ALIAS = 'os-cloudpipe'
authorize = extensions.os_compute_authorizer(ALIAS)
class CloudpipeController(wsgi.Controller):
"""Handle creating and listing cloudpipe instances."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
self.cloudpipe = pipelib.CloudPipe(skip_policy_check=True)
self.setup()
def setup(self):
"""Ensure the keychains and folders exist."""
# NOTE(vish): One of the drawbacks of doing this in the api is
# the keys will only be on the api node that launched
# the cloudpipe.
fileutils.ensure_tree(CONF.keys_path)
def _get_all_cloudpipes(self, context):
"""Get all cloudpipes."""
instances = self.compute_api.get_all(context,
search_opts={'deleted': False},
want_objects=True)
return [instance for instance in instances
if pipelib.is_vpn_image(instance.image_ref)
and instance.vm_state != vm_states.DELETED]
def _get_cloudpipe_for_project(self, context):
"""Get the cloudpipe instance for a project from context."""
cloudpipes = self._get_all_cloudpipes(context) or [None]
return cloudpipes[0]
def _vpn_dict(self, context, project_id, instance):
elevated = context.elevated()
rv = {'project_id': project_id}
if not instance:
rv['state'] = 'pending'
return rv
rv['instance_id'] = instance.uuid
rv['created_at'] = timeutils.isotime(instance.created_at)
nw_info = compute_utils.get_nw_info_for_instance(instance)
if not nw_info:
return rv
vif = nw_info[0]
ips = [ip for ip in vif.fixed_ips() if ip['version'] == 4]
if ips:
rv['internal_ip'] = ips[0]['address']
# NOTE(vish): Currently network_api.get does an owner check on
# project_id. This is probably no longer necessary
# but rather than risk changes in the db layer,
# we are working around it here by changing the
# project_id in the context. This can be removed
# if we remove the project_id check in the db.
elevated.project_id = project_id
network = self.network_api.get(elevated, vif['network']['id'])
if network:
vpn_ip = network['vpn_public_address']
vpn_port = network['vpn_public_port']
rv['public_ip'] = vpn_ip
rv['public_port'] = vpn_port
if vpn_ip and vpn_port:
if utils.vpn_ping(vpn_ip, vpn_port):
rv['state'] = 'running'
else:
rv['state'] = 'down'
else:
rv['state'] = 'invalid'
return rv
@extensions.expected_errors((400, 403))
@validation.schema(cloudpipe.create)
def create(self, req, body):
"""Create a new cloudpipe instance, if none exists.
Parameters: {cloudpipe: {'project_id': ''}}
"""
context = req.environ['nova.context']
authorize(context)
params = body.get('cloudpipe', {})
project_id = params.get('project_id', context.project_id)
# NOTE(vish): downgrade to project context. Note that we keep
# the same token so we can still talk to glance
context.project_id = project_id
context.user_id = 'project-vpn'
context.is_admin = False
context.roles = []
instance = self._get_cloudpipe_for_project(context)
if not instance:
try:
result = self.cloudpipe.launch_vpn_instance(context)
instance = result[0][0]
except exception.NoMoreNetworks:
msg = _("Unable to claim IP for VPN instances, ensure it "
"isn't running, and try again in a few minutes")
raise exc.HTTPBadRequest(explanation=msg)
return {'instance_id': instance.uuid}
@extensions.expected_errors((400, 403, 404))
def index(self, req):
"""List running cloudpipe instances."""
context = req.environ['nova.context']
authorize(context)
vpns = [self._vpn_dict(context, x['project_id'], x)
for x in self._get_all_cloudpipes(context)]
return {'cloudpipes': vpns}
@wsgi.response(202)
@extensions.expected_errors(400)
@validation.schema(cloudpipe.update)
def update(self, req, id, body):
"""Configure cloudpipe parameters for the project."""
context = req.environ['nova.context']
authorize(context)
if id != "configure-project":
msg = _("Unknown action %s") % id
raise exc.HTTPBadRequest(explanation=msg)
project_id = context.project_id
networks = objects.NetworkList.get_by_project(context, project_id)
params = body['configure_project']
vpn_ip = params['vpn_ip']
vpn_port = params['vpn_port']
for nw in networks:
nw.vpn_public_address = vpn_ip
nw.vpn_public_port = vpn_port
nw.save()
class Cloudpipe(extensions.V3APIExtensionBase):
"""Adds actions to create cloudpipe instances.
When running with the Vlan network mode, you need a mechanism to route
from the public Internet to your vlans. This mechanism is known as a
cloudpipe.
At the time of creating this class, only OpenVPN is supported. Support for
a SSH Bastion host is forthcoming.
"""
name = "Cloudpipe"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
CloudpipeController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| 37.368687
| 79
| 0.629274
|
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import timeutils
from webob import exc
from nova.api.openstack.compute.schemas.v3 import cloudpipe
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import network
from nova import objects
from nova import utils
CONF = cfg.CONF
CONF.import_opt('keys_path', 'nova.crypto')
ALIAS = 'os-cloudpipe'
authorize = extensions.os_compute_authorizer(ALIAS)
class CloudpipeController(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
self.cloudpipe = pipelib.CloudPipe(skip_policy_check=True)
self.setup()
def setup(self):
fileutils.ensure_tree(CONF.keys_path)
def _get_all_cloudpipes(self, context):
instances = self.compute_api.get_all(context,
search_opts={'deleted': False},
want_objects=True)
return [instance for instance in instances
if pipelib.is_vpn_image(instance.image_ref)
and instance.vm_state != vm_states.DELETED]
def _get_cloudpipe_for_project(self, context):
cloudpipes = self._get_all_cloudpipes(context) or [None]
return cloudpipes[0]
def _vpn_dict(self, context, project_id, instance):
elevated = context.elevated()
rv = {'project_id': project_id}
if not instance:
rv['state'] = 'pending'
return rv
rv['instance_id'] = instance.uuid
rv['created_at'] = timeutils.isotime(instance.created_at)
nw_info = compute_utils.get_nw_info_for_instance(instance)
if not nw_info:
return rv
vif = nw_info[0]
ips = [ip for ip in vif.fixed_ips() if ip['version'] == 4]
if ips:
rv['internal_ip'] = ips[0]['address']
elevated.project_id = project_id
network = self.network_api.get(elevated, vif['network']['id'])
if network:
vpn_ip = network['vpn_public_address']
vpn_port = network['vpn_public_port']
rv['public_ip'] = vpn_ip
rv['public_port'] = vpn_port
if vpn_ip and vpn_port:
if utils.vpn_ping(vpn_ip, vpn_port):
rv['state'] = 'running'
else:
rv['state'] = 'down'
else:
rv['state'] = 'invalid'
return rv
@extensions.expected_errors((400, 403))
@validation.schema(cloudpipe.create)
def create(self, req, body):
context = req.environ['nova.context']
authorize(context)
params = body.get('cloudpipe', {})
project_id = params.get('project_id', context.project_id)
context.project_id = project_id
context.user_id = 'project-vpn'
context.is_admin = False
context.roles = []
instance = self._get_cloudpipe_for_project(context)
if not instance:
try:
result = self.cloudpipe.launch_vpn_instance(context)
instance = result[0][0]
except exception.NoMoreNetworks:
msg = _("Unable to claim IP for VPN instances, ensure it "
"isn't running, and try again in a few minutes")
raise exc.HTTPBadRequest(explanation=msg)
return {'instance_id': instance.uuid}
@extensions.expected_errors((400, 403, 404))
def index(self, req):
context = req.environ['nova.context']
authorize(context)
vpns = [self._vpn_dict(context, x['project_id'], x)
for x in self._get_all_cloudpipes(context)]
return {'cloudpipes': vpns}
@wsgi.response(202)
@extensions.expected_errors(400)
@validation.schema(cloudpipe.update)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
if id != "configure-project":
msg = _("Unknown action %s") % id
raise exc.HTTPBadRequest(explanation=msg)
project_id = context.project_id
networks = objects.NetworkList.get_by_project(context, project_id)
params = body['configure_project']
vpn_ip = params['vpn_ip']
vpn_port = params['vpn_port']
for nw in networks:
nw.vpn_public_address = vpn_ip
nw.vpn_public_port = vpn_port
nw.save()
class Cloudpipe(extensions.V3APIExtensionBase):
name = "Cloudpipe"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
CloudpipeController())]
return resource
def get_controller_extensions(self):
return []
| true
| true
|
f719bfc6c7c129776e3b9c9595c4c130931fdd2d
| 15,240
|
py
|
Python
|
tempest/api/compute/servers/test_create_server.py
|
xavpaice/tempest
|
958bd694df27511e0346d799876fe49331b8145c
|
[
"Apache-2.0"
] | 3
|
2016-07-15T12:27:23.000Z
|
2021-04-23T04:41:10.000Z
|
tempest/api/compute/servers/test_create_server.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/servers/test_create_server.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | 12
|
2016-07-14T18:13:05.000Z
|
2017-07-08T18:45:42.000Z
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.network_client = cls.os.network_client
cls.networks_client = cls.os.networks_client
cls.subnets_client = cls.os.subnets_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name('server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (cls.client.show_server(cls.server_initial['id'])
['server'])
def _create_net_subnet_ret_net_from_cidr(self, cidr):
name_net = data_utils.rand_name(self.__class__.__name__)
net = self.networks_client.create_network(name=name_net)
self.addCleanup(self.networks_client.delete_network,
net['network']['id'])
subnet = self.subnets_client.create_subnet(
network_id=net['network']['id'],
cidr=cidr,
ip_version=4)
self.addCleanup(self.subnets_client.delete_subnet,
subnet['subnet']['id'])
return net
@test.attr(type='smoke')
@test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
# NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
# Here we compare directly with the canonicalized format.
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@test.attr(type='smoke')
@test.idempotent_id('9a438d88-10c6-4bcd-8b5b-5b6e25e1346f')
def test_list_servers(self):
# The created server should be in the list of all servers
body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
def test_list_servers_with_detail(self):
# The created server should be in the detailed list of all servers
body = self.client.list_servers(detail=True)
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('cbc0f52f-05aa-492b-bdc1-84b575ca294b')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@test.idempotent_id('ac1ad47f-984b-4441-9274-c9079b7a0666')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertTrue(linux_client.hostname_equals_servername(self.name))
@test.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
def test_create_server_with_scheduler_hint_group(self):
# Create a server with the scheduler hint "group".
name = data_utils.rand_name('server_group')
policies = ['affinity']
body = self.server_groups_client.create_server_group(
name=name, policies=policies)['server_group']
group_id = body['id']
self.addCleanup(self.server_groups_client.delete_server_group,
group_id)
hints = {'group': group_id}
server = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')
# Check a server is in the group
server_group = (self.server_groups_client.show_server_group(group_id)
['server_group'])
self.assertIn(server['id'], server_group['members'])
@test.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
# Cleanup server; this is needed in the test case because with the LIFO
# nature of the cleanups, if we don't delete the server first, the port
# will still be part of the subnet and we'll get a 409 from Neutron
# when trying to delete the subnet. The tear down in the base class
# will try to delete the server and get a 404 but it's ignored so
# we're OK.
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
# We can't predict the ip addresses assigned to the server on networks.
# Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
@test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
# The below skipUnless should be removed once Kilo-eol happens.
@testtools.skipUnless(CONF.compute_feature_enabled.
allow_duplicate_networks,
'Duplicate networks must be allowed')
def test_verify_duplicate_network_nics(self):
# Verify that server creation does not fail when more than one nic
# is created on the same network.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']},
{'uuid': net1['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr'],
addresses[net1['network']['name']][1]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24'),
netaddr.IPNetwork('19.80.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.flavor_client = cls.os_adm.flavors_client
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
@test.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor with extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_with_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id,
ephemeral=1))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor without extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_no_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_no_eph_disk_id))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_extra_specs()
flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
server_no_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
# Get partition number of server without extra specs.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_no_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num = len(linux_client.get_partitions().split('\n'))
# Explicit server deletion necessary for Juno compatibility
self.client.delete_server(server_no_eph_disk['id'])
server_with_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num_emph = len(linux_client.get_partitions().split('\n'))
self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@classmethod
def skip_checks(cls):
super(ServersTestManualDisk, cls).skip_checks()
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
| 42.569832
| 79
| 0.639764
|
import netaddr
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.network_client = cls.os.network_client
cls.networks_client = cls.os.networks_client
cls.subnets_client = cls.os.subnets_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name('server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (cls.client.show_server(cls.server_initial['id'])
['server'])
def _create_net_subnet_ret_net_from_cidr(self, cidr):
name_net = data_utils.rand_name(self.__class__.__name__)
net = self.networks_client.create_network(name=name_net)
self.addCleanup(self.networks_client.delete_network,
net['network']['id'])
subnet = self.subnets_client.create_subnet(
network_id=net['network']['id'],
cidr=cidr,
ip_version=4)
self.addCleanup(self.subnets_client.delete_subnet,
subnet['subnet']['id'])
return net
@test.attr(type='smoke')
@test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@test.attr(type='smoke')
@test.idempotent_id('9a438d88-10c6-4bcd-8b5b-5b6e25e1346f')
def test_list_servers(self):
body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
def test_list_servers_with_detail(self):
body = self.client.list_servers(detail=True)
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('cbc0f52f-05aa-492b-bdc1-84b575ca294b')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_vcpus(self):
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@test.idempotent_id('ac1ad47f-984b-4441-9274-c9079b7a0666')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertTrue(linux_client.hostname_equals_servername(self.name))
@test.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
def test_create_server_with_scheduler_hint_group(self):
name = data_utils.rand_name('server_group')
policies = ['affinity']
body = self.server_groups_client.create_server_group(
name=name, policies=policies)['server_group']
group_id = body['id']
self.addCleanup(self.server_groups_client.delete_server_group,
group_id)
hints = {'group': group_id}
server = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')
server_group = (self.server_groups_client.show_server_group(group_id)
['server_group'])
self.assertIn(server['id'], server_group['members'])
@test.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
# will still be part of the subnet and we'll get a 409 from Neutron
# we're OK.
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
# Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
@test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
# The below skipUnless should be removed once Kilo-eol happens.
@testtools.skipUnless(CONF.compute_feature_enabled.
allow_duplicate_networks,
'Duplicate networks must be allowed')
def test_verify_duplicate_network_nics(self):
# Verify that server creation does not fail when more than one nic
# is created on the same network.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']},
{'uuid': net1['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr'],
addresses[net1['network']['name']][1]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24'),
netaddr.IPNetwork('19.80.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.flavor_client = cls.os_adm.flavors_client
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
@test.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor with extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_with_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id,
ephemeral=1))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor without extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_no_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_no_eph_disk_id))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_extra_specs()
flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
server_no_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
# Get partition number of server without extra specs.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_no_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num = len(linux_client.get_partitions().split('\n'))
# Explicit server deletion necessary for Juno compatibility
self.client.delete_server(server_no_eph_disk['id'])
server_with_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num_emph = len(linux_client.get_partitions().split('\n'))
self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@classmethod
def skip_checks(cls):
super(ServersTestManualDisk, cls).skip_checks()
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
| true
| true
|
f719c1aa06398ac1ce2cbf746acc94255267f1b7
| 1,306
|
py
|
Python
|
Rock Spock Paper Lizard Scissor.py
|
manavbabber/IIPP
|
009bb0e74f7306d6880ed1dc3e748c604e76ad50
|
[
"MIT"
] | null | null | null |
Rock Spock Paper Lizard Scissor.py
|
manavbabber/IIPP
|
009bb0e74f7306d6880ed1dc3e748c604e76ad50
|
[
"MIT"
] | null | null | null |
Rock Spock Paper Lizard Scissor.py
|
manavbabber/IIPP
|
009bb0e74f7306d6880ed1dc3e748c604e76ad50
|
[
"MIT"
] | null | null | null |
import random
def name_to_number(name):
if(name=='rock'):
return 0
elif(name=='Spock'):
return 1
elif(name=='paper'):
return 2
elif(name=='lizard'):
return 3
elif(name=='scissors'):
return 4
else:
return name,"is an invalid name"
def number_to_name(number):
if(number == 0):
return 'rock'
elif(number == 1):
return 'Spock'
elif(number == 2):
return 'paper'
elif(number == 3):
return 'lizard'
elif(number == 4):
return 'scissors'
else:
return number,"is an invalid number"
def rpsls(player_choice):
print ""
print "Player chooses",player_choice
player_number = name_to_number(player_choice)
comp_number = random.randrange(0,5)
comp_choice = number_to_name(comp_number)
print "Computer chooses",comp_choice
difference = (comp_number-player_number)%5
if(difference == 0):
print "Player and computer tie!"
elif(difference == 1 or difference == 2 ):
print "Computer wins!"
elif(difference == 3 or difference == 4 ):
print "Player wins!"
else:
print "Incorrect input"
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
| 27.208333
| 50
| 0.581164
|
import random
def name_to_number(name):
if(name=='rock'):
return 0
elif(name=='Spock'):
return 1
elif(name=='paper'):
return 2
elif(name=='lizard'):
return 3
elif(name=='scissors'):
return 4
else:
return name,"is an invalid name"
def number_to_name(number):
if(number == 0):
return 'rock'
elif(number == 1):
return 'Spock'
elif(number == 2):
return 'paper'
elif(number == 3):
return 'lizard'
elif(number == 4):
return 'scissors'
else:
return number,"is an invalid number"
def rpsls(player_choice):
print ""
print "Player chooses",player_choice
player_number = name_to_number(player_choice)
comp_number = random.randrange(0,5)
comp_choice = number_to_name(comp_number)
print "Computer chooses",comp_choice
difference = (comp_number-player_number)%5
if(difference == 0):
print "Player and computer tie!"
elif(difference == 1 or difference == 2 ):
print "Computer wins!"
elif(difference == 3 or difference == 4 ):
print "Player wins!"
else:
print "Incorrect input"
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
| false
| true
|
f719c272300d7b8fc3f56eac0566b018ef20c845
| 1,313
|
py
|
Python
|
ooobuild/dyn/i18n/x_calendar4.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/i18n/x_calendar4.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/i18n/x_calendar4.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.i18n
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.i18n import XCalendar4 as XCalendar4
setattr(XCalendar4, '__ooo_ns__', 'com.sun.star.i18n')
setattr(XCalendar4, '__ooo_full_ns__', 'com.sun.star.i18n.XCalendar4')
setattr(XCalendar4, '__ooo_type_name__', 'interface')
else:
from ...lo.i18n.x_calendar4 import XCalendar4 as XCalendar4
__all__ = ['XCalendar4']
| 35.486486
| 74
| 0.760091
|
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.i18n import XCalendar4 as XCalendar4
setattr(XCalendar4, '__ooo_ns__', 'com.sun.star.i18n')
setattr(XCalendar4, '__ooo_full_ns__', 'com.sun.star.i18n.XCalendar4')
setattr(XCalendar4, '__ooo_type_name__', 'interface')
else:
from ...lo.i18n.x_calendar4 import XCalendar4 as XCalendar4
__all__ = ['XCalendar4']
| true
| true
|
f719c298d161b599d989c8e2337e4c83af090b4b
| 1,483
|
py
|
Python
|
test/test_binary.py
|
teristam/openephys-fileIO
|
8089e7c4aff829c13a79656b8812a3d3e68eb1eb
|
[
"MIT"
] | 1
|
2020-08-16T21:52:10.000Z
|
2020-08-16T21:52:10.000Z
|
test/test_binary.py
|
teristam/openephys-fileIO
|
8089e7c4aff829c13a79656b8812a3d3e68eb1eb
|
[
"MIT"
] | null | null | null |
test/test_binary.py
|
teristam/openephys-fileIO
|
8089e7c4aff829c13a79656b8812a3d3e68eb1eb
|
[
"MIT"
] | null | null | null |
import numpy as np
from openephys_fileIO.fileIO import *
from openephys_fileIO.Binary import *
def test_write_binary_data():
# Test writing of binary data
dataFolder = 'test/data'
# Read the data in original int16 format
data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,
num_data_channel=1,num_aux_channel=1, num_adc_channel=1)
print(headers)
# Write to binary file
writeBinaryData(dataFolder+'/experiment1/recording1/',data)
writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',headers)
#load the data in float format (take care of the bit per volt)
data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,
num_data_channel=1,num_aux_channel=1, num_adc_channel=1,dtype=float)
# Load binary file using the offical function
data2, rate2 = Load('test/data')
np.allclose(data.T,data2['100']['0']['0'])
def test_numpy2binary():
# test write of numpy data
Fs = 30000
x = np.random.randn(3*Fs,4)
bitVolts = 0.195
dataFolder = 'test/data2'
channel_names = [f'CH{i}' for i in range(x.shape[1])]
writeBinaryData(dataFolder+'/experiment1/recording1/', x, bitVolts)
writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',samplerate=30000,
num_channels= x.shape[1], bit_volts=bitVolts,channel_names=channel_names)
# load the binary file
data, rate = Load(dataFolder)
np.allclose(x, data['100']['0']['0'])
| 29.66
| 90
| 0.710722
|
import numpy as np
from openephys_fileIO.fileIO import *
from openephys_fileIO.Binary import *
def test_write_binary_data():
dataFolder = 'test/data'
data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,
num_data_channel=1,num_aux_channel=1, num_adc_channel=1)
print(headers)
writeBinaryData(dataFolder+'/experiment1/recording1/',data)
writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',headers)
data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,
num_data_channel=1,num_aux_channel=1, num_adc_channel=1,dtype=float)
data2, rate2 = Load('test/data')
np.allclose(data.T,data2['100']['0']['0'])
def test_numpy2binary():
Fs = 30000
x = np.random.randn(3*Fs,4)
bitVolts = 0.195
dataFolder = 'test/data2'
channel_names = [f'CH{i}' for i in range(x.shape[1])]
writeBinaryData(dataFolder+'/experiment1/recording1/', x, bitVolts)
writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',samplerate=30000,
num_channels= x.shape[1], bit_volts=bitVolts,channel_names=channel_names)
data, rate = Load(dataFolder)
np.allclose(x, data['100']['0']['0'])
| true
| true
|
f719c2d3c90414ada7ec442b5268bf062e2a60e0
| 23,986
|
py
|
Python
|
ckan/logic/__init__.py
|
robbi5/ckan
|
e89ca125dc68ddb9fe9bad68a401404146ba90c7
|
[
"BSD-3-Clause"
] | 6
|
2015-11-09T00:44:51.000Z
|
2019-11-21T14:56:01.000Z
|
ckan/logic/__init__.py
|
robbi5/ckan
|
e89ca125dc68ddb9fe9bad68a401404146ba90c7
|
[
"BSD-3-Clause"
] | 39
|
2015-02-18T17:32:23.000Z
|
2022-03-11T18:03:36.000Z
|
ckan/logic/__init__.py
|
robbi5/ckan
|
e89ca125dc68ddb9fe9bad68a401404146ba90c7
|
[
"BSD-3-Clause"
] | 17
|
2015-03-13T18:05:05.000Z
|
2020-11-06T13:55:32.000Z
|
# encoding: utf-8
import inspect
import functools
import logging
import re
import importlib
import inspect
from collections import defaultdict
from werkzeug.utils import import_string
import six
from six import string_types, text_type
import ckan.model as model
import ckan.authz as authz
import ckan.lib.navl.dictization_functions as df
import ckan.plugins as p
from ckan.common import _, c
log = logging.getLogger(__name__)
_validate = df.validate
class NameConflict(Exception):
pass
class UsernamePasswordError(Exception):
pass
class ActionError(Exception):
def __init__(self, message=''):
self.message = message
super(ActionError, self).__init__(message)
def __str__(self):
msg = self.message
if not isinstance(msg, six.string_types):
msg = str(msg)
return six.ensure_text(msg)
class NotFound(ActionError):
'''Exception raised by logic functions when a given object is not found.
For example :py:func:`~ckan.logic.action.get.package_show` raises
:py:exc:`~ckan.plugins.toolkit.ObjectNotFound` if no package with the
given ``id`` exists.
'''
pass
class NotAuthorized(ActionError):
'''Exception raised when the user is not authorized to call the action.
For example :py:func:`~ckan.logic.action.create.package_create` raises
:py:exc:`~ckan.plugins.toolkit.NotAuthorized` if the user is not authorized
to create packages.
'''
pass
class ValidationError(ActionError):
'''Exception raised by action functions when validating their given
``data_dict`` fails.
'''
def __init__(self, error_dict, error_summary=None, extra_msg=None):
if not isinstance(error_dict, dict):
error_dict = {'message': error_dict}
# tags errors are a mess so let's clean them up
if 'tags' in error_dict:
tag_errors = []
for error in error_dict['tags']:
try:
tag_errors.append(', '.join(error['name']))
except KeyError:
# e.g. if it is a vocabulary_id error
if error:
tag_errors.append(error)
error_dict['tags'] = tag_errors
self.error_dict = error_dict
self._error_summary = error_summary
super(ValidationError, self).__init__(extra_msg)
@property
def error_summary(self):
''' autogenerate the summary if not supplied '''
def summarise(error_dict):
''' Do some i18n stuff on the error_dict keys '''
def prettify(field_name):
field_name = re.sub(r'(?<!\w)[Uu]rl(?!\w)', 'URL',
field_name.replace('_', ' ').capitalize())
return _(field_name.replace('_', ' '))
summary = {}
for key, error in six.iteritems(error_dict):
if key == 'resources':
summary[_('Resources')] = _('Package resource(s) invalid')
elif key == 'extras':
errors_extras = []
for item in error:
if (item.get('key') and
item['key'][0] not in errors_extras):
errors_extras.append(item.get('key')[0])
summary[_('Extras')] = ', '.join(errors_extras)
elif key == 'extras_validation':
summary[_('Extras')] = error[0]
elif key == 'tags':
summary[_('Tags')] = error[0]
else:
summary[_(prettify(key))] = error[0]
return summary
if self._error_summary:
return self._error_summary
return summarise(self.error_dict)
def __str__(self):
err_msgs = (super(ValidationError, self).__str__(),
self.error_dict)
return ' - '.join([str(err_msg) for err_msg in err_msgs if err_msg])
log = logging.getLogger(__name__)
def parse_params(params, ignore_keys=None):
'''Takes a dict and returns it with some values standardised.
This is done on a dict before calling tuplize_dict on it.
'''
parsed = {}
for key in params:
if ignore_keys and key in ignore_keys:
continue
# flask request has `getlist` instead of pylons' `getall`
if hasattr(params, 'getall'):
value = params.getall(key)
else:
value = params.getlist(key)
# Blank values become ''
if not value:
value = ''
# A list with only one item is stripped of being a list
if len(value) == 1:
value = value[0]
parsed[key] = value
return parsed
def clean_dict(data_dict):
'''Takes a dict and if any of the values are lists of dicts,
the empty dicts are stripped from the lists (recursive).
e.g.
>>> clean_dict(
{'name': u'testgrp4',
'title': u'',
'description': u'',
'packages': [{'name': u'testpkg'}, {'name': u'testpkg'}],
'extras': [{'key': u'packages', 'value': u'["testpkg"]'},
{'key': u'', 'value': u''},
{'key': u'', 'value': u''}],
'state': u'active'}
{'name': u'testgrp4',
'title': u'',
'description': u'',
'packages': [{'name': u'testpkg'}, {'name': u'testpkg'}],
'extras': [{'key': u'packages', 'value': u'["testpkg"]'}],
'state': u'active'}
'''
for key, value in data_dict.items():
if not isinstance(value, list):
continue
for inner_dict in value[:]:
if isinstance(inner_dict, string_types):
break
if not any(inner_dict.values()):
value.remove(inner_dict)
else:
clean_dict(inner_dict)
return data_dict
def tuplize_dict(data_dict):
'''Takes a dict with keys of the form 'table__0__key' and converts them
to a tuple like ('table', 0, 'key').
Dict should be put through parse_dict before this function, to have
values standardized.
May raise a DataError if the format of the key is incorrect.
'''
tuplized_dict = {}
for key, value in six.iteritems(data_dict):
key_list = key.split('__')
for num, key in enumerate(key_list):
if num % 2 == 1:
try:
key_list[num] = int(key)
except ValueError:
raise df.DataError('Bad key')
tuplized_dict[tuple(key_list)] = value
return tuplized_dict
def untuplize_dict(tuplized_dict):
data_dict = {}
for key, value in six.iteritems(tuplized_dict):
new_key = '__'.join([str(item) for item in key])
data_dict[new_key] = value
return data_dict
def flatten_to_string_key(dict):
flattented = df.flatten_dict(dict)
return untuplize_dict(flattented)
def _prepopulate_context(context):
if context is None:
context = {}
context.setdefault('model', model)
context.setdefault('session', model.Session)
try:
context.setdefault('user', c.user)
except AttributeError:
# c.user not set
pass
except RuntimeError:
# Outside of request context
pass
except TypeError:
# c not registered
pass
return context
def check_access(action, context, data_dict=None):
'''Calls the authorization function for the provided action
This is the only function that should be called to determine whether a
user (or an anonymous request) is allowed to perform a particular action.
The function accepts a context object, which should contain a 'user' key
with the name of the user performing the action, and optionally a
dictionary with extra data to be passed to the authorization function.
For example::
check_access('package_update', context, data_dict)
If not already there, the function will add an `auth_user_obj` key to the
context object with the actual User object (in case it exists in the
database). This check is only performed once per context object.
Raise :py:exc:`~ckan.plugins.toolkit.NotAuthorized` if the user is not
authorized to call the named action function.
If the user *is* authorized to call the action, return ``True``.
:param action: the name of the action function, eg. ``'package_create'``
:type action: string
:param context:
:type context: dict
:param data_dict:
:type data_dict: dict
:raises: :py:exc:`~ckan.plugins.toolkit.NotAuthorized` if the user is not
authorized to call the named action
'''
# Auth Auditing. We remove this call from the __auth_audit stack to show
# we have called the auth function
try:
audit = context.get('__auth_audit', [])[-1]
except IndexError:
audit = ''
if audit and audit[0] == action:
context['__auth_audit'].pop()
user = context.get('user')
try:
if 'auth_user_obj' not in context:
context['auth_user_obj'] = None
if not context.get('ignore_auth'):
if not context.get('__auth_user_obj_checked'):
if context.get('user') and not context.get('auth_user_obj'):
context['auth_user_obj'] = \
model.User.by_name(context['user'])
context['__auth_user_obj_checked'] = True
context = _prepopulate_context(context)
logic_authorization = authz.is_authorized(action, context,
data_dict)
if not logic_authorization['success']:
msg = logic_authorization.get('msg', '')
raise NotAuthorized(msg)
except NotAuthorized as e:
log.debug(u'check access NotAuthorized - %s user=%s "%s"',
action, user, text_type(e))
raise
log.debug('check access OK - %s user=%s', action, user)
return True
_actions = {}
def clear_actions_cache():
_actions.clear()
def chained_action(func):
func.chained_action = True
return func
def _is_chained_action(func):
return getattr(func, 'chained_action', False)
def get_action(action):
'''Return the named :py:mod:`ckan.logic.action` function.
For example ``get_action('package_create')`` will normally return the
:py:func:`ckan.logic.action.create.package_create()` function.
For documentation of the available action functions, see
:ref:`api-reference`.
You should always use ``get_action()`` instead of importing an action
function directly, because :py:class:`~ckan.plugins.interfaces.IActions`
plugins can override action functions, causing ``get_action()`` to return a
plugin-provided function instead of the default one.
Usage::
import ckan.plugins.toolkit as toolkit
# Call the package_create action function:
toolkit.get_action('package_create')(context, data_dict)
As the context parameter passed to an action function is commonly::
context = {'model': ckan.model, 'session': ckan.model.Session,
'user': pylons.c.user}
an action function returned by ``get_action()`` will automatically add
these parameters to the context if they are not defined. This is
especially useful for plugins as they should not really be importing parts
of ckan eg :py:mod:`ckan.model` and as such do not have access to ``model``
or ``model.Session``.
If a ``context`` of ``None`` is passed to the action function then the
default context dict will be created.
.. note::
Many action functions modify the context dict. It can therefore
not be reused for multiple calls of the same or different action
functions.
:param action: name of the action function to return,
eg. ``'package_create'``
:type action: string
:returns: the named action function
:rtype: callable
'''
if _actions:
if action not in _actions:
raise KeyError("Action '%s' not found" % action)
return _actions.get(action)
# Otherwise look in all the plugins to resolve all possible First
# get the default ones in the ckan/logic/action directory Rather
# than writing them out in full will use importlib.import_module
# to load anything from ckan.logic.action that looks like it might
# be an action
for action_module_name in ['get', 'create', 'update', 'delete', 'patch']:
module = importlib.import_module(
'.' + action_module_name, 'ckan.logic.action')
for k, v in authz.get_local_functions(module):
_actions[k] = v
# Whitelist all actions defined in logic/action/get.py as
# being side-effect free.
if action_module_name == 'get' and \
not hasattr(v, 'side_effect_free'):
v.side_effect_free = True
# Then overwrite them with any specific ones in the plugins:
resolved_action_plugins = {}
fetched_actions = {}
chained_actions = defaultdict(list)
for plugin in p.PluginImplementations(p.IActions):
for name, action_function in plugin.get_actions().items():
if _is_chained_action(action_function):
chained_actions[name].append(action_function)
elif name in resolved_action_plugins:
raise NameConflict(
'The action %r is already implemented in %r' % (
name,
resolved_action_plugins[name]
)
)
else:
resolved_action_plugins[name] = plugin.name
# Extensions are exempted from the auth audit for now
# This needs to be resolved later
action_function.auth_audit_exempt = True
fetched_actions[name] = action_function
for name, func_list in six.iteritems(chained_actions):
if name not in fetched_actions and name not in _actions:
# nothing to override from plugins or core
raise NotFound('The action %r is not found for chained action' % (
name))
for func in reversed(func_list):
# try other plugins first, fall back to core
prev_func = fetched_actions.get(name, _actions.get(name))
new_func = functools.partial(func, prev_func)
# persisting attributes to the new partial function
for attribute, value in six.iteritems(func.__dict__):
setattr(new_func, attribute, value)
fetched_actions[name] = new_func
# Use the updated ones in preference to the originals.
_actions.update(fetched_actions)
# wrap the functions
for action_name, _action in _actions.items():
def make_wrapped(_action, action_name):
def wrapped(context=None, data_dict=None, **kw):
if kw:
log.critical('%s was passed extra keywords %r'
% (_action.__name__, kw))
context = _prepopulate_context(context)
# Auth Auditing - checks that the action function did call
# check_access (unless there is no accompanying auth function).
# We push the action name and id onto the __auth_audit stack
# before calling the action, and check_access removes it.
# (We need the id of the action in case the action is wrapped
# inside an action of the same name, which happens in the
# datastore)
context.setdefault('__auth_audit', [])
context['__auth_audit'].append((action_name, id(_action)))
# check_access(action_name, context, data_dict=None)
result = _action(context, data_dict, **kw)
try:
audit = context['__auth_audit'][-1]
if audit[0] == action_name and audit[1] == id(_action):
if action_name not in authz.auth_functions_list():
log.debug('No auth function for %s' % action_name)
elif not getattr(_action, 'auth_audit_exempt', False):
raise Exception(
'Action function {0} did not call its '
'auth function'
.format(action_name))
# remove from audit stack
context['__auth_audit'].pop()
except IndexError:
pass
return result
return wrapped
fn = make_wrapped(_action, action_name)
# we need to mirror the docstring
fn.__doc__ = _action.__doc__
# we need to retain the side effect free behaviour
if getattr(_action, 'side_effect_free', False):
fn.side_effect_free = True
_actions[action_name] = fn
return _actions.get(action)
def get_or_bust(data_dict, keys):
'''Return the value(s) from the given data_dict for the given key(s).
Usage::
single_value = get_or_bust(data_dict, 'a_key')
value_1, value_2 = get_or_bust(data_dict, ['key1', 'key2'])
:param data_dict: the dictionary to return the values from
:type data_dict: dictionary
:param keys: the key(s) for the value(s) to return
:type keys: either a string or a list
:returns: a single value from the dict if a single key was given,
or a tuple of values if a list of keys was given
:raises: :py:exc:`ckan.logic.ValidationError` if one of the given keys is
not in the given dictionary
'''
if isinstance(keys, string_types):
keys = [keys]
import ckan.logic.schema as schema
schema = schema.create_schema_for_required_keys(keys)
data_dict, errors = _validate(data_dict, schema)
if errors:
raise ValidationError(errors)
# preserve original key order
values = [data_dict[key] for key in keys]
if len(values) == 1:
return values[0]
return tuple(values)
def validate(schema_func, can_skip_validator=False):
''' A decorator that validates an action function against a given schema
'''
def action_decorator(action):
@functools.wraps(action)
def wrapper(context, data_dict):
if can_skip_validator:
if context.get('skip_validation'):
return action(context, data_dict)
schema = context.get('schema', schema_func())
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
return action(context, data_dict)
return wrapper
return action_decorator
def side_effect_free(action):
'''A decorator that marks the given action function as side-effect-free.
Action functions decorated with this decorator can be called with an HTTP
GET request to the :doc:`Action API </api/index>`. Action functions that
don't have this decorator must be called with a POST request.
If your CKAN extension defines its own action functions using the
:py:class:`~ckan.plugins.interfaces.IActions` plugin interface, you can use
this decorator to make your actions available with GET requests instead of
just with POST requests.
Example::
import ckan.plugins.toolkit as toolkit
@toolkit.side_effect_free
def my_custom_action_function(context, data_dict):
...
(Then implement :py:class:`~ckan.plugins.interfaces.IActions` to register
your action function with CKAN.)
'''
action.side_effect_free = True
return action
def auth_sysadmins_check(action):
'''A decorator that prevents sysadmins from being automatically authorized
to call an action function.
Normally sysadmins are allowed to call any action function (for example
when they're using the :doc:`Action API </api/index>` or the web
interface), if the user is a sysadmin the action function's authorization
function will not even be called.
If an action function is decorated with this decorator, then its
authorization function will always be called, even if the user is a
sysadmin.
'''
action.auth_sysadmins_check = True
return action
def auth_audit_exempt(action):
''' Dirty hack to stop auth audit being done '''
action.auth_audit_exempt = True
return action
def auth_allow_anonymous_access(action):
''' Flag an auth function as not requiring a logged in user
This means that check_access won't automatically raise a NotAuthorized
exception if an authenticated user is not provided in the context. (The
auth function can still return False if for some reason access is not
granted).
'''
action.auth_allow_anonymous_access = True
return action
def auth_disallow_anonymous_access(action):
''' Flag an auth function as requiring a logged in user
This means that check_access will automatically raise a NotAuthorized
exception if an authenticated user is not provided in the context, without
calling the actual auth function.
'''
action.auth_allow_anonymous_access = False
return action
def chained_auth_function(func):
'''
Decorator function allowing authentication functions to be chained.
'''
func.chained_auth_function = True
return func
class UnknownValidator(Exception):
'''Exception raised when a requested validator function cannot be found.
'''
pass
_validators_cache = {}
def clear_validators_cache():
_validators_cache.clear()
# This function exists mainly so that validators can be made available to
# extensions via ckan.plugins.toolkit.
def get_validator(validator):
'''Return a validator function by name.
:param validator: the name of the validator function to return,
eg. ``'package_name_exists'``
:type validator: string
:raises: :py:exc:`~ckan.plugins.toolkit.UnknownValidator` if the named
validator is not found
:returns: the named validator function
:rtype: ``types.FunctionType``
'''
if not _validators_cache:
validators = _import_module_functions('ckan.lib.navl.validators')
_validators_cache.update(validators)
validators = _import_module_functions('ckan.logic.validators')
_validators_cache.update(validators)
converters = _import_module_functions('ckan.logic.converters')
_validators_cache.update(converters)
_validators_cache.update({'OneOf': _validators_cache['one_of']})
for plugin in reversed(list(p.PluginImplementations(p.IValidators))):
for name, fn in plugin.get_validators().items():
log.debug('Validator function {0} from plugin {1} was inserted'
.format(name, plugin.name))
_validators_cache[name] = fn
try:
return _validators_cache[validator]
except KeyError:
raise UnknownValidator('Validator `%s` does not exist' % validator)
def model_name_to_class(model_module, model_name):
'''Return the class in model_module that has the same name as the
received string.
Raises AttributeError if there's no model in model_module named model_name.
'''
try:
model_class_name = model_name.title()
return getattr(model_module, model_class_name)
except AttributeError:
raise ValidationError("%s isn't a valid model" % model_class_name)
def _import_module_functions(module_path):
'''Import a module and get the functions and return them in a dict'''
module = importlib.import_module(module_path)
return {
k: v
for k, v in authz.get_local_functions(module)
}
| 33.688202
| 79
| 0.627741
|
import inspect
import functools
import logging
import re
import importlib
import inspect
from collections import defaultdict
from werkzeug.utils import import_string
import six
from six import string_types, text_type
import ckan.model as model
import ckan.authz as authz
import ckan.lib.navl.dictization_functions as df
import ckan.plugins as p
from ckan.common import _, c
log = logging.getLogger(__name__)
_validate = df.validate
class NameConflict(Exception):
pass
class UsernamePasswordError(Exception):
pass
class ActionError(Exception):
def __init__(self, message=''):
self.message = message
super(ActionError, self).__init__(message)
def __str__(self):
msg = self.message
if not isinstance(msg, six.string_types):
msg = str(msg)
return six.ensure_text(msg)
class NotFound(ActionError):
pass
class NotAuthorized(ActionError):
pass
class ValidationError(ActionError):
def __init__(self, error_dict, error_summary=None, extra_msg=None):
if not isinstance(error_dict, dict):
error_dict = {'message': error_dict}
if 'tags' in error_dict:
tag_errors = []
for error in error_dict['tags']:
try:
tag_errors.append(', '.join(error['name']))
except KeyError:
# e.g. if it is a vocabulary_id error
if error:
tag_errors.append(error)
error_dict['tags'] = tag_errors
self.error_dict = error_dict
self._error_summary = error_summary
super(ValidationError, self).__init__(extra_msg)
@property
def error_summary(self):
def summarise(error_dict):
def prettify(field_name):
field_name = re.sub(r'(?<!\w)[Uu]rl(?!\w)', 'URL',
field_name.replace('_', ' ').capitalize())
return _(field_name.replace('_', ' '))
summary = {}
for key, error in six.iteritems(error_dict):
if key == 'resources':
summary[_('Resources')] = _('Package resource(s) invalid')
elif key == 'extras':
errors_extras = []
for item in error:
if (item.get('key') and
item['key'][0] not in errors_extras):
errors_extras.append(item.get('key')[0])
summary[_('Extras')] = ', '.join(errors_extras)
elif key == 'extras_validation':
summary[_('Extras')] = error[0]
elif key == 'tags':
summary[_('Tags')] = error[0]
else:
summary[_(prettify(key))] = error[0]
return summary
if self._error_summary:
return self._error_summary
return summarise(self.error_dict)
def __str__(self):
err_msgs = (super(ValidationError, self).__str__(),
self.error_dict)
return ' - '.join([str(err_msg) for err_msg in err_msgs if err_msg])
log = logging.getLogger(__name__)
def parse_params(params, ignore_keys=None):
parsed = {}
for key in params:
if ignore_keys and key in ignore_keys:
continue
# flask request has `getlist` instead of pylons' `getall`
if hasattr(params, 'getall'):
value = params.getall(key)
else:
value = params.getlist(key)
if not value:
value = ''
if len(value) == 1:
value = value[0]
parsed[key] = value
return parsed
def clean_dict(data_dict):
for key, value in data_dict.items():
if not isinstance(value, list):
continue
for inner_dict in value[:]:
if isinstance(inner_dict, string_types):
break
if not any(inner_dict.values()):
value.remove(inner_dict)
else:
clean_dict(inner_dict)
return data_dict
def tuplize_dict(data_dict):
tuplized_dict = {}
for key, value in six.iteritems(data_dict):
key_list = key.split('__')
for num, key in enumerate(key_list):
if num % 2 == 1:
try:
key_list[num] = int(key)
except ValueError:
raise df.DataError('Bad key')
tuplized_dict[tuple(key_list)] = value
return tuplized_dict
def untuplize_dict(tuplized_dict):
data_dict = {}
for key, value in six.iteritems(tuplized_dict):
new_key = '__'.join([str(item) for item in key])
data_dict[new_key] = value
return data_dict
def flatten_to_string_key(dict):
flattented = df.flatten_dict(dict)
return untuplize_dict(flattented)
def _prepopulate_context(context):
if context is None:
context = {}
context.setdefault('model', model)
context.setdefault('session', model.Session)
try:
context.setdefault('user', c.user)
except AttributeError:
pass
except RuntimeError:
pass
except TypeError:
pass
return context
def check_access(action, context, data_dict=None):
try:
audit = context.get('__auth_audit', [])[-1]
except IndexError:
audit = ''
if audit and audit[0] == action:
context['__auth_audit'].pop()
user = context.get('user')
try:
if 'auth_user_obj' not in context:
context['auth_user_obj'] = None
if not context.get('ignore_auth'):
if not context.get('__auth_user_obj_checked'):
if context.get('user') and not context.get('auth_user_obj'):
context['auth_user_obj'] = \
model.User.by_name(context['user'])
context['__auth_user_obj_checked'] = True
context = _prepopulate_context(context)
logic_authorization = authz.is_authorized(action, context,
data_dict)
if not logic_authorization['success']:
msg = logic_authorization.get('msg', '')
raise NotAuthorized(msg)
except NotAuthorized as e:
log.debug(u'check access NotAuthorized - %s user=%s "%s"',
action, user, text_type(e))
raise
log.debug('check access OK - %s user=%s', action, user)
return True
_actions = {}
def clear_actions_cache():
_actions.clear()
def chained_action(func):
func.chained_action = True
return func
def _is_chained_action(func):
return getattr(func, 'chained_action', False)
def get_action(action):
if _actions:
if action not in _actions:
raise KeyError("Action '%s' not found" % action)
return _actions.get(action)
for action_module_name in ['get', 'create', 'update', 'delete', 'patch']:
module = importlib.import_module(
'.' + action_module_name, 'ckan.logic.action')
for k, v in authz.get_local_functions(module):
_actions[k] = v
if action_module_name == 'get' and \
not hasattr(v, 'side_effect_free'):
v.side_effect_free = True
resolved_action_plugins = {}
fetched_actions = {}
chained_actions = defaultdict(list)
for plugin in p.PluginImplementations(p.IActions):
for name, action_function in plugin.get_actions().items():
if _is_chained_action(action_function):
chained_actions[name].append(action_function)
elif name in resolved_action_plugins:
raise NameConflict(
'The action %r is already implemented in %r' % (
name,
resolved_action_plugins[name]
)
)
else:
resolved_action_plugins[name] = plugin.name
action_function.auth_audit_exempt = True
fetched_actions[name] = action_function
for name, func_list in six.iteritems(chained_actions):
if name not in fetched_actions and name not in _actions:
raise NotFound('The action %r is not found for chained action' % (
name))
for func in reversed(func_list):
prev_func = fetched_actions.get(name, _actions.get(name))
new_func = functools.partial(func, prev_func)
for attribute, value in six.iteritems(func.__dict__):
setattr(new_func, attribute, value)
fetched_actions[name] = new_func
_actions.update(fetched_actions)
for action_name, _action in _actions.items():
def make_wrapped(_action, action_name):
def wrapped(context=None, data_dict=None, **kw):
if kw:
log.critical('%s was passed extra keywords %r'
% (_action.__name__, kw))
context = _prepopulate_context(context)
context.setdefault('__auth_audit', [])
context['__auth_audit'].append((action_name, id(_action)))
result = _action(context, data_dict, **kw)
try:
audit = context['__auth_audit'][-1]
if audit[0] == action_name and audit[1] == id(_action):
if action_name not in authz.auth_functions_list():
log.debug('No auth function for %s' % action_name)
elif not getattr(_action, 'auth_audit_exempt', False):
raise Exception(
'Action function {0} did not call its '
'auth function'
.format(action_name))
context['__auth_audit'].pop()
except IndexError:
pass
return result
return wrapped
fn = make_wrapped(_action, action_name)
fn.__doc__ = _action.__doc__
if getattr(_action, 'side_effect_free', False):
fn.side_effect_free = True
_actions[action_name] = fn
return _actions.get(action)
def get_or_bust(data_dict, keys):
if isinstance(keys, string_types):
keys = [keys]
import ckan.logic.schema as schema
schema = schema.create_schema_for_required_keys(keys)
data_dict, errors = _validate(data_dict, schema)
if errors:
raise ValidationError(errors)
values = [data_dict[key] for key in keys]
if len(values) == 1:
return values[0]
return tuple(values)
def validate(schema_func, can_skip_validator=False):
def action_decorator(action):
@functools.wraps(action)
def wrapper(context, data_dict):
if can_skip_validator:
if context.get('skip_validation'):
return action(context, data_dict)
schema = context.get('schema', schema_func())
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
return action(context, data_dict)
return wrapper
return action_decorator
def side_effect_free(action):
action.side_effect_free = True
return action
def auth_sysadmins_check(action):
action.auth_sysadmins_check = True
return action
def auth_audit_exempt(action):
action.auth_audit_exempt = True
return action
def auth_allow_anonymous_access(action):
action.auth_allow_anonymous_access = True
return action
def auth_disallow_anonymous_access(action):
action.auth_allow_anonymous_access = False
return action
def chained_auth_function(func):
func.chained_auth_function = True
return func
class UnknownValidator(Exception):
pass
_validators_cache = {}
def clear_validators_cache():
_validators_cache.clear()
def get_validator(validator):
if not _validators_cache:
validators = _import_module_functions('ckan.lib.navl.validators')
_validators_cache.update(validators)
validators = _import_module_functions('ckan.logic.validators')
_validators_cache.update(validators)
converters = _import_module_functions('ckan.logic.converters')
_validators_cache.update(converters)
_validators_cache.update({'OneOf': _validators_cache['one_of']})
for plugin in reversed(list(p.PluginImplementations(p.IValidators))):
for name, fn in plugin.get_validators().items():
log.debug('Validator function {0} from plugin {1} was inserted'
.format(name, plugin.name))
_validators_cache[name] = fn
try:
return _validators_cache[validator]
except KeyError:
raise UnknownValidator('Validator `%s` does not exist' % validator)
def model_name_to_class(model_module, model_name):
try:
model_class_name = model_name.title()
return getattr(model_module, model_class_name)
except AttributeError:
raise ValidationError("%s isn't a valid model" % model_class_name)
def _import_module_functions(module_path):
module = importlib.import_module(module_path)
return {
k: v
for k, v in authz.get_local_functions(module)
}
| true
| true
|
f719c3d21c3cbd95489d2ede11b990e85803833d
| 79
|
py
|
Python
|
Chapter03/circle_call.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 13
|
2018-06-21T01:44:49.000Z
|
2021-12-01T10:49:53.000Z
|
Chapter03/circle_call.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | null | null | null |
Chapter03/circle_call.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 6
|
2018-10-05T08:29:24.000Z
|
2022-01-11T14:49:50.000Z
|
r = input("Input radius: ")
diameter, circumference, area = circle_measures(r)
| 26.333333
| 50
| 0.734177
|
r = input("Input radius: ")
diameter, circumference, area = circle_measures(r)
| true
| true
|
f719c48b433034a6d2941656747bb299c65248d8
| 9,652
|
py
|
Python
|
QLearning.py
|
FlowerForAlgernon/ai_tetris
|
7ac0d3875ad9b31fb260f7567a218e0de340c4e4
|
[
"Apache-2.0"
] | 1
|
2021-12-19T14:07:37.000Z
|
2021-12-19T14:07:37.000Z
|
QLearning.py
|
FlowerForAlgernon/ai_tetris
|
7ac0d3875ad9b31fb260f7567a218e0de340c4e4
|
[
"Apache-2.0"
] | null | null | null |
QLearning.py
|
FlowerForAlgernon/ai_tetris
|
7ac0d3875ad9b31fb260f7567a218e0de340c4e4
|
[
"Apache-2.0"
] | null | null | null |
"""
这份代码使用 Q learning 算法训练并运行俄罗斯方块游戏 ai。其中简化状态空间的方法可参考论文 Adapting Reinforcement Learning to Tetris
"""
import numpy as np
from game import *
sub_well = 4
base = 7
def getStateIndex(field_width, field_height, field_map):
"""
因为每一列有 7 种不同的情况,所以采用七进制数来作为状态索引
"""
temp = [0 for _ in range(field_width)]
convert = {}
for i in range(-(base - 1)//2, (base - 1)//2 + 1):
convert[i] = i + (base - 1)//2
for x in range(field_width):
while temp[x] < field_height and field_map[temp[x]][x] == 0:
temp[x] += 1
index = 0
for i in range(field_width-1):
if temp[i+1] - temp[i] > (base - 1)//2:
index += base**i * convert[(base - 1)//2]
elif temp[i+1] - temp[i] < -(base - 1)//2:
index += base**i * convert[-(base - 1)//2]
else:
index += base**i * convert[temp[i+1] - temp[i]]
return index
def getAllPossibleLocation(field_width, field_map, block, layout):
all_possible_position = []
for x in range(field_width):
if block.isLegal(layout, (x, -4), field_map) is not State.Middle:
all_possible_position.append(x)
return all_possible_position
def findBottomPosition(field_map, block, x, layout):
y = -4
while block.isLegal(layout, (x, y), field_map) is not State.Bottom:
y += 1
return y - 1
def dropBlock(field_height, field_map, x0, y0, layout):
for (x, y) in layout:
if 0 <= y0 + y < field_height:
field_map[y0 + y][x0 + x] = 1
if y0 + y < 0:
return False
return True
def resetMap(field_width, field_height, field_map):
count = 0
for y in range(field_height):
for x in range(field_width):
if field_map[y][x] == 1:
field_map[y][x] = 0
count += 1
if count == 4:
return
def getNewMap(block, position, direction, field_map):
while block.direction is not direction:
block.rotate(field_map)
while block.position[0] > position[0]:
block.left(field_map)
while block.position[0] < position[0]:
block.right(field_map)
while not block.is_stop:
block.down(field_map)
class QLearning(Game):
def __init__(self):
super(QLearning, self).__init__(sub_well, 1000)
self.repeat_num = 200
self.alpha = 0.2
self.gamma = 0.8
self.lambda_ = 0.3
self.epsilon = 0.01
self.key = [((s, b), (p, d)) for s in range(base**(self.field_width-1)) for b in range(7) for p in range(self.field_width) for d in range(4)]
self.V = [0 for _ in range(len(self.key))]
self.Q = dict(zip(self.key, self.V))
#self.Q = np.load('QL.npy').item()
def checkEvents(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def getBlock(self, block):
for x in range(len(Blocks_color)):
if block.color == Blocks_color[x]:
return x
def getReward(self):
temp = [0 for _ in range(self.field_width)]
for x in range(self.field_width):
while temp[x] < self.field_height and self.field_map[temp[x]][x] == 0:
temp[x] += 1
buried_holes = 0
block = self.block_factory.cur_block
for (x, y) in block.layout:
i = 1
while block.position[1]+y+i < self.field_height and self.field_map[block.position[1]+y+i][x] == 0:
buried_holes += 1
i += 1
return np.var(temp)*(-2) + buried_holes*(-1)
def getAllActions(self, block):
actions = []
for direction in range(len(block.layouts)):
for x in getAllPossibleLocation(self.field_width, self.field_map, block, block.layouts[direction]):
y = findBottomPosition(self.field_map, block, x, block.layouts[direction])
if dropBlock(self.field_height, self.field_map, x, y, block.layouts[direction]):
actions.append((x, direction))
resetMap(self.field_width, self.field_height, self.field_map)
return actions
def getBestActionWithGreedy(self, block):
block_type = self.getBlock(block)
state = getStateIndex(self.field_width, self.field_height, self.field_map)
actions = self.getAllActions(block)
actions_value = {}
for action in actions:
actions_value[action] = self.Q[((state, block_type), action)]
if actions_value == {}:
return None
elif random.random() > self.epsilon:
return max(actions_value, key=actions_value.get)
else:
return list(actions_value.keys())[random.randint(0, len(actions_value)-1)]
def getBestAction(self, block):
block_type = self.getBlock(block)
state = getStateIndex(self.field_width, self.field_height, self.field_map)
actions = self.getAllActions(block)
actions_value = {}
for action in actions:
actions_value[action] = self.Q[((state, block_type), action)]
if actions_value == {}:
return None
return max(actions_value, key=actions_value.get)
def train(self):
record = []
for i in range(1, self.repeat_num+1):
self.initialize()
while not self.block_factory.is_failed:
cur_state = getStateIndex(self.field_width, self.field_height, self.field_map)
cur_block = self.getBlock(self.block_factory.cur_block)
cur_action = self.getBestActionWithGreedy(self.block_factory.cur_block)
cur_index = ((cur_state, cur_block), cur_action)
if cur_action == None: break
getNewMap(self.block_factory.cur_block, cur_action, cur_action[1], self.field_map)
next_state = getStateIndex(self.field_width, self.field_height, self.field_map)
next_block = self.getBlock(self.block_factory.next_block)
next_action = self.getBestAction(self.block_factory.next_block)
next_index = ((next_state, next_block), next_action)
if next_action == None: break
self.Q[cur_index] += self.alpha*(self.getReward()+self.gamma*self.Q[next_index] - self.Q[cur_index])
self.update()
print("Epoch:"+str(i)+"/"+str(self.repeat_num)+" Lines:"+ str(self.lines_num)+" Alpha:"+str(self.alpha))
record.append(self.lines_num)
if i % 100 == 0:
self.alpha *= 0.5
np.save('QL.npy', {"V": self.V})
np.save('record_QL.npy', {"record": record})
np.save('QL.npy', self.Q)
np.save('record_QL.npy', {"record": record})
class QLGame(Game):
def __init__(self):
super(QLGame, self).__init__(10, 20)
self.Q = np.load('QL.npy', allow_pickle=True).item()
self.col = 0
def checkEvents(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def getBlock(self, block):
for x in range(len(Blocks_color)):
if block.color == Blocks_color[x]:
return x
def cutFieldMap(self, position):
new_field_map = [[0]*sub_well for _ in range(self.field_height)]
for y in range(self.field_height):
for x in range(sub_well):
new_field_map[y][x] = self.field_map[y][position+x]
return new_field_map
def getAllActions(self, field_width, field_height, block, field_map, init_pos):
actions = {}
for direction in range(len(block.layouts)):
for x in getAllPossibleLocation(field_width, field_map, block, block.layouts[direction]):
y = findBottomPosition(field_map, block, x, block.layouts[direction])
if dropBlock(field_height, field_map, x, y, block.layouts[direction]):
block_type = self.getBlock(block)
state = getStateIndex(field_width, field_height, field_map)
actions[(x + init_pos, direction)] = self.Q[((state, block_type), (x, direction))]
resetMap(field_width, field_height, field_map)
return actions
def getBestAction(self):
actions = {}
cur_block = Block(self.block_factory.cur_block.screen, sub_well, self.field_height, self.block_factory.cur_block.layouts, self.block_factory.cur_block.direction, self.block_factory.cur_block.color, (0, -4))
for x in range(self.field_width - sub_well + 1):
loc_actions = self.getAllActions(sub_well, self.field_height, cur_block, self.cutFieldMap(x), x)
for k, v in loc_actions.items():
if k in actions:
actions[k].append(v)
else:
actions[k] = [v]
for k, v in actions.items():
actions[k] = max(v)
return max(actions, key=actions.get) if actions != {} else None
def start(self):
self.initialize()
self.initializePygame()
while not self.block_factory.is_failed:
self.checkEvents()
action = self.getBestAction()
if action == None:
break
getNewMap(self.block_factory.cur_block, action, action[1], self.field_map)
self.update()
self.draw()
return self.lines_num
if __name__ == '__main__':
train = QLearning()
train.train()
game = QLGame()
game.start()
| 38
| 214
| 0.585993
|
import numpy as np
from game import *
sub_well = 4
base = 7
def getStateIndex(field_width, field_height, field_map):
temp = [0 for _ in range(field_width)]
convert = {}
for i in range(-(base - 1)//2, (base - 1)//2 + 1):
convert[i] = i + (base - 1)//2
for x in range(field_width):
while temp[x] < field_height and field_map[temp[x]][x] == 0:
temp[x] += 1
index = 0
for i in range(field_width-1):
if temp[i+1] - temp[i] > (base - 1)//2:
index += base**i * convert[(base - 1)//2]
elif temp[i+1] - temp[i] < -(base - 1)//2:
index += base**i * convert[-(base - 1)//2]
else:
index += base**i * convert[temp[i+1] - temp[i]]
return index
def getAllPossibleLocation(field_width, field_map, block, layout):
all_possible_position = []
for x in range(field_width):
if block.isLegal(layout, (x, -4), field_map) is not State.Middle:
all_possible_position.append(x)
return all_possible_position
def findBottomPosition(field_map, block, x, layout):
y = -4
while block.isLegal(layout, (x, y), field_map) is not State.Bottom:
y += 1
return y - 1
def dropBlock(field_height, field_map, x0, y0, layout):
for (x, y) in layout:
if 0 <= y0 + y < field_height:
field_map[y0 + y][x0 + x] = 1
if y0 + y < 0:
return False
return True
def resetMap(field_width, field_height, field_map):
count = 0
for y in range(field_height):
for x in range(field_width):
if field_map[y][x] == 1:
field_map[y][x] = 0
count += 1
if count == 4:
return
def getNewMap(block, position, direction, field_map):
while block.direction is not direction:
block.rotate(field_map)
while block.position[0] > position[0]:
block.left(field_map)
while block.position[0] < position[0]:
block.right(field_map)
while not block.is_stop:
block.down(field_map)
class QLearning(Game):
def __init__(self):
super(QLearning, self).__init__(sub_well, 1000)
self.repeat_num = 200
self.alpha = 0.2
self.gamma = 0.8
self.lambda_ = 0.3
self.epsilon = 0.01
self.key = [((s, b), (p, d)) for s in range(base**(self.field_width-1)) for b in range(7) for p in range(self.field_width) for d in range(4)]
self.V = [0 for _ in range(len(self.key))]
self.Q = dict(zip(self.key, self.V))
def checkEvents(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def getBlock(self, block):
for x in range(len(Blocks_color)):
if block.color == Blocks_color[x]:
return x
def getReward(self):
temp = [0 for _ in range(self.field_width)]
for x in range(self.field_width):
while temp[x] < self.field_height and self.field_map[temp[x]][x] == 0:
temp[x] += 1
buried_holes = 0
block = self.block_factory.cur_block
for (x, y) in block.layout:
i = 1
while block.position[1]+y+i < self.field_height and self.field_map[block.position[1]+y+i][x] == 0:
buried_holes += 1
i += 1
return np.var(temp)*(-2) + buried_holes*(-1)
def getAllActions(self, block):
actions = []
for direction in range(len(block.layouts)):
for x in getAllPossibleLocation(self.field_width, self.field_map, block, block.layouts[direction]):
y = findBottomPosition(self.field_map, block, x, block.layouts[direction])
if dropBlock(self.field_height, self.field_map, x, y, block.layouts[direction]):
actions.append((x, direction))
resetMap(self.field_width, self.field_height, self.field_map)
return actions
def getBestActionWithGreedy(self, block):
block_type = self.getBlock(block)
state = getStateIndex(self.field_width, self.field_height, self.field_map)
actions = self.getAllActions(block)
actions_value = {}
for action in actions:
actions_value[action] = self.Q[((state, block_type), action)]
if actions_value == {}:
return None
elif random.random() > self.epsilon:
return max(actions_value, key=actions_value.get)
else:
return list(actions_value.keys())[random.randint(0, len(actions_value)-1)]
def getBestAction(self, block):
block_type = self.getBlock(block)
state = getStateIndex(self.field_width, self.field_height, self.field_map)
actions = self.getAllActions(block)
actions_value = {}
for action in actions:
actions_value[action] = self.Q[((state, block_type), action)]
if actions_value == {}:
return None
return max(actions_value, key=actions_value.get)
def train(self):
record = []
for i in range(1, self.repeat_num+1):
self.initialize()
while not self.block_factory.is_failed:
cur_state = getStateIndex(self.field_width, self.field_height, self.field_map)
cur_block = self.getBlock(self.block_factory.cur_block)
cur_action = self.getBestActionWithGreedy(self.block_factory.cur_block)
cur_index = ((cur_state, cur_block), cur_action)
if cur_action == None: break
getNewMap(self.block_factory.cur_block, cur_action, cur_action[1], self.field_map)
next_state = getStateIndex(self.field_width, self.field_height, self.field_map)
next_block = self.getBlock(self.block_factory.next_block)
next_action = self.getBestAction(self.block_factory.next_block)
next_index = ((next_state, next_block), next_action)
if next_action == None: break
self.Q[cur_index] += self.alpha*(self.getReward()+self.gamma*self.Q[next_index] - self.Q[cur_index])
self.update()
print("Epoch:"+str(i)+"/"+str(self.repeat_num)+" Lines:"+ str(self.lines_num)+" Alpha:"+str(self.alpha))
record.append(self.lines_num)
if i % 100 == 0:
self.alpha *= 0.5
np.save('QL.npy', {"V": self.V})
np.save('record_QL.npy', {"record": record})
np.save('QL.npy', self.Q)
np.save('record_QL.npy', {"record": record})
class QLGame(Game):
def __init__(self):
super(QLGame, self).__init__(10, 20)
self.Q = np.load('QL.npy', allow_pickle=True).item()
self.col = 0
def checkEvents(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def getBlock(self, block):
for x in range(len(Blocks_color)):
if block.color == Blocks_color[x]:
return x
def cutFieldMap(self, position):
new_field_map = [[0]*sub_well for _ in range(self.field_height)]
for y in range(self.field_height):
for x in range(sub_well):
new_field_map[y][x] = self.field_map[y][position+x]
return new_field_map
def getAllActions(self, field_width, field_height, block, field_map, init_pos):
actions = {}
for direction in range(len(block.layouts)):
for x in getAllPossibleLocation(field_width, field_map, block, block.layouts[direction]):
y = findBottomPosition(field_map, block, x, block.layouts[direction])
if dropBlock(field_height, field_map, x, y, block.layouts[direction]):
block_type = self.getBlock(block)
state = getStateIndex(field_width, field_height, field_map)
actions[(x + init_pos, direction)] = self.Q[((state, block_type), (x, direction))]
resetMap(field_width, field_height, field_map)
return actions
def getBestAction(self):
actions = {}
cur_block = Block(self.block_factory.cur_block.screen, sub_well, self.field_height, self.block_factory.cur_block.layouts, self.block_factory.cur_block.direction, self.block_factory.cur_block.color, (0, -4))
for x in range(self.field_width - sub_well + 1):
loc_actions = self.getAllActions(sub_well, self.field_height, cur_block, self.cutFieldMap(x), x)
for k, v in loc_actions.items():
if k in actions:
actions[k].append(v)
else:
actions[k] = [v]
for k, v in actions.items():
actions[k] = max(v)
return max(actions, key=actions.get) if actions != {} else None
def start(self):
self.initialize()
self.initializePygame()
while not self.block_factory.is_failed:
self.checkEvents()
action = self.getBestAction()
if action == None:
break
getNewMap(self.block_factory.cur_block, action, action[1], self.field_map)
self.update()
self.draw()
return self.lines_num
if __name__ == '__main__':
train = QLearning()
train.train()
game = QLGame()
game.start()
| true
| true
|
f719c4a70ee2814bd930a3c19d9d0b1401f193f9
| 834
|
py
|
Python
|
pyvat/result.py
|
Alex-Espressone/pyvat
|
266559c9d8af2aee7ecea3aed52a517181a412c8
|
[
"Apache-2.0"
] | 48
|
2015-07-22T12:02:20.000Z
|
2022-02-07T16:54:13.000Z
|
pyvat/result.py
|
Alex-Espressone/pyvat
|
266559c9d8af2aee7ecea3aed52a517181a412c8
|
[
"Apache-2.0"
] | 34
|
2015-03-27T17:47:38.000Z
|
2022-02-08T18:14:55.000Z
|
pyvat/result.py
|
Alex-Espressone/pyvat
|
266559c9d8af2aee7ecea3aed52a517181a412c8
|
[
"Apache-2.0"
] | 40
|
2015-04-08T14:03:06.000Z
|
2022-02-09T12:29:04.000Z
|
class VatNumberCheckResult(object):
"""Result of a VAT number validation check.
:ivar is_valid:
Boolean value indicating if the checked VAT number was deemed to be
valid. ``True`` if the VAT number is valid or ``False`` if the VAT
number is positively invalid.
:ivar log_lines:
Check log lines.
:ivar business_name: Optional business name retrieved for the VAT number.
:ivar business_address: Optional address retrieved for the VAT number.
"""
def __init__(self,
is_valid=None,
log_lines=None,
business_name=None,
business_address=None):
self.is_valid = is_valid
self.log_lines = log_lines or []
self.business_name = business_name
self.business_address = business_address
| 36.26087
| 77
| 0.641487
|
class VatNumberCheckResult(object):
def __init__(self,
is_valid=None,
log_lines=None,
business_name=None,
business_address=None):
self.is_valid = is_valid
self.log_lines = log_lines or []
self.business_name = business_name
self.business_address = business_address
| true
| true
|
f719c4d93ac3ade1ce4c3daeee9db9db01e404b2
| 2,209
|
py
|
Python
|
source code/Data Visualization.py
|
starkworld/Python-Course-work
|
28715f079939129b442aedcd7edb2e0838886ba0
|
[
"Apache-2.0"
] | null | null | null |
source code/Data Visualization.py
|
starkworld/Python-Course-work
|
28715f079939129b442aedcd7edb2e0838886ba0
|
[
"Apache-2.0"
] | null | null | null |
source code/Data Visualization.py
|
starkworld/Python-Course-work
|
28715f079939129b442aedcd7edb2e0838886ba0
|
[
"Apache-2.0"
] | null | null | null |
"""
Author : nkalyan🤠
implementing Python Scripts on reading and returning the name no of mails that sent each day in week
and plot/display them in bar graph
I wrote code In counting to count the number of emails sent by each distinct user. That code may be helpful for this assignment.
"""
import matplotlib.pyplot as plt
from os import getcwd
def file_path():
"""Method that ask the users file name and returns it"""
file_name = input("Enter the file name:")
return file_name
def pop_values(filename):
"""Method the reads file and returning value"""
file_name = filename
try: # look for exception
fp = open(file_name, "r")
except FileNotFoundError: # if found exception display error
print("File Does not exist, please check your file name")
exit()
else: # if no exceptions thrown then performs this block
with fp:
for line in fp:
line = line.strip("\n")
offset = line.find("From")
offset1 = line.find("@")
line = line[-24:]
offset3 = line.find("@")
if offset == 0 and offset1 > 0 and offset3 == -1:
line = line[:-21]
yield line
def main():
"""Calls the all functions that necessary to get the output"""
name = file_path() # calls the file path method
dictionary = {'Sun': 0, 'Mon': 0, 'Tue': 0, 'Wed': 0, 'Thu': 0, 'Fri': 0, 'Sat': 0} # store the day val in dict
value = pop_values(name)
count = 0
for i in value:
if i in dictionary:
dictionary[i] += 1
count += len(i)
val = dictionary.values()
keys = dictionary.keys()
zp = zip(dictionary.keys(), dictionary.values())
for item in val:
i = val
j = keys
plt.bar(j, i, align='center', alpha=0.5)
plt.ylabel('Number of messages')
plt.title('Emails per day')
plt.show() # method that shows the bar graph of our code result
if __name__ == '__main__':
"""calls the main method"""
main()
| 32.970149
| 132
| 0.557718
|
import matplotlib.pyplot as plt
from os import getcwd
def file_path():
file_name = input("Enter the file name:")
return file_name
def pop_values(filename):
file_name = filename
try:
fp = open(file_name, "r")
except FileNotFoundError:
print("File Does not exist, please check your file name")
exit()
else:
with fp:
for line in fp:
line = line.strip("\n")
offset = line.find("From")
offset1 = line.find("@")
line = line[-24:]
offset3 = line.find("@")
if offset == 0 and offset1 > 0 and offset3 == -1:
line = line[:-21]
yield line
def main():
name = file_path()
dictionary = {'Sun': 0, 'Mon': 0, 'Tue': 0, 'Wed': 0, 'Thu': 0, 'Fri': 0, 'Sat': 0}
value = pop_values(name)
count = 0
for i in value:
if i in dictionary:
dictionary[i] += 1
count += len(i)
val = dictionary.values()
keys = dictionary.keys()
zp = zip(dictionary.keys(), dictionary.values())
for item in val:
i = val
j = keys
plt.bar(j, i, align='center', alpha=0.5)
plt.ylabel('Number of messages')
plt.title('Emails per day')
plt.show()
if __name__ == '__main__':
main()
| true
| true
|
f719c4fee092036cf2a37dc220d4280aca8e4828
| 665
|
py
|
Python
|
full-problems/studentRecord.py
|
vikas-t/DS-Algo
|
ea654d1cad5374c824c52da9d3815a9546eb43fa
|
[
"Apache-2.0"
] | null | null | null |
full-problems/studentRecord.py
|
vikas-t/DS-Algo
|
ea654d1cad5374c824c52da9d3815a9546eb43fa
|
[
"Apache-2.0"
] | null | null | null |
full-problems/studentRecord.py
|
vikas-t/DS-Algo
|
ea654d1cad5374c824c52da9d3815a9546eb43fa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#https://practice.geeksforgeeks.org/problems/student-record/0
def sol(records, n):
mx = 0
res = []
for ni in range(0, n*4, 4):
am = sum(map(int, records[ni+1:ni+4]))//3
if am > mx:
# If we find a better average overwrite the result list
# with the name of the student and the average
mx = am
res = [(records[ni], am)]
elif am == mx:
# If the averages are same append in the result list
res.append((records[ni], am))
for name, marks in res:
print(name, end=" ")
print(marks)
# print the result as stated in the problem
| 33.25
| 67
| 0.557895
|
def sol(records, n):
mx = 0
res = []
for ni in range(0, n*4, 4):
am = sum(map(int, records[ni+1:ni+4]))//3
if am > mx:
mx = am
res = [(records[ni], am)]
elif am == mx:
res.append((records[ni], am))
for name, marks in res:
print(name, end=" ")
print(marks)
| true
| true
|
f719c541df617120f9d4a9a665699e9251dae5ac
| 1,425
|
py
|
Python
|
angrmanagement/plugins/bughouse/data/component_tree.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | 474
|
2015-08-10T17:47:15.000Z
|
2022-03-31T21:10:55.000Z
|
angrmanagement/plugins/bughouse/data/component_tree.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | 355
|
2015-08-17T09:35:53.000Z
|
2022-03-31T21:29:52.000Z
|
angrmanagement/plugins/bughouse/data/component_tree.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | 95
|
2015-08-11T14:36:12.000Z
|
2022-03-31T23:01:01.000Z
|
from typing import List, Optional
class ComponentFunction:
__slots__ = ('mapped_base', 'virtual_addr', 'symbol_name', )
def __init__(self, mapped_base: int, virtual_addr: int, symbol_name: Optional[str]=None):
self.mapped_base = mapped_base
self.virtual_addr = virtual_addr
self.symbol_name = symbol_name
def __eq__(self, other):
return isinstance(other, ComponentFunction) and \
self.mapped_base == other.mapped_base and \
self.virtual_addr == other.virtual_addr
def __hash__(self):
return hash((ComponentFunction, self.mapped_base, self.virtual_addr))
class ComponentTreeNode:
def __init__(self, name=None):
self.name = name
self.components: List['ComponentTreeNode'] = [ ]
self.functions: List[ComponentFunction] = [ ]
def __eq__(self, other):
return isinstance(other, ComponentTreeNode) \
and self.components == other.components \
and set(self.functions) == set(other.functions)
def __hash__(self):
return hash((ComponentTreeNode,
hash(tuple(self.components)),
hash(tuple(sorted((f.mapped_base + f.virtual_addr) for f in self.functions))),
)
)
class ComponentTree:
def __init__(self, root: Optional[ComponentTreeNode]=None):
self.root = root
| 32.386364
| 99
| 0.627368
|
from typing import List, Optional
class ComponentFunction:
__slots__ = ('mapped_base', 'virtual_addr', 'symbol_name', )
def __init__(self, mapped_base: int, virtual_addr: int, symbol_name: Optional[str]=None):
self.mapped_base = mapped_base
self.virtual_addr = virtual_addr
self.symbol_name = symbol_name
def __eq__(self, other):
return isinstance(other, ComponentFunction) and \
self.mapped_base == other.mapped_base and \
self.virtual_addr == other.virtual_addr
def __hash__(self):
return hash((ComponentFunction, self.mapped_base, self.virtual_addr))
class ComponentTreeNode:
def __init__(self, name=None):
self.name = name
self.components: List['ComponentTreeNode'] = [ ]
self.functions: List[ComponentFunction] = [ ]
def __eq__(self, other):
return isinstance(other, ComponentTreeNode) \
and self.components == other.components \
and set(self.functions) == set(other.functions)
def __hash__(self):
return hash((ComponentTreeNode,
hash(tuple(self.components)),
hash(tuple(sorted((f.mapped_base + f.virtual_addr) for f in self.functions))),
)
)
class ComponentTree:
def __init__(self, root: Optional[ComponentTreeNode]=None):
self.root = root
| true
| true
|
f719c5dcecb268d37900df93c57ea65672756916
| 2,829
|
py
|
Python
|
analysis/stats.py
|
jasonrute/puzzle_cube
|
7e05a21acd26cb30e729ba6a95e14e16c76c1780
|
[
"MIT"
] | 81
|
2018-06-17T17:02:24.000Z
|
2021-11-05T07:16:12.000Z
|
analysis/stats.py
|
jasonrute/puzzle_cube
|
7e05a21acd26cb30e729ba6a95e14e16c76c1780
|
[
"MIT"
] | 1
|
2018-09-20T08:04:19.000Z
|
2018-09-20T12:14:55.000Z
|
analysis/stats.py
|
jasonrute/puzzle_cube
|
7e05a21acd26cb30e729ba6a95e14e16c76c1780
|
[
"MIT"
] | 23
|
2018-02-20T21:19:49.000Z
|
2022-03-05T18:05:10.000Z
|
"""
Training Statics Tools
A class for loading statistics related to a particular rutraiining session.
"""
import numpy as np
#from scipy import stats
import pandas as pd
import os
def str_between(s, start, end):
return (s.split(start))[1].split(end)[0]
def is_stat_file_version(file_name, version):
return file_name.startswith("stats_{}_gen".format(version)) and file_name.endswith(".h5")
class TrainingStates:
def __init__(self, versions, directory, verbose=True):
self.stats_files = self.get_stat_files(versions, directory)
if verbose:
print("Loading files:")
for f in self.stats_files:
print(directory + f)
self.generation_stats = self.load_stats('generation_stats')
self.game_stats = self.load_stats('game_stats')
self.move_stats = self.load_stats('self_play_stats')
def get_stat_files(self, versions, directory):
stat_files = []
for version in reversed(versions):
files = [directory + f for f in os.listdir(directory) if is_stat_file_version(f, version)]
stat_files += list(sorted(files))
return stat_files
def load_stats(self, key_name):
df_list = []
for f in self.stats_files:
path = f
generation = str_between(f, "_gen", ".h5")
df = pd.read_hdf(path, key=key_name)
df['_generation'] = int(generation)
df_list.append(df)
if df_list:
stats = pd.concat(df_list, ignore_index=True)
else:
return pd.DataFrame()
return stats
def first_move_stats(self):
"""
Note: There is an indexing issue (the index of first_play_stats is the orginal index
while the index of game_stats is the game number). The easiest fix is to just use
the values (an array) of the series and not the series itself.
"""
return self.move_stats[self.move_stats['_step_id'] == 0]
def found_target_on_first_move(self):
return (self.first_move_stats()['shortest_path'] >= 0).values
def lost_but_found_target_on_first_move(self):
return self.found_target_on_first_move() & ~self.game_stats['win']
def win_but_did_not_find_target_on_first_move(self):
return ~self.found_target_on_first_move() & self.game_stats['win']
if __name__ == '__main__':
from pprint import pprint
versions = ['v0.9.3']
save_dir = '../save/stats_v0.9.3/'
#VERSIONS = ['v0.9.2.1', 'v0.9.2']
#SAVE_DIR = '../save/stats_archive/'
cube_stats = TrainingStates(versions, save_dir)
pprint(cube_stats.generation_stats)
pprint(np.mean(cube_stats.lost_but_found_target_on_first_move()))
pprint(np.mean(cube_stats.win_but_did_not_find_target_on_first_move()))
| 32.147727
| 102
| 0.655355
|
import numpy as np
import pandas as pd
import os
def str_between(s, start, end):
return (s.split(start))[1].split(end)[0]
def is_stat_file_version(file_name, version):
return file_name.startswith("stats_{}_gen".format(version)) and file_name.endswith(".h5")
class TrainingStates:
def __init__(self, versions, directory, verbose=True):
self.stats_files = self.get_stat_files(versions, directory)
if verbose:
print("Loading files:")
for f in self.stats_files:
print(directory + f)
self.generation_stats = self.load_stats('generation_stats')
self.game_stats = self.load_stats('game_stats')
self.move_stats = self.load_stats('self_play_stats')
def get_stat_files(self, versions, directory):
stat_files = []
for version in reversed(versions):
files = [directory + f for f in os.listdir(directory) if is_stat_file_version(f, version)]
stat_files += list(sorted(files))
return stat_files
def load_stats(self, key_name):
df_list = []
for f in self.stats_files:
path = f
generation = str_between(f, "_gen", ".h5")
df = pd.read_hdf(path, key=key_name)
df['_generation'] = int(generation)
df_list.append(df)
if df_list:
stats = pd.concat(df_list, ignore_index=True)
else:
return pd.DataFrame()
return stats
def first_move_stats(self):
return self.move_stats[self.move_stats['_step_id'] == 0]
def found_target_on_first_move(self):
return (self.first_move_stats()['shortest_path'] >= 0).values
def lost_but_found_target_on_first_move(self):
return self.found_target_on_first_move() & ~self.game_stats['win']
def win_but_did_not_find_target_on_first_move(self):
return ~self.found_target_on_first_move() & self.game_stats['win']
if __name__ == '__main__':
from pprint import pprint
versions = ['v0.9.3']
save_dir = '../save/stats_v0.9.3/'
cube_stats = TrainingStates(versions, save_dir)
pprint(cube_stats.generation_stats)
pprint(np.mean(cube_stats.lost_but_found_target_on_first_move()))
pprint(np.mean(cube_stats.win_but_did_not_find_target_on_first_move()))
| true
| true
|
f719c6c53b95cb7f32858726c85cf496a8c0b670
| 1,192
|
py
|
Python
|
lantz/drivers/thorlabs/pm100d.py
|
ZixiLi0520/lantz
|
a67120a65e6b66f394965ef0100529db7be3df0a
|
[
"BSD-3-Clause"
] | 6
|
2016-04-13T12:59:18.000Z
|
2020-06-24T17:43:04.000Z
|
lantz/drivers/thorlabs/pm100d.py
|
awsch/lantz
|
717f6962a471be7ceb61d1d8f6c6f381553df9c4
|
[
"BSD-3-Clause"
] | null | null | null |
lantz/drivers/thorlabs/pm100d.py
|
awsch/lantz
|
717f6962a471be7ceb61d1d8f6c6f381553df9c4
|
[
"BSD-3-Clause"
] | 6
|
2015-12-14T19:30:36.000Z
|
2020-06-29T21:16:01.000Z
|
# -*- coding: utf-8 -*-
"""
To connect the power meter you'll need to use the "Power meter driver switcher" application to switch to the PM100D (Ni-Visa) drivers.
Then the resource name should show up when exceuting:
import visa
visa.ResourceManager().list_resources()
"""
from lantz.messagebased import MessageBasedDriver
from lantz import Feat
class PM100D(MessageBasedDriver):
DEFAULTS = {
'COMMON': {
'read_termination': '\n',
'write_termination': '\n',
},
}
@Feat(read_once=True)
def idn(self):
return self.query('*IDN?')
@Feat(units='W')
def power(self):
return float(self.query('MEAS:POWER?'))
@Feat(units='nm')
def correction_wavelength(self):
return float(self.query('SENSE:CORRECTION:WAVELENGTH?'))
@correction_wavelength.setter
def correction_wavelength(self, wavelength):
self.write('SENSE:CORRECTION:WAVELENGTH {}'.format(wavelength))
@Feat()
def correction_wavelength_range(self):
cmd = 'SENSE:CORRECTION:WAVELENGTH? {}'
cmd_vals = ['MIN', 'MAX']
return tuple(float(self.query(cmd.format(cmd_val))) for cmd_val in cmd_vals)
| 27.090909
| 134
| 0.655201
|
from lantz.messagebased import MessageBasedDriver
from lantz import Feat
class PM100D(MessageBasedDriver):
DEFAULTS = {
'COMMON': {
'read_termination': '\n',
'write_termination': '\n',
},
}
@Feat(read_once=True)
def idn(self):
return self.query('*IDN?')
@Feat(units='W')
def power(self):
return float(self.query('MEAS:POWER?'))
@Feat(units='nm')
def correction_wavelength(self):
return float(self.query('SENSE:CORRECTION:WAVELENGTH?'))
@correction_wavelength.setter
def correction_wavelength(self, wavelength):
self.write('SENSE:CORRECTION:WAVELENGTH {}'.format(wavelength))
@Feat()
def correction_wavelength_range(self):
cmd = 'SENSE:CORRECTION:WAVELENGTH? {}'
cmd_vals = ['MIN', 'MAX']
return tuple(float(self.query(cmd.format(cmd_val))) for cmd_val in cmd_vals)
| true
| true
|
f719c7438aaad15d2be4ec5a66891319541f52ff
| 1,767
|
py
|
Python
|
Scripts/reader/gpro_corpus.py
|
lasigeBioTM/ULISBOA-at-SemEval-2017
|
415dc3ebbd2365aa7620a9b4feb1218fa837d7d5
|
[
"MIT"
] | 8
|
2018-05-10T10:27:18.000Z
|
2021-08-30T02:55:54.000Z
|
Scripts/reader/gpro_corpus.py
|
lasigeBioTM/ULISBOA-at-SemEval-2017
|
415dc3ebbd2365aa7620a9b4feb1218fa837d7d5
|
[
"MIT"
] | 4
|
2018-10-24T13:32:45.000Z
|
2021-02-05T11:48:04.000Z
|
Scripts/reader/gpro_corpus.py
|
lasigeBioTM/ULISBOA-at-SemEval-2017
|
415dc3ebbd2365aa7620a9b4feb1218fa837d7d5
|
[
"MIT"
] | 5
|
2020-07-22T06:13:56.000Z
|
2020-11-18T14:48:39.000Z
|
import codecs
import logging
import pickle
from chemdner_corpus import ChemdnerCorpus
class GproCorpus(ChemdnerCorpus):
"""Chemdner GPRO corpus from BioCreative V"""
def __init__(self, corpusdir, **kwargs):
super(GproCorpus, self).__init__(corpusdir, **kwargs)
self.subtypes = ["NESTED", "IDENTIFIER", "FULL_NAME", "ABBREVIATION"]
def load_corpus(self, corenlpserver):
"""
Assume the corpus is already loaded as a ChemdnerCorpus
Load the pickle and get the docs
:param corenlpserver:
:return:
"""
ps = self.path.split("/")
cemp_path = "data/chemdner_" + "_".join(ps[-1].split("_")[1:]) + ".pickle"
corpus = pickle.load(open(cemp_path, 'rb'))
self.documents = corpus.documents
def load_annotations(self, ann_dir, etype="protein"):
logging.info("loading annotations file {}...".format(ann_dir))
with codecs.open(ann_dir, 'r', "utf-8") as inputfile:
for line in inputfile:
# logging.info("processing annotation %s/%s" % (n_lines, total_lines))
pmid, doct, start, end, text, t, dbid = line.strip().split('\t')
if dbid != "GPRO_TYPE_2" and pmid in self.documents:
#if pmid in self.documents:
#pmid = "PMID" + pmid
# For now, ignore the database ID information
#logging.debug("using this annotation: {}".format(text.encode("utf8")))
self.documents[pmid].tag_chemdner_entity(int(start), int(end),
t, text=text, doct=doct)
elif pmid not in self.documents:
logging.info("%s not found!" % pmid)
| 43.097561
| 91
| 0.57442
|
import codecs
import logging
import pickle
from chemdner_corpus import ChemdnerCorpus
class GproCorpus(ChemdnerCorpus):
def __init__(self, corpusdir, **kwargs):
super(GproCorpus, self).__init__(corpusdir, **kwargs)
self.subtypes = ["NESTED", "IDENTIFIER", "FULL_NAME", "ABBREVIATION"]
def load_corpus(self, corenlpserver):
ps = self.path.split("/")
cemp_path = "data/chemdner_" + "_".join(ps[-1].split("_")[1:]) + ".pickle"
corpus = pickle.load(open(cemp_path, 'rb'))
self.documents = corpus.documents
def load_annotations(self, ann_dir, etype="protein"):
logging.info("loading annotations file {}...".format(ann_dir))
with codecs.open(ann_dir, 'r', "utf-8") as inputfile:
for line in inputfile:
pmid, doct, start, end, text, t, dbid = line.strip().split('\t')
if dbid != "GPRO_TYPE_2" and pmid in self.documents:
self.documents[pmid].tag_chemdner_entity(int(start), int(end),
t, text=text, doct=doct)
elif pmid not in self.documents:
logging.info("%s not found!" % pmid)
| true
| true
|
f719c759c9af0450e25345c13d5b68b9e3d98654
| 788
|
py
|
Python
|
model.py
|
OrBin/N-Gram-Language-Model
|
e196758083bbed386dd1a24733cb956c8a36aa79
|
[
"MIT"
] | null | null | null |
model.py
|
OrBin/N-Gram-Language-Model
|
e196758083bbed386dd1a24733cb956c8a36aa79
|
[
"MIT"
] | null | null | null |
model.py
|
OrBin/N-Gram-Language-Model
|
e196758083bbed386dd1a24733cb956c8a36aa79
|
[
"MIT"
] | null | null | null |
import utils
class Model:
def __init__(self, file_path):
with open(file_path, 'r', encoding="utf8") as model_file:
self.model_tree = {}
for line in model_file:
chars, minus_log_p = utils.parse_model_file_line(line)
n_1_gram = ''.join(chars[:-1])
last_char = chars[-1]
if n_1_gram not in self.model_tree:
self.model_tree[n_1_gram] = {}
self.model_tree[n_1_gram][last_char] = minus_log_p
for n_1_gram in self.model_tree:
min_n_char, min_value = next(iter(self.model_tree[n_1_gram].items()))
for n_char, value in self.model_tree[n_1_gram].items():
if value < min_value:
min_n_char, min_value = n_char, value
self.model_tree[n_1_gram] = min_n_char
def __getitem__(self, n_1_gram):
return self.model_tree[n_1_gram]
| 28.142857
| 72
| 0.695431
|
import utils
class Model:
def __init__(self, file_path):
with open(file_path, 'r', encoding="utf8") as model_file:
self.model_tree = {}
for line in model_file:
chars, minus_log_p = utils.parse_model_file_line(line)
n_1_gram = ''.join(chars[:-1])
last_char = chars[-1]
if n_1_gram not in self.model_tree:
self.model_tree[n_1_gram] = {}
self.model_tree[n_1_gram][last_char] = minus_log_p
for n_1_gram in self.model_tree:
min_n_char, min_value = next(iter(self.model_tree[n_1_gram].items()))
for n_char, value in self.model_tree[n_1_gram].items():
if value < min_value:
min_n_char, min_value = n_char, value
self.model_tree[n_1_gram] = min_n_char
def __getitem__(self, n_1_gram):
return self.model_tree[n_1_gram]
| true
| true
|
f719c75eed3a13b4d4eda1dd71c9f05b6e0ba238
| 349
|
py
|
Python
|
tools/PRESUBMIT.py
|
RiyoCoder/v8
|
e073edfc7dc990cc5f71c4e51ac27b19be16fcb7
|
[
"BSD-3-Clause"
] | 2
|
2020-08-27T09:36:44.000Z
|
2020-09-23T14:01:12.000Z
|
tools/PRESUBMIT.py
|
RiyoCoder/v8
|
e073edfc7dc990cc5f71c4e51ac27b19be16fcb7
|
[
"BSD-3-Clause"
] | null | null | null |
tools/PRESUBMIT.py
|
RiyoCoder/v8
|
e073edfc7dc990cc5f71c4e51ac27b19be16fcb7
|
[
"BSD-3-Clause"
] | 1
|
2019-10-08T06:20:30.000Z
|
2019-10-08T06:20:30.000Z
|
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def CheckChangeOnCommit(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, 'unittests')
return input_api.RunTests(tests)
| 38.777778
| 72
| 0.782235
|
def CheckChangeOnCommit(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, 'unittests')
return input_api.RunTests(tests)
| true
| true
|
f719c7a7aac77a98a8d3c8df7aaf015dd69b5b0b
| 1,102
|
py
|
Python
|
ch_06/std_from_mean_kde.py
|
ags-ds/Hands-On-Data-Analysis-with-Pandas-By-Ags
|
f4ae6e3c3ef3c9ed9b11185724e1ea70a2f63f14
|
[
"MIT"
] | 260
|
2019-01-21T01:38:39.000Z
|
2022-03-26T18:49:21.000Z
|
ch_06/std_from_mean_kde.py
|
ags-ds/Hands-On-Data-Analysis-with-Pandas-By-Ags
|
f4ae6e3c3ef3c9ed9b11185724e1ea70a2f63f14
|
[
"MIT"
] | 8
|
2020-03-13T15:48:56.000Z
|
2021-08-23T21:43:44.000Z
|
ch_06/std_from_mean_kde.py
|
ags-ds/Hands-On-Data-Analysis-with-Pandas-By-Ags
|
f4ae6e3c3ef3c9ed9b11185724e1ea70a2f63f14
|
[
"MIT"
] | 665
|
2019-07-27T18:28:20.000Z
|
2022-03-23T08:20:35.000Z
|
import itertools
def std_from_mean_kde(data):
"""
Plot the KDE of the pandas series along with vertical
reference lines for each standard deviation from the mean.
Parameters:
- data: pandas Series with numeric data
Returns:
Matplotlib Axes object.
"""
mean_mag, std_mean = data.mean(), data.std()
ax = data.plot(kind='kde')
ax.axvline(mean_mag, color='b', alpha=0.2, label='mean')
colors = ['green', 'orange', 'red']
multipliers = [1, 2, 3]
signs = ['-', '+']
for sign, (color, multiplier) in itertools.product(
signs, zip(colors, multipliers)
):
adjustment = multiplier * std_mean
if sign == '-':
value = mean_mag - adjustment
label = '{} {}{}{}'.format(
r'$\mu$',
r'$\pm$',
multiplier,
r'$\sigma$'
)
else:
value = mean_mag + adjustment
label = None
ax.axvline(value, color=color, label=label, alpha=0.5)
ax.legend()
return ax
| 26.878049
| 62
| 0.520871
|
import itertools
def std_from_mean_kde(data):
mean_mag, std_mean = data.mean(), data.std()
ax = data.plot(kind='kde')
ax.axvline(mean_mag, color='b', alpha=0.2, label='mean')
colors = ['green', 'orange', 'red']
multipliers = [1, 2, 3]
signs = ['-', '+']
for sign, (color, multiplier) in itertools.product(
signs, zip(colors, multipliers)
):
adjustment = multiplier * std_mean
if sign == '-':
value = mean_mag - adjustment
label = '{} {}{}{}'.format(
r'$\mu$',
r'$\pm$',
multiplier,
r'$\sigma$'
)
else:
value = mean_mag + adjustment
label = None
ax.axvline(value, color=color, label=label, alpha=0.5)
ax.legend()
return ax
| true
| true
|
f719c8336db951828b5e48810801b98858d489e2
| 1,559
|
py
|
Python
|
ladder/urls.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 13
|
2015-04-30T21:07:20.000Z
|
2021-01-08T13:52:14.000Z
|
ladder/urls.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 13
|
2015-04-05T22:48:14.000Z
|
2021-12-12T17:29:16.000Z
|
ladder/urls.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 5
|
2016-10-12T16:24:09.000Z
|
2019-11-26T10:16:44.000Z
|
from django.urls import re_path
from ladder import views
urlpatterns = [
re_path(r'^$', views.index, name='index'),
re_path(r'^list/$', views.list_rounds, name='list'),
re_path(r'^current/$', views.current_season_redirect, name='current'),
# ex: /2013/round/1/
re_path(r'^(?P<year>\d+)/round/(?P<season_round>\d+)/$', views.season, name='season'),
# ex: /2013/round/1/division/1-n
re_path(r'^(?P<year>\d+)/round/(?P<season_round>\d+)/division/(?P<division_id>\w+)/$', views.ladder, name='ladder'),
# ex: /2013/round/1/division/1-n/add/
re_path(r'^(?P<year>\d+)/round/(?P<season_round>\d+)/division/(?P<division_id>\w+)/add/$', views.add, name='add'),
# ex: /head_to_head/1/vs/2
re_path(r'^head_to_head/(?P<player_id>\d+)/vs/(?P<opponent_id>\w+)/$', views.head_to_head, name='head_to_head'),
# ex: /player/1/
re_path(r'^player/(?P<player_id>\d+)/$', views.player_history, name='player_history'),
# ex: /player/
re_path(r'^player/search/$', views.player_search, name='player_search'),
re_path(r'^player/h2h/(?P<player_id>\d+)/$', views.h2h_search, name='h2h_search'),
re_path(r'^player/results/$', views.player_result, name='player_result'),
re_path(r'^season/ajax/stats/$', views.season_ajax_stats, name='season_ajax_stats'),
re_path(r'^season/ajax/progress/$', views.season_ajax_progress, name='season_ajax_progress'),
re_path(r'^result/entry/$', views.result_entry, name='result_entry'),
re_path(r'^result/entry/add/$', views.result_entry_add, name='result_entry_add'),
]
| 55.678571
| 120
| 0.664529
|
from django.urls import re_path
from ladder import views
urlpatterns = [
re_path(r'^$', views.index, name='index'),
re_path(r'^list/$', views.list_rounds, name='list'),
re_path(r'^current/$', views.current_season_redirect, name='current'),
re_path(r'^(?P<year>\d+)/round/(?P<season_round>\d+)/$', views.season, name='season'),
re_path(r'^(?P<year>\d+)/round/(?P<season_round>\d+)/division/(?P<division_id>\w+)/$', views.ladder, name='ladder'),
re_path(r'^(?P<year>\d+)/round/(?P<season_round>\d+)/division/(?P<division_id>\w+)/add/$', views.add, name='add'),
re_path(r'^head_to_head/(?P<player_id>\d+)/vs/(?P<opponent_id>\w+)/$', views.head_to_head, name='head_to_head'),
re_path(r'^player/(?P<player_id>\d+)/$', views.player_history, name='player_history'),
re_path(r'^player/search/$', views.player_search, name='player_search'),
re_path(r'^player/h2h/(?P<player_id>\d+)/$', views.h2h_search, name='h2h_search'),
re_path(r'^player/results/$', views.player_result, name='player_result'),
re_path(r'^season/ajax/stats/$', views.season_ajax_stats, name='season_ajax_stats'),
re_path(r'^season/ajax/progress/$', views.season_ajax_progress, name='season_ajax_progress'),
re_path(r'^result/entry/$', views.result_entry, name='result_entry'),
re_path(r'^result/entry/add/$', views.result_entry_add, name='result_entry_add'),
]
| true
| true
|
f719c848267dc37edb40ddf8031a05c5fa16b620
| 3,306
|
py
|
Python
|
myFileClass.py
|
saewoonam/thorium_daq_uqd
|
249ab89338591c833009711e2f7997dbe2898fbc
|
[
"MIT"
] | null | null | null |
myFileClass.py
|
saewoonam/thorium_daq_uqd
|
249ab89338591c833009711e2f7997dbe2898fbc
|
[
"MIT"
] | null | null | null |
myFileClass.py
|
saewoonam/thorium_daq_uqd
|
249ab89338591c833009711e2f7997dbe2898fbc
|
[
"MIT"
] | null | null | null |
import sys
import sqlite3
import hashlib
import time
import logging
import os.path
logger = logging.getLogger(__name__)
logpath = os.path.dirname(__file__)
logpath = os.path.join(logpath, 'logs/')
fileHandler = logging.FileHandler(logpath + __name__ + '.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.INFO)
logger.addHandler(fileHandler)
def create(fname='hashes.sqlite'):
conn = sqlite3.connect(fname)
c = conn.cursor()
# Create table
c.execute(
'CREATE TABLE hashes(filename text, md5 text, sha1 text, hashtime real)'
)
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
# based on
# http://stackoverflow.com/questions/16085292/subclassing-file-objects-to-extend-open-and-close-operations-in-python-3
class _file_obj(object):
"""Check if `f` is a file name and open the file in `mode`.
A context manager."""
hashdb = None
def __init__(self, f, mode):
if f is None:
self.file = {
'r': sys.stdin,
'a': sys.stdout,
'w': sys.stdout
}[mode[0]]
self.none = True
elif isinstance(f, str):
self.file = open(f, mode)
else:
self.file = f
self.close_file = (self.file is not f)
self.md5 = hashlib.md5()
self.sha1 = hashlib.sha1()
# self.hashdb = None
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
if (not self.close_file) or hasattr(self, 'none'):
return # do nothing
# clean up
exit = getattr(self.file, '__exit__', None)
if exit is not None:
return exit(*args, **kwargs)
else:
exit = getattr(self.file, 'close', None)
if exit is not None:
exit()
def write(self, rawdata):
byteswritten = self.file.tell()
res = self.file.write(rawdata)
# if res is not None: # It is None in python2
# logger.error('Problem with writing to file, res: %r' % res)
byteswritten = self.file.tell() - byteswritten
self.md5.update(rawdata)
self.sha1.update(rawdata)
# if self.hashdb is not None:
# print('md5: %s, sha1: %s'%(self.md5.hexdigest(),
# self.sha1.hexdigest()))
# self.updatehashdb()
return byteswritten
def close(self):
if self.hashdb is not None:
logger.info('md5: %s, sha1: %s' % (self.md5.hexdigest(),
self.sha1.hexdigest()))
self.updatehashdb()
return self.file.close()
def updatehashdb(self):
conn = sqlite3.connect(self.hashdb)
c = conn.cursor()
c.execute("INSERT INTO hashes VALUES (?,?,?,?)",
(self.file.name, self.md5.hexdigest(), self.sha1.hexdigest(),
time.time()))
conn.commit()
conn.close()
def __getattr__(self, attr):
return getattr(self.file, attr)
def __iter__(self):
return iter(self.file)
| 30.611111
| 119
| 0.575015
|
import sys
import sqlite3
import hashlib
import time
import logging
import os.path
logger = logging.getLogger(__name__)
logpath = os.path.dirname(__file__)
logpath = os.path.join(logpath, 'logs/')
fileHandler = logging.FileHandler(logpath + __name__ + '.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.INFO)
logger.addHandler(fileHandler)
def create(fname='hashes.sqlite'):
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute(
'CREATE TABLE hashes(filename text, md5 text, sha1 text, hashtime real)'
)
conn.commit()
conn.close()
class _file_obj(object):
hashdb = None
def __init__(self, f, mode):
if f is None:
self.file = {
'r': sys.stdin,
'a': sys.stdout,
'w': sys.stdout
}[mode[0]]
self.none = True
elif isinstance(f, str):
self.file = open(f, mode)
else:
self.file = f
self.close_file = (self.file is not f)
self.md5 = hashlib.md5()
self.sha1 = hashlib.sha1()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
if (not self.close_file) or hasattr(self, 'none'):
return
exit = getattr(self.file, '__exit__', None)
if exit is not None:
return exit(*args, **kwargs)
else:
exit = getattr(self.file, 'close', None)
if exit is not None:
exit()
def write(self, rawdata):
byteswritten = self.file.tell()
res = self.file.write(rawdata)
written = self.file.tell() - byteswritten
self.md5.update(rawdata)
self.sha1.update(rawdata)
return byteswritten
def close(self):
if self.hashdb is not None:
logger.info('md5: %s, sha1: %s' % (self.md5.hexdigest(),
self.sha1.hexdigest()))
self.updatehashdb()
return self.file.close()
def updatehashdb(self):
conn = sqlite3.connect(self.hashdb)
c = conn.cursor()
c.execute("INSERT INTO hashes VALUES (?,?,?,?)",
(self.file.name, self.md5.hexdigest(), self.sha1.hexdigest(),
time.time()))
conn.commit()
conn.close()
def __getattr__(self, attr):
return getattr(self.file, attr)
def __iter__(self):
return iter(self.file)
| true
| true
|
f719c9d44f62e65378d7e3d7e79d5a07e67b6c18
| 1,925
|
py
|
Python
|
tests/test_swf/models/test_generic_type.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | 5,460
|
2015-01-01T01:11:17.000Z
|
2022-03-31T23:45:38.000Z
|
tests/test_swf/models/test_generic_type.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | 4,475
|
2015-01-05T19:37:30.000Z
|
2022-03-31T13:55:12.000Z
|
tests/test_swf/models/test_generic_type.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | 1,831
|
2015-01-14T00:00:44.000Z
|
2022-03-31T20:30:04.000Z
|
from moto.swf.models import GenericType
import sure # noqa # pylint: disable=unused-import
# Tests for GenericType (ActivityType, WorkflowType)
class FooType(GenericType):
@property
def kind(self):
return "foo"
@property
def _configuration_keys(self):
return ["justAnExampleTimeout"]
def test_type_short_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_short_dict().should.equal({"name": "test-foo", "version": "v1.0"})
def test_type_medium_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_medium_dict()["fooType"].should.equal(_type.to_short_dict())
_type.to_medium_dict()["status"].should.equal("REGISTERED")
_type.to_medium_dict().should.contain("creationDate")
_type.to_medium_dict().should_not.contain("deprecationDate")
_type.to_medium_dict().should_not.contain("description")
_type.description = "foo bar"
_type.to_medium_dict()["description"].should.equal("foo bar")
_type.status = "DEPRECATED"
_type.to_medium_dict().should.contain("deprecationDate")
def test_type_full_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_full_dict()["typeInfo"].should.equal(_type.to_medium_dict())
_type.to_full_dict()["configuration"].should.equal({})
_type.task_list = "foo"
_type.to_full_dict()["configuration"]["defaultTaskList"].should.equal(
{"name": "foo"}
)
_type.just_an_example_timeout = "60"
_type.to_full_dict()["configuration"]["justAnExampleTimeout"].should.equal("60")
_type.non_whitelisted_property = "34"
keys = _type.to_full_dict()["configuration"].keys()
sorted(keys).should.equal(["defaultTaskList", "justAnExampleTimeout"])
def test_type_string_representation():
_type = FooType("test-foo", "v1.0")
str(_type).should.equal(
"FooType(name: test-foo, version: v1.0, status: REGISTERED)"
)
| 32.627119
| 84
| 0.701299
|
from moto.swf.models import GenericType
import sure
@property
def kind(self):
return "foo"
@property
def _configuration_keys(self):
return ["justAnExampleTimeout"]
def test_type_short_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_short_dict().should.equal({"name": "test-foo", "version": "v1.0"})
def test_type_medium_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_medium_dict()["fooType"].should.equal(_type.to_short_dict())
_type.to_medium_dict()["status"].should.equal("REGISTERED")
_type.to_medium_dict().should.contain("creationDate")
_type.to_medium_dict().should_not.contain("deprecationDate")
_type.to_medium_dict().should_not.contain("description")
_type.description = "foo bar"
_type.to_medium_dict()["description"].should.equal("foo bar")
_type.status = "DEPRECATED"
_type.to_medium_dict().should.contain("deprecationDate")
def test_type_full_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_full_dict()["typeInfo"].should.equal(_type.to_medium_dict())
_type.to_full_dict()["configuration"].should.equal({})
_type.task_list = "foo"
_type.to_full_dict()["configuration"]["defaultTaskList"].should.equal(
{"name": "foo"}
)
_type.just_an_example_timeout = "60"
_type.to_full_dict()["configuration"]["justAnExampleTimeout"].should.equal("60")
_type.non_whitelisted_property = "34"
keys = _type.to_full_dict()["configuration"].keys()
sorted(keys).should.equal(["defaultTaskList", "justAnExampleTimeout"])
def test_type_string_representation():
_type = FooType("test-foo", "v1.0")
str(_type).should.equal(
"FooType(name: test-foo, version: v1.0, status: REGISTERED)"
)
| true
| true
|
f719ca25ac5cfde9937fa4a3c1d7f11e2bc44eb3
| 427
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_r2_space.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_r2_space.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_r2_space.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_r2_space.iff"
result.attribute_template_id = 9
result.stfName("droid_name","r2_base")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 25.117647
| 54
| 0.71897
| true
| true
|
|
f719cc3064aeb246e474124454e307d2ac046ca9
| 4,087
|
py
|
Python
|
tests/unit_tests/routes_test.py
|
schmidtbri/rest-model-service
|
0d1705cb62e6a942f90150da3bcf51e3e1265a25
|
[
"BSD-3-Clause"
] | 1
|
2021-11-10T19:48:35.000Z
|
2021-11-10T19:48:35.000Z
|
tests/unit_tests/routes_test.py
|
schmidtbri/rest-model-service
|
0d1705cb62e6a942f90150da3bcf51e3e1265a25
|
[
"BSD-3-Clause"
] | 1
|
2022-03-15T12:36:46.000Z
|
2022-03-15T12:36:46.000Z
|
tests/unit_tests/routes_test.py
|
schmidtbri/rest-model-service
|
0d1705cb62e6a942f90150da3bcf51e3e1265a25
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from pathlib import Path
import unittest
import json
from starlette.testclient import TestClient
from ml_base.utilities import ModelManager
os.chdir(Path(__file__).resolve().parent.parent.parent)
os.environ["REST_CONFIG"] = "examples/rest_config.yaml"
from rest_model_service.main import app, create_app
from rest_model_service.configuration import Model
class RoutesTests(unittest.TestCase):
def test_root(self):
# arrange
client = TestClient(app)
# act
response = client.get("/")
# assert
self.assertTrue(response.status_code == 200)
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_get_models(self):
# arrange
client = TestClient(app)
# act
response = client.get("/api/models")
# assert
self.assertTrue(response.status_code == 200)
self.assertTrue(response.json() == {
"models":
[
{
"display_name": "Iris Model",
"qualified_name": "iris_model",
"description": "Model for predicting the species of a flower based on its measurements.",
"version": "1.0.0"
}
]
})
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction(self):
# arrange
client = TestClient(app)
# act
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 6.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
# assert
self.assertTrue(response.status_code == 200)
self.assertTrue(response.json() == {
"species": "Iris setosa"
})
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_bad_data(self):
# arrange
app = create_app("REST Model Service", [Model(qualified_name="iris_model",
class_path="tests.mocks.IrisModel",
create_endpoint=True)])
client = TestClient(app)
# act
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 16.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
# assert
self.assertTrue(response.status_code == 422)
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_bad_configuration(self):
# arrange, act, assert
with self.assertRaises(ValueError) as e:
app = create_app("REST Model Service", [Model(qualified_name="asdf",
class_path="tests.mocks.IrisModel",
create_endpoint=True)])
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_no_endpoint(self):
# arrange
app = create_app("REST Model Service", [Model(qualified_name="iris_model",
class_path="tests.mocks.IrisModel",
create_endpoint=False)])
client = TestClient(app)
# act
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 16.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
# assert
self.assertTrue(response.status_code == 404)
# cleanup
model_manager = ModelManager()
model_manager.clear_instance()
if __name__ == '__main__':
unittest.main()
| 29.402878
| 113
| 0.54025
|
import os
from pathlib import Path
import unittest
import json
from starlette.testclient import TestClient
from ml_base.utilities import ModelManager
os.chdir(Path(__file__).resolve().parent.parent.parent)
os.environ["REST_CONFIG"] = "examples/rest_config.yaml"
from rest_model_service.main import app, create_app
from rest_model_service.configuration import Model
class RoutesTests(unittest.TestCase):
def test_root(self):
client = TestClient(app)
response = client.get("/")
self.assertTrue(response.status_code == 200)
model_manager = ModelManager()
model_manager.clear_instance()
def test_get_models(self):
client = TestClient(app)
response = client.get("/api/models")
self.assertTrue(response.status_code == 200)
self.assertTrue(response.json() == {
"models":
[
{
"display_name": "Iris Model",
"qualified_name": "iris_model",
"description": "Model for predicting the species of a flower based on its measurements.",
"version": "1.0.0"
}
]
})
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction(self):
client = TestClient(app)
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 6.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
self.assertTrue(response.status_code == 200)
self.assertTrue(response.json() == {
"species": "Iris setosa"
})
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_bad_data(self):
app = create_app("REST Model Service", [Model(qualified_name="iris_model",
class_path="tests.mocks.IrisModel",
create_endpoint=True)])
client = TestClient(app)
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 16.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
self.assertTrue(response.status_code == 422)
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_bad_configuration(self):
with self.assertRaises(ValueError) as e:
app = create_app("REST Model Service", [Model(qualified_name="asdf",
class_path="tests.mocks.IrisModel",
create_endpoint=True)])
model_manager = ModelManager()
model_manager.clear_instance()
def test_prediction_with_no_endpoint(self):
app = create_app("REST Model Service", [Model(qualified_name="iris_model",
class_path="tests.mocks.IrisModel",
create_endpoint=False)])
client = TestClient(app)
response = client.post("/api/models/iris_model/prediction", data=json.dumps({
"sepal_length": 16.0,
"sepal_width": 5.0,
"petal_length": 3.0,
"petal_width": 2.0
}))
self.assertTrue(response.status_code == 404)
model_manager = ModelManager()
model_manager.clear_instance()
if __name__ == '__main__':
unittest.main()
| true
| true
|
f719cd368c417026551c16e8dd7e7961bff48f66
| 7,466
|
py
|
Python
|
inverted-index/src/test_inverted_index.py
|
Illumaria/made-python-2020
|
7ec219ff1a5116925027646810ca4b294b1080d9
|
[
"MIT"
] | 2
|
2021-07-08T10:59:44.000Z
|
2021-09-06T07:44:24.000Z
|
inverted-index/src/test_inverted_index.py
|
Illumaria/made-python
|
7ec219ff1a5116925027646810ca4b294b1080d9
|
[
"MIT"
] | null | null | null |
inverted-index/src/test_inverted_index.py
|
Illumaria/made-python
|
7ec219ff1a5116925027646810ca4b294b1080d9
|
[
"MIT"
] | null | null | null |
from argparse import Namespace
from textwrap import dedent
import pytest
from inverted_index import InvertedIndex
from inverted_index import build_inverted_index
from inverted_index import DEFAULT_INVERTED_INDEX_SAVE_PATH
from inverted_index import callback_query, process_queries
from inverted_index import callback_build, process_build
from inverted_index import load_documents
from storage_policy import ArrayStoragePolicy
DATASET_BIG_FPATH = "../resources/wikipedia_sample"
DATASET_SMALL_FPATH = "../resources/small_wikipedia_sample"
DATASET_TINY_FPATH = "../resources/tiny_wikipedia_sample"
def test_can_load_documents_v1():
documents = load_documents(DATASET_TINY_FPATH)
etalon_documents = {
123: "some words A_word and nothing",
2: "some word B_word in this dataset",
5: "famous_phrases to be or not to be",
37: "all words such as A_word and B_word are here",
}
assert etalon_documents == documents, (
"load_documents incorrectly loaded dataset"
)
def test_can_load_documents_v2(tmpdir):
dataset_str = dedent("""\
123\tsome words A_word and nothing
2\tsome word B_word in this dataset
5\tfamous_phrases to be or not to be
37\tall words such as A_word and B_word are here
""")
dataset_fio = tmpdir.join("tiny.dataset")
dataset_fio.write(dataset_str)
documents = load_documents(dataset_fio)
etalon_documents = {
123: "some words A_word and nothing",
2: "some word B_word in this dataset",
5: "famous_phrases to be or not to be",
37: "all words such as A_word and B_word are here",
}
assert etalon_documents == documents, (
"load_documents incorrectly loaded dataset"
)
DATASET_TINY_STR = dedent("""\
123\tsome words A_word and nothing
2\tsome word B_word in this dataset
5\tfamous_phrases to be or not to be
37\tall words such as A_word and B_word are here
""")
@pytest.fixture()
def tiny_dataset_fio(tmpdir):
dataset_fio = tmpdir.join("dataset.txt")
dataset_fio.write(DATASET_TINY_STR)
return dataset_fio
def test_can_load_documents(tiny_dataset_fio):
documents = load_documents(tiny_dataset_fio)
etalon_documents = {
123: "some words A_word and nothing",
2: "some word B_word in this dataset",
5: "famous_phrases to be or not to be",
37: "all words such as A_word and B_word are here",
}
assert etalon_documents == documents, (
"load_documents incorrectly loaded dataset"
)
@pytest.mark.parametrize(
"query, etalon_answer",
[
pytest.param(["A_word"], [123, 37], id="A_word"),
pytest.param(["B_word"], [2, 37], id="B_word"),
pytest.param(["A_word", "B_word"], [37], id="both_words"),
pytest.param(["word_does_not_exist"], [], id="word does not exist"),
],
)
def test_query_inverted_index_intersect_results(tiny_dataset_fio, query, etalon_answer):
documents = load_documents(tiny_dataset_fio)
tiny_inverted_index = build_inverted_index(documents)
answer = tiny_inverted_index.query(query)
assert sorted(answer) == sorted(etalon_answer), (
f"Expected answer is {etalon_answer}, but you got {answer}"
)
# @pytest.mark.skip
def test_can_load_wikipedia_sample():
documents = load_documents(DATASET_BIG_FPATH)
assert len(documents) == 4100, (
"you incorrectly loaded Wikipedia sample"
)
@pytest.fixture()
def wikipedia_documents():
# documents = load_documents(DATASET_BIG_FPATH)
documents = load_documents(DATASET_SMALL_FPATH)
# documents = load_documents(DATASET_TINY_FPATH)
return documents
@pytest.fixture()
def small_sample_wikipedia_documents():
documents = load_documents(DATASET_SMALL_FPATH)
return documents
# @pytest.mark.skip
def test_can_build_and_query_inverted_index(wikipedia_documents):
wikipedia_inverted_index = build_inverted_index(wikipedia_documents)
doc_ids = wikipedia_inverted_index.query(["wikipedia"])
assert isinstance(doc_ids, list), "inverted index query should return list"
@pytest.fixture()
def wikipedia_inverted_index(wikipedia_documents):
wikipedia_inverted_index = build_inverted_index(wikipedia_documents)
return wikipedia_inverted_index
@pytest.fixture()
def small_wikipedia_inverted_index(small_sample_wikipedia_documents):
wikipedia_inverted_index = build_inverted_index(small_sample_wikipedia_documents)
return wikipedia_inverted_index
# @pytest.mark.skip
def test_can_dump_and_load_inverted_index(tmpdir, wikipedia_inverted_index):
index_fio = tmpdir.join("index.dump")
wikipedia_inverted_index.dump(index_fio)
loaded_inverted_index = InvertedIndex.load(index_fio)
assert wikipedia_inverted_index == loaded_inverted_index, (
"load should return the same inverted index"
)
# @pytest.mark.parametrize(
# ("filepath",),
# [
# pytest.param(DATASET_SMALL_FPATH, id="small dataset"),
# # pytest.param(DATASET_BIG_FPATH, marks=[pytest.mark.slow], id="big dataset"),
# ],
# )
# @pytest.mark.skip
# def test_can_dump_and_load_inverted_index_with_array_policy_parametrized(filepath, tmpdir):
# index_fio = tmpdir.join("index.dump")
#
# documents = load_documents(filepath)
# etalon_inverted_index = build_inverted_index(documents)
#
# # class StoragePolicy:
# # @staticmethod
# # def dump(word_to_docs_mapping, filepath):
# # pass
# #
# # @staticmethod
# # def load(filepath):# pass
#
# etalon_inverted_index.dump(index_fio, storage_policy=ArrayStoragePolicy)
# loaded_inverted_index = InvertedIndex.load(index_fio, storage_policy=ArrayStoragePolicy)
# assert etalon_inverted_index == loaded_inverted_index, (
# "load should return the same inverted index"
# )
@pytest.mark.parametrize(
"dataset_filepath",
[
DATASET_TINY_FPATH,
DATASET_SMALL_FPATH,
# pytest.param(DATASET_BIG_FPATH, marks=[pytest.mark.slow]),
],
)
def test_process_build_can_load_documents(dataset_filepath):
process_build(dataset_filepath, "inverted.index")
@pytest.mark.parametrize(
"dataset_filepath",
[
DATASET_TINY_FPATH,
DATASET_SMALL_FPATH,
# pytest.param(DATASET_BIG_FPATH, marks=[pytest.mark.slow]),
],
)
def test_callback_build_can_build_inverted_index_from_provided_file(dataset_filepath):
build_arguments = Namespace(
dataset_filepath=dataset_filepath,
inverted_index_filepath=DEFAULT_INVERTED_INDEX_SAVE_PATH,
)
callback_build(build_arguments)
def test_process_queries_can_process_queries_from_provided_file(capsys):
with open("queries-utf8.txt") as queries_fin:
process_queries(
inverted_index_filepath=DEFAULT_INVERTED_INDEX_SAVE_PATH,
query_file=queries_fin,
)
captured = capsys.readouterr()
assert "load inverted index" not in captured.out
assert "load inverted index" in captured.err
assert "two words" in captured.out
assert "two words" not in captured.err
def test_callback_query_can_process_queries_from_provided_file():
with open("queries-utf8.txt") as queries_fin:
query_arguments = Namespace(
inverted_index_filepath=DEFAULT_INVERTED_INDEX_SAVE_PATH,
query_file=queries_fin,
)
callback_query(query_arguments)
| 32.889868
| 94
| 0.721939
|
from argparse import Namespace
from textwrap import dedent
import pytest
from inverted_index import InvertedIndex
from inverted_index import build_inverted_index
from inverted_index import DEFAULT_INVERTED_INDEX_SAVE_PATH
from inverted_index import callback_query, process_queries
from inverted_index import callback_build, process_build
from inverted_index import load_documents
from storage_policy import ArrayStoragePolicy
DATASET_BIG_FPATH = "../resources/wikipedia_sample"
DATASET_SMALL_FPATH = "../resources/small_wikipedia_sample"
DATASET_TINY_FPATH = "../resources/tiny_wikipedia_sample"
def test_can_load_documents_v1():
documents = load_documents(DATASET_TINY_FPATH)
etalon_documents = {
123: "some words A_word and nothing",
2: "some word B_word in this dataset",
5: "famous_phrases to be or not to be",
37: "all words such as A_word and B_word are here",
}
assert etalon_documents == documents, (
"load_documents incorrectly loaded dataset"
)
def test_can_load_documents_v2(tmpdir):
dataset_str = dedent("""\
123\tsome words A_word and nothing
2\tsome word B_word in this dataset
5\tfamous_phrases to be or not to be
37\tall words such as A_word and B_word are here
""")
dataset_fio = tmpdir.join("tiny.dataset")
dataset_fio.write(dataset_str)
documents = load_documents(dataset_fio)
etalon_documents = {
123: "some words A_word and nothing",
2: "some word B_word in this dataset",
5: "famous_phrases to be or not to be",
37: "all words such as A_word and B_word are here",
}
assert etalon_documents == documents, (
"load_documents incorrectly loaded dataset"
)
DATASET_TINY_STR = dedent("""\
123\tsome words A_word and nothing
2\tsome word B_word in this dataset
5\tfamous_phrases to be or not to be
37\tall words such as A_word and B_word are here
""")
@pytest.fixture()
def tiny_dataset_fio(tmpdir):
dataset_fio = tmpdir.join("dataset.txt")
dataset_fio.write(DATASET_TINY_STR)
return dataset_fio
def test_can_load_documents(tiny_dataset_fio):
documents = load_documents(tiny_dataset_fio)
etalon_documents = {
123: "some words A_word and nothing",
2: "some word B_word in this dataset",
5: "famous_phrases to be or not to be",
37: "all words such as A_word and B_word are here",
}
assert etalon_documents == documents, (
"load_documents incorrectly loaded dataset"
)
@pytest.mark.parametrize(
"query, etalon_answer",
[
pytest.param(["A_word"], [123, 37], id="A_word"),
pytest.param(["B_word"], [2, 37], id="B_word"),
pytest.param(["A_word", "B_word"], [37], id="both_words"),
pytest.param(["word_does_not_exist"], [], id="word does not exist"),
],
)
def test_query_inverted_index_intersect_results(tiny_dataset_fio, query, etalon_answer):
documents = load_documents(tiny_dataset_fio)
tiny_inverted_index = build_inverted_index(documents)
answer = tiny_inverted_index.query(query)
assert sorted(answer) == sorted(etalon_answer), (
f"Expected answer is {etalon_answer}, but you got {answer}"
)
def test_can_load_wikipedia_sample():
documents = load_documents(DATASET_BIG_FPATH)
assert len(documents) == 4100, (
"you incorrectly loaded Wikipedia sample"
)
@pytest.fixture()
def wikipedia_documents():
documents = load_documents(DATASET_SMALL_FPATH)
return documents
@pytest.fixture()
def small_sample_wikipedia_documents():
documents = load_documents(DATASET_SMALL_FPATH)
return documents
def test_can_build_and_query_inverted_index(wikipedia_documents):
wikipedia_inverted_index = build_inverted_index(wikipedia_documents)
doc_ids = wikipedia_inverted_index.query(["wikipedia"])
assert isinstance(doc_ids, list), "inverted index query should return list"
@pytest.fixture()
def wikipedia_inverted_index(wikipedia_documents):
wikipedia_inverted_index = build_inverted_index(wikipedia_documents)
return wikipedia_inverted_index
@pytest.fixture()
def small_wikipedia_inverted_index(small_sample_wikipedia_documents):
wikipedia_inverted_index = build_inverted_index(small_sample_wikipedia_documents)
return wikipedia_inverted_index
def test_can_dump_and_load_inverted_index(tmpdir, wikipedia_inverted_index):
index_fio = tmpdir.join("index.dump")
wikipedia_inverted_index.dump(index_fio)
loaded_inverted_index = InvertedIndex.load(index_fio)
assert wikipedia_inverted_index == loaded_inverted_index, (
"load should return the same inverted index"
)
"inverted.index")
@pytest.mark.parametrize(
"dataset_filepath",
[
DATASET_TINY_FPATH,
DATASET_SMALL_FPATH,
],
)
def test_callback_build_can_build_inverted_index_from_provided_file(dataset_filepath):
build_arguments = Namespace(
dataset_filepath=dataset_filepath,
inverted_index_filepath=DEFAULT_INVERTED_INDEX_SAVE_PATH,
)
callback_build(build_arguments)
def test_process_queries_can_process_queries_from_provided_file(capsys):
with open("queries-utf8.txt") as queries_fin:
process_queries(
inverted_index_filepath=DEFAULT_INVERTED_INDEX_SAVE_PATH,
query_file=queries_fin,
)
captured = capsys.readouterr()
assert "load inverted index" not in captured.out
assert "load inverted index" in captured.err
assert "two words" in captured.out
assert "two words" not in captured.err
def test_callback_query_can_process_queries_from_provided_file():
with open("queries-utf8.txt") as queries_fin:
query_arguments = Namespace(
inverted_index_filepath=DEFAULT_INVERTED_INDEX_SAVE_PATH,
query_file=queries_fin,
)
callback_query(query_arguments)
| true
| true
|
f719ce9774a010e3d05576a84a6bbe9fff496778
| 3,460
|
py
|
Python
|
purity_fb/purity_fb_1dot6/models/object_response.py
|
mabdelhafez/purity_fb_python_client
|
a9856875b3df43b4302a2e4addd1a6b71f51f5ce
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot6/models/object_response.py
|
mabdelhafez/purity_fb_python_client
|
a9856875b3df43b4302a2e4addd1a6b71f51f5ce
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot6/models/object_response.py
|
mabdelhafez/purity_fb_python_client
|
a9856875b3df43b4302a2e4addd1a6b71f51f5ce
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.6 Python SDK
Pure Storage FlashBlade REST 1.6 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.6
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ObjectResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo'
}
attribute_map = {
'pagination_info': 'pagination_info'
}
def __init__(self, pagination_info=None):
"""
ObjectResponse - a model defined in Swagger
"""
self._pagination_info = None
if pagination_info is not None:
self.pagination_info = pagination_info
@property
def pagination_info(self):
"""
Gets the pagination_info of this ObjectResponse.
pagination information, only available in GET requests
:return: The pagination_info of this ObjectResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this ObjectResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this ObjectResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ObjectResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.460317
| 204
| 0.578324
|
from pprint import pformat
from six import iteritems
import re
class ObjectResponse(object):
swagger_types = {
'pagination_info': 'PaginationInfo'
}
attribute_map = {
'pagination_info': 'pagination_info'
}
def __init__(self, pagination_info=None):
self._pagination_info = None
if pagination_info is not None:
self.pagination_info = pagination_info
@property
def pagination_info(self):
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
self._pagination_info = pagination_info
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ObjectResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f719cf9df16d97ce0c0637dc8db52aa8046af0f1
| 6,350
|
py
|
Python
|
deeppages/utils.py
|
ricardofalasca/deep-pages
|
d1b2a48f62c31e20d767df5c6345e07e4d05290d
|
[
"MIT"
] | null | null | null |
deeppages/utils.py
|
ricardofalasca/deep-pages
|
d1b2a48f62c31e20d767df5c6345e07e4d05290d
|
[
"MIT"
] | null | null | null |
deeppages/utils.py
|
ricardofalasca/deep-pages
|
d1b2a48f62c31e20d767df5c6345e07e4d05290d
|
[
"MIT"
] | null | null | null |
from django.template import Template, Context
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from django.db.models import Q
from django.urls import reverse, NoReverseMatch
from django.core.exceptions import ObjectDoesNotExist
from .signals import page_found, page_not_found, page_requested
from .exceptions import InvalidPathException, PageNotFoundException
from .models import Page
import re
def normalize_path(path):
''' Remove duplicated slashes and reverse mode with/wo slash in the end '''
from .urls import get_deeppages_path
new_path = re.sub(r'[\/]{2,}', '/', path)
try:
# check if deeppages' path isn't the root path
deeppages_path = reverse('deeppages:{}'.format(get_deeppages_path()))
except NoReverseMatch:
pass
else:
if deeppages_path != '/':
if new_path.startswith(deeppages_path):
new_path = new_path.replace(deeppages_path, '')
if not new_path.startswith('/'):
new_path = '/{}'.format(new_path)
return new_path[:-1] if new_path.endswith('/') else '{}/'.format(new_path)
def render_content(content, context):
''' Render page content '''
ctx = Context(context or {})
return Template(content).render(ctx)
def render_page(page, context, callback):
''' Render page '''
if callback:
page_content = callback(page, context)
else:
page_content = page.content
return render_content(page_content, context)
def render_requested_page_content(sender, request, page):
''' Render page requested by Middleware or PageView '''
content = page.content
ctx = {'request': request}
page_found.send_robust(
sender=sender.__class__,
page=page,
path=page.path,
request=request,
content=content,
context=ctx)
# So, if content and/or context was changed inside the signal receiver,
# we'll render with the new values.
return render_content(content, ctx)
def is_acceptable_file_type(path):
''' Only text-based content can be accepted, any other will be ignored. '''
filename = path.strip('/').split('/')[-1]
accepted_exts = ['.html', '.htm', '.css', '.js', '.svg', '.txt']
max_ext_len = max(map(len, accepted_exts))
try:
has_extension = filename.index('.') >= (len(filename) - max_ext_len)
except ValueError:
has_extension = False
is_accepted = not has_extension or len([a for a in accepted_exts
if filename.endswith(a)]) > 0
return is_accepted
def get_page_by_path(sender, request, logger):
''' Get page by path and return a rendered and processed template.
Arguments:
sender -- object sender
request -- WSGIRequest object
logger -- logger instance
Also, three robust signals can be dispatched from here:
1. page_requested (after a page request, ha!)
2. page_not_found (for non-existent pages! O'really?)
3. and, mainly, page_found (When a page exists AND is active! Ha!
Could you imagine that?)
Both signals: 'page_request' and 'page_not_found' these keyword
arguments will be received: 'path' and 'request'.
For 'page_found':
- path: the path (URL) requested
- page: a deeppages.models.Page() model's instance that was found
by its PATH
- request: WSGIRequest object
- context: a context dictionary (with request inside)
- content: the page content (you can change it as you wish)
In case of 'page_not_found', after robust signal callback has been
returned, Django's will follow its normal flow.
ps.: if settings.DEBUG is True, you can handle some logs for debug
purposes.
'''
path = normalize_path(request.path)
if not is_acceptable_file_type(path):
return
if settings.DEBUG and logger:
logger.debug('DeepPage Path Requested: [{}]'.format(path))
# dispatch page requested signal
page_requested.send_robust(
sender=sender.__class__, path=path, request=request)
if not path:
# Is called from an instance subclass of TemplateView ?
if issubclass(sender.__class__, MiddlewareMixin):
return
else:
raise InvalidPathException
try:
# try to get page directly
page = Page.objects.exclude(is_active=False).get(
Q(path__iexact=path) | Q(path__iexact=request.path))
except Page.DoesNotExist:
if settings.DEBUG and logger:
logger.exception('DeepPage Not Found: [{}]'.format(path))
page_not_found.send_robust(
sender=sender.__class__,
path=path,
request=request)
if issubclass(sender.__class__, MiddlewareMixin):
return
else:
raise PageNotFoundException
else:
return render_requested_page_content(sender, request, page)
def get_page_by_name(name, context=None, callback=None):
''' Get page by its name and render it.
Arguments:
name -- Page name
Keyword arguments:
context -- dictionary with additional key/values that
will be used for page content rendering (default: None)
callback -- callback function - will be called before render the
page content (default: None)
'''
if not name:
return
try:
# try to get page directly
page = Page.objects.exclude(is_active=False).get(name__iexact=name)
except ObjectDoesNotExist:
return
else:
return render_page(page, context, callback)
def get_page_by_slug(slug, context=None, callback=None):
''' Get page by its slug and render it.
Arguments:
slug -- Page's slug
Keyword arguments:
context -- dictionary with additional key/values that
will be used for page content rendering (default: None)
callback -- callback function - will be called before render the
page content (default: None)
'''
if not slug:
return
try:
page = Page.objects.exclude(is_active=False).get(slug__iexact=slug)
except ObjectDoesNotExist:
return
else:
return render_page(page, context, callback)
| 29.398148
| 79
| 0.649291
|
from django.template import Template, Context
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from django.db.models import Q
from django.urls import reverse, NoReverseMatch
from django.core.exceptions import ObjectDoesNotExist
from .signals import page_found, page_not_found, page_requested
from .exceptions import InvalidPathException, PageNotFoundException
from .models import Page
import re
def normalize_path(path):
from .urls import get_deeppages_path
new_path = re.sub(r'[\/]{2,}', '/', path)
try:
deeppages_path = reverse('deeppages:{}'.format(get_deeppages_path()))
except NoReverseMatch:
pass
else:
if deeppages_path != '/':
if new_path.startswith(deeppages_path):
new_path = new_path.replace(deeppages_path, '')
if not new_path.startswith('/'):
new_path = '/{}'.format(new_path)
return new_path[:-1] if new_path.endswith('/') else '{}/'.format(new_path)
def render_content(content, context):
ctx = Context(context or {})
return Template(content).render(ctx)
def render_page(page, context, callback):
if callback:
page_content = callback(page, context)
else:
page_content = page.content
return render_content(page_content, context)
def render_requested_page_content(sender, request, page):
content = page.content
ctx = {'request': request}
page_found.send_robust(
sender=sender.__class__,
page=page,
path=page.path,
request=request,
content=content,
context=ctx)
return render_content(content, ctx)
def is_acceptable_file_type(path):
filename = path.strip('/').split('/')[-1]
accepted_exts = ['.html', '.htm', '.css', '.js', '.svg', '.txt']
max_ext_len = max(map(len, accepted_exts))
try:
has_extension = filename.index('.') >= (len(filename) - max_ext_len)
except ValueError:
has_extension = False
is_accepted = not has_extension or len([a for a in accepted_exts
if filename.endswith(a)]) > 0
return is_accepted
def get_page_by_path(sender, request, logger):
path = normalize_path(request.path)
if not is_acceptable_file_type(path):
return
if settings.DEBUG and logger:
logger.debug('DeepPage Path Requested: [{}]'.format(path))
# dispatch page requested signal
page_requested.send_robust(
sender=sender.__class__, path=path, request=request)
if not path:
# Is called from an instance subclass of TemplateView ?
if issubclass(sender.__class__, MiddlewareMixin):
return
else:
raise InvalidPathException
try:
# try to get page directly
page = Page.objects.exclude(is_active=False).get(
Q(path__iexact=path) | Q(path__iexact=request.path))
except Page.DoesNotExist:
if settings.DEBUG and logger:
logger.exception('DeepPage Not Found: [{}]'.format(path))
page_not_found.send_robust(
sender=sender.__class__,
path=path,
request=request)
if issubclass(sender.__class__, MiddlewareMixin):
return
else:
raise PageNotFoundException
else:
return render_requested_page_content(sender, request, page)
def get_page_by_name(name, context=None, callback=None):
if not name:
return
try:
# try to get page directly
page = Page.objects.exclude(is_active=False).get(name__iexact=name)
except ObjectDoesNotExist:
return
else:
return render_page(page, context, callback)
def get_page_by_slug(slug, context=None, callback=None):
if not slug:
return
try:
page = Page.objects.exclude(is_active=False).get(slug__iexact=slug)
except ObjectDoesNotExist:
return
else:
return render_page(page, context, callback)
| true
| true
|
f719cfd1f03d71fd7a99b8b868b8442eb5dcb3c5
| 26,423
|
py
|
Python
|
cabot_ui/src/cabot_ui/geojson.py
|
kufusha/cabot
|
52a40a39a29f0bd79b6fdd8f961708e09fda9a51
|
[
"MIT"
] | null | null | null |
cabot_ui/src/cabot_ui/geojson.py
|
kufusha/cabot
|
52a40a39a29f0bd79b6fdd8f961708e09fda9a51
|
[
"MIT"
] | null | null | null |
cabot_ui/src/cabot_ui/geojson.py
|
kufusha/cabot
|
52a40a39a29f0bd79b6fdd8f961708e09fda9a51
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
MapService GeoJson mapper
MapService: https://github.com/hulop/MapService
Author: Daisuke Sato<daisukes@cmu.edu>
"""
# -*- coding: utf-8 -*-
import sys
import traceback
import copy
import math
import json
import scipy
import scipy.spatial
import numpy
import numpy.linalg
import rospy
import tf
import angles
import geometry_msgs.msg
from cabot_ui import geoutil, i18n
class Geometry(object):
"""Geometry class"""
@classmethod
def marshal(cls, dic):
"""marshal Geometry subclasses object"""
if 'type' in dic:
if dic['type'] == "Point":
cls = Point
elif dic['type'] == "LineString":
cls = LineString
if cls == Geometry:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
s = super(Geometry, self)
if self.__class__.mro()[-2] == s.__thisclass__:
s.__init__()
else:
s.__init__(**dic)
if 'coordinates' in dic:
self.coordinates = dic['coordinates']
if 'type' in dic:
self.geometry_type = dic['type']
class Point(Geometry, geoutil.Latlng):
"""Point class representing global point"""
@classmethod
def marshal(cls, dic):
"""marshal Point object"""
return cls(**dic)
def __init__(self, **dic):
c = dic['coordinates']
super(Point, self).__init__(lat=c[1], lng=c[0], **dic)
class LineString(Geometry):
"""Point class representing global line (start to end)"""
@classmethod
def marshal(cls, dic):
"""marshal LineString object"""
return cls(**dic)
def __init__(self, **dic):
super(LineString, self).__init__(**dic)
self.start = geoutil.Latlng(lat=self.coordinates[0][1], lng=self.coordinates[0][0])
self.end = geoutil.Latlng(lat=self.coordinates[1][1], lng=self.coordinates[1][0])
def distance_to(self, point):
if isinstance(point, Point):
return self.nearest_point_on_line(point).distance_to(point)
raise RuntimeError("Need to pass a Point object (%s)"%(type(point)))
def nearest_point_on_line(self, point):
A = geoutil.latlng2mercator(self.start)
B = geoutil.latlng2mercator(self.end)
C = geoutil.latlng2mercator(point)
# Distance between A and B
distAB = math.sqrt(math.pow(A.x - B.x, 2) + math.pow(A.y - B.y, 2));
# Direction vector from A to B
vecABx = (B.x - A.x) / distAB;
vecABy = (B.y - A.y) / distAB;
# Time from A to C
timeAC = max(0, min(distAB, vecABx * (C.x - A.x) + vecABy * (C.y - A.y)));
# LatLng of the point
x = timeAC * vecABx + A.x;
y = timeAC * vecABy + A.y;
return geoutil.mercator2latlng(geoutil.Point(x=x, y=y))
class Properties(object):
@classmethod
def marshal(cls, dic):
"""marshal Properties object"""
return cls(**dic)
DEFAULT_VALUES = {
"hulop_building": None,
"hulop_major_category": None,
"hulop_sub_category": None,
"hulop_minor_category": None,
"hulop_heading": 0,
"hulop_angle": 180,
"hulop_height": 0,
"hulop_long_description": None,
"hulop_short_description": None,
"hulop_description": None,
"hulop_location_description": None,
"hulop_content": None,
"hulop_tags": None,
"hulop_poi_external_category": None,
"hulop_show_labels_zoomlevel": None
}
def __getattr__(self, name):
value = self.__dict__.get(name)
if not value:
if name in Properties.DEFAULT_VALUES:
return Properties.DEFAULT_VALUES[name]
raise AttributeError("%s.%s is invalid"%(self.__class__.__name__, name))
return value
def __init__(self, **dic):
for key in dic:
try:
setattr(self, key, dic[key])
except:
print("Cannot use unicode string for a property name: \"{}\"".format(key.encode('utf8')))
def __str__(self):
return json.dumps(self.__dict__, sort_keys=True, indent=2)
class Object(object):
"""Object class"""
@classmethod
def marshal_list(cls, objects):
"""marshal list of Object subclasses objects"""
temp = []
for obj in objects:
temp.append(cls.marshal(obj))
return temp
@classmethod
def marshal_dict(cls, objects):
"""marshal dict of Object subclasses objects"""
temp = {}
for key in objects.keys():
temp[key] = cls.marshal(objects[key])
return temp
@classmethod
def marshal(cls, dic):
"""marshal Object subclasses object"""
if 'node' in dic:
cls = Landmark
else:
prop = dic['properties'] if 'properties' in dic else None
if prop is not None:
if 'node_id' in prop:
cls = Node
if 'link_id' in prop:
cls = Link
if 'facil_id' in prop:
cls = Facility
if cls == Object:
return cls(**dic)
return cls.marshal(dic)
_id_map = {}
_all_objects = []
@staticmethod
def get_object_by_id(_id, func=None):
"""get object having id by callback function, it can be defered"""
if _id in Object._id_map:
if isinstance(Object._id_map[_id], list):
Object._id_map[_id].append(func)
else:
if func is not None and callable(func):
func(Object._id_map[_id])
return None
return Object._id_map[_id]
else:
Object._id_map[_id] = [func]
return None
@staticmethod
def get_objects_by_type(_type):
"""get objects of specified type"""
temp = []
for obj in Object._all_objects:
if isinstance(obj, _type):
temp.append(obj)
return temp
@staticmethod
def get_all_objects():
return Object._all_objects
@staticmethod
def _register(obj):
"""store object with id and type"""
# register with id
_id = obj._id
if _id in Object._id_map:
if isinstance(Object._id_map[_id], list):
for func in Object._id_map[_id]:
if callable(func):
func(obj)
Object._id_map[_id] = obj
Object._all_objects.append(obj)
else:
#raise RuntimeError("duplicate id")
pass
else:
Object._id_map[_id] = obj
Object._all_objects.append(obj)
@staticmethod
def reset_all_objects():
"""reset all state in the objects"""
for obj in Object._all_objects:
obj.reset()
@staticmethod
def _reset_link_index():
Object._link_index = []
Object._link_points = []
Object._link_kdtree = None
_link_index = []
_link_points = []
_link_kdtree = None
@staticmethod
def _build_link_index():
for obj in Object.get_objects_by_type(Link):
if obj.start_node and obj.end_node:
sp = numpy.array([obj.start_node.local_geometry.x, obj.start_node.local_geometry.y])
ep = numpy.array([obj.end_node.local_geometry.x, obj.end_node.local_geometry.y])
Object._add_link_index(sp, ep, obj)
if Object._link_points:
Object._link_kdtree = scipy.spatial.KDTree(Object._link_points)
@staticmethod
def _add_link_index(sp, ep, obj):
mp = (sp+ep)/2.0
Object._link_points.append(mp)
Object._link_index.append(obj)
if numpy.linalg.norm(sp-ep) > 1:
Object._add_link_index(sp, mp, obj)
Object._add_link_index(mp, ep, obj)
@staticmethod
def get_nearest_link(node, exclude=None):
point = node.local_geometry
latlng = node.geometry
_, index = Object._link_kdtree.query([point.x, point.y], 50)
min_index = None
min_dist = 1000
for i in index:
link = Object._link_index[i]
if exclude is not None and exclude(link):
continue
dist = link.geometry.distance_to(latlng)
if node.floor is not None:
if link.start_node.floor != node.floor and \
link.end_node.floor != node.floor:
dist += 1000
if dist < min_dist:
min_dist = dist
min_index = i
if min_index is None:
return None
return Object._link_index[min_index]
@staticmethod
def update_anchor_all(anchor):
"""update anchor of all object"""
Object._reset_link_index()
for obj in Object._all_objects:
obj.update_anchor(anchor)
Object._build_link_index()
def __init__(self, **dic):
s = super(Object, self)
if self.__class__.mro()[-2] == s.__thisclass__:
s.__init__()
else:
s.__init__(**dic)
if 'geometry' in dic:
self.geometry = Geometry.marshal(dic['geometry'])
if 'properties' in dic:
self.properties = Properties.marshal(dic['properties'])
if '_id' in dic:
self._id = dic['_id']
if 'no_registration' not in dic or not dic['no_registration']:
Object._register(self)
self.anchor = None
self.local_geometry = None
def __str__(self):
ret = "%s, (%s)\n" % (type(self), hex(id(self)))
for key in self.__dict__:
value = getattr(self, key)
if isinstance(value, Object):
ret += "%s: %s<%s>\n"%(key, type(value), value._id)
else:
ret += "%s: %s\n"%(key, str(value))
import inspect
for method in inspect.getmembers(type(self), predicate=lambda o: isinstance(o, property)):
ret += "%s: %s\n"%(method[0], method[1].__get__(self, type(self)))
return ret
def __repr__(self):
return "%s<%s>"%(type(self), self._id)
def update_anchor(self, anchor):
self.anchor = anchor
if anchor is not None:
try:
self.local_geometry = geoutil.global2local(self.geometry, anchor)
except:
print("Could not convert geometry: {}".format(self.local_geometry))
def distance_to(self, point):
if isinstance(point, geoutil.Point):
return self.local_geometry.distance_to(point)
if isinstance(point, geoutil.Latlng):
return self.geometry.distance_to(point)
def reset(self):
pass
class Link(Object):
"""Link class"""
ROUTE_TYPE_WALKWAY = 1
ROUTE_TYPE_MOVING_WALKWAY = 2
ROUTE_TYPE_RAILROAD_CROSSING = 3
ROUTE_TYPE_ELEVATOR = 4
ROUTE_TYPE_ESCALATOR = 5
ROUTE_TYPE_STAIRS = 6
ROUTE_TYPE_SLOPE = 7
ROUTE_TYPE_UNKNOWN = 99
@classmethod
def marshal(cls, dic):
"""marshal Link subclasses object"""
if 'properties' in dic:
prop = dic['properties']
if 'sourceNode' in prop:
cls = RouteLink
if cls == Link:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
super(Link, self).__init__(**dic)
self.start_node = None
self.end_node = None
self.pois = []
self.floor = 0
Object.get_object_by_id(self.properties.start_id, self._set_start_node)
Object.get_object_by_id(self.properties.end_id, self._set_end_node)
def _set_start_node(self, node):
self.start_node = node
self._update()
def _set_end_node(self, node):
self.end_node = node
self._update()
def _update(self):
if self.start_node is not None and \
self.end_node is not None:
self.floor = (self.start_node.floor + self.end_node.floor)/2.0
@property
def is_elevator(self):
"""wheather this links is an elevator or not"""
return self.properties.route_type == Link.ROUTE_TYPE_ELEVATOR
@property
def is_escalator(self):
"""wheather this links is an escalator or not"""
return self.properties.route_type == Link.ROUTE_TYPE_ESCALATOR
@property
def is_leaf(self):
"""wheather this links is a leaf or not"""
if self.start_node is None or self.end_node is None:
return False
return self.start_node.is_leaf or self.end_node.is_leaf
@property
def length(self):
"""distance from start to end"""
if self.start_node is None or self.end_node is None:
return float('nan')
return self.start_node.geometry.distance_to(self.end_node.geometry)
def register_poi(self, poi):
self.pois.append(poi)
def update_anchor(self, anchor):
self.anchor = anchor
#TODO
class RouteLink(Link):
"""Route Link class"""
@classmethod
def marshal(cls, dic):
"""marshal Directed Link object"""
return cls(**dic)
def __init__(self, **dic):
super(RouteLink, self).__init__(no_registration=True, **dic)
self.source_node = None
self.target_node = None
Object.get_object_by_id(self.properties.sourceNode, self._set_source_node)
Object.get_object_by_id(self.properties.targetNode, self._set_target_node)
Object.get_object_by_id(self._id, self._found_link)
def _set_source_node(self, node):
self.source_node = node
def _set_target_node(self, node):
self.target_node = node
def _found_link(self, link):
self.pois = link.pois
@property
def is_temp(self):
return self._id.startswith("_TEMP_LINK")
class Node(Object):
"""Node class"""
@classmethod
def marshal(cls, dic):
"""marshal Node object"""
return cls(**dic)
def __init__(self, **dic):
super(Node, self).__init__(**dic)
self.links = []
for i in range(1, 100):
attr = "link%d_id"%(i)
if hasattr(self.properties, attr):
Object.get_object_by_id(getattr(self.properties, attr), self._add_link)
if hasattr(self.properties, 'floor'):
self.floor = self.properties.floor
else:
self.floor = 0
self.facility = None
Facility.get_facility_by_id(self._id, self._set_facility)
def _add_link(self, link):
self.links.append(link)
def _set_facility(self, facility):
self.facility = facility
@property
def is_leaf(self):
"""wheather this node is the end of leaf link"""
return len(self.links) == 1
@property
def is_elevator(self):
"""wheather this node is connected to elevator link"""
res = False
for link in self.links:
res = res or link.is_elevator
return res
class Facility(Object):
"""Facility class"""
@classmethod
def marshal(cls, dic):
"""marshal Facility subclasses object"""
if 'properties' in dic:
prop = dic['properties']
if 'hulop_major_category' in prop:
category = prop['hulop_major_category']
if category == '_nav_poi_':
cls = POI
if cls == Facility:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
super(Facility, self).__init__(**dic)
self.entrances = []
for i in range(1, 100):
attr = "ent%d_node"%(i)
if hasattr(self.properties, attr):
Facility._id_map[getattr(self.properties, attr)] = self
Object.get_object_by_id(getattr(self.properties, attr), self._add_facility)
self.name = i18n.localized_attr(self.properties, "name")
self.name_pron = i18n.localized_attr(self.properties, "name_hira", only_if="ja") ## special case
self.long_description = i18n.localized_attr(self.properties, "hulop_long_description")
def _add_facility(self, node):
self.entrances.append(node)
_id_map = {}
@staticmethod
def get_facility_by_id(_id, func=None):
"""get facility having id by callback function, it can be defered"""
if _id in Facility._id_map:
if isinstance(Facility._id_map[_id], list):
Facility._id_map[_id].append(func)
else:
if func is not None and callable(func):
func(Facility._id_map[_id])
return None
return Facility._id_map[_id]
else:
Facility._id_map[_id] = [func]
return None
class POI(Facility, geoutil.TargetPlace):
"""POI class"""
@classmethod
def marshal(cls, dic):
"""marshal POI object"""
if 'properties' in dic:
prop = dic['properties']
if 'hulop_sub_category' in prop:
category = prop['hulop_sub_category']
if category == '_nav_door_':
cls = DoorPOI
if category == '_nav_info_':
cls = InfoPOI
if category == '_cabot_speed_':
cls = SpeedPOI
if category == '_nav_elevator_cab_':
cls = ElevatorCabPOI
if category == '_nav_queue_wait_':
cls = QueueWaitPOI
if category == '_nav_queue_target_':
cls = QueueTargetPOI
if cls == POI:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
if 'properties' in dic:
prop = dic['properties']
get_prop = lambda prop, key: prop[key] if key in prop else Properties.DEFAULT_VALUES[key]
r = (-get_prop(prop, 'hulop_heading') + 90) / 180.0 * math.pi
angle = get_prop(prop, 'hulop_angle')
self.floor = get_prop(prop, 'hulop_height')
super(POI, self).__init__(r=r, x=0, y=0, angle=angle, floor=self.floor, **dic)
self.sub_category = self.properties.hulop_sub_category \
if hasattr(self.properties, 'hulop_sub_category') else ""
self.minor_category = self.properties.hulop_minor_category \
if hasattr(self.properties, 'hulop_minor_category') else ""
#backward compatibility
self.local_pose = self
def approaching_statement(self):
return None
def approached_statement(self):
return None
def passed_statement(self):
return None
def update_anchor(self, anchor):
super(POI, self).update_anchor(anchor)
if anchor is not None:
rad = (-self.properties.hulop_heading + 90 + anchor.rotate) / 180.0 * math.pi
self.update_pose(self.local_geometry, rad)
def reset(self):
self.reset_target()
class DoorPOI(POI):
"""POI class"""
@classmethod
def marshal(cls, dic):
"""marshal Door POI object"""
return cls(**dic)
def __init__(self, **dic):
super(DoorPOI, self).__init__(**dic)
@property
def title(self):
if self.is_auto:
return i18n.localized_string("AUTO_DOOR")
else:
return i18n.localized_string("DOOR")
@property
def is_auto(self):
"""wheather this is auto door or not"""
return self.minor_category is not None and \
'_flag_auto_' in self.minor_category
def approaching_statement(self):
return i18n.localized_string("DOOR_POI_APPROACHING", self.title)
class InfoPOI(POI):
"""Nav Info POI class"""
@classmethod
def marshal(cls, dic):
"""marshal Info POI object"""
return cls(**dic)
def __init__(self, **dic):
super(InfoPOI, self).__init__(**dic)
def approached_statement(self):
return self.name
class SpeedPOI(POI):
"""Cabot Speed POI class"""
@classmethod
def marshal(cls, dic):
"""marshal Speed POI object"""
return cls(**dic)
def __init__(self, **dic):
super(SpeedPOI, self).__init__(**dic)
self.limit = float(self.properties.hulop_content)
class ElevatorCabPOI(POI):
"""Elevator Cab POI class"""
@classmethod
def marshal(cls, dic):
"""marshal Elevator Cab POI object"""
return cls(**dic)
def __init__(self, **dic):
super(ElevatorCabPOI, self).__init__(**dic)
self.set_back = (3.0, 0.0)
self.set_forward = (3.0, 0.0)
self.door = (1.0, 0.0)
if self.properties.hulop_content:
try:
hulop_content_json = json.loads(self.properties.hulop_content)
if "set_back" in hulop_content_json:
self.set_back = hulop_content_json["set_back"]
if "set_forward" in hulop_content_json:
self.set_forward = hulop_content_json["set_forward"]
if "door" in hulop_content_json:
self.door = hulop_content_json["door"]
if "buttons" in hulop_content_json:
self.buttons = hulop_content_json["buttons"]
except:
traceback.print_exc(file=sys.std_out)
@property
def door_geometry(self):
x = self.x + math.cos(self.r) * self.door[0] - math.sin(self.r) * self.door[1]
y = self.y + math.sin(self.r) * self.door[0] + math.cos(self.r) * self.door[1]
return geoutil.Point(x=x, y=y)
def where_is_buttons(self, pose):
x = self.x + math.cos(self.r) * self.buttons[0] - math.sin(self.r) * self.buttons[1]
y = self.y + math.sin(self.r) * self.buttons[0] + math.cos(self.r) * self.buttons[1]
b_pos = geoutil.Point(x=x,y=y)
b_pose = geoutil.Pose.pose_from_points(b_pos, pose)
dir = angles.shortest_angular_distance(pose.r, b_pose.r)
print(pose, b_pos, b_pose, dir)
if abs(dir) > math.pi / 3 * 2:
return "BACK"
elif abs(dir) > math.pi / 3:
if dir > 0:
return "LEFT"
elif dir < 0:
return "RIGHT"
elif abs(dir) < math.pi / 10:
return "FRONT"
elif dir > 0:
return "FRONT_LEFT"
elif dir < 0:
return "FRONT_RIGHT"
rospy.logerror("should not happen")
return None
class QueueWaitPOI(POI):
"""Queue Wait POI class"""
@classmethod
def marshal(cls, dic):
"""marshal Queue TaWaitrget POI object"""
return cls(**dic)
def __init__(self, **dic):
super(QueueWaitPOI, self).__init__(**dic)
self.interval = 1.0
hulop_content_json = json.loads(self.properties.hulop_content)
if "interval" in hulop_content_json:
self.interval = float(hulop_content_json["interval"])
self.is_copied = False
self.link_orientation = None
# def approached_statement(self):
# return "queue wait point"
def register_link(self, link):
end_pose = geoutil.Pose.pose_from_points(link.end_node.local_geometry, link.start_node.local_geometry)
quat = tf.transformations.quaternion_from_euler(0, 0, end_pose.r)
self.link_orientation = geometry_msgs.msg.Quaternion()
self.link_orientation.x = quat[0]
self.link_orientation.y = quat[1]
self.link_orientation.z = quat[2]
self.link_orientation.w = quat[3]
def copy_to_link(self, link, local_geometry_x, local_geometry_y):
copied_poi = copy.deepcopy(self)
copied_poi.x = local_geometry_x
copied_poi.y = local_geometry_y
copied_poi.local_geometry.x = local_geometry_x
copied_poi.local_geometry.y = local_geometry_y
copied_poi.geometry = geoutil.local2global(copied_poi.local_geometry, copied_poi.anchor)
link.register_poi(copied_poi)
copied_poi.register_link(link)
self.is_copied = True
return copied_poi
class QueueTargetPOI(POI):
"""Queue Target POI class"""
@classmethod
def marshal(cls, dic):
"""marshal Queue Target POI object"""
return cls(**dic)
def __init__(self, **dic):
super(QueueTargetPOI, self).__init__(**dic)
self.enter_node = None
self.exit_node = None
hulop_content_json = json.loads(self.properties.hulop_content)
Object.get_object_by_id(hulop_content_json["enter"], self._set_enter_node)
Object.get_object_by_id(hulop_content_json["exit"], self._set_exit_node)
def _set_enter_node(self, node):
self.enter_node = node
def _set_exit_node(self, node):
self.exit_node = node
class Landmark(Facility):
"""Landmark class"""
@classmethod
def marshal(cls, dic):
"""marshal Landmark object"""
return cls(**dic)
def __init__(self, **dic):
self._id = dic['node']+"_landmark"
super(Landmark, self).__init__(**dic)
| 32.027879
| 110
| 0.590357
|
import sys
import traceback
import copy
import math
import json
import scipy
import scipy.spatial
import numpy
import numpy.linalg
import rospy
import tf
import angles
import geometry_msgs.msg
from cabot_ui import geoutil, i18n
class Geometry(object):
@classmethod
def marshal(cls, dic):
if 'type' in dic:
if dic['type'] == "Point":
cls = Point
elif dic['type'] == "LineString":
cls = LineString
if cls == Geometry:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
s = super(Geometry, self)
if self.__class__.mro()[-2] == s.__thisclass__:
s.__init__()
else:
s.__init__(**dic)
if 'coordinates' in dic:
self.coordinates = dic['coordinates']
if 'type' in dic:
self.geometry_type = dic['type']
class Point(Geometry, geoutil.Latlng):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
c = dic['coordinates']
super(Point, self).__init__(lat=c[1], lng=c[0], **dic)
class LineString(Geometry):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(LineString, self).__init__(**dic)
self.start = geoutil.Latlng(lat=self.coordinates[0][1], lng=self.coordinates[0][0])
self.end = geoutil.Latlng(lat=self.coordinates[1][1], lng=self.coordinates[1][0])
def distance_to(self, point):
if isinstance(point, Point):
return self.nearest_point_on_line(point).distance_to(point)
raise RuntimeError("Need to pass a Point object (%s)"%(type(point)))
def nearest_point_on_line(self, point):
A = geoutil.latlng2mercator(self.start)
B = geoutil.latlng2mercator(self.end)
C = geoutil.latlng2mercator(point)
distAB = math.sqrt(math.pow(A.x - B.x, 2) + math.pow(A.y - B.y, 2));
vecABx = (B.x - A.x) / distAB;
vecABy = (B.y - A.y) / distAB;
timeAC = max(0, min(distAB, vecABx * (C.x - A.x) + vecABy * (C.y - A.y)));
x = timeAC * vecABx + A.x;
y = timeAC * vecABy + A.y;
return geoutil.mercator2latlng(geoutil.Point(x=x, y=y))
class Properties(object):
@classmethod
def marshal(cls, dic):
return cls(**dic)
DEFAULT_VALUES = {
"hulop_building": None,
"hulop_major_category": None,
"hulop_sub_category": None,
"hulop_minor_category": None,
"hulop_heading": 0,
"hulop_angle": 180,
"hulop_height": 0,
"hulop_long_description": None,
"hulop_short_description": None,
"hulop_description": None,
"hulop_location_description": None,
"hulop_content": None,
"hulop_tags": None,
"hulop_poi_external_category": None,
"hulop_show_labels_zoomlevel": None
}
def __getattr__(self, name):
value = self.__dict__.get(name)
if not value:
if name in Properties.DEFAULT_VALUES:
return Properties.DEFAULT_VALUES[name]
raise AttributeError("%s.%s is invalid"%(self.__class__.__name__, name))
return value
def __init__(self, **dic):
for key in dic:
try:
setattr(self, key, dic[key])
except:
print("Cannot use unicode string for a property name: \"{}\"".format(key.encode('utf8')))
def __str__(self):
return json.dumps(self.__dict__, sort_keys=True, indent=2)
class Object(object):
@classmethod
def marshal_list(cls, objects):
temp = []
for obj in objects:
temp.append(cls.marshal(obj))
return temp
@classmethod
def marshal_dict(cls, objects):
temp = {}
for key in objects.keys():
temp[key] = cls.marshal(objects[key])
return temp
@classmethod
def marshal(cls, dic):
if 'node' in dic:
cls = Landmark
else:
prop = dic['properties'] if 'properties' in dic else None
if prop is not None:
if 'node_id' in prop:
cls = Node
if 'link_id' in prop:
cls = Link
if 'facil_id' in prop:
cls = Facility
if cls == Object:
return cls(**dic)
return cls.marshal(dic)
_id_map = {}
_all_objects = []
@staticmethod
def get_object_by_id(_id, func=None):
if _id in Object._id_map:
if isinstance(Object._id_map[_id], list):
Object._id_map[_id].append(func)
else:
if func is not None and callable(func):
func(Object._id_map[_id])
return None
return Object._id_map[_id]
else:
Object._id_map[_id] = [func]
return None
@staticmethod
def get_objects_by_type(_type):
temp = []
for obj in Object._all_objects:
if isinstance(obj, _type):
temp.append(obj)
return temp
@staticmethod
def get_all_objects():
return Object._all_objects
@staticmethod
def _register(obj):
_id = obj._id
if _id in Object._id_map:
if isinstance(Object._id_map[_id], list):
for func in Object._id_map[_id]:
if callable(func):
func(obj)
Object._id_map[_id] = obj
Object._all_objects.append(obj)
else:
pass
else:
Object._id_map[_id] = obj
Object._all_objects.append(obj)
@staticmethod
def reset_all_objects():
for obj in Object._all_objects:
obj.reset()
@staticmethod
def _reset_link_index():
Object._link_index = []
Object._link_points = []
Object._link_kdtree = None
_link_index = []
_link_points = []
_link_kdtree = None
@staticmethod
def _build_link_index():
for obj in Object.get_objects_by_type(Link):
if obj.start_node and obj.end_node:
sp = numpy.array([obj.start_node.local_geometry.x, obj.start_node.local_geometry.y])
ep = numpy.array([obj.end_node.local_geometry.x, obj.end_node.local_geometry.y])
Object._add_link_index(sp, ep, obj)
if Object._link_points:
Object._link_kdtree = scipy.spatial.KDTree(Object._link_points)
@staticmethod
def _add_link_index(sp, ep, obj):
mp = (sp+ep)/2.0
Object._link_points.append(mp)
Object._link_index.append(obj)
if numpy.linalg.norm(sp-ep) > 1:
Object._add_link_index(sp, mp, obj)
Object._add_link_index(mp, ep, obj)
@staticmethod
def get_nearest_link(node, exclude=None):
point = node.local_geometry
latlng = node.geometry
_, index = Object._link_kdtree.query([point.x, point.y], 50)
min_index = None
min_dist = 1000
for i in index:
link = Object._link_index[i]
if exclude is not None and exclude(link):
continue
dist = link.geometry.distance_to(latlng)
if node.floor is not None:
if link.start_node.floor != node.floor and \
link.end_node.floor != node.floor:
dist += 1000
if dist < min_dist:
min_dist = dist
min_index = i
if min_index is None:
return None
return Object._link_index[min_index]
@staticmethod
def update_anchor_all(anchor):
Object._reset_link_index()
for obj in Object._all_objects:
obj.update_anchor(anchor)
Object._build_link_index()
def __init__(self, **dic):
s = super(Object, self)
if self.__class__.mro()[-2] == s.__thisclass__:
s.__init__()
else:
s.__init__(**dic)
if 'geometry' in dic:
self.geometry = Geometry.marshal(dic['geometry'])
if 'properties' in dic:
self.properties = Properties.marshal(dic['properties'])
if '_id' in dic:
self._id = dic['_id']
if 'no_registration' not in dic or not dic['no_registration']:
Object._register(self)
self.anchor = None
self.local_geometry = None
def __str__(self):
ret = "%s, (%s)\n" % (type(self), hex(id(self)))
for key in self.__dict__:
value = getattr(self, key)
if isinstance(value, Object):
ret += "%s: %s<%s>\n"%(key, type(value), value._id)
else:
ret += "%s: %s\n"%(key, str(value))
import inspect
for method in inspect.getmembers(type(self), predicate=lambda o: isinstance(o, property)):
ret += "%s: %s\n"%(method[0], method[1].__get__(self, type(self)))
return ret
def __repr__(self):
return "%s<%s>"%(type(self), self._id)
def update_anchor(self, anchor):
self.anchor = anchor
if anchor is not None:
try:
self.local_geometry = geoutil.global2local(self.geometry, anchor)
except:
print("Could not convert geometry: {}".format(self.local_geometry))
def distance_to(self, point):
if isinstance(point, geoutil.Point):
return self.local_geometry.distance_to(point)
if isinstance(point, geoutil.Latlng):
return self.geometry.distance_to(point)
def reset(self):
pass
class Link(Object):
ROUTE_TYPE_WALKWAY = 1
ROUTE_TYPE_MOVING_WALKWAY = 2
ROUTE_TYPE_RAILROAD_CROSSING = 3
ROUTE_TYPE_ELEVATOR = 4
ROUTE_TYPE_ESCALATOR = 5
ROUTE_TYPE_STAIRS = 6
ROUTE_TYPE_SLOPE = 7
ROUTE_TYPE_UNKNOWN = 99
@classmethod
def marshal(cls, dic):
if 'properties' in dic:
prop = dic['properties']
if 'sourceNode' in prop:
cls = RouteLink
if cls == Link:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
super(Link, self).__init__(**dic)
self.start_node = None
self.end_node = None
self.pois = []
self.floor = 0
Object.get_object_by_id(self.properties.start_id, self._set_start_node)
Object.get_object_by_id(self.properties.end_id, self._set_end_node)
def _set_start_node(self, node):
self.start_node = node
self._update()
def _set_end_node(self, node):
self.end_node = node
self._update()
def _update(self):
if self.start_node is not None and \
self.end_node is not None:
self.floor = (self.start_node.floor + self.end_node.floor)/2.0
@property
def is_elevator(self):
return self.properties.route_type == Link.ROUTE_TYPE_ELEVATOR
@property
def is_escalator(self):
return self.properties.route_type == Link.ROUTE_TYPE_ESCALATOR
@property
def is_leaf(self):
if self.start_node is None or self.end_node is None:
return False
return self.start_node.is_leaf or self.end_node.is_leaf
@property
def length(self):
if self.start_node is None or self.end_node is None:
return float('nan')
return self.start_node.geometry.distance_to(self.end_node.geometry)
def register_poi(self, poi):
self.pois.append(poi)
def update_anchor(self, anchor):
self.anchor = anchor
class RouteLink(Link):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(RouteLink, self).__init__(no_registration=True, **dic)
self.source_node = None
self.target_node = None
Object.get_object_by_id(self.properties.sourceNode, self._set_source_node)
Object.get_object_by_id(self.properties.targetNode, self._set_target_node)
Object.get_object_by_id(self._id, self._found_link)
def _set_source_node(self, node):
self.source_node = node
def _set_target_node(self, node):
self.target_node = node
def _found_link(self, link):
self.pois = link.pois
@property
def is_temp(self):
return self._id.startswith("_TEMP_LINK")
class Node(Object):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(Node, self).__init__(**dic)
self.links = []
for i in range(1, 100):
attr = "link%d_id"%(i)
if hasattr(self.properties, attr):
Object.get_object_by_id(getattr(self.properties, attr), self._add_link)
if hasattr(self.properties, 'floor'):
self.floor = self.properties.floor
else:
self.floor = 0
self.facility = None
Facility.get_facility_by_id(self._id, self._set_facility)
def _add_link(self, link):
self.links.append(link)
def _set_facility(self, facility):
self.facility = facility
@property
def is_leaf(self):
return len(self.links) == 1
@property
def is_elevator(self):
res = False
for link in self.links:
res = res or link.is_elevator
return res
class Facility(Object):
@classmethod
def marshal(cls, dic):
if 'properties' in dic:
prop = dic['properties']
if 'hulop_major_category' in prop:
category = prop['hulop_major_category']
if category == '_nav_poi_':
cls = POI
if cls == Facility:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
super(Facility, self).__init__(**dic)
self.entrances = []
for i in range(1, 100):
attr = "ent%d_node"%(i)
if hasattr(self.properties, attr):
Facility._id_map[getattr(self.properties, attr)] = self
Object.get_object_by_id(getattr(self.properties, attr), self._add_facility)
self.name = i18n.localized_attr(self.properties, "name")
self.name_pron = i18n.localized_attr(self.properties, "name_hira", only_if="ja") long_description = i18n.localized_attr(self.properties, "hulop_long_description")
def _add_facility(self, node):
self.entrances.append(node)
_id_map = {}
@staticmethod
def get_facility_by_id(_id, func=None):
if _id in Facility._id_map:
if isinstance(Facility._id_map[_id], list):
Facility._id_map[_id].append(func)
else:
if func is not None and callable(func):
func(Facility._id_map[_id])
return None
return Facility._id_map[_id]
else:
Facility._id_map[_id] = [func]
return None
class POI(Facility, geoutil.TargetPlace):
@classmethod
def marshal(cls, dic):
if 'properties' in dic:
prop = dic['properties']
if 'hulop_sub_category' in prop:
category = prop['hulop_sub_category']
if category == '_nav_door_':
cls = DoorPOI
if category == '_nav_info_':
cls = InfoPOI
if category == '_cabot_speed_':
cls = SpeedPOI
if category == '_nav_elevator_cab_':
cls = ElevatorCabPOI
if category == '_nav_queue_wait_':
cls = QueueWaitPOI
if category == '_nav_queue_target_':
cls = QueueTargetPOI
if cls == POI:
return cls(**dic)
return cls.marshal(dic)
def __init__(self, **dic):
if 'properties' in dic:
prop = dic['properties']
get_prop = lambda prop, key: prop[key] if key in prop else Properties.DEFAULT_VALUES[key]
r = (-get_prop(prop, 'hulop_heading') + 90) / 180.0 * math.pi
angle = get_prop(prop, 'hulop_angle')
self.floor = get_prop(prop, 'hulop_height')
super(POI, self).__init__(r=r, x=0, y=0, angle=angle, floor=self.floor, **dic)
self.sub_category = self.properties.hulop_sub_category \
if hasattr(self.properties, 'hulop_sub_category') else ""
self.minor_category = self.properties.hulop_minor_category \
if hasattr(self.properties, 'hulop_minor_category') else ""
self.local_pose = self
def approaching_statement(self):
return None
def approached_statement(self):
return None
def passed_statement(self):
return None
def update_anchor(self, anchor):
super(POI, self).update_anchor(anchor)
if anchor is not None:
rad = (-self.properties.hulop_heading + 90 + anchor.rotate) / 180.0 * math.pi
self.update_pose(self.local_geometry, rad)
def reset(self):
self.reset_target()
class DoorPOI(POI):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(DoorPOI, self).__init__(**dic)
@property
def title(self):
if self.is_auto:
return i18n.localized_string("AUTO_DOOR")
else:
return i18n.localized_string("DOOR")
@property
def is_auto(self):
return self.minor_category is not None and \
'_flag_auto_' in self.minor_category
def approaching_statement(self):
return i18n.localized_string("DOOR_POI_APPROACHING", self.title)
class InfoPOI(POI):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(InfoPOI, self).__init__(**dic)
def approached_statement(self):
return self.name
class SpeedPOI(POI):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(SpeedPOI, self).__init__(**dic)
self.limit = float(self.properties.hulop_content)
class ElevatorCabPOI(POI):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(ElevatorCabPOI, self).__init__(**dic)
self.set_back = (3.0, 0.0)
self.set_forward = (3.0, 0.0)
self.door = (1.0, 0.0)
if self.properties.hulop_content:
try:
hulop_content_json = json.loads(self.properties.hulop_content)
if "set_back" in hulop_content_json:
self.set_back = hulop_content_json["set_back"]
if "set_forward" in hulop_content_json:
self.set_forward = hulop_content_json["set_forward"]
if "door" in hulop_content_json:
self.door = hulop_content_json["door"]
if "buttons" in hulop_content_json:
self.buttons = hulop_content_json["buttons"]
except:
traceback.print_exc(file=sys.std_out)
@property
def door_geometry(self):
x = self.x + math.cos(self.r) * self.door[0] - math.sin(self.r) * self.door[1]
y = self.y + math.sin(self.r) * self.door[0] + math.cos(self.r) * self.door[1]
return geoutil.Point(x=x, y=y)
def where_is_buttons(self, pose):
x = self.x + math.cos(self.r) * self.buttons[0] - math.sin(self.r) * self.buttons[1]
y = self.y + math.sin(self.r) * self.buttons[0] + math.cos(self.r) * self.buttons[1]
b_pos = geoutil.Point(x=x,y=y)
b_pose = geoutil.Pose.pose_from_points(b_pos, pose)
dir = angles.shortest_angular_distance(pose.r, b_pose.r)
print(pose, b_pos, b_pose, dir)
if abs(dir) > math.pi / 3 * 2:
return "BACK"
elif abs(dir) > math.pi / 3:
if dir > 0:
return "LEFT"
elif dir < 0:
return "RIGHT"
elif abs(dir) < math.pi / 10:
return "FRONT"
elif dir > 0:
return "FRONT_LEFT"
elif dir < 0:
return "FRONT_RIGHT"
rospy.logerror("should not happen")
return None
class QueueWaitPOI(POI):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(QueueWaitPOI, self).__init__(**dic)
self.interval = 1.0
hulop_content_json = json.loads(self.properties.hulop_content)
if "interval" in hulop_content_json:
self.interval = float(hulop_content_json["interval"])
self.is_copied = False
self.link_orientation = None
def register_link(self, link):
end_pose = geoutil.Pose.pose_from_points(link.end_node.local_geometry, link.start_node.local_geometry)
quat = tf.transformations.quaternion_from_euler(0, 0, end_pose.r)
self.link_orientation = geometry_msgs.msg.Quaternion()
self.link_orientation.x = quat[0]
self.link_orientation.y = quat[1]
self.link_orientation.z = quat[2]
self.link_orientation.w = quat[3]
def copy_to_link(self, link, local_geometry_x, local_geometry_y):
copied_poi = copy.deepcopy(self)
copied_poi.x = local_geometry_x
copied_poi.y = local_geometry_y
copied_poi.local_geometry.x = local_geometry_x
copied_poi.local_geometry.y = local_geometry_y
copied_poi.geometry = geoutil.local2global(copied_poi.local_geometry, copied_poi.anchor)
link.register_poi(copied_poi)
copied_poi.register_link(link)
self.is_copied = True
return copied_poi
class QueueTargetPOI(POI):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
super(QueueTargetPOI, self).__init__(**dic)
self.enter_node = None
self.exit_node = None
hulop_content_json = json.loads(self.properties.hulop_content)
Object.get_object_by_id(hulop_content_json["enter"], self._set_enter_node)
Object.get_object_by_id(hulop_content_json["exit"], self._set_exit_node)
def _set_enter_node(self, node):
self.enter_node = node
def _set_exit_node(self, node):
self.exit_node = node
class Landmark(Facility):
@classmethod
def marshal(cls, dic):
return cls(**dic)
def __init__(self, **dic):
self._id = dic['node']+"_landmark"
super(Landmark, self).__init__(**dic)
| true
| true
|
f719cfe9dbb5d0f42f324f0f2d5937b2e5f17212
| 1,481
|
py
|
Python
|
Code Bundle/Chapter02/tests/test_checks.py
|
ghanigreen/pytest_code
|
dbdcc322b3469c62ad328043060518edf2b2d83f
|
[
"MIT"
] | 46
|
2018-06-28T04:40:08.000Z
|
2022-02-14T05:36:48.000Z
|
Code Bundle/Chapter02/tests/test_checks.py
|
ghanigreen/pytest_code
|
dbdcc322b3469c62ad328043060518edf2b2d83f
|
[
"MIT"
] | null | null | null |
Code Bundle/Chapter02/tests/test_checks.py
|
ghanigreen/pytest_code
|
dbdcc322b3469c62ad328043060518edf2b2d83f
|
[
"MIT"
] | 22
|
2018-06-10T23:20:29.000Z
|
2022-02-24T06:47:18.000Z
|
import pytest
class InvalidCharacterNameError(Exception):
pass
class InvalidClassNameError(Exception):
pass
class Character:
pass
VALID_CLASSES = ["sorcerer", "warrior"]
def create_character(name: str, class_name: str) -> Character:
"""
Creates a new character and inserts it into the database.
:param name: the character name.
:param class_name: the character class name.
:raise InvalidCharacterNameError:
if the character name is empty.
:raise InvalidClassNameError:
if the class name is invalid.
:return: the newly created Character.
"""
if not name:
raise InvalidCharacterNameError("character name empty")
if class_name not in VALID_CLASSES:
msg = f'invalid class name: "{class_name}"'
raise InvalidCharacterNameError(msg)
...
def test_empty_name():
with pytest.raises(InvalidCharacterNameError):
create_character(name="", class_name="warrior")
def test_invalid_class_name():
with pytest.raises(InvalidClassNameError):
create_character(name="Solaire", class_name="mage")
def test_empty_name():
with pytest.raises(
InvalidCharacterNameError, match="character name empty"
):
create_character(name="", class_name="warrior")
def test_invalid_class_name():
with pytest.raises(
InvalidClassNameError, match='invalid class name: "mage"'
):
create_character(name="Solaire", class_name="mage")
| 22.439394
| 65
| 0.694126
|
import pytest
class InvalidCharacterNameError(Exception):
pass
class InvalidClassNameError(Exception):
pass
class Character:
pass
VALID_CLASSES = ["sorcerer", "warrior"]
def create_character(name: str, class_name: str) -> Character:
if not name:
raise InvalidCharacterNameError("character name empty")
if class_name not in VALID_CLASSES:
msg = f'invalid class name: "{class_name}"'
raise InvalidCharacterNameError(msg)
...
def test_empty_name():
with pytest.raises(InvalidCharacterNameError):
create_character(name="", class_name="warrior")
def test_invalid_class_name():
with pytest.raises(InvalidClassNameError):
create_character(name="Solaire", class_name="mage")
def test_empty_name():
with pytest.raises(
InvalidCharacterNameError, match="character name empty"
):
create_character(name="", class_name="warrior")
def test_invalid_class_name():
with pytest.raises(
InvalidClassNameError, match='invalid class name: "mage"'
):
create_character(name="Solaire", class_name="mage")
| true
| true
|
f719d091d9d47a5add06aa4cc9f22b144941b945
| 1,379
|
py
|
Python
|
plantara/urls.py
|
plantara/plantara-backend
|
3e3cf1f7aa83a124b7e1b616e44aa1f31333598e
|
[
"MIT"
] | null | null | null |
plantara/urls.py
|
plantara/plantara-backend
|
3e3cf1f7aa83a124b7e1b616e44aa1f31333598e
|
[
"MIT"
] | null | null | null |
plantara/urls.py
|
plantara/plantara-backend
|
3e3cf1f7aa83a124b7e1b616e44aa1f31333598e
|
[
"MIT"
] | null | null | null |
"""plantara URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from plantara.contrib.users.views import UserViewSet, ObtainAuthToken
from plantara.contrib.plants.views import PlantViewSet
router = DefaultRouter()
router.register(r"users", UserViewSet, basename="user")
router.register(r"plants", PlantViewSet, basename="plant")
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include(router.urls)),
path("api/auth/", include("rest_framework.urls")),
path("api/token/", ObtainAuthToken.as_view()),
]
if settings.DEBUG:
import debug_toolbar # NOQA
urlpatterns += [path("__debug__/", include(debug_toolbar.urls))]
| 33.634146
| 77
| 0.730964
|
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from plantara.contrib.users.views import UserViewSet, ObtainAuthToken
from plantara.contrib.plants.views import PlantViewSet
router = DefaultRouter()
router.register(r"users", UserViewSet, basename="user")
router.register(r"plants", PlantViewSet, basename="plant")
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include(router.urls)),
path("api/auth/", include("rest_framework.urls")),
path("api/token/", ObtainAuthToken.as_view()),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [path("__debug__/", include(debug_toolbar.urls))]
| true
| true
|
f719d0cdcf3b09c0fae3b540dda5bf816f23f254
| 5,061
|
py
|
Python
|
ipypublish/postprocessors/base.py
|
phelps-sg/ipypublish
|
c99ba56fbaeef033e3baeb3246143660ac7eb78e
|
[
"BSD-3-Clause"
] | null | null | null |
ipypublish/postprocessors/base.py
|
phelps-sg/ipypublish
|
c99ba56fbaeef033e3baeb3246143660ac7eb78e
|
[
"BSD-3-Clause"
] | null | null | null |
ipypublish/postprocessors/base.py
|
phelps-sg/ipypublish
|
c99ba56fbaeef033e3baeb3246143660ac7eb78e
|
[
"BSD-3-Clause"
] | 1
|
2021-02-09T01:12:10.000Z
|
2021-02-09T01:12:10.000Z
|
import logging
from six import string_types
from traitlets import Bool
from traitlets.config.configurable import Configurable
from ipypublish.utils import handle_error, pathlib
try:
from shutil import which as exe_exists
except ImportError:
from distutils.spawn import find_executable as exe_exists # noqa: F401
class IPyPostProcessor(Configurable):
""" an abstract class for post-processors
"""
@property
def allowed_mimetypes(self):
""" override in subclasses
return a list of allowed mime types
if None, then all are allowed
Text based mime-types include: text/plain, text/latex,
text/restructuredtext, text/html, text/x-python, application/json,
text/markdown, text/asciidoc, text/yaml
"""
raise NotImplementedError("allowed_mimetypes")
@property
def requires_path(self):
""" override in subclasses
whether the prostprocessor requires the supplied filepath
to have an existing parent directory
if True and filepath is None, will raise an IOError, otherwise,
will try to make the directory if it doesn't exist
"""
raise NotImplementedError("requires_path")
@property
def logger_name(self):
""" override in subclass
"""
return "post-processor"
@property
def logger(self):
return logging.getLogger(self.logger_name)
skip_mime = Bool(
True,
help="if False, raise a TypeError if the mimetype is not allowed, "
"else return without processing",
).tag(config=True)
def __init__(self, config=None):
super(IPyPostProcessor, self).__init__(config=config)
def __call__(self, stream, mimetype, filepath, resources=None):
"""
See def postprocess() ...
"""
self.postprocess(stream, mimetype, filepath, resources)
def postprocess(self, stream, mimetype, filepath, resources=None):
""" Post-process output.
Parameters
----------
stream: str
the main file contents
mimetype: str
the mimetype of the file
filepath: None or str or pathlib.Path
the path to the output file
the path does not have to exist, but must be absolute
resources: None or dict
a resources dict, output from exporter.from_notebook_node
Returns
-------
stream: str
filepath: None or str or pathlib.Path
"""
if (
self.allowed_mimetypes is not None
and mimetype not in self.allowed_mimetypes
):
if not self.skip_mime:
self.handle_error(
"the mimetype {0} is not in the allowed list: {1}".format(
mimetype, self.allowed_mimetypes
),
TypeError,
)
else:
self.logger.debug("skipping incorrect mime type: {}".format(mimetype))
return stream, filepath, resources
if self.requires_path and filepath is None:
self.handle_error(
"the filepath is None, " "but the post-processor requires a folder",
IOError,
)
if filepath is not None and isinstance(filepath, string_types):
filepath = pathlib.Path(filepath)
if self.requires_path:
if filepath.parent.exists() and not filepath.parent.is_dir():
self.handle_error(
"the filepath's parent is not a folder: {}".format(filepath),
TypeError,
)
if not filepath.parent.exists():
filepath.parent.mkdir(parents=True)
if resources is None:
resources = {}
return self.run_postprocess(stream, mimetype, filepath, resources)
def run_postprocess(self, stream, mimetype, filepath, resources):
""" should not be called directly
override in sub-class
Parameters
----------
stream: str
the main file contents
filepath: None or pathlib.Path
the path to the output file
resources: dict
a resources dict, output from exporter.from_notebook_node
Returns
-------
stream: str
filepath: None or pathlib.Path
resources: dict
"""
raise NotImplementedError("run_postprocess")
def handle_error(self, msg, err_type, raise_msg=None, log_msg=None):
""" handle error by logging it then raising
"""
handle_error(msg, err_type, self.logger, raise_msg=raise_msg, log_msg=log_msg)
def check_exe_exists(self, name, error_msg):
""" test if an executable exists
"""
if not exe_exists(name):
self.handle_error(error_msg, RuntimeError)
return True
if __name__ == "__main__":
print(IPyPostProcessor.allowed_mimetypes)
IPyPostProcessor()("stream", "a")
| 29.424419
| 86
| 0.601857
|
import logging
from six import string_types
from traitlets import Bool
from traitlets.config.configurable import Configurable
from ipypublish.utils import handle_error, pathlib
try:
from shutil import which as exe_exists
except ImportError:
from distutils.spawn import find_executable as exe_exists
class IPyPostProcessor(Configurable):
@property
def allowed_mimetypes(self):
raise NotImplementedError("allowed_mimetypes")
@property
def requires_path(self):
raise NotImplementedError("requires_path")
@property
def logger_name(self):
return "post-processor"
@property
def logger(self):
return logging.getLogger(self.logger_name)
skip_mime = Bool(
True,
help="if False, raise a TypeError if the mimetype is not allowed, "
"else return without processing",
).tag(config=True)
def __init__(self, config=None):
super(IPyPostProcessor, self).__init__(config=config)
def __call__(self, stream, mimetype, filepath, resources=None):
self.postprocess(stream, mimetype, filepath, resources)
def postprocess(self, stream, mimetype, filepath, resources=None):
if (
self.allowed_mimetypes is not None
and mimetype not in self.allowed_mimetypes
):
if not self.skip_mime:
self.handle_error(
"the mimetype {0} is not in the allowed list: {1}".format(
mimetype, self.allowed_mimetypes
),
TypeError,
)
else:
self.logger.debug("skipping incorrect mime type: {}".format(mimetype))
return stream, filepath, resources
if self.requires_path and filepath is None:
self.handle_error(
"the filepath is None, " "but the post-processor requires a folder",
IOError,
)
if filepath is not None and isinstance(filepath, string_types):
filepath = pathlib.Path(filepath)
if self.requires_path:
if filepath.parent.exists() and not filepath.parent.is_dir():
self.handle_error(
"the filepath's parent is not a folder: {}".format(filepath),
TypeError,
)
if not filepath.parent.exists():
filepath.parent.mkdir(parents=True)
if resources is None:
resources = {}
return self.run_postprocess(stream, mimetype, filepath, resources)
def run_postprocess(self, stream, mimetype, filepath, resources):
raise NotImplementedError("run_postprocess")
def handle_error(self, msg, err_type, raise_msg=None, log_msg=None):
handle_error(msg, err_type, self.logger, raise_msg=raise_msg, log_msg=log_msg)
def check_exe_exists(self, name, error_msg):
if not exe_exists(name):
self.handle_error(error_msg, RuntimeError)
return True
if __name__ == "__main__":
print(IPyPostProcessor.allowed_mimetypes)
IPyPostProcessor()("stream", "a")
| true
| true
|
f719d11806bbcb48ec4f51cc84eb95cd4e1c6804
| 2,201
|
py
|
Python
|
glance/common/crypt.py
|
komawar/glance
|
e550cac697dd8c78e837c6884f599ac6ee2137ae
|
[
"Apache-2.0"
] | 1
|
2018-07-27T15:16:14.000Z
|
2018-07-27T15:16:14.000Z
|
glance/common/crypt.py
|
komawar/glance
|
e550cac697dd8c78e837c6884f599ac6ee2137ae
|
[
"Apache-2.0"
] | null | null | null |
glance/common/crypt.py
|
komawar/glance
|
e550cac697dd8c78e837c6884f599ac6ee2137ae
|
[
"Apache-2.0"
] | 1
|
2021-07-18T18:57:04.000Z
|
2021-07-18T18:57:04.000Z
|
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for URL-safe encrypting/decrypting
"""
import base64
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Random import random
def urlsafe_encrypt(key, plaintext, blocksize=16):
"""
Encrypts plaintext. Resulting ciphertext will contain URL-safe characters
:param key: AES secret key
:param plaintext: Input text to be encrypted
:param blocksize: Non-zero integer multiple of AES blocksize in bytes (16)
:returns : Resulting ciphertext
"""
def pad(text):
"""
Pads text to be encrypted
"""
pad_length = (blocksize - len(text) % blocksize)
sr = random.StrongRandom()
pad = ''.join(chr(sr.randint(1, 0xFF)) for i in range(pad_length - 1))
# We use chr(0) as a delimiter between text and padding
return text + chr(0) + pad
# random initial 16 bytes for CBC
init_vector = Random.get_random_bytes(16)
cypher = AES.new(key, AES.MODE_CBC, init_vector)
padded = cypher.encrypt(pad(str(plaintext)))
return base64.urlsafe_b64encode(init_vector + padded)
def urlsafe_decrypt(key, ciphertext):
"""
Decrypts URL-safe base64 encoded ciphertext
:param key: AES secret key
:param ciphertext: The encrypted text to decrypt
:returns : Resulting plaintext
"""
# Cast from unicode
ciphertext = base64.urlsafe_b64decode(str(ciphertext))
cypher = AES.new(key, AES.MODE_CBC, ciphertext[:16])
padded = cypher.decrypt(ciphertext[16:])
return padded[:padded.rfind(chr(0))]
| 32.367647
| 78
| 0.695593
|
import base64
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Random import random
def urlsafe_encrypt(key, plaintext, blocksize=16):
def pad(text):
pad_length = (blocksize - len(text) % blocksize)
sr = random.StrongRandom()
pad = ''.join(chr(sr.randint(1, 0xFF)) for i in range(pad_length - 1))
return text + chr(0) + pad
init_vector = Random.get_random_bytes(16)
cypher = AES.new(key, AES.MODE_CBC, init_vector)
padded = cypher.encrypt(pad(str(plaintext)))
return base64.urlsafe_b64encode(init_vector + padded)
def urlsafe_decrypt(key, ciphertext):
ciphertext = base64.urlsafe_b64decode(str(ciphertext))
cypher = AES.new(key, AES.MODE_CBC, ciphertext[:16])
padded = cypher.decrypt(ciphertext[16:])
return padded[:padded.rfind(chr(0))]
| true
| true
|
f719d17e244fc2d7cacd2371cabf35fc5edcbac2
| 2,609
|
py
|
Python
|
img-placeholder.py
|
fisker/img-placeholder
|
d4b42551b41a546553a47358b9bb616c6492b2da
|
[
"MIT"
] | 3
|
2017-01-17T05:40:10.000Z
|
2022-01-17T02:42:35.000Z
|
img-placeholder.py
|
fisker/img-placeholder
|
d4b42551b41a546553a47358b9bb616c6492b2da
|
[
"MIT"
] | 1
|
2017-01-15T15:34:58.000Z
|
2017-01-16T05:28:34.000Z
|
img-placeholder.py
|
fisker/img-placeholder
|
d4b42551b41a546553a47358b9bb616c6492b2da
|
[
"MIT"
] | 1
|
2017-01-14T12:00:55.000Z
|
2017-01-14T12:00:55.000Z
|
import sublime
import sublime_plugin
import re
completions = []
def plugin_loaded():
init_settings()
def init_settings():
get_settings()
sublime.load_settings('img-placeholder.sublime-settings').add_on_change('get_settings', get_settings)
def get_settings():
settings = sublime.load_settings('img-placeholder.sublime-settings')
domains = settings.get('domains', [])
protocol = settings.get('protocol', 'http:')
width = str(settings.get('width', 600))
height = str(settings.get('height', 300))
background_color = settings.get('backgroundColor', 'ccc')
text_color = settings.get('textColor', '333')
file_ext = settings.get('format', 'png')
text = settings.get('text', '')
del completions[:]
for domain in domains:
url = protocol + '//' + domain + '/'
completions.append(
(
domain,
url + '${1:' + width + 'x' + height + '}'
)
)
completions.append(
(
domain + ' (full version)',
url + '${1:' + width + 'x' + height + '/' + background_color + '/' + text_color + '.' + file_ext + '?text=' + text + '}'
)
)
def pos(view, pos):
point = view.sel()[0].begin()
return view.substr(sublime.Region(point - pos, point))
def before(view, location):
lineLocation = view.line(location)
return view.substr(sublime.Region(lineLocation.a, location))
def get_before_text(view):
point = view.sel()[0].begin()
lineLocation = view.line(point)
return view.substr(sublime.Region(lineLocation.a, point))
def is_trigger(text, syntax):
text = text.lower()
syntax = syntax.lower()
if syntax.find(u'html'):
search = re.search(r"(?:(?:^|\s))(?:src|poster|srcset)=[\"\']?$", text)
if (search):
return True
for s in (u'html', u'css', u'less', u'sass', u'scss', u'stylus'):
if syntax.find(s):
search = re.search(r"(?:(?:^|\s))url\([\"\']?$", text)
if (search):
return True
for s in (u'markdown', u'multimarkdown'):
if syntax.find(s):
search = re.search(r"(?:(?:^|\s))\!\[.*?\]\(?$", text)
if (search):
return True
return False
class imgHolder(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
syntax = view.settings().get('syntax')
before_text = before(view, locations[0]);
if is_trigger(before_text, syntax):
return (completions, sublime.INHIBIT_EXPLICIT_COMPLETIONS)
return
| 30.694118
| 132
| 0.576849
|
import sublime
import sublime_plugin
import re
completions = []
def plugin_loaded():
init_settings()
def init_settings():
get_settings()
sublime.load_settings('img-placeholder.sublime-settings').add_on_change('get_settings', get_settings)
def get_settings():
settings = sublime.load_settings('img-placeholder.sublime-settings')
domains = settings.get('domains', [])
protocol = settings.get('protocol', 'http:')
width = str(settings.get('width', 600))
height = str(settings.get('height', 300))
background_color = settings.get('backgroundColor', 'ccc')
text_color = settings.get('textColor', '333')
file_ext = settings.get('format', 'png')
text = settings.get('text', '')
del completions[:]
for domain in domains:
url = protocol + '//' + domain + '/'
completions.append(
(
domain,
url + '${1:' + width + 'x' + height + '}'
)
)
completions.append(
(
domain + ' (full version)',
url + '${1:' + width + 'x' + height + '/' + background_color + '/' + text_color + '.' + file_ext + '?text=' + text + '}'
)
)
def pos(view, pos):
point = view.sel()[0].begin()
return view.substr(sublime.Region(point - pos, point))
def before(view, location):
lineLocation = view.line(location)
return view.substr(sublime.Region(lineLocation.a, location))
def get_before_text(view):
point = view.sel()[0].begin()
lineLocation = view.line(point)
return view.substr(sublime.Region(lineLocation.a, point))
def is_trigger(text, syntax):
text = text.lower()
syntax = syntax.lower()
if syntax.find(u'html'):
search = re.search(r"(?:(?:^|\s))(?:src|poster|srcset)=[\"\']?$", text)
if (search):
return True
for s in (u'html', u'css', u'less', u'sass', u'scss', u'stylus'):
if syntax.find(s):
search = re.search(r"(?:(?:^|\s))url\([\"\']?$", text)
if (search):
return True
for s in (u'markdown', u'multimarkdown'):
if syntax.find(s):
search = re.search(r"(?:(?:^|\s))\!\[.*?\]\(?$", text)
if (search):
return True
return False
class imgHolder(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
syntax = view.settings().get('syntax')
before_text = before(view, locations[0]);
if is_trigger(before_text, syntax):
return (completions, sublime.INHIBIT_EXPLICIT_COMPLETIONS)
return
| true
| true
|
f719d17e359c308c58ea4e556083c454e5d757ed
| 39,475
|
py
|
Python
|
bockbuild/package.py
|
lewurm/bockbuild
|
dbe4185d21318a1c09d35c878b560176ac02c2b3
|
[
"MIT"
] | null | null | null |
bockbuild/package.py
|
lewurm/bockbuild
|
dbe4185d21318a1c09d35c878b560176ac02c2b3
|
[
"MIT"
] | null | null | null |
bockbuild/package.py
|
lewurm/bockbuild
|
dbe4185d21318a1c09d35c878b560176ac02c2b3
|
[
"MIT"
] | null | null | null |
import hashlib
import os
import sys
import shutil
import tempfile
import filecmp
import datetime
import stat
import time
import urllib
from util.util import *
import functools
# FancyURLopener is incorrectly documented; this working handler was
# copied from
# https://mail.python.org/pipermail/python-bugs-list/2006-February/032155.html
class MyUrlOpener(urllib.FancyURLopener):
def http_error_default(*args, **kwargs):
return urllib.URLopener.http_error_default(*args, **kwargs)
class Package:
def __init__(self, name, version=None, organization=None, configure_flags=None, sources=None, revision=None, git_branch=None, source_dir_name=None, override_properties=None, configure=None):
Package.last_instance = self
self.name = name
self.version = version
self.organization = organization
self.configure_flags = []
self.gcc_flags = list(Package.profile.gcc_flags)
self.cpp_flags = list(Package.profile.gcc_flags)
self.ld_flags = list(Package.profile.ld_flags)
self.aux_files = [] # delete workspace-related files that are residing outside the workspace dir
self.local_cpp_flags = []
self.local_gcc_flags = []
self.local_ld_flags = []
self.local_configure_flags = []
self.build_env = ''
self.desc = None
self._dirstack = []
# additional files that need staging (besides binaries and scripts)
# (use path relative to prefix root e.g. 'etc/something.config')
self.extra_stage_files = []
# fat binary parameters. On a 64-bit Darwin profile (m64 = True)
# each package must decide if it will a) perform a multi-arch (64/32) build
# b) request two builds that are lipoed at the end or c) request a 32-bit
# build only.
self.needs_lipo = False
self.m32_only = False
self.build_dependency = False
self.dont_clean = False
self.needs_build = None
self.deploy_requests = []
if configure_flags:
self.configure_flags.extend(configure_flags)
self.sources = sources
if self.sources is None \
and not self.__class__.default_sources is None:
self.sources = list(self.__class__.default_sources)
if self.organization is None and self.sources is not None and len(self.sources) > 0:
self.organization = self.extract_organization(self.sources[0])
self.source_dir_name = source_dir_name
if self.source_dir_name is None:
self.source_dir_name = "%s-%s" % (name, version)
self.revision = revision
if configure:
self.configure = configure
else:
self.configure = './configure --prefix="%{package_prefix}"'
self.make = 'make -j%s' % Package.profile.bockbuild.cpu_count
self.makeinstall = None
self.git_branch = git_branch
self.git = Package.profile.bockbuild.git
if not override_properties is None:
for k, v in override_properties.iteritems():
self.__dict__[k] = v
self.makeinstall = self.makeinstall or 'make install DESTDIR=%{stage_root}'
self.fetched = False
def extract_organization(self, source):
if (not "git" in source) or ("http" in source):
return None
if "git.gnome.org" in source:
return None
if "github" in source:
pattern = r"github.com\W(\w+)\/\S+\.git"
match = re.search(pattern, source)
if match:
return match.group(1)
else:
raise Exception(
"Cannot determine organization for %s" % source)
else:
raise Exception("Cannot determine organization for %s" % source)
def try_get_version(self, source_dir):
configure_ac = os.path.join(source_dir, 'configure.ac')
if os.path.exists(configure_ac):
with open(configure_ac) as file:
# AC_INIT (...,[VERSION]...
pattern = r"AC_INIT\(\S+?\s*,\s*\[(\d\S+?)\]"
for x in range(40):
line = file.readline()
match = re.search(pattern, line)
if match:
return match.group(1)
def trace(self, message):
trace(message, skip=1)
def resolve_version(self, source_dir):
package_version = expand_macros(self.version, self)
found_version = self.try_get_version(source_dir) or package_version
if package_version is None:
package_version = found_version
trace('%s: Using found version %s' % (self.name, found_version))
elif found_version[0] != package_version[0]: # major version differs
warn('Version in configure.ac is %s, package declares %s' %
(found_version, package_version))
self.version = package_version
@retry
def fetch(self, dest):
if self.fetched and os.path.lexists(dest):
return
scratch = self.profile.bockbuild.scratch
resources = self.profile.bockbuild.resources
source_cache_dir = self.profile.bockbuild.source_cache
self.buildstring = []
self.is_local = False
scratch_workspace = os.path.join(scratch, '%s.workspace' % self.name)
self.rm_if_exists(scratch_workspace)
if os.path.lexists(dest):
if os.path.islink(dest):
delete(dest)
elif os.path.isdir(dest):
shutil.move(dest, scratch_workspace)
else:
error ('Unexpected workspace found at %s' % dest)
def checkout(self, source_url, cache_dir, workspace_dir):
def clean_git_workspace(dir):
trace('Cleaning git workspace: ' + self.name)
self.git('reset --hard', dir, hazard = True)
if config.iterative == False:
self.git('clean -xffd', dir, hazard = True)
else:
warn('iterative')
def clean_local_git_workspace(dir): # avoid resetting and destroying work!
self.git('clean -xffd', dir)
def create_cache():
# since this is a fresh cache, the workspace copy is invalid if
# it exists
if os.path.exists(workspace_dir):
self.rm(workspace_dir)
progress('Cloning git repo: %s' % source_url)
self.git('clone --mirror %s %s' %
(source_url, cache_dir), self.profile.bockbuild.root)
def update_cache():
trace('Updating cache: ' + cache_dir)
if self.git_branch is None:
self.git('fetch --all --prune', cache_dir)
else:
self.git('fetch origin %s' % self.git_branch, cache_dir)
def create_workspace():
self.git('clone --local --shared --recursive %s %s' %
(cache_dir, workspace_dir), cache_dir)
def update_workspace():
trace('Updating workspace')
if self.git_branch is None:
self.git('fetch --all --prune', workspace_dir)
else:
self.git('fetch origin %s:refs/remotes/origin/%s' %
(self.git_branch, self.git_branch), workspace_dir)
def resolve():
root = git_rootdir(self, os.path.realpath (workspace_dir))
if not is_modifiable_repo(root):
return clean_local_git_workspace
current_revision = git_get_revision(self, workspace_dir)
target_revision = None
if current_revision == self.revision:
return
if not self.is_local and self.revision is None and self.git_branch is None:
warn(
'Package does not define revision or branch, defaulting to tip of "master"')
self.git_branch = self.git_branch or 'master'
if self.revision is not None:
target_revision = self.revision
if self.git_branch is not None:
self.git('checkout %s' % self.git_branch, workspace_dir)
self.git('merge origin/%s --ff-only' %
self.git_branch, workspace_dir)
if self.revision is None: # target the tip of the branch
target_revision = git_get_revision(self, workspace_dir)
if target_revision and (current_revision != target_revision):
self.git('reset --hard %s' %
target_revision, workspace_dir, hazard = True)
self.git('submodule update --recursive', workspace_dir)
current_revision = git_get_revision(self, workspace_dir)
if (self.revision is not None and self.revision != current_revision):
error('Workspace error: Revision is %s, package specifies %s' % (
current_revision, self.revision))
self.revision = current_revision
def define():
self.resolve_version(workspace_dir)
str = self.name
if self.version:
str += ' %s' % self.version
str += ' (%s)' % git_shortid(self, workspace_dir)
self.desc = str
self.buildstring = ['%s <%s>' % (str, source_url)]
if self.is_local:
self.rm_if_exists(workspace_dir)
work_committed = False
if git_is_dirty (self, source_url):
if self.profile.bockbuild.cmd_options.release_build:
error ('Release builds cannot have uncommitted local changes!')
else:
info ('The repository is dirty, your changes will be committed.')
bockbuild_commit_msg = '"WIP (auto-committed by bockbuild)"'
top_commit_msg = git_get_commit_msg (self, source_url)
if top_commit_msg == bockbuild_commit_msg:
self.git ('commit -a --allow-empty --amend -m', source_url, options = [bockbuild_commit_msg])
else:
self.git('commit -a --allow-empty -m', source_url, options = [bockbuild_commit_msg])
work_committed = True
self.shadow_copy (source_url, workspace_dir)
if work_committed:
self.git ('reset HEAD~1', source_url)
else:
if os.path.exists(cache_dir):
update_cache()
else:
create_cache()
if os.path.exists(workspace_dir):
if self.dont_clean == True: # previous workspace was left dirty, delete
clean_git_workspace(workspace_dir)
update_workspace()
else:
create_workspace()
cache = None # at this point, the cache is not the problem; keep _fetch_sources from deleting it
resolve()
define()
return clean_git_workspace
def checkout_archive(archive, cache_dest, workspace_dir):
def create_cache():
progress('Downloading: %s' % archive)
try:
filename, message = MyUrlOpener().retrieve(archive, cache_dest)
except IOError as e:
raise CommandException(
'%s error downloading %s' % (e[1], archive))
def update_cache():
pass
def create_workspace(dir):
filetype = get_filetype(cache_dest).lower()
if filetype.startswith(('gzip', 'xz', 'zip', 'bzip2')):
self.extract_archive(cache_dest, scratch, validate_only=False)
expected_path = os.path.join(scratch, self.source_dir_name)
if not os.path.exists(expected_path):
error('Archive %s was extracted but not found at workspace path %s' % (
cache_dest, expected_path))
if expected_path != dir:
shutil.move(expected_path, dir)
else: # create the directory and just place the downloaded file inside
ensure_dir(scratch_workspace)
shutil.copy(cache_dest, scratch_workspace)
def update_workspace():
pass
def clean_archive(dir):
try:
self.rm(dir)
create_workspace(dir)
except Exception as e:
self.rm_if_exists(cache_dest)
self.rm_if_exists(workspace_dir)
raise
def define():
self.resolve_version(workspace_dir)
self.desc = '%s %s' % (self.name, self.version)
self.buildstring = ['%s <%s> md5: %s)' % (
self.desc, archive, md5(cache_dest))]
if os.path.exists(cache_dest):
update_cache()
else:
create_cache()
if os.path.exists(workspace_dir):
update_workspace()
else:
create_workspace(workspace_dir)
define()
return clean_archive
def get_download_dest(url):
return os.path.join(source_cache_dir, os.path.basename(url))
def get_git_cache_path():
if self.organization is None:
name = self.name
else:
name = self.organization + "+" + self.name
return os.path.join(source_cache_dir, name)
clean_func = None # what to run if the workspace needs to be redone
expand_macros(self.sources, self)
if not self.sources:
def clean_nop (dir):
pass
self.sources = []
self.desc = '%s %s' % (self.name, self.version)
self.buildstring.extend(
['%s md5: %s' % (os.path.basename(self._path), md5(self._path))])
clean_func = clean_nop
local_sources = []
try:
for source in self.sources:
resolved_source = None
cache = None
# if source.startswith ('http://'):
# raise Exception ('HTTP downloads are no longer allowed: %s', source)
if source.startswith(('http://', 'https://', 'ftp://')):
cache = get_download_dest(source)
if self.profile.cache_host is not None:
cached_source = os.path.join(
self.profile.cache_host, os.path.basename(source))
try:
clean_func = checkout_archive(
cached_source, cache, scratch_workspace)
source = cached_source
except CommandException as e:
warn(repr(e))
verbose('Trying original source')
clean_func = checkout_archive(
source, cache, scratch_workspace)
else:
clean_func = checkout_archive(
source, cache, scratch_workspace)
resolved_source = scratch_workspace
elif source.startswith(('git://', 'file://', 'ssh://')) or source.endswith('.git') or (os.path.isdir(source) and git_isrootdir (self, source)):
if os.path.isdir(source):
self.is_local = True
cache = None
else:
cache = get_git_cache_path()
clean_func = checkout(
self, source, cache, scratch_workspace)
resolved_source = scratch_workspace
elif os.path.isabs(source) and os.path.isdir(source):
trace('copying local dir source %s ' % source)
def clean_local_copy(dir):
self.rm_if_exists(dir)
shutil.copytree(source, scratch_workspace)
resolved_source = scratch_workspace
self.resolve_version(scratch_workspace)
self.desc = '%s %s (local workspace: %s)' % (
self.name, self.version, source)
self.buildstring = ['local workspace: %s' % (source)]
clean_func = clean_local_copy
else:
for path in resources:
if os.path.isfile(os.path.join(path, source)):
resolved_source = os.path.join(path, source)
self.buildstring.extend(
['%s md5: %s' % (source, md5(resolved_source))])
if resolved_source is None:
error('could not resolve source: %s' % source)
trace('%s resolved to %s' % (source, resolved_source))
local_sources.append(resolved_source)
except Exception as e:
if cache is not None:
self.rm_if_exists(cache)
self.rm_if_exists(scratch_workspace)
raise
if len(self.sources) != len(local_sources):
error('Source number mismatch after processing: %s before, %s after ' % (
self.sources, local_sources))
if clean_func is None:
error('workspace cleaning function (clean_func) must be set')
self.local_sources = local_sources
self.clean = clean_func
if not os.path.exists(scratch_workspace):
os.mkdir(scratch_workspace)
self.workspace = dest
shutil.move(scratch_workspace, self.workspace)
if not os.path.exists(self.workspace):
error ('Workspace was not created')
self.fetched = True
def request_build(self, reason):
self.needs_build = reason
def override_build(self, reason):
self.needs_build = reason
def start_build(self, arch, dest, stage):
info(self.desc)
self.package_prefix = dest
self.staged_profile = stage
protect_dir(self.staged_profile, recursive=True)
workspace = self.workspace
build_artifact = self.build_artifact
if config.never_rebuild and os.path.isfile(build_artifact):
if self.deploy_package(build_artifact, self.staged_profile):
self.override_build(
'never_rebuild option enabled, using artifact')
else:
warn('Failed to deploy from artifact %s. Rebuilding' %
os.path.basename(build_artifact))
if self.needs_build:
verbose(self.buildstring)
if (arch == 'darwin-universal' and self.needs_lipo):
workspace_x86 = workspace + '-x86'
workspace_x64 = workspace + '-x64'
self.rm_if_exists(workspace_x86)
self.rm_if_exists(workspace_x64)
shutil.move(workspace, workspace_x86)
self.shadow_copy(workspace_x86, workspace_x64)
self.link(workspace_x86, workspace)
stagedir_x32 = self.do_build(
'darwin-32', os.path.join(self.profile.bockbuild.scratch, self.name + '-x86.install'))
self.link(workspace_x64, workspace)
package_stage = self.do_build(
'darwin-64', os.path.join(self.profile.bockbuild.scratch, self.name + '-x64.install'))
delete(workspace)
shutil.move(workspace_x86, workspace)
print 'lipo', self.name
self.lipo_dirs(stagedir_x32, package_stage, 'lib')
self.copy_side_by_side(
stagedir_x32, package_stage, 'bin', '32', '64')
elif arch == 'toolchain':
package_stage = self.do_build('darwin-64')
elif self.m32_only:
package_stage = self.do_build('darwin-32')
else:
package_stage = self.do_build(arch)
self.make_artifact(package_stage, build_artifact)
for target in self.deploy_requests:
self.deploy_package(build_artifact, target)
def deploy_package(self, artifact, dest):
trace('Deploying (%s -> %s)' %
(os.path.basename(artifact), os.path.basename(dest)))
unprotect_dir(dest, recursive=True)
artifact_stage = artifact + '.extracted'
try:
assert_exists(artifact)
self.rm_if_exists(artifact_stage)
unzip(artifact, artifact_stage)
assert_exists(artifact_stage)
except Exception as e:
self.rm_if_exists(artifact)
self.rm_if_exists(artifact_stage)
protect_dir(dest, recursive=True)
return False
ensure_dir(artifact_stage)
# catalogue files
files = list()
size = 0
for path in iterate_dir(artifact_stage, with_links = True, summary=False):
relpath = os.path.relpath(path, artifact_stage)
destpath = os.path.join(dest, relpath)
if os.path.exists(destpath) and not identical_files(path, destpath):
warn(
'Different file exists in package already: ''%s''' % relpath )
files.append(relpath)
if not os.path.islink(path):
size = size + os.path.getsize(path)
files.sort()
is_changed(files, artifact + '.files')
with open(artifact + '.files', 'w') as output:
output.write('\n'.join(files))
if len(files) != 0:
merge_trees(artifact_stage, dest, False)
self.sh = functools.partial(self.build_sh, cwd=artifact_stage)
self.deploy()
self.rm_if_exists(artifact_stage)
protect_dir(dest, recursive=True)
verbose ('%d files, %sMB' % (len(files), "{:.2f}".format (size /1024 / 1024 )))
return True
def do_build(self, arch, install_dir=None):
progress('Building (arch: %s)' % arch)
if install_dir is None:
install_dir = os.path.join(
self.profile.bockbuild.scratch, self.name + '.install')
self.stage_root = install_dir
self.rm_if_exists(self.stage_root)
self.staged_prefix = os.path.join(
self.stage_root, self.package_prefix[1:])
os.makedirs(self.staged_prefix)
# protect against relocation bugs often landing files in the wrong path
protect_dir(self.stage_root)
try:
self.arch_build(arch)
self.build_env = self.expand_build_env()
self.sh = functools.partial(self.build_sh, cwd=self.workspace)
self.prep()
self.build()
self.install()
if not os.path.exists(self.staged_prefix):
error('Result directory %s not found.' % self.staged_prefix)
self.profile.process_package(self)
if not self.dont_clean:
self.clean (dir=self.workspace)
except (Exception, KeyboardInterrupt) as e:
self.rm_if_exists(self.stage_root)
if isinstance(e, CommandException):
if os.path.exists(self.workspace):
for path in self.aux_files:
self.rm_if_exists(path)
problem_dir = os.path.join(
self.profile.bockbuild.execution_root, os.path.basename(self.workspace) + '.problem')
# take this chance to clear out older .problems
for d in os.listdir(self.profile.bockbuild.execution_root):
if d.endswith('.problem'):
self.rm(os.path.join(self.profile.bockbuild.execution_root, d))
shutil.move(self.workspace, problem_dir)
info('Build moved to ./%s\n' % os.path.basename(problem_dir))
info('Run "source ./%s" first to replicate bockbuild environment.' %
os.path.basename(self.profile.bockbuild.env_script))
error(str(e))
else:
self.rm_if_exists(self.workspace)
raise
return self.staged_prefix
def make_artifact(self, stage_dir, build_artifact):
self.rm_if_exists(build_artifact)
zip(stage_dir, build_artifact)
self.rm_if_exists(stage_dir)
def deploy(self):
return
def build_sh(self, command, cwd):
if isinstance(command, list):
map(lambda cmd: self.build_sh(cmd, cwd), command)
return
if not isinstance(command, str):
error('command arg must be a string: %s' % repr(command))
if not os.path.isdir(cwd):
error('Directory does not exist: %s' % cwd)
try:
env_command = '%s %s' % (
self.build_env, expand_macros(command, self))
except Exception as e:
error('MACRO EXPANSION ERROR: ' + str(e))
if config.verbose is True:
logprint('\t@\t' + expand_macros(command, self), bcolors.BOLD)
with open(self.log, 'a') as log:
log.write('%s\n' % env_command)
full_command = '%s >>%s 2>&1' % (env_command, self.log)
try:
run_shell(full_command, cwd=cwd)
except Exception as e:
with open(self.log, 'r') as log:
output_text = log.readlines()
for line in output_text:
line = line.replace(config.absolute_root, '@')
print line,
warn('build env: ' + self.build_env)
raise CommandException('command failed: %s' %
expand_macros(command, self), cwd=cwd)
def backtick(self, command):
command = expand_macros(command, self)
return backtick(command)
def cwd(self):
try:
self._cwd = os.getcwd()
except Exception as e:
warn('In invalid directory: %s' % self._cwd)
return self._cwd
def cd(self, dir):
dir = expand_macros(dir, self)
if self.cwd() == dir:
return
os.chdir(dir)
self.cwd()
trace(dir)
def pushd(self, dir):
if len(self._dirstack) == 0:
self._dirstack.append({'dir': self._cwd, 'caller': 'profile'})
self.cd(dir)
self._dirstack.append({'dir': self._cwd, 'caller': get_caller()})
def popd(self, failure=False):
caller = get_caller()
cwd = self._dirstack.pop()
if not failure:
if cwd['caller'] != caller:
warn('popd: Unmatched pushd/popd callers: (%s/%s)' %
(cwd['caller'], caller))
# return False
if cwd['dir'] != self.cwd() and not failure:
warn ('popd: Inconsistent current dir state (expected ''%s'', was in ''%s''' % (
cwd['dir'], self._cwd))
top = self._dirstack[-1]
self.cd(top['dir'])
def prep(self):
return
def rm_if_exists(self, path):
path = expand_macros(path, self)
if os.path.lexists(path):
delete(path)
def rm(self, path):
delete(expand_macros(path, self))
def link(self, source, link):
trace('%s -> %s' % (link, source))
source = expand_macros(source, self)
link = expand_macros(link, self)
if os.path.lexists(link):
delete(link)
os.symlink(source, link)
def extract_archive(self, archive, cwd, validate_only, overwrite=False):
root, ext = os.path.splitext(archive)
if ext == '.zip':
command = which('unzip')
if not command:
error('unzip not found')
args = ["-qq"]
if overwrite:
args.extend(["-o"])
if validate_only:
args.extend(["-t"])
args.extend([archive])
else:
command = which('tar')
if not command:
error('tar not found')
args = ['xf', archive]
if validate_only:
args.extend(['-O'])
run(command, args, cwd)
def build(self):
Package.configure(self)
Package.make(self)
def lipo_dirs(self, dir_64, dir_32, bin_subdir, replace_32=True):
dir64_bin = os.path.join(dir_64, bin_subdir)
dir32_bin = os.path.join(dir_32, bin_subdir)
lipo_dir = tempfile.mkdtemp()
lipo_bin = os.path.join(lipo_dir, bin_subdir)
if not os.path.exists(dir64_bin):
return # we don't always have bin/lib dirs
if not os.path.exists(lipo_bin):
os.mkdir(lipo_bin)
# take each 64-bit binary, lipo with binary of same name
for root, dirs, filelist in os.walk(dir64_bin):
relpath = os.path.relpath(root, dir64_bin)
for file in filelist:
if file.endswith('.a') or file.endswith('.dylib') or file.endswith('.so'):
dir64_file = os.path.join(dir64_bin, relpath, file)
dir32_file = os.path.join(dir32_bin, relpath, file)
lipo_file = os.path.join(lipo_bin, relpath, file)
if os.path.exists(dir32_file):
if not os.path.exists(os.path.join(lipo_bin, relpath)):
os.makedirs(os.path.join(lipo_bin, relpath))
if os.path.islink(dir64_file):
continue
lipo_cmd = 'lipo -create %s %s -output %s ' % (
dir64_file, dir32_file, lipo_file)
# print lipo_cmd
run_shell(lipo_cmd)
if replace_32:
# replace all 32-bit binaries with the new fat
# binaries
shutil.move(lipo_file, dir32_file)
else:
warn("lipo: 32-bit version of file %s not found" % file)
#creates a deep hardlink copy of a directory
def shadow_copy (self, source, dest, exclude_git = False):
trace ('shadow_copy %s %s' % (source , dest))
if os.path.exists(dest):
error ('Destination directory must not exist')
# Bockbuild state may be under the directory if we are copying a local workspace. Avoid recursive copying
stateroot_parent = os.path.dirname (config.state_root)
stateroot_name = os.path.basename (config.state_root)
stateroot_found = False
if not os.path.commonprefix ([source, config.state_root]) == source:
stateroot_found = True
for root, subdirs, filelist in os.walk (source):
relpath = os.path.relpath(root, source) # e.g. 'lib/mystuff'
destpath = os.path.join(dest, relpath)
os.makedirs(destpath)
if exclude_git:
subdirs[:] = [dir for dir in subdirs if dir != '.git']
if not stateroot_found and root == stateroot_parent:
subdirs [:] = [dir for dir in subdirs if dir != stateroot_name]
stateroot_found = True
for file in filelist:
fullpath = os.path.join (root, file)
if os.path.islink(fullpath):
target = os.path.join(os.path.dirname(fullpath), os.readlink(fullpath))
if not os.path.exists(fullpath) or os.path.commonprefix ([config.state_root, target]) == config.state_root:
break
os.link (fullpath, os.path.join (destpath, file))
trace ('shadow_copy done')
def copy_side_by_side(self, src_dir, dest_dir, bin_subdir, suffix, orig_suffix=None):
def add_suffix(filename, sfx):
fileparts = filename.split('.', 1)
if len(fileparts) > 1:
p = '%s%s.%s' % (fileparts[0], sfx, fileparts[1])
else:
p = '%s%s' % (filename, sfx)
trace(p)
return p
src_dir = os.path.join(src_dir, bin_subdir)
dest_dir = os.path.join(dest_dir, bin_subdir)
trace('src_dir %s' % src_dir)
trace('dest_dir %s' % dest_dir)
if not os.path.exists(src_dir):
return # we don't always have bin/lib dirs
for path in iterate_dir(src_dir):
relpath = os.path.relpath(path, src_dir)
reldir, filename = os.path.split(relpath)
trace(reldir + '/' + filename)
filetype = backtick('file -b "%s"' % path)[0]
if filetype.startswith('Mach-O'):
dest_file = os.path.join(
dest_dir, reldir, add_suffix(filename, suffix))
trace(dest_file)
dest_orig_file = os.path.join(dest_dir, reldir, filename)
if not os.path.exists(dest_orig_file):
warn('lipo: %s exists in %s but not in %s' %
(relpath, src_dir, dest_dir))
elif orig_suffix is not None:
suffixed = os.path.join(
dest_dir, reldir, add_suffix(filename, orig_suffix))
trace(suffixed)
shutil.move(dest_orig_file, suffixed)
os.symlink(os.path.basename(suffixed), dest_orig_file)
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copy2(path, dest_file)
def arch_build(self, arch):
Package.profile.arch_build(arch, self)
def expand_build_env(self):
return expand_macros(
'OBJCFLAGS="%{gcc_flags} %{local_gcc_flags}" '
'CFLAGS="%{gcc_flags} %{local_gcc_flags}" '
'CXXFLAGS="%{gcc_flags} %{local_gcc_flags}" '
'CPPFLAGS="%{cpp_flags} %{local_cpp_flags}" '
'LDFLAGS="%{ld_flags} %{local_ld_flags}" ', self)
def configure(self):
self.sh('%{configure} %{configure_flags} %{local_configure_flags}')
def make(self):
self.sh('%{make}')
def install(self):
self.sh('%{makeinstall}')
Package.default_sources = None
# -------------------------------------
# Package Templates
# -------------------------------------
class GnomePackage (Package):
def __init__(self, name, version_major='0', version_minor='0',
configure_flags=None, sources=None, override_properties=None):
self.version_major = version_major
self.version_minor = version_minor
Package.__init__(self, name, '%{version_major}.%{version_minor}',
configure_flags=configure_flags,
sources=sources,
override_properties=override_properties)
GnomePackage.default_sources = [
'http://ftp.gnome.org/pub/gnome/sources/%{name}/%{version_major}/%{name}-%{version}.tar.bz2'
]
class GnomeXzPackage (GnomePackage):
pass
GnomeXzPackage.default_sources = [
'http://ftp.gnome.org/pub/gnome/sources/%{name}/%{version_major}/%{name}-%{version}.tar.xz'
]
class GnomeGitPackage (Package):
def __init__(self, name, version, revision,
configure_flags=None, sources=None, override_properties=None):
Package.__init__(self, name, version,
configure='./autogen.sh --prefix="%{package_prefix}"',
configure_flags=configure_flags,
sources=sources,
override_properties=override_properties,
revision=revision)
GnomeGitPackage.default_sources = [
'git://git.gnome.org/%{name}'
]
class GnuPackage (Package):
pass
GnuPackage.default_sources = [
'ftp://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.gz'
]
class GnuBz2Package (Package):
pass
GnuBz2Package.default_sources = [
'ftp://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.bz2'
]
class GnuXzPackage (Package):
pass
GnuXzPackage.default_sources = [
'ftp://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.xz'
]
class CairoGraphicsPackage (Package):
pass
CairoGraphicsPackage.default_sources = [
'http://cairographics.org/releases/%{name}-%{version}.tar.gz'
]
class CairoGraphicsXzPackage (Package):
pass
CairoGraphicsXzPackage.default_sources = [
'http://cairographics.org/releases/%{name}-%{version}.tar.xz'
]
class ProjectPackage (Package):
def __init__(self, project, name, version, configure_flags=None,
sources=None, override_properties=None):
self.project = project
Package.__init__(self, name, version,
configure_flags=configure_flags,
sources=sources,
override_properties=override_properties)
class SourceForgePackage (ProjectPackage):
pass
SourceForgePackage.default_sources = [
'https://downloads.sourceforge.net/sourceforge/%{project}/%{name}-%{version}.tar.gz'
]
class FreeDesktopPackage (ProjectPackage):
pass
FreeDesktopPackage.default_sources = [
'http://%{project}.freedesktop.org/releases/%{name}-%{version}.tar.gz'
]
class GitHubTarballPackage (Package):
def __init__(self, org, name, version, commit, configure, override_properties=None):
Package.__init__(self, name, version, revision=commit, organization=org,
override_properties=override_properties)
self.configure = configure
self.source_dir_name = '%s-%s-%s' % (org, name, self.revision[:7])
GitHubTarballPackage.default_sources = [
'https://github.com/%{organization}/%{name}/tarball/%{revision}'
]
class GitHubPackage (Package):
def __init__(self, organization, name, version, revision=None, git_branch=None, configure=None, configure_flags=None, override_properties=None):
Package.__init__(self, name, version,
organization=organization,
revision=revision,
git_branch=git_branch,
configure_flags=configure_flags,
configure=configure,
sources=[
'git://github.com/%{organization}/%{name}.git'],
override_properties=override_properties)
class GstreamerPackage (ProjectPackage):
pass
GstreamerPackage.default_sources = [
'https://%{project}.freedesktop.org/src/%{name}/%{name}-%{version}.tar.gz'
]
class XiphPackage (ProjectPackage):
pass
XiphPackage.default_sources = [
'https://downloads.xiph.org/releases/%{project}/%{name}-%{version}.tar.gz'
]
| 37.417062
| 194
| 0.555263
|
import hashlib
import os
import sys
import shutil
import tempfile
import filecmp
import datetime
import stat
import time
import urllib
from util.util import *
import functools
class MyUrlOpener(urllib.FancyURLopener):
def http_error_default(*args, **kwargs):
return urllib.URLopener.http_error_default(*args, **kwargs)
class Package:
def __init__(self, name, version=None, organization=None, configure_flags=None, sources=None, revision=None, git_branch=None, source_dir_name=None, override_properties=None, configure=None):
Package.last_instance = self
self.name = name
self.version = version
self.organization = organization
self.configure_flags = []
self.gcc_flags = list(Package.profile.gcc_flags)
self.cpp_flags = list(Package.profile.gcc_flags)
self.ld_flags = list(Package.profile.ld_flags)
self.aux_files = []
self.local_cpp_flags = []
self.local_gcc_flags = []
self.local_ld_flags = []
self.local_configure_flags = []
self.build_env = ''
self.desc = None
self._dirstack = []
self.extra_stage_files = []
self.needs_lipo = False
self.m32_only = False
self.build_dependency = False
self.dont_clean = False
self.needs_build = None
self.deploy_requests = []
if configure_flags:
self.configure_flags.extend(configure_flags)
self.sources = sources
if self.sources is None \
and not self.__class__.default_sources is None:
self.sources = list(self.__class__.default_sources)
if self.organization is None and self.sources is not None and len(self.sources) > 0:
self.organization = self.extract_organization(self.sources[0])
self.source_dir_name = source_dir_name
if self.source_dir_name is None:
self.source_dir_name = "%s-%s" % (name, version)
self.revision = revision
if configure:
self.configure = configure
else:
self.configure = './configure --prefix="%{package_prefix}"'
self.make = 'make -j%s' % Package.profile.bockbuild.cpu_count
self.makeinstall = None
self.git_branch = git_branch
self.git = Package.profile.bockbuild.git
if not override_properties is None:
for k, v in override_properties.iteritems():
self.__dict__[k] = v
self.makeinstall = self.makeinstall or 'make install DESTDIR=%{stage_root}'
self.fetched = False
def extract_organization(self, source):
if (not "git" in source) or ("http" in source):
return None
if "git.gnome.org" in source:
return None
if "github" in source:
pattern = r"github.com\W(\w+)\/\S+\.git"
match = re.search(pattern, source)
if match:
return match.group(1)
else:
raise Exception(
"Cannot determine organization for %s" % source)
else:
raise Exception("Cannot determine organization for %s" % source)
def try_get_version(self, source_dir):
configure_ac = os.path.join(source_dir, 'configure.ac')
if os.path.exists(configure_ac):
with open(configure_ac) as file:
pattern = r"AC_INIT\(\S+?\s*,\s*\[(\d\S+?)\]"
for x in range(40):
line = file.readline()
match = re.search(pattern, line)
if match:
return match.group(1)
def trace(self, message):
trace(message, skip=1)
def resolve_version(self, source_dir):
package_version = expand_macros(self.version, self)
found_version = self.try_get_version(source_dir) or package_version
if package_version is None:
package_version = found_version
trace('%s: Using found version %s' % (self.name, found_version))
elif found_version[0] != package_version[0]:
warn('Version in configure.ac is %s, package declares %s' %
(found_version, package_version))
self.version = package_version
@retry
def fetch(self, dest):
if self.fetched and os.path.lexists(dest):
return
scratch = self.profile.bockbuild.scratch
resources = self.profile.bockbuild.resources
source_cache_dir = self.profile.bockbuild.source_cache
self.buildstring = []
self.is_local = False
scratch_workspace = os.path.join(scratch, '%s.workspace' % self.name)
self.rm_if_exists(scratch_workspace)
if os.path.lexists(dest):
if os.path.islink(dest):
delete(dest)
elif os.path.isdir(dest):
shutil.move(dest, scratch_workspace)
else:
error ('Unexpected workspace found at %s' % dest)
def checkout(self, source_url, cache_dir, workspace_dir):
def clean_git_workspace(dir):
trace('Cleaning git workspace: ' + self.name)
self.git('reset --hard', dir, hazard = True)
if config.iterative == False:
self.git('clean -xffd', dir, hazard = True)
else:
warn('iterative')
def clean_local_git_workspace(dir):
self.git('clean -xffd', dir)
def create_cache():
if os.path.exists(workspace_dir):
self.rm(workspace_dir)
progress('Cloning git repo: %s' % source_url)
self.git('clone --mirror %s %s' %
(source_url, cache_dir), self.profile.bockbuild.root)
def update_cache():
trace('Updating cache: ' + cache_dir)
if self.git_branch is None:
self.git('fetch --all --prune', cache_dir)
else:
self.git('fetch origin %s' % self.git_branch, cache_dir)
def create_workspace():
self.git('clone --local --shared --recursive %s %s' %
(cache_dir, workspace_dir), cache_dir)
def update_workspace():
trace('Updating workspace')
if self.git_branch is None:
self.git('fetch --all --prune', workspace_dir)
else:
self.git('fetch origin %s:refs/remotes/origin/%s' %
(self.git_branch, self.git_branch), workspace_dir)
def resolve():
root = git_rootdir(self, os.path.realpath (workspace_dir))
if not is_modifiable_repo(root):
return clean_local_git_workspace
current_revision = git_get_revision(self, workspace_dir)
target_revision = None
if current_revision == self.revision:
return
if not self.is_local and self.revision is None and self.git_branch is None:
warn(
'Package does not define revision or branch, defaulting to tip of "master"')
self.git_branch = self.git_branch or 'master'
if self.revision is not None:
target_revision = self.revision
if self.git_branch is not None:
self.git('checkout %s' % self.git_branch, workspace_dir)
self.git('merge origin/%s --ff-only' %
self.git_branch, workspace_dir)
if self.revision is None:
target_revision = git_get_revision(self, workspace_dir)
if target_revision and (current_revision != target_revision):
self.git('reset --hard %s' %
target_revision, workspace_dir, hazard = True)
self.git('submodule update --recursive', workspace_dir)
current_revision = git_get_revision(self, workspace_dir)
if (self.revision is not None and self.revision != current_revision):
error('Workspace error: Revision is %s, package specifies %s' % (
current_revision, self.revision))
self.revision = current_revision
def define():
self.resolve_version(workspace_dir)
str = self.name
if self.version:
str += ' %s' % self.version
str += ' (%s)' % git_shortid(self, workspace_dir)
self.desc = str
self.buildstring = ['%s <%s>' % (str, source_url)]
if self.is_local:
self.rm_if_exists(workspace_dir)
work_committed = False
if git_is_dirty (self, source_url):
if self.profile.bockbuild.cmd_options.release_build:
error ('Release builds cannot have uncommitted local changes!')
else:
info ('The repository is dirty, your changes will be committed.')
bockbuild_commit_msg = '"WIP (auto-committed by bockbuild)"'
top_commit_msg = git_get_commit_msg (self, source_url)
if top_commit_msg == bockbuild_commit_msg:
self.git ('commit -a --allow-empty --amend -m', source_url, options = [bockbuild_commit_msg])
else:
self.git('commit -a --allow-empty -m', source_url, options = [bockbuild_commit_msg])
work_committed = True
self.shadow_copy (source_url, workspace_dir)
if work_committed:
self.git ('reset HEAD~1', source_url)
else:
if os.path.exists(cache_dir):
update_cache()
else:
create_cache()
if os.path.exists(workspace_dir):
if self.dont_clean == True:
clean_git_workspace(workspace_dir)
update_workspace()
else:
create_workspace()
cache = None
resolve()
define()
return clean_git_workspace
def checkout_archive(archive, cache_dest, workspace_dir):
def create_cache():
progress('Downloading: %s' % archive)
try:
filename, message = MyUrlOpener().retrieve(archive, cache_dest)
except IOError as e:
raise CommandException(
'%s error downloading %s' % (e[1], archive))
def update_cache():
pass
def create_workspace(dir):
filetype = get_filetype(cache_dest).lower()
if filetype.startswith(('gzip', 'xz', 'zip', 'bzip2')):
self.extract_archive(cache_dest, scratch, validate_only=False)
expected_path = os.path.join(scratch, self.source_dir_name)
if not os.path.exists(expected_path):
error('Archive %s was extracted but not found at workspace path %s' % (
cache_dest, expected_path))
if expected_path != dir:
shutil.move(expected_path, dir)
else:
ensure_dir(scratch_workspace)
shutil.copy(cache_dest, scratch_workspace)
def update_workspace():
pass
def clean_archive(dir):
try:
self.rm(dir)
create_workspace(dir)
except Exception as e:
self.rm_if_exists(cache_dest)
self.rm_if_exists(workspace_dir)
raise
def define():
self.resolve_version(workspace_dir)
self.desc = '%s %s' % (self.name, self.version)
self.buildstring = ['%s <%s> md5: %s)' % (
self.desc, archive, md5(cache_dest))]
if os.path.exists(cache_dest):
update_cache()
else:
create_cache()
if os.path.exists(workspace_dir):
update_workspace()
else:
create_workspace(workspace_dir)
define()
return clean_archive
def get_download_dest(url):
return os.path.join(source_cache_dir, os.path.basename(url))
def get_git_cache_path():
if self.organization is None:
name = self.name
else:
name = self.organization + "+" + self.name
return os.path.join(source_cache_dir, name)
clean_func = None
expand_macros(self.sources, self)
if not self.sources:
def clean_nop (dir):
pass
self.sources = []
self.desc = '%s %s' % (self.name, self.version)
self.buildstring.extend(
['%s md5: %s' % (os.path.basename(self._path), md5(self._path))])
clean_func = clean_nop
local_sources = []
try:
for source in self.sources:
resolved_source = None
cache = None
if source.startswith(('http://', 'https://', 'ftp://')):
cache = get_download_dest(source)
if self.profile.cache_host is not None:
cached_source = os.path.join(
self.profile.cache_host, os.path.basename(source))
try:
clean_func = checkout_archive(
cached_source, cache, scratch_workspace)
source = cached_source
except CommandException as e:
warn(repr(e))
verbose('Trying original source')
clean_func = checkout_archive(
source, cache, scratch_workspace)
else:
clean_func = checkout_archive(
source, cache, scratch_workspace)
resolved_source = scratch_workspace
elif source.startswith(('git://', 'file://', 'ssh://')) or source.endswith('.git') or (os.path.isdir(source) and git_isrootdir (self, source)):
if os.path.isdir(source):
self.is_local = True
cache = None
else:
cache = get_git_cache_path()
clean_func = checkout(
self, source, cache, scratch_workspace)
resolved_source = scratch_workspace
elif os.path.isabs(source) and os.path.isdir(source):
trace('copying local dir source %s ' % source)
def clean_local_copy(dir):
self.rm_if_exists(dir)
shutil.copytree(source, scratch_workspace)
resolved_source = scratch_workspace
self.resolve_version(scratch_workspace)
self.desc = '%s %s (local workspace: %s)' % (
self.name, self.version, source)
self.buildstring = ['local workspace: %s' % (source)]
clean_func = clean_local_copy
else:
for path in resources:
if os.path.isfile(os.path.join(path, source)):
resolved_source = os.path.join(path, source)
self.buildstring.extend(
['%s md5: %s' % (source, md5(resolved_source))])
if resolved_source is None:
error('could not resolve source: %s' % source)
trace('%s resolved to %s' % (source, resolved_source))
local_sources.append(resolved_source)
except Exception as e:
if cache is not None:
self.rm_if_exists(cache)
self.rm_if_exists(scratch_workspace)
raise
if len(self.sources) != len(local_sources):
error('Source number mismatch after processing: %s before, %s after ' % (
self.sources, local_sources))
if clean_func is None:
error('workspace cleaning function (clean_func) must be set')
self.local_sources = local_sources
self.clean = clean_func
if not os.path.exists(scratch_workspace):
os.mkdir(scratch_workspace)
self.workspace = dest
shutil.move(scratch_workspace, self.workspace)
if not os.path.exists(self.workspace):
error ('Workspace was not created')
self.fetched = True
def request_build(self, reason):
self.needs_build = reason
def override_build(self, reason):
self.needs_build = reason
def start_build(self, arch, dest, stage):
info(self.desc)
self.package_prefix = dest
self.staged_profile = stage
protect_dir(self.staged_profile, recursive=True)
workspace = self.workspace
build_artifact = self.build_artifact
if config.never_rebuild and os.path.isfile(build_artifact):
if self.deploy_package(build_artifact, self.staged_profile):
self.override_build(
'never_rebuild option enabled, using artifact')
else:
warn('Failed to deploy from artifact %s. Rebuilding' %
os.path.basename(build_artifact))
if self.needs_build:
verbose(self.buildstring)
if (arch == 'darwin-universal' and self.needs_lipo):
workspace_x86 = workspace + '-x86'
workspace_x64 = workspace + '-x64'
self.rm_if_exists(workspace_x86)
self.rm_if_exists(workspace_x64)
shutil.move(workspace, workspace_x86)
self.shadow_copy(workspace_x86, workspace_x64)
self.link(workspace_x86, workspace)
stagedir_x32 = self.do_build(
'darwin-32', os.path.join(self.profile.bockbuild.scratch, self.name + '-x86.install'))
self.link(workspace_x64, workspace)
package_stage = self.do_build(
'darwin-64', os.path.join(self.profile.bockbuild.scratch, self.name + '-x64.install'))
delete(workspace)
shutil.move(workspace_x86, workspace)
print 'lipo', self.name
self.lipo_dirs(stagedir_x32, package_stage, 'lib')
self.copy_side_by_side(
stagedir_x32, package_stage, 'bin', '32', '64')
elif arch == 'toolchain':
package_stage = self.do_build('darwin-64')
elif self.m32_only:
package_stage = self.do_build('darwin-32')
else:
package_stage = self.do_build(arch)
self.make_artifact(package_stage, build_artifact)
for target in self.deploy_requests:
self.deploy_package(build_artifact, target)
def deploy_package(self, artifact, dest):
trace('Deploying (%s -> %s)' %
(os.path.basename(artifact), os.path.basename(dest)))
unprotect_dir(dest, recursive=True)
artifact_stage = artifact + '.extracted'
try:
assert_exists(artifact)
self.rm_if_exists(artifact_stage)
unzip(artifact, artifact_stage)
assert_exists(artifact_stage)
except Exception as e:
self.rm_if_exists(artifact)
self.rm_if_exists(artifact_stage)
protect_dir(dest, recursive=True)
return False
ensure_dir(artifact_stage)
files = list()
size = 0
for path in iterate_dir(artifact_stage, with_links = True, summary=False):
relpath = os.path.relpath(path, artifact_stage)
destpath = os.path.join(dest, relpath)
if os.path.exists(destpath) and not identical_files(path, destpath):
warn(
'Different file exists in package already: ''%s''' % relpath )
files.append(relpath)
if not os.path.islink(path):
size = size + os.path.getsize(path)
files.sort()
is_changed(files, artifact + '.files')
with open(artifact + '.files', 'w') as output:
output.write('\n'.join(files))
if len(files) != 0:
merge_trees(artifact_stage, dest, False)
self.sh = functools.partial(self.build_sh, cwd=artifact_stage)
self.deploy()
self.rm_if_exists(artifact_stage)
protect_dir(dest, recursive=True)
verbose ('%d files, %sMB' % (len(files), "{:.2f}".format (size /1024 / 1024 )))
return True
def do_build(self, arch, install_dir=None):
progress('Building (arch: %s)' % arch)
if install_dir is None:
install_dir = os.path.join(
self.profile.bockbuild.scratch, self.name + '.install')
self.stage_root = install_dir
self.rm_if_exists(self.stage_root)
self.staged_prefix = os.path.join(
self.stage_root, self.package_prefix[1:])
os.makedirs(self.staged_prefix)
# protect against relocation bugs often landing files in the wrong path
protect_dir(self.stage_root)
try:
self.arch_build(arch)
self.build_env = self.expand_build_env()
self.sh = functools.partial(self.build_sh, cwd=self.workspace)
self.prep()
self.build()
self.install()
if not os.path.exists(self.staged_prefix):
error('Result directory %s not found.' % self.staged_prefix)
self.profile.process_package(self)
if not self.dont_clean:
self.clean (dir=self.workspace)
except (Exception, KeyboardInterrupt) as e:
self.rm_if_exists(self.stage_root)
if isinstance(e, CommandException):
if os.path.exists(self.workspace):
for path in self.aux_files:
self.rm_if_exists(path)
problem_dir = os.path.join(
self.profile.bockbuild.execution_root, os.path.basename(self.workspace) + '.problem')
# take this chance to clear out older .problems
for d in os.listdir(self.profile.bockbuild.execution_root):
if d.endswith('.problem'):
self.rm(os.path.join(self.profile.bockbuild.execution_root, d))
shutil.move(self.workspace, problem_dir)
info('Build moved to ./%s\n' % os.path.basename(problem_dir))
info('Run "source ./%s" first to replicate bockbuild environment.' %
os.path.basename(self.profile.bockbuild.env_script))
error(str(e))
else:
self.rm_if_exists(self.workspace)
raise
return self.staged_prefix
def make_artifact(self, stage_dir, build_artifact):
self.rm_if_exists(build_artifact)
zip(stage_dir, build_artifact)
self.rm_if_exists(stage_dir)
def deploy(self):
return
def build_sh(self, command, cwd):
if isinstance(command, list):
map(lambda cmd: self.build_sh(cmd, cwd), command)
return
if not isinstance(command, str):
error('command arg must be a string: %s' % repr(command))
if not os.path.isdir(cwd):
error('Directory does not exist: %s' % cwd)
try:
env_command = '%s %s' % (
self.build_env, expand_macros(command, self))
except Exception as e:
error('MACRO EXPANSION ERROR: ' + str(e))
if config.verbose is True:
logprint('\t@\t' + expand_macros(command, self), bcolors.BOLD)
with open(self.log, 'a') as log:
log.write('%s\n' % env_command)
full_command = '%s >>%s 2>&1' % (env_command, self.log)
try:
run_shell(full_command, cwd=cwd)
except Exception as e:
with open(self.log, 'r') as log:
output_text = log.readlines()
for line in output_text:
line = line.replace(config.absolute_root, '@')
print line,
warn('build env: ' + self.build_env)
raise CommandException('command failed: %s' %
expand_macros(command, self), cwd=cwd)
def backtick(self, command):
command = expand_macros(command, self)
return backtick(command)
def cwd(self):
try:
self._cwd = os.getcwd()
except Exception as e:
warn('In invalid directory: %s' % self._cwd)
return self._cwd
def cd(self, dir):
dir = expand_macros(dir, self)
if self.cwd() == dir:
return
os.chdir(dir)
self.cwd()
trace(dir)
def pushd(self, dir):
if len(self._dirstack) == 0:
self._dirstack.append({'dir': self._cwd, 'caller': 'profile'})
self.cd(dir)
self._dirstack.append({'dir': self._cwd, 'caller': get_caller()})
def popd(self, failure=False):
caller = get_caller()
cwd = self._dirstack.pop()
if not failure:
if cwd['caller'] != caller:
warn('popd: Unmatched pushd/popd callers: (%s/%s)' %
(cwd['caller'], caller))
# return False
if cwd['dir'] != self.cwd() and not failure:
warn ('popd: Inconsistent current dir state (expected ''%s'', was in ''%s''' % (
cwd['dir'], self._cwd))
top = self._dirstack[-1]
self.cd(top['dir'])
def prep(self):
return
def rm_if_exists(self, path):
path = expand_macros(path, self)
if os.path.lexists(path):
delete(path)
def rm(self, path):
delete(expand_macros(path, self))
def link(self, source, link):
trace('%s -> %s' % (link, source))
source = expand_macros(source, self)
link = expand_macros(link, self)
if os.path.lexists(link):
delete(link)
os.symlink(source, link)
def extract_archive(self, archive, cwd, validate_only, overwrite=False):
root, ext = os.path.splitext(archive)
if ext == '.zip':
command = which('unzip')
if not command:
error('unzip not found')
args = ["-qq"]
if overwrite:
args.extend(["-o"])
if validate_only:
args.extend(["-t"])
args.extend([archive])
else:
command = which('tar')
if not command:
error('tar not found')
args = ['xf', archive]
if validate_only:
args.extend(['-O'])
run(command, args, cwd)
def build(self):
Package.configure(self)
Package.make(self)
def lipo_dirs(self, dir_64, dir_32, bin_subdir, replace_32=True):
dir64_bin = os.path.join(dir_64, bin_subdir)
dir32_bin = os.path.join(dir_32, bin_subdir)
lipo_dir = tempfile.mkdtemp()
lipo_bin = os.path.join(lipo_dir, bin_subdir)
if not os.path.exists(dir64_bin):
return
if not os.path.exists(lipo_bin):
os.mkdir(lipo_bin)
# take each 64-bit binary, lipo with binary of same name
for root, dirs, filelist in os.walk(dir64_bin):
relpath = os.path.relpath(root, dir64_bin)
for file in filelist:
if file.endswith('.a') or file.endswith('.dylib') or file.endswith('.so'):
dir64_file = os.path.join(dir64_bin, relpath, file)
dir32_file = os.path.join(dir32_bin, relpath, file)
lipo_file = os.path.join(lipo_bin, relpath, file)
if os.path.exists(dir32_file):
if not os.path.exists(os.path.join(lipo_bin, relpath)):
os.makedirs(os.path.join(lipo_bin, relpath))
if os.path.islink(dir64_file):
continue
lipo_cmd = 'lipo -create %s %s -output %s ' % (
dir64_file, dir32_file, lipo_file)
# print lipo_cmd
run_shell(lipo_cmd)
if replace_32:
# replace all 32-bit binaries with the new fat
# binaries
shutil.move(lipo_file, dir32_file)
else:
warn("lipo: 32-bit version of file %s not found" % file)
#creates a deep hardlink copy of a directory
def shadow_copy (self, source, dest, exclude_git = False):
trace ('shadow_copy %s %s' % (source , dest))
if os.path.exists(dest):
error ('Destination directory must not exist')
# Bockbuild state may be under the directory if we are copying a local workspace. Avoid recursive copying
stateroot_parent = os.path.dirname (config.state_root)
stateroot_name = os.path.basename (config.state_root)
stateroot_found = False
if not os.path.commonprefix ([source, config.state_root]) == source:
stateroot_found = True
for root, subdirs, filelist in os.walk (source):
relpath = os.path.relpath(root, source) # e.g. 'lib/mystuff'
destpath = os.path.join(dest, relpath)
os.makedirs(destpath)
if exclude_git:
subdirs[:] = [dir for dir in subdirs if dir != '.git']
if not stateroot_found and root == stateroot_parent:
subdirs [:] = [dir for dir in subdirs if dir != stateroot_name]
stateroot_found = True
for file in filelist:
fullpath = os.path.join (root, file)
if os.path.islink(fullpath):
target = os.path.join(os.path.dirname(fullpath), os.readlink(fullpath))
if not os.path.exists(fullpath) or os.path.commonprefix ([config.state_root, target]) == config.state_root:
break
os.link (fullpath, os.path.join (destpath, file))
trace ('shadow_copy done')
def copy_side_by_side(self, src_dir, dest_dir, bin_subdir, suffix, orig_suffix=None):
def add_suffix(filename, sfx):
fileparts = filename.split('.', 1)
if len(fileparts) > 1:
p = '%s%s.%s' % (fileparts[0], sfx, fileparts[1])
else:
p = '%s%s' % (filename, sfx)
trace(p)
return p
src_dir = os.path.join(src_dir, bin_subdir)
dest_dir = os.path.join(dest_dir, bin_subdir)
trace('src_dir %s' % src_dir)
trace('dest_dir %s' % dest_dir)
if not os.path.exists(src_dir):
return # we don't always have bin/lib dirs
for path in iterate_dir(src_dir):
relpath = os.path.relpath(path, src_dir)
reldir, filename = os.path.split(relpath)
trace(reldir + '/' + filename)
filetype = backtick('file -b "%s"' % path)[0]
if filetype.startswith('Mach-O'):
dest_file = os.path.join(
dest_dir, reldir, add_suffix(filename, suffix))
trace(dest_file)
dest_orig_file = os.path.join(dest_dir, reldir, filename)
if not os.path.exists(dest_orig_file):
warn('lipo: %s exists in %s but not in %s' %
(relpath, src_dir, dest_dir))
elif orig_suffix is not None:
suffixed = os.path.join(
dest_dir, reldir, add_suffix(filename, orig_suffix))
trace(suffixed)
shutil.move(dest_orig_file, suffixed)
os.symlink(os.path.basename(suffixed), dest_orig_file)
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copy2(path, dest_file)
def arch_build(self, arch):
Package.profile.arch_build(arch, self)
def expand_build_env(self):
return expand_macros(
'OBJCFLAGS="%{gcc_flags} %{local_gcc_flags}" '
'CFLAGS="%{gcc_flags} %{local_gcc_flags}" '
'CXXFLAGS="%{gcc_flags} %{local_gcc_flags}" '
'CPPFLAGS="%{cpp_flags} %{local_cpp_flags}" '
'LDFLAGS="%{ld_flags} %{local_ld_flags}" ', self)
def configure(self):
self.sh('%{configure} %{configure_flags} %{local_configure_flags}')
def make(self):
self.sh('%{make}')
def install(self):
self.sh('%{makeinstall}')
Package.default_sources = None
class GnomePackage (Package):
def __init__(self, name, version_major='0', version_minor='0',
configure_flags=None, sources=None, override_properties=None):
self.version_major = version_major
self.version_minor = version_minor
Package.__init__(self, name, '%{version_major}.%{version_minor}',
configure_flags=configure_flags,
sources=sources,
override_properties=override_properties)
GnomePackage.default_sources = [
'http://ftp.gnome.org/pub/gnome/sources/%{name}/%{version_major}/%{name}-%{version}.tar.bz2'
]
class GnomeXzPackage (GnomePackage):
pass
GnomeXzPackage.default_sources = [
'http://ftp.gnome.org/pub/gnome/sources/%{name}/%{version_major}/%{name}-%{version}.tar.xz'
]
class GnomeGitPackage (Package):
def __init__(self, name, version, revision,
configure_flags=None, sources=None, override_properties=None):
Package.__init__(self, name, version,
configure='./autogen.sh --prefix="%{package_prefix}"',
configure_flags=configure_flags,
sources=sources,
override_properties=override_properties,
revision=revision)
GnomeGitPackage.default_sources = [
'git://git.gnome.org/%{name}'
]
class GnuPackage (Package):
pass
GnuPackage.default_sources = [
'ftp://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.gz'
]
class GnuBz2Package (Package):
pass
GnuBz2Package.default_sources = [
'ftp://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.bz2'
]
class GnuXzPackage (Package):
pass
GnuXzPackage.default_sources = [
'ftp://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.xz'
]
class CairoGraphicsPackage (Package):
pass
CairoGraphicsPackage.default_sources = [
'http://cairographics.org/releases/%{name}-%{version}.tar.gz'
]
class CairoGraphicsXzPackage (Package):
pass
CairoGraphicsXzPackage.default_sources = [
'http://cairographics.org/releases/%{name}-%{version}.tar.xz'
]
class ProjectPackage (Package):
def __init__(self, project, name, version, configure_flags=None,
sources=None, override_properties=None):
self.project = project
Package.__init__(self, name, version,
configure_flags=configure_flags,
sources=sources,
override_properties=override_properties)
class SourceForgePackage (ProjectPackage):
pass
SourceForgePackage.default_sources = [
'https://downloads.sourceforge.net/sourceforge/%{project}/%{name}-%{version}.tar.gz'
]
class FreeDesktopPackage (ProjectPackage):
pass
FreeDesktopPackage.default_sources = [
'http://%{project}.freedesktop.org/releases/%{name}-%{version}.tar.gz'
]
class GitHubTarballPackage (Package):
def __init__(self, org, name, version, commit, configure, override_properties=None):
Package.__init__(self, name, version, revision=commit, organization=org,
override_properties=override_properties)
self.configure = configure
self.source_dir_name = '%s-%s-%s' % (org, name, self.revision[:7])
GitHubTarballPackage.default_sources = [
'https://github.com/%{organization}/%{name}/tarball/%{revision}'
]
class GitHubPackage (Package):
def __init__(self, organization, name, version, revision=None, git_branch=None, configure=None, configure_flags=None, override_properties=None):
Package.__init__(self, name, version,
organization=organization,
revision=revision,
git_branch=git_branch,
configure_flags=configure_flags,
configure=configure,
sources=[
'git://github.com/%{organization}/%{name}.git'],
override_properties=override_properties)
class GstreamerPackage (ProjectPackage):
pass
GstreamerPackage.default_sources = [
'https://%{project}.freedesktop.org/src/%{name}/%{name}-%{version}.tar.gz'
]
class XiphPackage (ProjectPackage):
pass
XiphPackage.default_sources = [
'https://downloads.xiph.org/releases/%{project}/%{name}-%{version}.tar.gz'
]
| false
| true
|
f719d3c75df148a6ceda79acf57bc0e57342d5f3
| 4,311
|
py
|
Python
|
src/test.py
|
alexey-kaz/python-project
|
661fe06e09846cd1c3c6d600973a6e3433096c1d
|
[
"MIT"
] | null | null | null |
src/test.py
|
alexey-kaz/python-project
|
661fe06e09846cd1c3c6d600973a6e3433096c1d
|
[
"MIT"
] | null | null | null |
src/test.py
|
alexey-kaz/python-project
|
661fe06e09846cd1c3c6d600973a6e3433096c1d
|
[
"MIT"
] | 3
|
2021-04-25T06:37:26.000Z
|
2021-06-03T19:19:19.000Z
|
"""Тест."""
import unittest
from recipes import form_answer
from database import delete_table_data
from parsing import NEWS, AFISHA, HOROSCOPE, WEATHER
class TestBot(unittest.TestCase):
"""Тест."""
def test_form_answer(self):
"""Тест."""
rec1 = {"name": "Булочки с изюмом",
"ingrs": ["Мука", "Яйцо куриное", "Изюм"],
"link": "http://recipe"}
ans1 = '<b>Булочки с изюмом</b>\nИнгредиенты:\n 1) Мука\n' + \
'2) Яйцо куриное\n3) Изюм\n\n<a href="http://recipe">Булочки с изюмом</a>'
self.assertEqual(form_answer(rec1), ans1)
rec2 = {"name": "Омлет",
"ingrs": ["Яйцо куриное", "Соль", "Молоко"],
"link": "http://recipe"}
ans2 = '<b>Омлет</b>\nИнгредиенты:\n 1) Яйцо куриное\n2) Соль\n' + \
'3) Молоко\n\n<a href="http://recipe">Омлет</a>'
self.assertEqual(form_answer(rec2), ans2)
with self.assertRaises(KeyError):
form_answer(dict())
# def test_empty_delete_reminders(self):
# self.assertEqual(delete_table_data("reminders"), 0)
def test_parsing_horoscope(self):
"""Тест."""
obj = HOROSCOPE()
self.assertEqual(obj.url, "https://1001goroskop.ru")
def test_parsing_horoscope_1(self):
"""Тест."""
obj = HOROSCOPE()
self.assertEqual(type(obj.get_signs()), type([1, 2]))
def test_parsing_news(self):
"""Тест."""
obj = NEWS()
self.assertEqual(obj.count, None)
def test_parsing_news_1(self):
"""Тест."""
obj = NEWS()
obj.count = 5
obj.make_zero()
self.assertEqual(obj.count, 0)
def test_parsing_news_2(self):
"""Тест."""
obj = NEWS()
self.assertEqual(obj.url, "https://lenta.ru/parts/news")
def test_parsing_news_3(self):
"""Тест."""
obj = NEWS()
self.assertEqual(obj.url_part, "https://lenta.ru")
def test_parsing_news_4(self):
"""Тест."""
obj = NEWS()
self.assertEqual(type(obj.parse()), type([1, 2]))
def test_parsing_weather(self):
"""Тест."""
obj = WEATHER()
self.assertEqual(type(obj.extra_data), type({}))
def test_parsing_weather_1(self):
"""Тест."""
obj = WEATHER()
self.assertEqual(obj.url, "https://www.gismeteo.ru")
def test_parsing_weather_2(self):
"""Тест."""
obj = WEATHER()
self.assertEqual(type(obj.main_data), type({}))
def test_parsing_afisha(self):
"""Тест."""
obj = AFISHA()
self.assertEqual(obj.cinema_count, None)
def test_parsing_afisha_1(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
def test_parsing_afisha_2(self):
"""Тест."""
obj = AFISHA()
obj.theatre_count = 2
obj.make_zero()
self.assertEqual(obj.theatre_count, 0)
def test_parsing_afisha_3(self):
"""Тест."""
obj = AFISHA()
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_4(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.theatre_count = 2
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.theatre_count, 0)
def test_parsing_afisha_5(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_6(self):
"""Тест."""
obj = AFISHA()
obj.theatre_count = 2
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.theatre_count, 0)
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_total(self):
"""Тест."""
obj = AFISHA()
obj.cinema_count = 1
obj.theatre_count = 2
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.theatre_count, 0)
self.assertEqual(obj.concert_count, 0)
| 29.527397
| 89
| 0.567618
|
import unittest
from recipes import form_answer
from database import delete_table_data
from parsing import NEWS, AFISHA, HOROSCOPE, WEATHER
class TestBot(unittest.TestCase):
def test_form_answer(self):
rec1 = {"name": "Булочки с изюмом",
"ingrs": ["Мука", "Яйцо куриное", "Изюм"],
"link": "http://recipe"}
ans1 = '<b>Булочки с изюмом</b>\nИнгредиенты:\n 1) Мука\n' + \
'2) Яйцо куриное\n3) Изюм\n\n<a href="http://recipe">Булочки с изюмом</a>'
self.assertEqual(form_answer(rec1), ans1)
rec2 = {"name": "Омлет",
"ingrs": ["Яйцо куриное", "Соль", "Молоко"],
"link": "http://recipe"}
ans2 = '<b>Омлет</b>\nИнгредиенты:\n 1) Яйцо куриное\n2) Соль\n' + \
'3) Молоко\n\n<a href="http://recipe">Омлет</a>'
self.assertEqual(form_answer(rec2), ans2)
with self.assertRaises(KeyError):
form_answer(dict())
def test_parsing_horoscope(self):
obj = HOROSCOPE()
self.assertEqual(obj.url, "https://1001goroskop.ru")
def test_parsing_horoscope_1(self):
obj = HOROSCOPE()
self.assertEqual(type(obj.get_signs()), type([1, 2]))
def test_parsing_news(self):
obj = NEWS()
self.assertEqual(obj.count, None)
def test_parsing_news_1(self):
obj = NEWS()
obj.count = 5
obj.make_zero()
self.assertEqual(obj.count, 0)
def test_parsing_news_2(self):
obj = NEWS()
self.assertEqual(obj.url, "https://lenta.ru/parts/news")
def test_parsing_news_3(self):
obj = NEWS()
self.assertEqual(obj.url_part, "https://lenta.ru")
def test_parsing_news_4(self):
obj = NEWS()
self.assertEqual(type(obj.parse()), type([1, 2]))
def test_parsing_weather(self):
obj = WEATHER()
self.assertEqual(type(obj.extra_data), type({}))
def test_parsing_weather_1(self):
obj = WEATHER()
self.assertEqual(obj.url, "https://www.gismeteo.ru")
def test_parsing_weather_2(self):
obj = WEATHER()
self.assertEqual(type(obj.main_data), type({}))
def test_parsing_afisha(self):
obj = AFISHA()
self.assertEqual(obj.cinema_count, None)
def test_parsing_afisha_1(self):
obj = AFISHA()
obj.cinema_count = 1
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
def test_parsing_afisha_2(self):
obj = AFISHA()
obj.theatre_count = 2
obj.make_zero()
self.assertEqual(obj.theatre_count, 0)
def test_parsing_afisha_3(self):
obj = AFISHA()
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_4(self):
obj = AFISHA()
obj.cinema_count = 1
obj.theatre_count = 2
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.theatre_count, 0)
def test_parsing_afisha_5(self):
obj = AFISHA()
obj.cinema_count = 1
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_6(self):
obj = AFISHA()
obj.theatre_count = 2
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.theatre_count, 0)
self.assertEqual(obj.concert_count, 0)
def test_parsing_afisha_total(self):
obj = AFISHA()
obj.cinema_count = 1
obj.theatre_count = 2
obj.concert_count = 3
obj.make_zero()
self.assertEqual(obj.cinema_count, 0)
self.assertEqual(obj.theatre_count, 0)
self.assertEqual(obj.concert_count, 0)
| true
| true
|
f719d4ecd4826b2aad60d635215f0987148b894f
| 7,097
|
py
|
Python
|
vendor/bundle/ruby/2.3.0/gems/nokogiri-1.10.10/ext/nokogiri/tmp/x86_64-apple-darwin17/ports/libxml2/2.9.10/libxml2-2.9.10/python/setup.py
|
emsommers/futureDocs
|
344524234d024a532716a8ad4162aad00a455e8b
|
[
"CC0-1.0"
] | null | null | null |
vendor/bundle/ruby/2.3.0/gems/nokogiri-1.10.10/ext/nokogiri/tmp/x86_64-apple-darwin17/ports/libxml2/2.9.10/libxml2-2.9.10/python/setup.py
|
emsommers/futureDocs
|
344524234d024a532716a8ad4162aad00a455e8b
|
[
"CC0-1.0"
] | null | null | null |
vendor/bundle/ruby/2.3.0/gems/nokogiri-1.10.10/ext/nokogiri/tmp/x86_64-apple-darwin17/ports/libxml2/2.9.10/libxml2-2.9.10/python/setup.py
|
emsommers/futureDocs
|
344524234d024a532716a8ad4162aad00a455e8b
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/Users/emsommers/Documents/GitHub/futureDocs/vendor/bundle/ruby/2.3.0/gems/nokogiri-1.10.10/ports/x86_64-apple-darwin17/libxml2/2.9.10'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = [os.path.join(ROOT,'bin',dll) for dll in dlls]
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print("failed to find headers for libxml2: update includes_dir")
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print("failed to find headers for libiconv: update includes_dir")
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print("failed to find and generate stubs for libxml2, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print("libxslt stub generator not found, libxslt not built")
else:
try:
import xsltgenerator
except:
print("failed to generate stubs for libxslt, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print("failed to find headers for libxslt: update includes_dir")
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.9.10",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| 29.205761
| 144
| 0.610117
|
import sys, os
from distutils.core import setup, Extension
ROOT = r'/Users/emsommers/Documents/GitHub/futureDocs/vendor/bundle/ruby/2.3.0/gems/nokogiri-1.10.10/ports/x86_64-apple-darwin17/libxml2/2.9.10'
with_threads = 1
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = [os.path.join(ROOT,'bin',dll) for dll in dlls]
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print("failed to find headers for libxml2: update includes_dir")
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print("failed to find headers for libiconv: update includes_dir")
sys.exit(1)
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print("failed to find and generate stubs for libxml2, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print("libxslt stub generator not found, libxslt not built")
else:
try:
import xsltgenerator
except:
print("failed to generate stubs for libxslt, aborting ...")
print(sys.exc_info()[0], sys.exc_info()[1])
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print("failed to find headers for libxslt: update includes_dir")
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
version = "2.9.10",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| true
| true
|
f719d938c36fb80ad1c9ea86ac17254b9fc23390
| 50,482
|
py
|
Python
|
pyGPs/Core/gp.py
|
Corentin-LF/pyGPs
|
b9d36777584cd53756bd4311c3c20ea52e945451
|
[
"BSD-2-Clause"
] | null | null | null |
pyGPs/Core/gp.py
|
Corentin-LF/pyGPs
|
b9d36777584cd53756bd4311c3c20ea52e945451
|
[
"BSD-2-Clause"
] | null | null | null |
pyGPs/Core/gp.py
|
Corentin-LF/pyGPs
|
b9d36777584cd53756bd4311c3c20ea52e945451
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import division
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
#================================================================================
# Marion Neumann [marion dot neumann at uni-bonn dot de]
# Daniel Marthaler [dan dot marthaler at gmail dot com]
# Shan Huang [shan dot huang at iais dot fraunhofer dot de]
# Kristian Kersting [kristian dot kersting at cs dot tu-dortmund dot de]
#
# This file is part of pyGPs.
# The software package is released under the BSD 2-Clause (FreeBSD) License.
#
# Copyright (c) by
# Marion Neumann, Daniel Marthaler, Shan Huang & Kristian Kersting, 18/02/2014
#================================================================================
# MEANING OF NOTATION:
#
# inffunc function specifying the inference method
# covfunc prior covariance function (see below)
# meanfunc prior mean function
# likfunc likelihood function
# x n by D matrix of training inputs
# y column vector of length n of training targets
# xs n by D matrix of test inputs
# ys column vector of length nn of true test targets (optional)
# nlZ returned value of the negative log marginal likelihood
# dnlZ column vector of partial derivatives of the negative
# log marginal likelihood w.r.t. each hyperparameter
# ym column vector (of length ns) of predictive output means
# ys2 column vector (of length ns) of predictive output variances
# fm column vector (of length ns) of predictive latent means
# fs2 column vector (of length ns) of predictive latent variances
# lp column vector (of length ns) of log predictive probabilities
# post struct representation of the (approximate) posterior
# post consists of post.alpha, post.L, post.sW
#
# This is a object-oriented python implementation of gpml functionality
# (Copyright (c) by Carl Edward Rasmussen and Hannes Nickisch, 2011-02-18).
# based on the functional-version of python implementation
# (Copyright (c) by Marion Neumann and Daniel Marthaler, 20/05/2013)
#
# Copyright (c) by Marion Neumann and Shan Huang, 30/09/2013
import itertools
import numpy as np
import matplotlib.pyplot as plt
from . import inf, mean, lik, cov, opt
from .tools import unique, jitchol, solve_chol
from copy import deepcopy
import pyGPs
from pyGPs.Core.cov import FITCOfKernel
import logging
SHADEDCOLOR = [0.7539, 0.89453125, 0.62890625, 1.0]
MEANCOLOR = [ 0.2109375, 0.63385, 0.1796875, 1.0]
DATACOLOR = [0.12109375, 0.46875, 1., 1.0]
class GP(object):
'''
Base class for GP model.
'''
def __init__(self):
super(GP, self).__init__()
self.usingDefaultMean = True # was using default mean function now?
self.meanfunc = None # mean function
self.covfunc = None # covariance function
self.likfunc = None # likelihood function
self.inffunc = None # inference function
self.optimizer = None # optimizer object
self.nlZ = None # negative log marginal likelihood
self.dnlZ = None # column vector of partial derivatives of the negative
# log marginal likelihood w.r.t. each hyperparameter
self.posterior = None # struct representation of the (approximate) posterior
self.x = None # n by D matrix of training inputs
self.y = None # column vector of length n of training targets
self.xs = None # n by D matrix of test inputs
self.ys = None # column vector of length nn of true test targets (optional)
self.ym = None # column vector (of length ns) of predictive output means
self.ys2 = None # column vector (of length ns) of predictive output variances
self.fm = None # column vector (of length ns) of predictive latent means
self.fs2 = None # column vector (of length ns) of predictive latent variances
self.lp = None # column vector (of length ns) of log predictive probabilities
self.logger = logging.getLogger(__name__)
def __str__(self):
strvalue = 'To get the properties of the model use:\n'+\
'model.nlZ # negative log marginal likelihood\n'+\
'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\n'+\
'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\n'+\
'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\n'+\
'model.posterior # posterior structure\n'+\
'model.covfunc.hyp # hyperparameters of cov func\n'+\
'model.meanfunc.hyp # hyperparameters of mean func\n'+\
'model.likfunc.hyp # hyperparameters of lik func\n'+\
'model.fm # latent mean\n'+\
'model.fs2 # latent variance\n'+\
'model.ym # predictive mean\n'+\
'model.ys2 # predictive variance\n'+\
'model.lp # log predictive probability'
return strvalue
def __repr__(self):
strvalue = str(type(self))+': '+\
'to get the properties of the model use:\n'+\
'model.nlZ # negative log marginal likelihood\n'+\
'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\n'+\
'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\n'+\
'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\n'+\
'model.posterior # posterior structure\n'+\
'model.covfunc.hyp # hyperparameters of cov func\n'+\
'model.meanfunc.hyp # hyperparameters of mean func\n'+\
'model.likfunc.hyp # hyperparameters of lik func\n'+\
'model.fm # latent mean\n'+\
'model.fs2 # latent variance\n'+\
'model.ym # predictive mean\n'+\
'model.ys2 # predictive variance\n'+\
'model.lp # log predictive probability'
return strvalue
def setData(self, x, y):
'''
Set training inputs and traning labels to model.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
Note this method will transform x, y to correct shape
if x, y is given in 1d array.
'''
# check wether the number of inputs and labels match
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x = x
self.y = y
if self.usingDefaultMean:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
def plotData_1d(self, axisvals=None):
'''
Toy Method for ploting 1d data of the model.
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
'''
plt.figure()
plt.plot(self.x, self.y, ls='None', marker='+', color=DATACOLOR, ms=12, mew=2)
if axisvals:
plt.axis(axisvals)
plt.grid()
plt.xlabel('input x')
plt.ylabel('target y')
plt.show()
def plotData_2d(self,x1,x2,t1,t2,p1,p2,axisvals=None):
'''
Toy Method for ploting 2d data of the model. \n
For plotting, we superimpose the data points with the posterior equi-probability contour
lines for the probability of class two given complete information about the generating mechanism.
:param x1: inputs for class +1
:param x2: inputs for class -1
:param t1: meshgrid array for the first axis
:param t2: meshgrid array for the second axis
:param p1,p2: contour lines contains p2/(p1+p2)
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
That is to say, the contour is ploted by plt.contour(t1, t2, p2/(p1+p2) )
Note these parameters are (only) used for our hard-coded data for classification demo.
'''
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
pc = plt.contour(t1, t2, np.reshape(old_div(p2,(p1+p2)), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if axisvals:
plt.axis(axisvals)
plt.show()
def setPrior(self, mean=None, kernel=None):
'''
Set prior mean and covariance other than the default setting of current model.
:param mean: instance of mean class. (e.g. mean.Linear())
:param kernel: instance of covariance class. (e.g. cov.RBF())
'''
# check the type of inputs
# ensure they are the right class before setting prior
if not mean is None:
assert isinstance(mean, pyGPs.mean.Mean), "mean function is not an instance of pyGPs.mean.Mean"
self.meanfunc = mean
self.usingDefaultMean = False
if not kernel is None:
assert isinstance(kernel, pyGPs.cov.Kernel), "cov function is not an instance of pyGPs.cov.Kernel"
self.covfunc = kernel
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
This method is used to sepecify optimization configuration. By default, gp uses a single run "minimize".
:param method: Optimization methods. Possible values are:\n
"Minimize" -> minimize by Carl Rasmussen (python implementation of "minimize" in GPML)\n
"CG" -> conjugent gradient\n
"BFGS" -> quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)\n
"SCG" -> scaled conjugent gradient (faster than CG)\n
:param num_restarts: Set if you want to run mulitiple times of optimization with different initial guess.
It specifys the maximum number of runs/restarts/trials.
:param min_threshold: Set if you want to run mulitiple times of optimization with different initial guess.
It specifys the threshold of objective function value. Stop optimization when this value is reached.
:param meanRange: The range of initial guess for mean hyperparameters.
e.g. meanRange = [(-2,2), (-5,5), (0,1)].
Each tuple specifys the range (low, high) of this hyperparameter,
This is only the range of initial guess, during optimization process, optimal hyperparameters may go out of this range.
(-5,5) for each hyperparameter by default.
:param covRange: The range of initial guess for kernel hyperparameters. Usage see meanRange
:param likRange: The range of initial guess for likelihood hyperparameters. Usage see meanRange
'''
pass
def optimize40(self, x=None, y=None, numIterations=40):
'''
Train optimal hyperparameters based on training data,
adjust new hyperparameters to all mean/cov/lik functions.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
'''
# check wether the number of inputs and labels match
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# optimize
optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)
self.nlZ = optimalNlZ
# apply optimal hyp to all mean/cov/lik functions here
self.optimizer._apply_in_objects(optimalHyp)
self.getPosterior()
def optimize(self, x=None, y=None, numIterations=1000):
'''
Train optimal hyperparameters based on training data,
adjust new hyperparameters to all mean/cov/lik functions.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
'''
# check wether the number of inputs and labels match
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# optimize
optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)
self.nlZ = optimalNlZ
# apply optimal hyp to all mean/cov/lik functions here
self.optimizer._apply_in_objects(optimalHyp)
self.getPosterior()
def getPosterior(self, x=None, y=None, der=True):
'''
Fit the training data. Update negative log marginal likelihood(nlZ),
partial derivatives of nlZ w.r.t. each hyperparameter(dnlZ),
and struct representation of the (approximate) posterior(post),
which consists of post.alpha, post.L, post.sW.
nlZ, dnlZ, post = getPosterior(x, y, der=True)\n
nlZ, post = getPosterior(x, y, der=False )
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
:param boolean der: flag for whether to compute derivatives
:return: negative log marginal likelihood (nlZ), derivatives of nlZ (dnlZ), posterior structure(post)
You can print post to see descriptions of posterior.
or see pyGPs.Core.inf for details.
'''
# check wether the number of inputs and labels match
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# call inference method
if isinstance(self.likfunc, lik.Erf): #or is instance(self.likfunc, lik.Logistic):
uy = unique(self.y)
ind = ( uy != 1 )
if any( uy[ind] != -1):
raise Exception('You attempt classification using labels different from {+1,-1}')
if not der:
post, nlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 2)
self.nlZ = nlZ
self.posterior = deepcopy(post)
return nlZ, post
else:
post, nlZ, dnlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 3)
self.nlZ = nlZ
self.dnlZ = deepcopy(dnlZ)
self.posterior = deepcopy(post)
return nlZ, dnlZ, post
def predict(self, xs, ys=None):
'''
Prediction of test points (given by xs) based on training data of the current model.
This method will output the following value:\n
predictive output means(ym),\n
predictive output variances(ys2),\n
predictive latent means(fm),\n
predictive latent variances(fs2),\n
log predictive probabilities(lp).\n
Theses values can also be achieved from model's property. (e.g. model.ym)
:param xs: test input in shape of nn by D
:param ys: test target(optional) in shape of nn by 1 if given
:return: ym, ys2, fm, fs2, lp
'''
# check the shape of inputs
# transform to correct shape if neccessary
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
self.xs = xs
if not ys is None:
if ys.ndim == 1:
ys = np.reshape(ys, (ys.shape[0],1))
self.ys = ys
meanfunc = self.meanfunc
covfunc = self.covfunc
likfunc = self.likfunc
inffunc = self.inffunc
x = self.x
y = self.y
if self.posterior is None:
self.getPosterior()
alpha = self.posterior.alpha
L = self.posterior.L
sW = self.posterior.sW
nz = list(range(len(alpha[:,0]))) # non-sparse representation
if len(L) == 0: # in case L is not provided, we compute it
K = covfunc.getCovMatrix(x=x[nz,:], mode='train')
#L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )
L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )
Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?
ns = xs.shape[0] # number of data points
nperbatch = 1000 # number of data points per mini batch
nact = 0 # number of already processed test data points
ymu = np.zeros((ns,1))
ys2 = np.zeros((ns,1))
fmu = np.zeros((ns,1))
fs2 = np.zeros((ns,1))
lp = np.zeros((ns,1))
while nact<=ns-1: # process minibatches of test cases to save memory
ids = list(range(nact,min(nact+nperbatch,ns))) # data points to process
kss = covfunc.getCovMatrix(z=xs[ids,:], mode='self_test') # self-variances
if isinstance(covfunc, FITCOfKernel):
Ks = covfunc.getCovMatrix(x=x, z=xs[ids,:], mode='cross') # cross-covariances
Ks = Ks[nz,:]
else:
Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[ids,:], mode='cross') # cross-covariances
ms = meanfunc.getMean(xs[ids,:])
N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)
Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f
fmu[ids] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(ids),1)) # predictive means
if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)
V = np.linalg.solve(L.T,np.tile(sW,(1,len(ids)))*Ks)
fs2[ids] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances
else: # L is not triangular => use alternative parametrization
fs2[ids] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances
fs2[ids] = np.maximum(fs2[ids],0) # remove numerical noise i.e. negative variances
Fs2 = np.tile(fs2[ids],(1,N)) # we have multiple values in case of sampling
if ys is None:
Lp, Ymu, Ys2 = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)
else:
Lp, Ymu, Ys2 = likfunc.evaluate(np.tile(ys[ids],(1,N)), Fmu[:], Fs2[:],None,None,3)
lp[ids] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(ids),1) ) # log probability; sample averaging
ymu[ids] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(ids),1) ) # predictive mean ys|y and ...
ys2[ids] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(ids),1) ) # .. variance
nact = ids[-1]+1 # set counter to index of next data point
self.ym = ymu
self.ys2 = ys2
self.lp = lp
self.fm = fmu
self.fs2 = fs2
if ys is None:
return ymu, ys2, fmu, fs2, None
else:
return ymu, ys2, fmu, fs2, lp
def predict_with_posterior(self, post, xs, ys=None):
'''
Prediction of test points (given by xs) based on training data
of the current model with posterior already provided.
(i.e. you already have the posterior and thus don't need the fitting phase.)
This method will output the following value:\n
predictive output means(ym),\n
predictive output variances(ys2),\n
predictive latent means(fm),\n
predictive latent variances(fs2),\n
log predictive probabilities(lp).\n
Theses values can also be achieved from model's property. (e.g. model.ym)
:param post: struct representation of posterior
:param xs: test input
:param ys: test target(optional)
:return: ym, ys2, fm, fs2, lp
'''
# check the shape of inputs
# transform to correct shape if neccessary
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
self.xs = xs
if not ys is None:
if ys.ndim == 1:
ys = np.reshape(ys, (ys.shape[0],1))
self.ys = ys
meanfunc = self.meanfunc
covfunc = self.covfunc
likfunc = self.likfunc
inffunc = self.inffunc
x = self.x
y = self.y
self.posterior = deepcopy(post)
alpha = post.alpha
L = post.L
sW = post.sW
nz = list(range(len(alpha[:,0]))) # non-sparse representation
if len(L) == 0: # in case L is not provided, we compute it
K = covfunc.getCovMatrix(x=x[nz,:], mode='train')
#L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )
L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )
Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?
ns = xs.shape[0] # number of data points
nperbatch = 1000 # number of data points per mini batch
nact = 0 # number of already processed test data points
ymu = np.zeros((ns,1))
ys2 = np.zeros((ns,1))
fmu = np.zeros((ns,1))
fs2 = np.zeros((ns,1))
lp = np.zeros((ns,1))
while nact<=ns-1: # process minibatches of test cases to save memory
id = list(range(nact,min(nact+nperbatch,ns))) # data points to process
kss = covfunc.getCovMatrix(z=xs[id,:], mode='self_test') # self-variances
Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[id,:], mode='cross') # cross-covariances
ms = meanfunc.getMean(xs[id,:])
N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)
Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f
fmu[id] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(id),1)) # predictive means
if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)
V = np.linalg.solve(L.T,np.tile(sW,(1,len(id)))*Ks)
fs2[id] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances
else: # L is not triangular => use alternative parametrization
fs2[id] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances
fs2[id] = np.maximum(fs2[id],0) # remove numerical noise i.e. negative variances
Fs2 = np.tile(fs2[id],(1,N)) # we have multiple values in case of sampling
if ys is None:
[Lp, Ymu, Ys2] = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)
else:
[Lp, Ymu, Ys2] = likfunc.evaluate(np.tile(ys[id],(1,N)), Fmu[:], Fs2[:],None,None,3)
lp[id] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(id),1) ) # log probability; sample averaging
ymu[id] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(id),1) ) # predictive mean ys|y and ...
ys2[id] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(id),1) ) # .. variance
nact = id[-1]+1 # set counter to index of next data point
self.ym = ymu
self.ys2 = ys2
self.lp = lp
self.fm = fmu
self.fs2 = fs2
if ys is None:
return ymu, ys2, fmu, fs2, None
else:
return ymu, ys2, fmu, fs2, lp
class GPR(GP):
'''
Model for Gaussian Process Regression
'''
def __init__(self):
super(GPR, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Gauss() # likihood with default noise variance 0.1
self.inffunc = inf.Exact() # inference method
self.optimizer = opt.Minimize(self) # default optimizer
def setNoise(self,log_sigma):
'''
Set noise other than default noise value
:param log_sigma: logarithm of the noise sigma
'''
self.likfunc = lik.Gauss(log_sigma)
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
elif method == "Nelder-Mead":
self.optimizer = opt.Simplex(self, conf)
else:
raise Exception('Optimization method is not set correctly in setOptimizer')
def plot(self,axisvals=None):
'''
Plot 1d GP regression result.
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
'''
xs = self.xs # test point
x = self.x
y = self.y
ym = self.ym # predictive test mean
ys2 = self.ys2 # predictive test variance
plt.figure()
xss = np.reshape(xs,(xs.shape[0],))
ymm = np.reshape(ym,(ym.shape[0],))
ys22 = np.reshape(ys2,(ys2.shape[0],))
plt.plot(x, y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)
plt.plot(xs, ym, color=MEANCOLOR, ls='-', lw=3.)
plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.xlabel('input x')
plt.ylabel('target y')
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default exact inference.
:param str newInf: 'Laplace' or 'EP'
'''
if newInf == "Laplace":
self.inffunc = inf.Laplace()
elif newInf == "EP":
self.inffunc = inf.EP()
else:
raise Exception('Possible inf values are "Laplace", "EP".')
def useLikelihood(self,newLik):
'''
Use another likelihood function other than default Gaussian likelihood.
:param str newLik: 'Laplace'
'''
if newLik == "Laplace":
self.likfunc = lik.Laplace()
self.inffunc = inf.EP()
else:
raise Exception('Possible lik values are "Laplace".')
class GPC(GP):
'''
Model for Gaussian Process Classification.
'''
def __init__(self):
super(GPC, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Erf() # erf likihood
self.inffunc = inf.EP() # default inference method
self.optimizer = opt.Minimize(self) # default optimizer
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,x1,x2,t1,t2,axisvals=None):
'''
Plot 2d GP Classification result.
For plotting, we superimpose the data points with the posterior equi-probability contour
lines for the probability of class two given complete information about the generating mechanism.
:param x1: inputs for class +1
:param x2: inputs for class -1
:param t1: meshgrid array for the first axis
:param t2: meshgrid array for the second axis
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
Note these parameters are (only) used for our hard-coded data for classification demo.
'''
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default EP inference.
:param str newInf: 'Laplace'
'''
if newInf == "Laplace":
self.inffunc = inf.Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
'''
Use another likelihood function other than default error function.
(Not used in this version)
:param str newLik: 'Logistic'
'''
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
#self.likfunc = lik.Logistic()
else:
raise Exception('Possible lik values are "Logistic".')
class GPMC(object):
'''
This is a one vs. one classification wrapper for GP Classification
'''
def __init__(self, n_class):
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.n_class = n_class # number of different classes
self.x_all = None
self.y_all = None
self.newInf = None # new inference? -> call useInference
self.newLik = None # new likelihood? -> call useLikelihood
self.newPrior = False
def setPrior(self, mean=None, kernel=None):
'''
Set prior mean and covariance other than the default setting of current model.
:param mean: instance of mean class. (e.g. mean.Linear())
:param kernel: instance of covariance class. (e.g. cov.RBF())
'''
# check the type of inputs
# ensure they are the right class before setting prior
if not mean is None:
assert isinstance(mean, pyGPs.mean.Mean), "mean function is not an instance of pyGPs.mean.Mean"
self.meanfunc = mean
self.usingDefaultMean = False
if not kernel is None:
assert isinstance(kernel, pyGPs.cov.Kernel), "cov function is not an instance of pyGPs.cov.Kernel"
self.covfunc = kernel
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
self.newPrior = True
def useInference(self, newInf):
'''
Use another inference techinique other than default EP inference.
:param str newInf: 'Laplace'
'''
if newInf == "Laplace":
self.inffunc = inf.Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
'''
Use another likelihood function other than default error function.
(Not used in this version)
:param str newLik: 'Logistic'
'''
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
#self.likfunc = lik.Logistic()
else:
raise Exception('Possible lik values are "Logistic".')
def setData(self,x,y):
'''
Set training inputs and traning labels to model.
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
Note this method will transform x, y to correct shape
if x, y is given in 1d array.
'''
# check wether the number of inputs and labels match
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check the shape of inputs
# transform to the correct shape
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x_all = x
self.y_all = y
def fitAndPredict(self, xs):
'''
Fit the model with given training data and predict for test points (given by xs).
predictive_vote is a matrix where row i is each test point i,
and column j is the probability for being class j
:param xs: test inputs in shape of nn by D
:return: predictive_vote
'''
# check the shape of inputs
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
predictive_vote = np.zeros((xs.shape[0],self.n_class))
for i in range(self.n_class): # classifier for class i...
for j in range(i+1,self.n_class): # ...and class j
x,y = self.createBinaryClass(i,j)
model = GPC()
if self.newPrior:
model.setPrior(mean=self.meanfunc, kernel=self.covfunc)
if self.newInf:
model.useInference(self.newInf)
if self.newLik:
model.useLikelihood(self.newLik)
model.getPosterior(x,y) # fitting
ym = model.predict(xs)[0]
ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i
vote_i = np.zeros((xs.shape[0],self.n_class))
vote_j = np.zeros((xs.shape[0],self.n_class))
vote_i[:,i:i+1] = ym
vote_j[:,j:j+1] = 2-ym
predictive_vote += vote_i
predictive_vote += vote_j
predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]
return predictive_vote
def optimizeAndPredict(self, xs):
'''
Optimize the model with given training data and predict for test points (given by xs).
predictive_vote is a matrix where row i is each test point i,
and column j is the probability for being class j
:param xs: test inputs in shape of nn by D
:return: predictive_vote
'''
# check the shape of inputs
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
predictive_vote = np.zeros((xs.shape[0],self.n_class))
for i in range(self.n_class): # classifier for class i...
for j in range(i+1,self.n_class): # ...and class j
x,y = self.createBinaryClass(i,j)
model = GPC()
if self.newPrior:
model.setPrior(mean=self.meanfunc, kernel=self.covfunc)
if self.newInf:
model.useInference(self.newInf)
if self.newLik:
model.useLikelihood(self.newLik)
model.optimize(x,y) # training
ym = model.predict(xs)[0]
ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i
vote_i = np.zeros((xs.shape[0],self.n_class))
vote_j = np.zeros((xs.shape[0],self.n_class))
vote_i[:,i:i+1] = ym
vote_j[:,j:j+1] = 2-ym
predictive_vote += vote_i
predictive_vote += vote_j
predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]
return predictive_vote
def createBinaryClass(self, i,j):
'''
Create dataset x(data) and y(label) which only contains class i and j.
Relabel class i to +1 and class j to -1
:param int i: the i_th class
:param int j: the j_th class
:return: x(data) and y(label) which only contains class i and j
'''
class_i = []
class_j = []
for index in range(len(self.y_all)): # check all classes
target = self.y_all[index]
if target == i:
class_i.append(index)
elif target == j:
class_j.append(index)
n1 = len(class_i)
n2 = len(class_j)
class_i.extend(class_j)
x = self.x_all[class_i,:]
y = np.concatenate((np.ones((1,n1)),-np.ones((1,n2))),axis=1).T
return x,y
class GP_FITC(GP):
'''
Model for FITC GP base class
'''
def __init__(self):
super(GP_FITC, self).__init__()
self.u = None # inducing points
def setData(self, x, y, value_per_axis=5):
'''
Set training inputs and traning labels to model and derive deault inducing_points..
:param x: training inputs in shape (n,D)
:param y: training labels in shape (n,1)
:param int value_per_axis: number of value in each dimension
when using a uni-distant default inducing points
Note this method will transform x, y to correct shape
if x, y is given in 1d array.
'''
# check wether the number of inputs and labels match
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
# check dimension of inputs
# transform to correct shape if neccessary
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x = x
self.y = y
if self.usingDefaultMean:
c = np.mean(y)
self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels
# get range of x in each dimension
# 5 uniformally selected value for each dimension
gridAxis=[]
for d in range(x.shape[1]):
column = x[:,d]
mini = np.min(column)
maxi = np.max(column)
axis = np.linspace(mini,maxi,value_per_axis)
gridAxis.append(axis)
# default inducing points-> a grid
if self.u is None:
self.u = np.array(list(itertools.product(*gridAxis)))
self.covfunc = self.covfunc.fitc(self.u)
def setPrior(self, mean=None, kernel=None, inducing_points=None):
'''
Set prior mean and covariance other than the default setting of current model,
as well as the inducing points
:param mean: instance of mean class. (e.g. mean.Linear())
:param kernel: instance of covariance class. (e.g. cov.RBF())
:inducing_points: matrix of inducing points in shape of (nu,D)
'''
if not kernel is None:
if not inducing_points is None:
self.covfunc = kernel.fitc(inducing_points)
self.u = inducing_points
else:
if not self.u is None:
self.covfunc = kernel.fitc(self.u)
else:
raise Exception("To use default inducing points, please call setData() first!")
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
if not mean is None:
self.meanfunc = mean
self.usingDefaultMean = False
class GPR_FITC(GP_FITC):
'''
Model for Gaussian Process Regression FITC
'''
def __init__(self):
super(GPR_FITC, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Gauss() # likihood with default noise variance 0.1
self.inffunc = inf.FITC_Exact() # inference method
self.optimizer = opt.Minimize(self) # default optimizer
self.u = None # no default inducing points
def setNoise(self,log_sigma):
'''
Set noise other than default noise value
:param log_sigma: logarithm of the noise sigma
'''
self.likfunc = lik.Gauss(log_sigma)
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,axisvals=None):
'''
Plot 1d GP FITC Regression result.
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
'''
plt.figure()
xss = np.reshape(self.xs,(self.xs.shape[0],))
ymm = np.reshape(self.ym,(self.ym.shape[0],))
ys22 = np.reshape(self.ys2,(self.ys2.shape[0],))
plt.plot(self.x, self.y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)
plt.plot(self.xs, self.ym, color=MEANCOLOR, ls='-', lw=3.)
plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.xlabel('input x')
plt.ylabel('output y')
plt.plot(self.u,np.ones_like(self.u), ls='None', color='k',marker='x',markersize=12,mew=2)
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default exact inference.
:param str newInf: 'Laplace' or 'EP'
'''
if newInf == "Laplace":
self.inffunc = inf.FITC_Laplace()
elif newInf == "EP":
self.inffunc = inf.FITC_EP()
else:
raise Exception('Possible inf values are "Laplace", "EP".')
def useLikelihood(self,newLik):
'''
Use another inference techinique other than default Gaussian likelihood.
:param str newLik: 'Laplace'
'''
if newLik == "Laplace":
self.likfunc = lik.Laplace()
self.inffunc = inf.FITC_EP()
else:
raise Exception('Possible lik values are "Laplace".')
class GPC_FITC(GP_FITC):
'''
Model for Gaussian Process Classification FITC
'''
def __init__(self):
super(GPC_FITC, self).__init__()
self.meanfunc = mean.Zero() # default prior mean
self.covfunc = cov.RBF() # default prior covariance
self.likfunc = lik.Erf() # erf liklihood
self.inffunc = inf.FITC_EP() # default inference method
self.optimizer = opt.Minimize(self) # default optimizer
self.u = None # no default inducing points
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
'''
Overriding. Usage see base class pyGPs.gp.GP.setOptimizer
'''
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,x1,x2,t1,t2,axisvals=None):
'''Plot 2d GP FITC classification.
For plotting, we superimpose the data points with the posterior equi-probability contour
lines for the probability of class two given complete information about the generating mechanism.
:param x1: inputs for class +1
:param x2: inputs for class -1
:param t1: meshgrid array for the first axis
:param t2: meshgrid array for the second axis
:param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range
Note these parameters are (only) used for our hard-coded data for classification demo.
'''
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
plt.plot(self.u[:,0],self.u[:,1],'ko', markersize=12)
pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.show()
def useInference(self, newInf):
'''
Use another inference techinique other than default exact inference.
:param str newInf: 'Laplace' or 'EP'
'''
if newInf == "Laplace":
self.inffunc = inf.FITC_Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
'''
Use another inference techinique other than default Erf likelihood.
(Not used in this version)
:param str newLik: 'Logistic'
'''
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
else:
raise Exception('Possible lik values are "Logistic".')
| 40.450321
| 156
| 0.563389
|
from __future__ import division
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import itertools
import numpy as np
import matplotlib.pyplot as plt
from . import inf, mean, lik, cov, opt
from .tools import unique, jitchol, solve_chol
from copy import deepcopy
import pyGPs
from pyGPs.Core.cov import FITCOfKernel
import logging
SHADEDCOLOR = [0.7539, 0.89453125, 0.62890625, 1.0]
MEANCOLOR = [ 0.2109375, 0.63385, 0.1796875, 1.0]
DATACOLOR = [0.12109375, 0.46875, 1., 1.0]
class GP(object):
def __init__(self):
super(GP, self).__init__()
self.usingDefaultMean = True
self.meanfunc = None
self.covfunc = None
self.likfunc = None
self.inffunc = None
self.optimizer = None
self.nlZ = None
self.dnlZ = None
self.posterior = None
self.x = None
self.y = None
self.xs = None
self.ys = None
self.ym = None
self.ys2 = None
self.fm = None
self.fs2 = None
self.lp = None
self.logger = logging.getLogger(__name__)
def __str__(self):
strvalue = 'To get the properties of the model use:\n'+\
'model.nlZ # negative log marginal likelihood\n'+\
'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\n'+\
'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\n'+\
'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\n'+\
'model.posterior # posterior structure\n'+\
'model.covfunc.hyp # hyperparameters of cov func\n'+\
'model.meanfunc.hyp # hyperparameters of mean func\n'+\
'model.likfunc.hyp # hyperparameters of lik func\n'+\
'model.fm # latent mean\n'+\
'model.fs2 # latent variance\n'+\
'model.ym # predictive mean\n'+\
'model.ys2 # predictive variance\n'+\
'model.lp # log predictive probability'
return strvalue
def __repr__(self):
strvalue = str(type(self))+': '+\
'to get the properties of the model use:\n'+\
'model.nlZ # negative log marginal likelihood\n'+\
'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\n'+\
'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\n'+\
'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\n'+\
'model.posterior # posterior structure\n'+\
'model.covfunc.hyp # hyperparameters of cov func\n'+\
'model.meanfunc.hyp # hyperparameters of mean func\n'+\
'model.likfunc.hyp # hyperparameters of lik func\n'+\
'model.fm # latent mean\n'+\
'model.fs2 # latent variance\n'+\
'model.ym # predictive mean\n'+\
'model.ys2 # predictive variance\n'+\
'model.lp # log predictive probability'
return strvalue
def setData(self, x, y):
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x = x
self.y = y
if self.usingDefaultMean:
c = np.mean(y)
self.meanfunc = mean.Const(c)
def plotData_1d(self, axisvals=None):
plt.figure()
plt.plot(self.x, self.y, ls='None', marker='+', color=DATACOLOR, ms=12, mew=2)
if axisvals:
plt.axis(axisvals)
plt.grid()
plt.xlabel('input x')
plt.ylabel('target y')
plt.show()
def plotData_2d(self,x1,x2,t1,t2,p1,p2,axisvals=None):
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
pc = plt.contour(t1, t2, np.reshape(old_div(p2,(p1+p2)), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if axisvals:
plt.axis(axisvals)
plt.show()
def setPrior(self, mean=None, kernel=None):
if not mean is None:
assert isinstance(mean, pyGPs.mean.Mean), "mean function is not an instance of pyGPs.mean.Mean"
self.meanfunc = mean
self.usingDefaultMean = False
if not kernel is None:
assert isinstance(kernel, pyGPs.cov.Kernel), "cov function is not an instance of pyGPs.cov.Kernel"
self.covfunc = kernel
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
pass
def optimize40(self, x=None, y=None, numIterations=40):
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c)
optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)
self.nlZ = optimalNlZ
self.optimizer._apply_in_objects(optimalHyp)
self.getPosterior()
def optimize(self, x=None, y=None, numIterations=1000):
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c)
optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)
self.nlZ = optimalNlZ
self.optimizer._apply_in_objects(optimalHyp)
self.getPosterior()
def getPosterior(self, x=None, y=None, der=True):
if x is not None and y is not None:
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
if not x is None:
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
self.x = x
if not y is None:
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.y = y
if self.usingDefaultMean and self.meanfunc is None:
c = np.mean(y)
self.meanfunc = mean.Const(c)
if isinstance(self.likfunc, lik.Erf):
uy = unique(self.y)
ind = ( uy != 1 )
if any( uy[ind] != -1):
raise Exception('You attempt classification using labels different from {+1,-1}')
if not der:
post, nlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 2)
self.nlZ = nlZ
self.posterior = deepcopy(post)
return nlZ, post
else:
post, nlZ, dnlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 3)
self.nlZ = nlZ
self.dnlZ = deepcopy(dnlZ)
self.posterior = deepcopy(post)
return nlZ, dnlZ, post
def predict(self, xs, ys=None):
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
self.xs = xs
if not ys is None:
if ys.ndim == 1:
ys = np.reshape(ys, (ys.shape[0],1))
self.ys = ys
meanfunc = self.meanfunc
covfunc = self.covfunc
likfunc = self.likfunc
inffunc = self.inffunc
x = self.x
y = self.y
if self.posterior is None:
self.getPosterior()
alpha = self.posterior.alpha
L = self.posterior.L
sW = self.posterior.sW
nz = list(range(len(alpha[:,0])))
if len(L) == 0:
K = covfunc.getCovMatrix(x=x[nz,:], mode='train')
L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )
Ltril = np.all( np.tril(L,-1) == 0 )
ns = xs.shape[0]
nperbatch = 1000
nact = 0
ymu = np.zeros((ns,1))
ys2 = np.zeros((ns,1))
fmu = np.zeros((ns,1))
fs2 = np.zeros((ns,1))
lp = np.zeros((ns,1))
while nact<=ns-1:
ids = list(range(nact,min(nact+nperbatch,ns)))
kss = covfunc.getCovMatrix(z=xs[ids,:], mode='self_test')
if isinstance(covfunc, FITCOfKernel):
Ks = covfunc.getCovMatrix(x=x, z=xs[ids,:], mode='cross')
Ks = Ks[nz,:]
else:
Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[ids,:], mode='cross')
ms = meanfunc.getMean(xs[ids,:])
N = (alpha.shape)[1]
Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz])
fmu[ids] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(ids),1))
if Ltril:
V = np.linalg.solve(L.T,np.tile(sW,(1,len(ids)))*Ks)
fs2[ids] = kss - np.array([(V*V).sum(axis=0)]).T
else:
fs2[ids] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T
fs2[ids] = np.maximum(fs2[ids],0)
Fs2 = np.tile(fs2[ids],(1,N))
if ys is None:
Lp, Ymu, Ys2 = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)
else:
Lp, Ymu, Ys2 = likfunc.evaluate(np.tile(ys[ids],(1,N)), Fmu[:], Fs2[:],None,None,3)
lp[ids] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(ids),1) )
ymu[ids] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(ids),1) )
ys2[ids] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(ids),1) )
nact = ids[-1]+1
self.ym = ymu
self.ys2 = ys2
self.lp = lp
self.fm = fmu
self.fs2 = fs2
if ys is None:
return ymu, ys2, fmu, fs2, None
else:
return ymu, ys2, fmu, fs2, lp
def predict_with_posterior(self, post, xs, ys=None):
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
self.xs = xs
if not ys is None:
if ys.ndim == 1:
ys = np.reshape(ys, (ys.shape[0],1))
self.ys = ys
meanfunc = self.meanfunc
covfunc = self.covfunc
likfunc = self.likfunc
inffunc = self.inffunc
x = self.x
y = self.y
self.posterior = deepcopy(post)
alpha = post.alpha
L = post.L
sW = post.sW
nz = list(range(len(alpha[:,0])))
if len(L) == 0:
K = covfunc.getCovMatrix(x=x[nz,:], mode='train')
L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )
Ltril = np.all( np.tril(L,-1) == 0 )
ns = xs.shape[0]
nperbatch = 1000
nact = 0
ymu = np.zeros((ns,1))
ys2 = np.zeros((ns,1))
fmu = np.zeros((ns,1))
fs2 = np.zeros((ns,1))
lp = np.zeros((ns,1))
while nact<=ns-1:
id = list(range(nact,min(nact+nperbatch,ns)))
kss = covfunc.getCovMatrix(z=xs[id,:], mode='self_test')
Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[id,:], mode='cross')
ms = meanfunc.getMean(xs[id,:])
N = (alpha.shape)[1]
Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz])
fmu[id] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(id),1))
if Ltril:
V = np.linalg.solve(L.T,np.tile(sW,(1,len(id)))*Ks)
fs2[id] = kss - np.array([(V*V).sum(axis=0)]).T
else:
fs2[id] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T
fs2[id] = np.maximum(fs2[id],0)
Fs2 = np.tile(fs2[id],(1,N))
if ys is None:
[Lp, Ymu, Ys2] = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)
else:
[Lp, Ymu, Ys2] = likfunc.evaluate(np.tile(ys[id],(1,N)), Fmu[:], Fs2[:],None,None,3)
lp[id] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(id),1) )
ymu[id] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(id),1) )
ys2[id] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(id),1) )
nact = id[-1]+1
self.ym = ymu
self.ys2 = ys2
self.lp = lp
self.fm = fmu
self.fs2 = fs2
if ys is None:
return ymu, ys2, fmu, fs2, None
else:
return ymu, ys2, fmu, fs2, lp
class GPR(GP):
def __init__(self):
super(GPR, self).__init__()
self.meanfunc = mean.Zero()
self.covfunc = cov.RBF()
self.likfunc = lik.Gauss()
self.inffunc = inf.Exact()
self.optimizer = opt.Minimize(self)
def setNoise(self,log_sigma):
self.likfunc = lik.Gauss(log_sigma)
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
elif method == "Nelder-Mead":
self.optimizer = opt.Simplex(self, conf)
else:
raise Exception('Optimization method is not set correctly in setOptimizer')
def plot(self,axisvals=None):
xs = self.xs
x = self.x
y = self.y
ym = self.ym
ys2 = self.ys2
plt.figure()
xss = np.reshape(xs,(xs.shape[0],))
ymm = np.reshape(ym,(ym.shape[0],))
ys22 = np.reshape(ys2,(ys2.shape[0],))
plt.plot(x, y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)
plt.plot(xs, ym, color=MEANCOLOR, ls='-', lw=3.)
plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.xlabel('input x')
plt.ylabel('target y')
plt.show()
def useInference(self, newInf):
if newInf == "Laplace":
self.inffunc = inf.Laplace()
elif newInf == "EP":
self.inffunc = inf.EP()
else:
raise Exception('Possible inf values are "Laplace", "EP".')
def useLikelihood(self,newLik):
if newLik == "Laplace":
self.likfunc = lik.Laplace()
self.inffunc = inf.EP()
else:
raise Exception('Possible lik values are "Laplace".')
class GPC(GP):
def __init__(self):
super(GPC, self).__init__()
self.meanfunc = mean.Zero()
self.covfunc = cov.RBF()
self.likfunc = lik.Erf()
self.inffunc = inf.EP()
self.optimizer = opt.Minimize(self)
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,x1,x2,t1,t2,axisvals=None):
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.show()
def useInference(self, newInf):
if newInf == "Laplace":
self.inffunc = inf.Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
else:
raise Exception('Possible lik values are "Logistic".')
class GPMC(object):
def __init__(self, n_class):
self.meanfunc = mean.Zero()
self.covfunc = cov.RBF()
self.n_class = n_class
self.x_all = None
self.y_all = None
self.newInf = None
self.newLik = None
self.newPrior = False
def setPrior(self, mean=None, kernel=None):
if not mean is None:
assert isinstance(mean, pyGPs.mean.Mean), "mean function is not an instance of pyGPs.mean.Mean"
self.meanfunc = mean
self.usingDefaultMean = False
if not kernel is None:
assert isinstance(kernel, pyGPs.cov.Kernel), "cov function is not an instance of pyGPs.cov.Kernel"
self.covfunc = kernel
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
self.newPrior = True
def useInference(self, newInf):
if newInf == "Laplace":
self.inffunc = inf.Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
else:
raise Exception('Possible lik values are "Logistic".')
def setData(self,x,y):
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x_all = x
self.y_all = y
def fitAndPredict(self, xs):
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
predictive_vote = np.zeros((xs.shape[0],self.n_class))
for i in range(self.n_class):
for j in range(i+1,self.n_class):
x,y = self.createBinaryClass(i,j)
model = GPC()
if self.newPrior:
model.setPrior(mean=self.meanfunc, kernel=self.covfunc)
if self.newInf:
model.useInference(self.newInf)
if self.newLik:
model.useLikelihood(self.newLik)
model.getPosterior(x,y)
ym = model.predict(xs)[0]
ym += 1
vote_i = np.zeros((xs.shape[0],self.n_class))
vote_j = np.zeros((xs.shape[0],self.n_class))
vote_i[:,i:i+1] = ym
vote_j[:,j:j+1] = 2-ym
predictive_vote += vote_i
predictive_vote += vote_j
predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]
return predictive_vote
def optimizeAndPredict(self, xs):
if xs.ndim == 1:
xs = np.reshape(xs, (xs.shape[0],1))
predictive_vote = np.zeros((xs.shape[0],self.n_class))
for i in range(self.n_class):
for j in range(i+1,self.n_class):
x,y = self.createBinaryClass(i,j)
model = GPC()
if self.newPrior:
model.setPrior(mean=self.meanfunc, kernel=self.covfunc)
if self.newInf:
model.useInference(self.newInf)
if self.newLik:
model.useLikelihood(self.newLik)
model.optimize(x,y)
ym = model.predict(xs)[0]
ym += 1
vote_i = np.zeros((xs.shape[0],self.n_class))
vote_j = np.zeros((xs.shape[0],self.n_class))
vote_i[:,i:i+1] = ym
vote_j[:,j:j+1] = 2-ym
predictive_vote += vote_i
predictive_vote += vote_j
predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]
return predictive_vote
def createBinaryClass(self, i,j):
class_i = []
class_j = []
for index in range(len(self.y_all)):
target = self.y_all[index]
if target == i:
class_i.append(index)
elif target == j:
class_j.append(index)
n1 = len(class_i)
n2 = len(class_j)
class_i.extend(class_j)
x = self.x_all[class_i,:]
y = np.concatenate((np.ones((1,n1)),-np.ones((1,n2))),axis=1).T
return x,y
class GP_FITC(GP):
def __init__(self):
super(GP_FITC, self).__init__()
self.u = None
def setData(self, x, y, value_per_axis=5):
assert x.shape[0] == y.shape[0], "number of inputs and labels does not match"
if x.ndim == 1:
x = np.reshape(x, (x.shape[0],1))
if y.ndim == 1:
y = np.reshape(y, (y.shape[0],1))
self.x = x
self.y = y
if self.usingDefaultMean:
c = np.mean(y)
self.meanfunc = mean.Const(c)
gridAxis=[]
for d in range(x.shape[1]):
column = x[:,d]
mini = np.min(column)
maxi = np.max(column)
axis = np.linspace(mini,maxi,value_per_axis)
gridAxis.append(axis)
if self.u is None:
self.u = np.array(list(itertools.product(*gridAxis)))
self.covfunc = self.covfunc.fitc(self.u)
def setPrior(self, mean=None, kernel=None, inducing_points=None):
if not kernel is None:
if not inducing_points is None:
self.covfunc = kernel.fitc(inducing_points)
self.u = inducing_points
else:
if not self.u is None:
self.covfunc = kernel.fitc(self.u)
else:
raise Exception("To use default inducing points, please call setData() first!")
if type(kernel) is cov.Pre:
self.usingDefaultMean = False
if not mean is None:
self.meanfunc = mean
self.usingDefaultMean = False
class GPR_FITC(GP_FITC):
def __init__(self):
super(GPR_FITC, self).__init__()
self.meanfunc = mean.Zero()
self.covfunc = cov.RBF()
self.likfunc = lik.Gauss()
self.inffunc = inf.FITC_Exact()
self.optimizer = opt.Minimize(self)
self.u = None
def setNoise(self,log_sigma):
self.likfunc = lik.Gauss(log_sigma)
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,axisvals=None):
plt.figure()
xss = np.reshape(self.xs,(self.xs.shape[0],))
ymm = np.reshape(self.ym,(self.ym.shape[0],))
ys22 = np.reshape(self.ys2,(self.ys2.shape[0],))
plt.plot(self.x, self.y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)
plt.plot(self.xs, self.ym, color=MEANCOLOR, ls='-', lw=3.)
plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.xlabel('input x')
plt.ylabel('output y')
plt.plot(self.u,np.ones_like(self.u), ls='None', color='k',marker='x',markersize=12,mew=2)
plt.show()
def useInference(self, newInf):
if newInf == "Laplace":
self.inffunc = inf.FITC_Laplace()
elif newInf == "EP":
self.inffunc = inf.FITC_EP()
else:
raise Exception('Possible inf values are "Laplace", "EP".')
def useLikelihood(self,newLik):
if newLik == "Laplace":
self.likfunc = lik.Laplace()
self.inffunc = inf.FITC_EP()
else:
raise Exception('Possible lik values are "Laplace".')
class GPC_FITC(GP_FITC):
def __init__(self):
super(GPC_FITC, self).__init__()
self.meanfunc = mean.Zero()
self.covfunc = cov.RBF()
self.likfunc = lik.Erf()
self.inffunc = inf.FITC_EP()
self.optimizer = opt.Minimize(self)
self.u = None
def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):
conf = None
if (num_restarts!=None) or (min_threshold!=None):
conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)
conf.num_restarts = num_restarts
conf.min_threshold = min_threshold
if not meanRange is None:
conf.meanRange = meanRange
if not covRange is None:
conf.covRange = covRange
if not likRange is None:
conf.likRange = likRange
if method == "Minimize":
self.optimizer = opt.Minimize(self,conf)
elif method == "SCG":
self.optimizer = opt.SCG(self,conf)
elif method == "CG":
self.optimizer = opt.CG(self,conf)
elif method == "BFGS":
self.optimizer = opt.BFGS(self,conf)
def plot(self,x1,x2,t1,t2,axisvals=None):
fig = plt.figure()
plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)
plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)
plt.plot(self.u[:,0],self.u[:,1],'ko', markersize=12)
pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))
fig.colorbar(pc)
plt.grid()
if not axisvals is None:
plt.axis(axisvals)
plt.show()
def useInference(self, newInf):
if newInf == "Laplace":
self.inffunc = inf.FITC_Laplace()
else:
raise Exception('Possible inf values are "Laplace".')
def useLikelihood(self,newLik):
if newLik == "Logistic":
raise Exception("Logistic likelihood is currently not implemented.")
else:
raise Exception('Possible lik values are "Logistic".')
| true
| true
|
f719d9b90696ca91133528a980477a87a5e8550f
| 3,619
|
py
|
Python
|
tfx/components/example_gen/custom_executors/parquet_executor.py
|
NikeNano/tfx
|
8f7756f223e3bd3bd5abe37fa287010509cdae75
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/example_gen/custom_executors/parquet_executor.py
|
NikeNano/tfx
|
8f7756f223e3bd3bd5abe37fa287010509cdae75
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/example_gen/custom_executors/parquet_executor.py
|
NikeNano/tfx
|
8f7756f223e3bd3bd5abe37fa287010509cdae75
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parquet based TFX example gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _ParquetToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read Parquet files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains Parquet data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
parquet_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input parquet data %s to TFExample.',
parquet_pattern)
return (pipeline
# TODO(jyzhao): support per column read by input_config.
| 'ReadFromParquet' >> beam.io.ReadFromParquet(parquet_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example))
class Executor(BaseExampleGenExecutor):
"""TFX example gen executor for processing parquet format.
Data type conversion:
integer types will be converted to tf.train.Feature with tf.train.Int64List.
float types will be converted to tf.train.Feature with tf.train.FloatList.
string types will be converted to tf.train.Feature with tf.train.BytesList
and utf-8 encoding.
Note that,
Single value will be converted to a list of that single value.
Missing value will be converted to empty tf.train.Feature().
Parquet data might lose precision, e.g., int96.
For details, check the dict_to_example function in example_gen.utils.
Example usage:
from tfx.components.base import executor_spec
from tfx.components.example_gen.component import
FileBasedExampleGen
from tfx.components.example_gen.custom_executors import
parquet_executor
from tfx.utils.dsl_utils import external_input
example_gen = FileBasedExampleGen(
input=external_input(parquet_dir_path),
custom_executor_spec=executor_spec.ExecutorClassSpec(
parquet_executor.Executor))
"""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for parquet to TF examples."""
return _ParquetToExample
| 36.928571
| 87
| 0.762089
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _ParquetToExample(
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
parquet_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input parquet data %s to TFExample.',
parquet_pattern)
return (pipeline
| 'ReadFromParquet' >> beam.io.ReadFromParquet(parquet_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example))
class Executor(BaseExampleGenExecutor):
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
return _ParquetToExample
| true
| true
|
f719dc710e799dfa2967b8713a4d68b60594d1ec
| 8,170
|
py
|
Python
|
Tests/compat/sbs_builtin.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | 2
|
2019-09-21T22:22:30.000Z
|
2020-05-09T12:45:51.000Z
|
Tests/compat/sbs_builtin.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | null | null | null |
Tests/compat/sbs_builtin.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | null | null | null |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from common import *
import testdata
import sys
def complex_case_repr(*args):
ret = "complex with "
for x in args:
ret += "'%s (%s)'" % (str(x), type(x))
return ret
class test_builtin(object):
''' test built-in type, etc '''
def test_slice(self):
''' currently mainly test
del list[slice]
'''
test_str = testdata.long_string
str_len = len(test_str)
choices = ['', 0]
numbers = [1, 2, 3, str_len/2-1, str_len/2, str_len/2+1, str_len-3, str_len-2, str_len-1, str_len, str_len+1, str_len+2, str_len+3, str_len*2]
numbers = numbers[::3] # Temporary approach to speed things up...
choices.extend(numbers)
choices.extend([-1 * x for x in numbers])
for x in choices:
for y in choices:
for z in choices:
if z == 0: continue
line = "l = list(test_str); del l[%s:%s:%s]" % (str(x), str(y), str(z))
exec(line)
printwith("case", "del l[%s:%s:%s]" % (str(x), str(y), str(z)))
printwith("same", eval("l"), eval("len(l)"))
def test_xrange(self):
''' test xrange with corner cases'''
import sys
maxint = sys.maxsize
numbers = [1, 2, maxint/2, maxint-1, maxint, maxint+1, maxint+2]
choices = [0]
choices.extend(numbers)
choices.extend([-1 * x for x in numbers])
for x in choices:
for y in choices:
for z in choices:
line = "xrange(%s, %s, %s)" % (str(x), str(y), str(z))
printwith("case", line)
try:
xr = eval(line)
xl = len(xr)
cnt = 0
first = last = first2 = last2 = "n/a"
# testing XRangeIterator
if xl < 10:
for x in xr:
if cnt == 0: first = x
if cnt == xl -1 : last = x
cnt += 1
# testing this[index]
if xl == 0: first2 = xr[0]
if xl > 1 : first2, last2 = xr[0], xr[xl - 1]
printwith("same", xr, xl, first, last, first2, last2)
except:
printwith("same", sys.exc_info()[0])
def test_complex_ctor_str(self):
l = [ "-1", "0", "1", "+1", "+1.1", "-1.01", "-.101", ".234", "-1.3e3", "1.09e-3", "33.2e+10"] #, " ", ""] #http://ironpython.codeplex.com/workitem/28385
for s in l:
try:
printwith("case", complex_case_repr(s))
c = complex(s)
printwithtype(c)
except:
printwith("same", sys.exc_info()[0], sys.exc_info()[1])
s += "j"
try:
printwith("case", complex_case_repr(s))
c = complex(s)
printwithtype(c)
except:
printwith("same", sys.exc_info()[0], sys.exc_info()[1])
for s1 in l:
for s2 in l:
try:
if s2.startswith("+") or s2.startswith("-"):
s = "%s%sJ" % (s1, s2)
else:
s = "%s+%sj" % (s1, s2)
printwith("case", complex_case_repr(s))
c = complex(s)
printwithtype(c)
except:
printwith("same", sys.exc_info()[0], sys.exc_info()[1])
def test_complex_ctor(self):
# None is not included due to defaultvalue issue
ln = [-1, 1, 1.5, 1.5e+5, 1+2j, -1-9.3j ]
ls = ["1", "1L", "-1.5", "1.5e+5", "-34-2j"]
la = []
la.extend(ln)
la.extend(ls)
for s in la:
try:
printwith("case", complex_case_repr(s))
c = complex(s)
printwithtype(c)
except:
printwith("same", sys.exc_info()[0], sys.exc_info()[1])
for s in la:
try:
printwith("case", "real only", complex_case_repr(s))
c = complex(real=s)
printwithtype(c)
except:
printwith("same", sys.exc_info()[0], sys.exc_info()[1])
for s in la:
try:
printwith("case", "imag only", complex_case_repr(s))
c = complex(imag=s)
printwithtype(c)
except:
printwith("same", sys.exc_info()[0], sys.exc_info()[1])
for s1 in la:
for s2 in ln:
try:
printwith("case", complex_case_repr(s1, s2))
c = complex(s1, s2)
printwithtype(c)
except:
printwith("same", sys.exc_info()[0], sys.exc_info()[1])
def test_bigint(self):
s = '1234567890'
for x in range(10): s += str(x) * x
s = s * 10
l = [7, 1001, 5.89, True]
for start in range(1, 50, 7):
startx = start
for length in [1, 20, 50, 60, 100]:
startx += 1
l.append(int(s[startx:startx + length]))
for x in l:
for y in l:
print(x, y)
printwith('case', '%s, %s' % (x, y))
printwith('same', x+y)
printwith('same', x-y)
printwith('same', x*y)
if y:
printwith('same', x/y)
t = divmod(x, y)
printwithtype(t[0])
printwithtype(t[1])
l.remove(5.89)
l.remove(True) #
for a in range(1, 100, 7):
for x in l:
for y in l:
if x and y:
printwith('case', a, x, y)
printwith('same', pow(a, x, y))
def test_file_mode(self):
disabled_modes = ['Ut+', 'rUt+', 'Urt+']
disabled_modes += ['Ut', 'U+t', 'rUt', 'rU+t', 'Urt', 'Ur+t'] #http://ironpython.codeplex.com/workitem/28386
arw = ['', 'a', 'r', 'w', 'U', 'rU', 'Ur', 'wU', 'Uw', 'Ua', 'aU']
bt = ['', 'b', 't']
plus = ['', '+']
modes = []
for x in arw:
for y in bt:
for z in plus:
modes.append(x + y + z)
for y in plus:
for z in bt:
modes.append(x + y + z)
modes = [x for x in modes if x not in disabled_modes]
filename = 'tempfile.txt'
for m in modes:
printwith('case', m)
try:
f = file(filename, m)
s = str(f)
atPos = s.find('at')
printwith('same', s[:atPos])
f.close()
except:
printwith("same", 'throw')
runtests(test_builtin)
| 35.991189
| 161
| 0.408813
| true
| true
|
|
f719dc95d1f1ad1b119e769f41c66e49e76fe5d2
| 695
|
py
|
Python
|
abastece/migrations/0006_auto_20210531_1145.py
|
lembon/atizar
|
579ef6212e9b2582beb86c5e14339b0615ec16ee
|
[
"Apache-2.0"
] | null | null | null |
abastece/migrations/0006_auto_20210531_1145.py
|
lembon/atizar
|
579ef6212e9b2582beb86c5e14339b0615ec16ee
|
[
"Apache-2.0"
] | null | null | null |
abastece/migrations/0006_auto_20210531_1145.py
|
lembon/atizar
|
579ef6212e9b2582beb86c5e14339b0615ec16ee
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-05-31 14:45
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('abastece', '0005_auto_20210528_1946'),
]
operations = [
migrations.AlterField(
model_name='pedido',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 31, 11, 45, 20, 503212), editable=False,
verbose_name='fecha y hora'),
),
migrations.AlterField(
model_name='producto',
name='titulo',
field=models.CharField(max_length=200),
),
]
| 26.730769
| 114
| 0.574101
|
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('abastece', '0005_auto_20210528_1946'),
]
operations = [
migrations.AlterField(
model_name='pedido',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 31, 11, 45, 20, 503212), editable=False,
verbose_name='fecha y hora'),
),
migrations.AlterField(
model_name='producto',
name='titulo',
field=models.CharField(max_length=200),
),
]
| true
| true
|
f719dd4e36211e0181b4b0387c2d5462aad335b3
| 1,305
|
py
|
Python
|
lib/surface/meta/apis/collections/describe.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/meta/apis/collections/describe.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/meta/apis/collections/describe.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command that describes a resource collection for a given API."""
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.meta.apis import flags
from googlecloudsdk.command_lib.util.apis import registry
class Describe(base.DescribeCommand):
"""Describe the details of a collection for an API."""
@staticmethod
def Args(parser):
flags.API_VERSION_FLAG.AddToParser(parser)
parser.add_argument(
'collection',
completer=flags.CollectionCompleter,
help='The name of the collection to get the details of.')
def Run(self, args):
return registry.GetAPICollection(args.collection,
api_version=args.api_version)
| 36.25
| 74
| 0.738697
|
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.meta.apis import flags
from googlecloudsdk.command_lib.util.apis import registry
class Describe(base.DescribeCommand):
@staticmethod
def Args(parser):
flags.API_VERSION_FLAG.AddToParser(parser)
parser.add_argument(
'collection',
completer=flags.CollectionCompleter,
help='The name of the collection to get the details of.')
def Run(self, args):
return registry.GetAPICollection(args.collection,
api_version=args.api_version)
| true
| true
|
f719df2ec061fcd0ec591c84a3ef60c4759b669f
| 3,398
|
py
|
Python
|
tests/data_generation/animate_berlin_y_stretch.py
|
Algomorph/NeuralTracking
|
6312be8e18828344c65e25a423c239efcd3428dd
|
[
"Apache-2.0"
] | 3
|
2021-04-18T04:23:08.000Z
|
2022-02-01T08:37:51.000Z
|
tests/data_generation/animate_berlin_y_stretch.py
|
Algomorph/NeuralTracking
|
6312be8e18828344c65e25a423c239efcd3428dd
|
[
"Apache-2.0"
] | 24
|
2021-05-28T21:59:11.000Z
|
2022-02-03T16:09:41.000Z
|
tests/data_generation/animate_berlin_y_stretch.py
|
Algomorph/NeuralTracking
|
6312be8e18828344c65e25a423c239efcd3428dd
|
[
"Apache-2.0"
] | 5
|
2021-03-10T02:56:16.000Z
|
2021-12-14T06:04:50.000Z
|
import sys
import os
import shutil
import cv2
import open3d as o3d
import open3d.core as o3c
import numpy as np
from rendering.pytorch3d_renderer import PyTorch3DRenderer
from data import StandaloneFrameDataset
import data.presets as presets
import tsdf.default_voxel_grid
import data.camera
from settings import process_arguments, PathParameters, DeformNetParameters
PROGRAM_EXIT_SUCCESS = 0
def main():
process_arguments()
frame_dataset: StandaloneFrameDataset = presets.StandaloneFramePreset.BERLIN_0.value
device = o3c.Device("cuda:0")
volume: o3d.t = tsdf.default_voxel_grid.make_default_tsdf_voxel_grid(device)
depth_image = frame_dataset.load_depth_image_open3d(device)
color_image = frame_dataset.load_color_image_open3d(device)
intrinsics_open3d_cpu, _ = data.camera.load_open3d_intrinsics_from_text_4x4_matrix_and_image(frame_dataset.get_intrinsics_path(),
frame_dataset.get_depth_image_path())
intrinsics_open3d_cuda = o3d.core.Tensor(intrinsics_open3d_cpu.intrinsic_matrix, o3d.core.Dtype.Float32, device)
extrinsics_open3d_cuda = o3d.core.Tensor.eye(4, o3d.core.Dtype.Float32, device)
volume.integrate(depth_image, color_image, intrinsics_open3d_cuda, extrinsics_open3d_cuda, DeformNetParameters.depth_scale.value, 3.0)
original_mesh: o3d.geometry.TriangleMesh = volume.extract_surface_mesh(-1, 0).to_legacy_triangle_mesh()
renderer = PyTorch3DRenderer((depth_image.rows, depth_image.columns), device, intrinsics_open3d_cuda)
frame_count = 6
scale_factor_increment = 0.1
scale_center = np.array([0.0855289, -0.03289237, 2.79831315], dtype=np.float32)
def scale_mesh_y(mesh: o3d.geometry.TriangleMesh, factor: float) -> o3d.geometry.TriangleMesh:
vertices = np.array(mesh.vertices)
stretched_vertices = vertices - scale_center
stretched_vertices[:, 1] *= factor
stretched_vertices += scale_center
_scaled_mesh = o3d.geometry.TriangleMesh(o3d.cuda.pybind.utility.Vector3dVector(stretched_vertices), mesh.triangles)
_scaled_mesh.vertex_colors = mesh.vertex_colors
return _scaled_mesh
# prepare folders
root_output_directory = os.path.join(PathParameters.output_directory.value, "berlin_y_stretch_sequence")
depth_output_directory = os.path.join(root_output_directory, "depth")
if not os.path.exists(depth_output_directory):
os.makedirs(depth_output_directory)
color_output_directory = os.path.join(root_output_directory, "color")
if not os.path.exists(color_output_directory):
os.makedirs(color_output_directory)
# record animation rendering output
for i_frame in range(0, frame_count):
scaled_mesh = scale_mesh_y(original_mesh, 1.0 + scale_factor_increment * i_frame)
depth, color = renderer.render_mesh_legacy(scaled_mesh, depth_scale=1000.0)
color_path = os.path.join(color_output_directory, f"{i_frame:06d}.jpg")
depth_path = os.path.join(depth_output_directory, f"{i_frame:06d}.png")
cv2.imwrite(color_path, color)
cv2.imwrite(depth_path, depth.astype(np.uint16))
shutil.copy(frame_dataset.get_intrinsics_path(), os.path.join(root_output_directory, "intrinsics.txt"))
return PROGRAM_EXIT_SUCCESS
if __name__ == "__main__":
sys.exit(main())
| 43.564103
| 138
| 0.751619
|
import sys
import os
import shutil
import cv2
import open3d as o3d
import open3d.core as o3c
import numpy as np
from rendering.pytorch3d_renderer import PyTorch3DRenderer
from data import StandaloneFrameDataset
import data.presets as presets
import tsdf.default_voxel_grid
import data.camera
from settings import process_arguments, PathParameters, DeformNetParameters
PROGRAM_EXIT_SUCCESS = 0
def main():
process_arguments()
frame_dataset: StandaloneFrameDataset = presets.StandaloneFramePreset.BERLIN_0.value
device = o3c.Device("cuda:0")
volume: o3d.t = tsdf.default_voxel_grid.make_default_tsdf_voxel_grid(device)
depth_image = frame_dataset.load_depth_image_open3d(device)
color_image = frame_dataset.load_color_image_open3d(device)
intrinsics_open3d_cpu, _ = data.camera.load_open3d_intrinsics_from_text_4x4_matrix_and_image(frame_dataset.get_intrinsics_path(),
frame_dataset.get_depth_image_path())
intrinsics_open3d_cuda = o3d.core.Tensor(intrinsics_open3d_cpu.intrinsic_matrix, o3d.core.Dtype.Float32, device)
extrinsics_open3d_cuda = o3d.core.Tensor.eye(4, o3d.core.Dtype.Float32, device)
volume.integrate(depth_image, color_image, intrinsics_open3d_cuda, extrinsics_open3d_cuda, DeformNetParameters.depth_scale.value, 3.0)
original_mesh: o3d.geometry.TriangleMesh = volume.extract_surface_mesh(-1, 0).to_legacy_triangle_mesh()
renderer = PyTorch3DRenderer((depth_image.rows, depth_image.columns), device, intrinsics_open3d_cuda)
frame_count = 6
scale_factor_increment = 0.1
scale_center = np.array([0.0855289, -0.03289237, 2.79831315], dtype=np.float32)
def scale_mesh_y(mesh: o3d.geometry.TriangleMesh, factor: float) -> o3d.geometry.TriangleMesh:
vertices = np.array(mesh.vertices)
stretched_vertices = vertices - scale_center
stretched_vertices[:, 1] *= factor
stretched_vertices += scale_center
_scaled_mesh = o3d.geometry.TriangleMesh(o3d.cuda.pybind.utility.Vector3dVector(stretched_vertices), mesh.triangles)
_scaled_mesh.vertex_colors = mesh.vertex_colors
return _scaled_mesh
root_output_directory = os.path.join(PathParameters.output_directory.value, "berlin_y_stretch_sequence")
depth_output_directory = os.path.join(root_output_directory, "depth")
if not os.path.exists(depth_output_directory):
os.makedirs(depth_output_directory)
color_output_directory = os.path.join(root_output_directory, "color")
if not os.path.exists(color_output_directory):
os.makedirs(color_output_directory)
for i_frame in range(0, frame_count):
scaled_mesh = scale_mesh_y(original_mesh, 1.0 + scale_factor_increment * i_frame)
depth, color = renderer.render_mesh_legacy(scaled_mesh, depth_scale=1000.0)
color_path = os.path.join(color_output_directory, f"{i_frame:06d}.jpg")
depth_path = os.path.join(depth_output_directory, f"{i_frame:06d}.png")
cv2.imwrite(color_path, color)
cv2.imwrite(depth_path, depth.astype(np.uint16))
shutil.copy(frame_dataset.get_intrinsics_path(), os.path.join(root_output_directory, "intrinsics.txt"))
return PROGRAM_EXIT_SUCCESS
if __name__ == "__main__":
sys.exit(main())
| true
| true
|
f719dfc68869c7b36d4086ce6861e839d17b9e4c
| 338
|
py
|
Python
|
src/utils.py
|
delbio/maze
|
cbc58ebb2c54f300f6413b770b57b0cab0750672
|
[
"MIT"
] | null | null | null |
src/utils.py
|
delbio/maze
|
cbc58ebb2c54f300f6413b770b57b0cab0750672
|
[
"MIT"
] | null | null | null |
src/utils.py
|
delbio/maze
|
cbc58ebb2c54f300f6413b770b57b0cab0750672
|
[
"MIT"
] | null | null | null |
def rotate_counterclockwise(array_2d):
list_of_tuples = zip(*array_2d[::])
return [list(elem) for elem in list_of_tuples]
def rotate_clockwise(array_2d):
"""
Code copied by: https://stackoverflow.com/a/48444999/3753724
"""
list_of_tuples = zip(*array_2d[::-1])
return [list(elem) for elem in list_of_tuples]
| 28.166667
| 64
| 0.695266
|
def rotate_counterclockwise(array_2d):
list_of_tuples = zip(*array_2d[::])
return [list(elem) for elem in list_of_tuples]
def rotate_clockwise(array_2d):
list_of_tuples = zip(*array_2d[::-1])
return [list(elem) for elem in list_of_tuples]
| true
| true
|
f719e02577fb8babdf4f9190cb1e562309acb229
| 5,806
|
py
|
Python
|
sdk/python/pulumi_alicloud/fc/get_triggers.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/fc/get_triggers.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/fc/get_triggers.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetTriggersResult',
'AwaitableGetTriggersResult',
'get_triggers',
]
@pulumi.output_type
class GetTriggersResult:
"""
A collection of values returned by getTriggers.
"""
def __init__(__self__, function_name=None, id=None, ids=None, name_regex=None, names=None, output_file=None, service_name=None, triggers=None):
if function_name and not isinstance(function_name, str):
raise TypeError("Expected argument 'function_name' to be a str")
pulumi.set(__self__, "function_name", function_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
pulumi.set(__self__, "name_regex", name_regex)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if service_name and not isinstance(service_name, str):
raise TypeError("Expected argument 'service_name' to be a str")
pulumi.set(__self__, "service_name", service_name)
if triggers and not isinstance(triggers, list):
raise TypeError("Expected argument 'triggers' to be a list")
pulumi.set(__self__, "triggers", triggers)
@property
@pulumi.getter(name="functionName")
def function_name(self) -> str:
return pulumi.get(self, "function_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
A list of FC triggers ids.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="nameRegex")
def name_regex(self) -> Optional[str]:
return pulumi.get(self, "name_regex")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
"""
A list of FC triggers names.
"""
return pulumi.get(self, "names")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
return pulumi.get(self, "service_name")
@property
@pulumi.getter
def triggers(self) -> Sequence['outputs.GetTriggersTriggerResult']:
"""
A list of FC triggers. Each element contains the following attributes:
"""
return pulumi.get(self, "triggers")
class AwaitableGetTriggersResult(GetTriggersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTriggersResult(
function_name=self.function_name,
id=self.id,
ids=self.ids,
name_regex=self.name_regex,
names=self.names,
output_file=self.output_file,
service_name=self.service_name,
triggers=self.triggers)
def get_triggers(function_name: Optional[str] = None,
ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTriggersResult:
"""
This data source provides the Function Compute triggers of the current Alibaba Cloud user.
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
fc_triggers_ds = alicloud.fc.get_triggers(function_name="sample_function",
name_regex="sample_fc_trigger",
service_name="sample_service")
pulumi.export("firstFcTriggerName", fc_triggers_ds.triggers[0].name)
```
:param str function_name: FC function name.
:param Sequence[str] ids: - A list of FC triggers ids.
:param str name_regex: A regex string to filter results by FC trigger name.
:param str service_name: FC service name.
"""
__args__ = dict()
__args__['functionName'] = function_name
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:fc/getTriggers:getTriggers', __args__, opts=opts, typ=GetTriggersResult).value
return AwaitableGetTriggersResult(
function_name=__ret__.function_name,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
service_name=__ret__.service_name,
triggers=__ret__.triggers)
| 34.975904
| 147
| 0.650189
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetTriggersResult',
'AwaitableGetTriggersResult',
'get_triggers',
]
@pulumi.output_type
class GetTriggersResult:
def __init__(__self__, function_name=None, id=None, ids=None, name_regex=None, names=None, output_file=None, service_name=None, triggers=None):
if function_name and not isinstance(function_name, str):
raise TypeError("Expected argument 'function_name' to be a str")
pulumi.set(__self__, "function_name", function_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
pulumi.set(__self__, "name_regex", name_regex)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if service_name and not isinstance(service_name, str):
raise TypeError("Expected argument 'service_name' to be a str")
pulumi.set(__self__, "service_name", service_name)
if triggers and not isinstance(triggers, list):
raise TypeError("Expected argument 'triggers' to be a list")
pulumi.set(__self__, "triggers", triggers)
@property
@pulumi.getter(name="functionName")
def function_name(self) -> str:
return pulumi.get(self, "function_name")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="nameRegex")
def name_regex(self) -> Optional[str]:
return pulumi.get(self, "name_regex")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
return pulumi.get(self, "names")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
return pulumi.get(self, "service_name")
@property
@pulumi.getter
def triggers(self) -> Sequence['outputs.GetTriggersTriggerResult']:
return pulumi.get(self, "triggers")
class AwaitableGetTriggersResult(GetTriggersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTriggersResult(
function_name=self.function_name,
id=self.id,
ids=self.ids,
name_regex=self.name_regex,
names=self.names,
output_file=self.output_file,
service_name=self.service_name,
triggers=self.triggers)
def get_triggers(function_name: Optional[str] = None,
ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTriggersResult:
__args__ = dict()
__args__['functionName'] = function_name
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:fc/getTriggers:getTriggers', __args__, opts=opts, typ=GetTriggersResult).value
return AwaitableGetTriggersResult(
function_name=__ret__.function_name,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
service_name=__ret__.service_name,
triggers=__ret__.triggers)
| true
| true
|
f719e0a27fcf5d467cb187747490a3e0cc93edc0
| 2,159
|
py
|
Python
|
py-games/first_game/part1.py
|
martanunesdea/cpp-shop-management
|
ed9371e8b5d6c5b3bdc31385158c747ea538046d
|
[
"MIT"
] | null | null | null |
py-games/first_game/part1.py
|
martanunesdea/cpp-shop-management
|
ed9371e8b5d6c5b3bdc31385158c747ea538046d
|
[
"MIT"
] | null | null | null |
py-games/first_game/part1.py
|
martanunesdea/cpp-shop-management
|
ed9371e8b5d6c5b3bdc31385158c747ea538046d
|
[
"MIT"
] | 1
|
2021-01-18T21:14:31.000Z
|
2021-01-18T21:14:31.000Z
|
import pygame, sys
from pygame.locals import *
import random
#### GAME SETUP ######
pygame.init()
FPS = 60
FramePerSec = pygame.time.Clock()
# Defining game constants
RED = (255, 0, 0)
WHITE = (255, 255, 255)
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 600
GAME_NAME = "Dodge The Enemy"
SCORE = 0
# Creating the main surface
DISPLAYSURF = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
DISPLAYSURF.fill(WHITE)
pygame.display.set_caption(GAME_NAME)
# Create class interfaces
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = pygame.image.load("images/alien.png")
self.surf = pygame.Surface((100, 100))
self.rect = self.surf.get_rect(center = (random.randint(40, (SCREEN_WIDTH-40)),0))
def move(self):
self.rect.move_ip(0,5)
if (self.rect.bottom > 600):
self.rect.top = 0
self.rect.center = (random.randint(40, (SCREEN_WIDTH-40)), 0)
def draw(self, surface):
surface.blit(self.image, self.rect)
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__() # initilizing Sprite
self.image = pygame.image.load("images/rocket.png")
self.surf = pygame.Surface((100, 100))
self.rect = self.surf.get_rect(center = (250, 500))
def update(self):
pressed_keys = pygame.key.get_pressed()
if self.rect.left > 0:
if pressed_keys[K_LEFT]:
self.rect.move_ip(-5, 0)
if self.rect.right < SCREEN_WIDTH:
if pressed_keys[K_RIGHT]:
self.rect.move_ip(5, 0)
def draw(self, surface):
surface.blit(self.image, self.rect)
### GAME STARTUP #######
P1 = Player()
E1 = Enemy()
while True:
list_events = pygame.event.get()
for event in list_events:
if event.type == QUIT:
pygame.quit()
sys.exit()
# get physical updates
P1.update()
E1.move()
# update graphics
DISPLAYSURF.fill(WHITE)
P1.draw(DISPLAYSURF)
E1.draw(DISPLAYSURF)
pygame.display.update()
FramePerSec.tick(FPS)
| 26.654321
| 95
| 0.610468
|
import pygame, sys
from pygame.locals import *
import random
WHITE = (255, 255, 255)
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 600
GAME_NAME = "Dodge The Enemy"
SCORE = 0
DISPLAYSURF = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
DISPLAYSURF.fill(WHITE)
pygame.display.set_caption(GAME_NAME)
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = pygame.image.load("images/alien.png")
self.surf = pygame.Surface((100, 100))
self.rect = self.surf.get_rect(center = (random.randint(40, (SCREEN_WIDTH-40)),0))
def move(self):
self.rect.move_ip(0,5)
if (self.rect.bottom > 600):
self.rect.top = 0
self.rect.center = (random.randint(40, (SCREEN_WIDTH-40)), 0)
def draw(self, surface):
surface.blit(self.image, self.rect)
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = pygame.image.load("images/rocket.png")
self.surf = pygame.Surface((100, 100))
self.rect = self.surf.get_rect(center = (250, 500))
def update(self):
pressed_keys = pygame.key.get_pressed()
if self.rect.left > 0:
if pressed_keys[K_LEFT]:
self.rect.move_ip(-5, 0)
if self.rect.right < SCREEN_WIDTH:
if pressed_keys[K_RIGHT]:
self.rect.move_ip(5, 0)
def draw(self, surface):
surface.blit(self.image, self.rect)
ent.get()
for event in list_events:
if event.type == QUIT:
pygame.quit()
sys.exit()
P1.update()
E1.move()
DISPLAYSURF.fill(WHITE)
P1.draw(DISPLAYSURF)
E1.draw(DISPLAYSURF)
pygame.display.update()
FramePerSec.tick(FPS)
| true
| true
|
f719e0a51e64125bb5ced9ad77431d75f5d0af9c
| 2,550
|
py
|
Python
|
new_model/test_big.py
|
aliabid2243/deepgaze
|
8c602db89a1d1d8a644b44a381ddb8a693375e08
|
[
"MIT"
] | 2
|
2019-02-24T15:03:19.000Z
|
2019-07-29T09:06:33.000Z
|
new_model/test_big.py
|
aliabid2243/deepgaze
|
8c602db89a1d1d8a644b44a381ddb8a693375e08
|
[
"MIT"
] | null | null | null |
new_model/test_big.py
|
aliabid2243/deepgaze
|
8c602db89a1d1d8a644b44a381ddb8a693375e08
|
[
"MIT"
] | null | null | null |
import os
from load_data import load_batch, load_data_names, load_batch_from_names, load_batch_from_names_random
from my_model import get_eye_tracker_model
import numpy as np
from keras.models import load_model
from keras.optimizers import SGD, adam
def generator(data, batch_size, img_cols, img_rows, img_ch):
while True:
for it in list(range(0, data[0].shape[0], batch_size)):
x, y = load_batch([l[it:it + batch_size] for l in data], img_cols, img_rows, img_ch)
yield x, y
def test_big(args):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.dev
names_path = r"C:\Users\Aliab\PycharmProjects\data\test"
print("Names to test: {}".format(names_path))
dataset_path = r"D:\GazeCapture"
print("Dataset: {}".format(names_path))
weights_path = "weight_vgg.hdf5"
print("Weights: {}".format(weights_path))
# image parameter
img_cols = 128
img_rows = 128
img_ch = 3
# test parameter
batch_size = 64
chunk_size = 500
# model
model = get_eye_tracker_model(img_cols, img_rows, img_ch)
# model summary
model.summary()
# weights
print("Loading weights...")
model = load_model(weights_path)
model.load_weights(weights_path)
# data
test_names = load_data_names(names_path)
# limit amount of testing data
# test_names = test_names[:1000]
# results
err_x = []
err_y = []
print("Loading testing data...")
for it in list(range(0, len(test_names), chunk_size)):
x, y = load_batch_from_names_random(test_names[it:it + chunk_size], dataset_path, batch_size, img_cols, img_rows, img_ch)
# x, y = load_batch_from_names(test_names[it:it + chunk_size], dataset_path, img_ch, img_cols, img_rows)
predictions = model.predict(x=x, batch_size=batch_size, verbose=1)
# print and analyze predictions
for i, prediction in enumerate(predictions):
print("PR: {} {}".format(prediction[0], prediction[1]))
print("GT: {} {} \n".format(y[i][0], y[i][1]))
err_x.append(abs(prediction[0] - y[i][0]))
err_y.append(abs(prediction[1] - y[i][1]))
# mean absolute error
mae_x = np.mean(err_x)
mae_y = np.mean(err_y)
# standard deviation
std_x = np.std(err_x)
std_y = np.std(err_y)
# final results
print("MAE: {} {} ( samples)".format(mae_x, mae_y))
print("STD: {} {} ( samples)".format(std_x, std_y))
if __name__ == '__main__':
test_big()
| 28.651685
| 130
| 0.65098
|
import os
from load_data import load_batch, load_data_names, load_batch_from_names, load_batch_from_names_random
from my_model import get_eye_tracker_model
import numpy as np
from keras.models import load_model
from keras.optimizers import SGD, adam
def generator(data, batch_size, img_cols, img_rows, img_ch):
while True:
for it in list(range(0, data[0].shape[0], batch_size)):
x, y = load_batch([l[it:it + batch_size] for l in data], img_cols, img_rows, img_ch)
yield x, y
def test_big(args):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.dev
names_path = r"C:\Users\Aliab\PycharmProjects\data\test"
print("Names to test: {}".format(names_path))
dataset_path = r"D:\GazeCapture"
print("Dataset: {}".format(names_path))
weights_path = "weight_vgg.hdf5"
print("Weights: {}".format(weights_path))
img_cols = 128
img_rows = 128
img_ch = 3
batch_size = 64
chunk_size = 500
model = get_eye_tracker_model(img_cols, img_rows, img_ch)
model.summary()
print("Loading weights...")
model = load_model(weights_path)
model.load_weights(weights_path)
test_names = load_data_names(names_path)
err_x = []
err_y = []
print("Loading testing data...")
for it in list(range(0, len(test_names), chunk_size)):
x, y = load_batch_from_names_random(test_names[it:it + chunk_size], dataset_path, batch_size, img_cols, img_rows, img_ch)
predictions = model.predict(x=x, batch_size=batch_size, verbose=1)
for i, prediction in enumerate(predictions):
print("PR: {} {}".format(prediction[0], prediction[1]))
print("GT: {} {} \n".format(y[i][0], y[i][1]))
err_x.append(abs(prediction[0] - y[i][0]))
err_y.append(abs(prediction[1] - y[i][1]))
mae_x = np.mean(err_x)
mae_y = np.mean(err_y)
std_x = np.std(err_x)
std_y = np.std(err_y)
print("MAE: {} {} ( samples)".format(mae_x, mae_y))
print("STD: {} {} ( samples)".format(std_x, std_y))
if __name__ == '__main__':
test_big()
| true
| true
|
f719e0c57fd4991be0a999dbdc1c3d13878ecbff
| 9,695
|
py
|
Python
|
src/collectors/diskspace/diskspace.py
|
smartattack/Diamond
|
0559cb212559a852fce9a3cdb8643c1d129f41d4
|
[
"MIT"
] | null | null | null |
src/collectors/diskspace/diskspace.py
|
smartattack/Diamond
|
0559cb212559a852fce9a3cdb8643c1d129f41d4
|
[
"MIT"
] | 1
|
2022-02-22T08:46:21.000Z
|
2022-02-22T12:56:05.000Z
|
src/collectors/diskspace/diskspace.py
|
hostedgraphite/Diamond
|
e70fe7d358897ef9082c8778fba288215788b3d5
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Uses /proc/mounts and os.statvfs() to get disk space usage
#### Dependencies
* /proc/mounts
#### Examples
# no exclude filters at all
exclude_filters =,
# exclude everything that begins /boot or /mnt
exclude_filters = ^/boot, ^/mnt
# exclude everything that includes the letter 'm'
exclude_filters = m,
"""
import diamond.collector
import diamond.convertor
import os
import re
try:
import psutil
except ImportError:
psutil = None
class DiskSpaceCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(DiskSpaceCollector, self).get_default_config_help()
config_help.update({
'filesystems': "filesystems to examine",
'exclude_filters':
"A list of regex patterns. Any filesystem" +
" matching any of these patterns will be excluded from disk" +
" space metrics collection",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DiskSpaceCollector, self).get_default_config()
config.update({
'path': 'diskspace',
# filesystems to examine
'filesystems': 'ext2, ext3, ext4, xfs, glusterfs, nfs, nfs4, ' +
' ntfs, hfs, fat32, fat16, btrfs',
# exclude_filters
# A list of regex patterns
# A filesystem matching any of these patterns will be excluded
# from disk space metrics collection.
#
# Examples:
# exclude_filters =,
# no exclude filters at all
# exclude_filters = ^/boot, ^/mnt
# exclude everything that begins /boot or /mnt
# exclude_filters = m,
# exclude everything that includes the letter "m"
'exclude_filters': ['^/export/home'],
# Default numeric output
'byte_unit': ['byte']
})
return config
def process_config(self):
super(DiskSpaceCollector, self).process_config()
# Precompile things
self.exclude_filters = self.config['exclude_filters']
if isinstance(self.exclude_filters, basestring):
self.exclude_filters = [self.exclude_filters]
if not self.exclude_filters:
self.exclude_reg = re.compile('!.*')
else:
self.exclude_reg = re.compile('|'.join(self.exclude_filters))
self.filesystems = []
if isinstance(self.config['filesystems'], basestring):
for filesystem in self.config['filesystems'].split(','):
self.filesystems.append(filesystem.strip())
elif isinstance(self.config['filesystems'], list):
self.filesystems = self.config['filesystems']
def get_disk_labels(self):
"""
Creates a mapping of device nodes to filesystem labels
"""
path = '/dev/disk/by-label/'
labels = {}
if not os.path.isdir(path):
return labels
for label in os.listdir(path):
label = label.replace('\\x2f', '/')
device = os.path.realpath(path + '/' + label)
labels[device] = label
return labels
def get_file_systems(self):
"""
Creates a map of mounted filesystems on the machine.
iostat(1): Each sector has size of 512 bytes.
Returns:
st_dev -> FileSystem(device, mount_point)
"""
result = {}
if os.access('/proc/mounts', os.R_OK):
file = open('/proc/mounts')
for line in file:
try:
mount = line.split()
device = mount[0]
mount_point = mount[1]
fs_type = mount[2]
except (IndexError, ValueError):
continue
# Skip the filesystem if it is not in the list of valid
# filesystems
if fs_type not in self.filesystems:
self.log.debug("Ignoring %s since it is of type %s " +
" which is not in the list of filesystems.",
mount_point, fs_type)
continue
# Process the filters
if self.exclude_reg.search(mount_point):
self.log.debug("Ignoring %s since it is in the " +
"exclude_filter list.", mount_point)
continue
if ((('/' in device or ':' in device or device == 'tmpfs') and
mount_point.startswith('/'))):
try:
stat = os.stat(mount_point)
except OSError:
self.log.debug("Path %s is not mounted - skipping.",
mount_point)
continue
if stat.st_dev in result:
continue
result[stat.st_dev] = {
'device': os.path.realpath(device),
'mount_point': mount_point,
'fs_type': fs_type
}
file.close()
else:
if not psutil:
self.log.error('Unable to import psutil')
return None
partitions = psutil.disk_partitions(False)
for partition in partitions:
result[len(result)] = {
'device': os.path.realpath(partition.device),
'mount_point': partition.mountpoint,
'fs_type': partition.fstype
}
pass
return result
def collect(self):
labels = self.get_disk_labels()
results = self.get_file_systems()
if not results:
self.log.error('No diskspace metrics retrieved')
return None
for info in results.itervalues():
if info['device'] in labels:
name = labels[info['device']]
else:
name = info['mount_point'].replace('/', '_')
name = name.replace('.', '_').replace('\\', '')
if name == '_':
name = 'root'
if name == '_tmp':
name = 'tmp'
if hasattr(os, 'statvfs'): # POSIX
try:
data = os.statvfs(info['mount_point'])
except OSError as e:
self.log.exception(e)
continue
# Changed from data.f_bsize as f_frsize seems to be a more
# accurate representation of block size on multiple POSIX
# operating systems.
block_size = data.f_frsize
blocks_total = data.f_blocks
blocks_free = data.f_bfree
blocks_avail = data.f_bavail
inodes_total = data.f_files
inodes_free = data.f_ffree
inodes_avail = data.f_favail
elif os.name == 'nt': # Windows
# fixme: used still not exact compared to disk_usage.py
# from psutil
raw_data = psutil.disk_usage(info['mount_point'])
block_size = 1 # fixme: ?
blocks_total = raw_data.total
blocks_free = raw_data.free
else:
raise NotImplementedError("platform not supported")
for unit in self.config['byte_unit']:
metric_name = '%s.%s_percentfree' % (name, unit)
try:
metric_value = float(blocks_free) / float(
blocks_free + (blocks_total - blocks_free)) * 100
except ZeroDivisionError:
metric_value = 0
self.publish_gauge(metric_name, metric_value, 2)
metric_name = '%s.%s_used' % (name, unit)
metric_value = float(block_size) * float(
blocks_total - blocks_free)
metric_value = diamond.convertor.binary.convert(
value=metric_value, oldUnit='byte', newUnit=unit)
self.publish_gauge(metric_name, metric_value, 2)
metric_name = '%s.%s_free' % (name, unit)
metric_value = float(block_size) * float(blocks_free)
metric_value = diamond.convertor.binary.convert(
value=metric_value, oldUnit='byte', newUnit=unit)
self.publish_gauge(metric_name, metric_value, 2)
if os.name != 'nt':
metric_name = '%s.%s_avail' % (name, unit)
metric_value = float(block_size) * float(blocks_avail)
metric_value = diamond.convertor.binary.convert(
value=metric_value, oldUnit='byte', newUnit=unit)
self.publish_gauge(metric_name, metric_value, 2)
if os.name != 'nt':
if float(inodes_total) > 0:
self.publish_gauge(
'%s.inodes_percentfree' % name,
float(inodes_free) / float(inodes_total) * 100)
self.publish_gauge('%s.inodes_used' % name,
inodes_total - inodes_free)
self.publish_gauge('%s.inodes_free' % name, inodes_free)
self.publish_gauge('%s.inodes_avail' % name, inodes_avail)
| 35.774908
| 79
| 0.513357
|
import diamond.collector
import diamond.convertor
import os
import re
try:
import psutil
except ImportError:
psutil = None
class DiskSpaceCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(DiskSpaceCollector, self).get_default_config_help()
config_help.update({
'filesystems': "filesystems to examine",
'exclude_filters':
"A list of regex patterns. Any filesystem" +
" matching any of these patterns will be excluded from disk" +
" space metrics collection",
})
return config_help
def get_default_config(self):
config = super(DiskSpaceCollector, self).get_default_config()
config.update({
'path': 'diskspace',
'filesystems': 'ext2, ext3, ext4, xfs, glusterfs, nfs, nfs4, ' +
' ntfs, hfs, fat32, fat16, btrfs',
'exclude_filters': ['^/export/home'],
'byte_unit': ['byte']
})
return config
def process_config(self):
super(DiskSpaceCollector, self).process_config()
self.exclude_filters = self.config['exclude_filters']
if isinstance(self.exclude_filters, basestring):
self.exclude_filters = [self.exclude_filters]
if not self.exclude_filters:
self.exclude_reg = re.compile('!.*')
else:
self.exclude_reg = re.compile('|'.join(self.exclude_filters))
self.filesystems = []
if isinstance(self.config['filesystems'], basestring):
for filesystem in self.config['filesystems'].split(','):
self.filesystems.append(filesystem.strip())
elif isinstance(self.config['filesystems'], list):
self.filesystems = self.config['filesystems']
def get_disk_labels(self):
path = '/dev/disk/by-label/'
labels = {}
if not os.path.isdir(path):
return labels
for label in os.listdir(path):
label = label.replace('\\x2f', '/')
device = os.path.realpath(path + '/' + label)
labels[device] = label
return labels
def get_file_systems(self):
result = {}
if os.access('/proc/mounts', os.R_OK):
file = open('/proc/mounts')
for line in file:
try:
mount = line.split()
device = mount[0]
mount_point = mount[1]
fs_type = mount[2]
except (IndexError, ValueError):
continue
if fs_type not in self.filesystems:
self.log.debug("Ignoring %s since it is of type %s " +
" which is not in the list of filesystems.",
mount_point, fs_type)
continue
if self.exclude_reg.search(mount_point):
self.log.debug("Ignoring %s since it is in the " +
"exclude_filter list.", mount_point)
continue
if ((('/' in device or ':' in device or device == 'tmpfs') and
mount_point.startswith('/'))):
try:
stat = os.stat(mount_point)
except OSError:
self.log.debug("Path %s is not mounted - skipping.",
mount_point)
continue
if stat.st_dev in result:
continue
result[stat.st_dev] = {
'device': os.path.realpath(device),
'mount_point': mount_point,
'fs_type': fs_type
}
file.close()
else:
if not psutil:
self.log.error('Unable to import psutil')
return None
partitions = psutil.disk_partitions(False)
for partition in partitions:
result[len(result)] = {
'device': os.path.realpath(partition.device),
'mount_point': partition.mountpoint,
'fs_type': partition.fstype
}
pass
return result
def collect(self):
labels = self.get_disk_labels()
results = self.get_file_systems()
if not results:
self.log.error('No diskspace metrics retrieved')
return None
for info in results.itervalues():
if info['device'] in labels:
name = labels[info['device']]
else:
name = info['mount_point'].replace('/', '_')
name = name.replace('.', '_').replace('\\', '')
if name == '_':
name = 'root'
if name == '_tmp':
name = 'tmp'
if hasattr(os, 'statvfs'):
try:
data = os.statvfs(info['mount_point'])
except OSError as e:
self.log.exception(e)
continue
block_size = data.f_frsize
blocks_total = data.f_blocks
blocks_free = data.f_bfree
blocks_avail = data.f_bavail
inodes_total = data.f_files
inodes_free = data.f_ffree
inodes_avail = data.f_favail
elif os.name == 'nt':
raw_data = psutil.disk_usage(info['mount_point'])
block_size = 1
blocks_total = raw_data.total
blocks_free = raw_data.free
else:
raise NotImplementedError("platform not supported")
for unit in self.config['byte_unit']:
metric_name = '%s.%s_percentfree' % (name, unit)
try:
metric_value = float(blocks_free) / float(
blocks_free + (blocks_total - blocks_free)) * 100
except ZeroDivisionError:
metric_value = 0
self.publish_gauge(metric_name, metric_value, 2)
metric_name = '%s.%s_used' % (name, unit)
metric_value = float(block_size) * float(
blocks_total - blocks_free)
metric_value = diamond.convertor.binary.convert(
value=metric_value, oldUnit='byte', newUnit=unit)
self.publish_gauge(metric_name, metric_value, 2)
metric_name = '%s.%s_free' % (name, unit)
metric_value = float(block_size) * float(blocks_free)
metric_value = diamond.convertor.binary.convert(
value=metric_value, oldUnit='byte', newUnit=unit)
self.publish_gauge(metric_name, metric_value, 2)
if os.name != 'nt':
metric_name = '%s.%s_avail' % (name, unit)
metric_value = float(block_size) * float(blocks_avail)
metric_value = diamond.convertor.binary.convert(
value=metric_value, oldUnit='byte', newUnit=unit)
self.publish_gauge(metric_name, metric_value, 2)
if os.name != 'nt':
if float(inodes_total) > 0:
self.publish_gauge(
'%s.inodes_percentfree' % name,
float(inodes_free) / float(inodes_total) * 100)
self.publish_gauge('%s.inodes_used' % name,
inodes_total - inodes_free)
self.publish_gauge('%s.inodes_free' % name, inodes_free)
self.publish_gauge('%s.inodes_avail' % name, inodes_avail)
| true
| true
|
f719e23150154f51fed830c34ef140c0f8e124fa
| 1,831
|
py
|
Python
|
backend/app/api/api_v1/endpoints/users.py
|
BartlomiejRasztabiga/Rentally
|
ba70199d329895a5295ceddd0ecc4c61928890dd
|
[
"MIT"
] | 2
|
2021-01-11T23:24:29.000Z
|
2021-01-12T09:55:58.000Z
|
backend/app/api/api_v1/endpoints/users.py
|
BartlomiejRasztabiga/Rentally
|
ba70199d329895a5295ceddd0ecc4c61928890dd
|
[
"MIT"
] | null | null | null |
backend/app/api/api_v1/endpoints/users.py
|
BartlomiejRasztabiga/Rentally
|
ba70199d329895a5295ceddd0ecc4c61928890dd
|
[
"MIT"
] | null | null | null |
from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from app import models, schemas, services
from app.api import deps
router = APIRouter()
@router.get("/", response_model=List[schemas.User])
def read_users(
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_admin),
) -> Any:
"""
Retrieve users.
"""
users = services.user.get_all(db)
return users
@router.post("/", response_model=schemas.User)
def create_user(
*,
db: Session = Depends(deps.get_db),
user_in: schemas.UserCreateDto,
current_user: models.User = Depends(deps.get_current_active_admin),
) -> Any:
"""
Create new user.
"""
user = services.user.get_by_email(db, email=user_in.email)
if user:
raise HTTPException(
status_code=400,
detail="The user with this username already exists in the system.",
)
user = services.user.create(db, obj_in=user_in)
return user
@router.get("/me", response_model=schemas.User)
def read_user_me(
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_user),
) -> Any:
"""
Get current user.
"""
return current_user
@router.get("/{user_id}", response_model=schemas.User)
def read_user_by_id(
user_id: int,
current_user: models.User = Depends(deps.get_current_user),
db: Session = Depends(deps.get_db),
) -> Any:
"""
Get a specific user by id.
"""
user = services.user.get(db, _id=user_id)
if user == current_user:
return user
if not services.user.is_admin(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return user
| 25.082192
| 79
| 0.666303
|
from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from app import models, schemas, services
from app.api import deps
router = APIRouter()
@router.get("/", response_model=List[schemas.User])
def read_users(
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_admin),
) -> Any:
users = services.user.get_all(db)
return users
@router.post("/", response_model=schemas.User)
def create_user(
*,
db: Session = Depends(deps.get_db),
user_in: schemas.UserCreateDto,
current_user: models.User = Depends(deps.get_current_active_admin),
) -> Any:
user = services.user.get_by_email(db, email=user_in.email)
if user:
raise HTTPException(
status_code=400,
detail="The user with this username already exists in the system.",
)
user = services.user.create(db, obj_in=user_in)
return user
@router.get("/me", response_model=schemas.User)
def read_user_me(
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_user),
) -> Any:
return current_user
@router.get("/{user_id}", response_model=schemas.User)
def read_user_by_id(
user_id: int,
current_user: models.User = Depends(deps.get_current_user),
db: Session = Depends(deps.get_db),
) -> Any:
user = services.user.get(db, _id=user_id)
if user == current_user:
return user
if not services.user.is_admin(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return user
| true
| true
|
f719e2f9ea943ab752ebf80ab241bf9d6a0bde56
| 273
|
py
|
Python
|
urls.py
|
ActuallyZach/in_app_purchase_receipt_verifier
|
f342809bcc2a16a34de3cccf965f0821a5bd552b
|
[
"Apache-2.0"
] | 1
|
2021-12-10T09:59:17.000Z
|
2021-12-10T09:59:17.000Z
|
urls.py
|
ActuallyZach/in_app_purchase_receipt_verifier
|
f342809bcc2a16a34de3cccf965f0821a5bd552b
|
[
"Apache-2.0"
] | null | null | null |
urls.py
|
ActuallyZach/in_app_purchase_receipt_verifier
|
f342809bcc2a16a34de3cccf965f0821a5bd552b
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import include, url
from django.http import HttpResponse
from app import views
urlpatterns = [
url(r'^verify', views.verify_receipt),
url('verify/scum', views.verify_receipt_scum),
url('verify/jellycuts', views.verify_receipt_jelly),
]
| 22.75
| 56
| 0.747253
|
from django.conf.urls import include, url
from django.http import HttpResponse
from app import views
urlpatterns = [
url(r'^verify', views.verify_receipt),
url('verify/scum', views.verify_receipt_scum),
url('verify/jellycuts', views.verify_receipt_jelly),
]
| true
| true
|
f719e312e4286ce9cdd25018ce166a3a13eee31c
| 6,014
|
py
|
Python
|
nikola/plugins/compile/rest/post_list.py
|
pellenilsson/nikola
|
67a944a40b35584525a1bb363b3abd85582704cb
|
[
"MIT"
] | 1
|
2015-11-06T01:07:29.000Z
|
2015-11-06T01:07:29.000Z
|
nikola/plugins/compile/rest/post_list.py
|
pellenilsson/nikola
|
67a944a40b35584525a1bb363b3abd85582704cb
|
[
"MIT"
] | null | null | null |
nikola/plugins/compile/rest/post_list.py
|
pellenilsson/nikola
|
67a944a40b35584525a1bb363b3abd85582704cb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2013-2014 Udo Spallek, Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import uuid
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola import utils
from nikola.plugin_categories import RestExtension
# WARNING: the directive name is post-list
# (with a DASH instead of an UNDERSCORE)
class Plugin(RestExtension):
name = "rest_post_list"
def set_site(self, site):
self.site = site
directives.register_directive('post-list', PostList)
PostList.site = site
return super(Plugin, self).set_site(site)
class PostList(Directive):
"""
Post List
=========
:Directive Arguments: None.
:Directive Options: lang, start, stop, reverse, tags, template, id
:Directive Content: None.
Provides a reStructuredText directive to create a list of posts.
The posts appearing in the list can be filtered by options.
*List slicing* is provided with the *start*, *stop* and *reverse* options.
The following not required options are recognized:
``start`` : integer
The index of the first post to show.
A negative value like ``-3`` will show the *last* three posts in the
post-list.
Defaults to None.
``stop`` : integer
The index of the last post to show.
A value negative value like ``-1`` will show every post, but not the
*last* in the post-list.
Defaults to None.
``reverse`` : flag
Reverse the order of the post-list.
Defaults is to not reverse the order of posts.
``tags`` : string [, string...]
Filter posts to show only posts having at least one of the ``tags``.
Defaults to None.
``slugs`` : string [, string...]
Filter posts to show only posts having at least one of the ``slugs``.
Defaults to None.
``all`` : flag
Shows all posts and pages in the post list.
Defaults to show only posts with set *use_in_feeds*.
``lang`` : string
The language of post *titles* and *links*.
Defaults to default language.
``template`` : string
The name of an alternative template to render the post-list.
Defaults to ``post_list_directive.tmpl``
``id`` : string
A manual id for the post list.
Defaults to a random name composed by 'post_list_' + uuid.uuid4().hex.
"""
option_spec = {
'start': int,
'stop': int,
'reverse': directives.flag,
'tags': directives.unchanged,
'slugs': directives.unchanged,
'all': directives.flag,
'lang': directives.unchanged,
'template': directives.path,
'id': directives.unchanged,
}
def run(self):
start = self.options.get('start')
stop = self.options.get('stop')
reverse = self.options.get('reverse', False)
tags = self.options.get('tags')
tags = [t.strip().lower() for t in tags.split(',')] if tags else []
slugs = self.options.get('slugs')
slugs = [s.strip() for s in slugs.split(',')] if slugs else []
show_all = self.options.get('all', False)
lang = self.options.get('lang', utils.LocaleBorg().current_lang)
template = self.options.get('template', 'post_list_directive.tmpl')
if self.site.invariant: # for testing purposes
post_list_id = self.options.get('id', 'post_list_' + 'fixedvaluethatisnotauuid')
else:
post_list_id = self.options.get('id', 'post_list_' + uuid.uuid4().hex)
filtered_timeline = []
posts = []
step = -1 if reverse is None else None
if show_all is None:
timeline = [p for p in self.site.timeline]
else:
timeline = [p for p in self.site.timeline if p.use_in_feeds]
for post in timeline:
if tags:
cont = True
tags_lower = [t.lower() for t in post.tags]
for tag in tags:
if tag in tags_lower:
cont = False
if cont:
continue
filtered_timeline.append(post)
for post in filtered_timeline[start:stop:step]:
if slugs:
cont = True
for slug in slugs:
if slug == post.meta('slug'):
cont = False
if cont:
continue
posts += [post]
if not posts:
return []
template_data = {
'lang': lang,
'posts': posts,
'date_format': self.site.GLOBAL_CONTEXT.get('date_format'),
'post_list_id': post_list_id,
}
output = self.site.template_system.render_template(
template, None, template_data)
return [nodes.raw('', output, format='html')]
| 33.977401
| 92
| 0.617559
|
from __future__ import unicode_literals
import uuid
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola import utils
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
name = "rest_post_list"
def set_site(self, site):
self.site = site
directives.register_directive('post-list', PostList)
PostList.site = site
return super(Plugin, self).set_site(site)
class PostList(Directive):
option_spec = {
'start': int,
'stop': int,
'reverse': directives.flag,
'tags': directives.unchanged,
'slugs': directives.unchanged,
'all': directives.flag,
'lang': directives.unchanged,
'template': directives.path,
'id': directives.unchanged,
}
def run(self):
start = self.options.get('start')
stop = self.options.get('stop')
reverse = self.options.get('reverse', False)
tags = self.options.get('tags')
tags = [t.strip().lower() for t in tags.split(',')] if tags else []
slugs = self.options.get('slugs')
slugs = [s.strip() for s in slugs.split(',')] if slugs else []
show_all = self.options.get('all', False)
lang = self.options.get('lang', utils.LocaleBorg().current_lang)
template = self.options.get('template', 'post_list_directive.tmpl')
if self.site.invariant:
post_list_id = self.options.get('id', 'post_list_' + 'fixedvaluethatisnotauuid')
else:
post_list_id = self.options.get('id', 'post_list_' + uuid.uuid4().hex)
filtered_timeline = []
posts = []
step = -1 if reverse is None else None
if show_all is None:
timeline = [p for p in self.site.timeline]
else:
timeline = [p for p in self.site.timeline if p.use_in_feeds]
for post in timeline:
if tags:
cont = True
tags_lower = [t.lower() for t in post.tags]
for tag in tags:
if tag in tags_lower:
cont = False
if cont:
continue
filtered_timeline.append(post)
for post in filtered_timeline[start:stop:step]:
if slugs:
cont = True
for slug in slugs:
if slug == post.meta('slug'):
cont = False
if cont:
continue
posts += [post]
if not posts:
return []
template_data = {
'lang': lang,
'posts': posts,
'date_format': self.site.GLOBAL_CONTEXT.get('date_format'),
'post_list_id': post_list_id,
}
output = self.site.template_system.render_template(
template, None, template_data)
return [nodes.raw('', output, format='html')]
| true
| true
|
f719e34865d33ff09f68d04ec4e19add1ab00e5b
| 5,344
|
py
|
Python
|
analysis.py
|
edpolanco/air_cargo
|
20ddf6c72dafed85b87486ca46a9c09656f31d90
|
[
"MIT"
] | null | null | null |
analysis.py
|
edpolanco/air_cargo
|
20ddf6c72dafed85b87486ca46a9c09656f31d90
|
[
"MIT"
] | null | null | null |
analysis.py
|
edpolanco/air_cargo
|
20ddf6c72dafed85b87486ca46a9c09656f31d90
|
[
"MIT"
] | null | null | null |
"""Module for summarizing cargo planning testing results.
Ed Polanco
ed.polanco@outlook.com
"""
import pandas as pd
from collections import OrderedDict
import datetime
import time
from aimacode.search import Problem, Node
from timeit import default_timer as timer
from run_search import PrintableProblem, PROBLEMS
from aimacode.search import (breadth_first_search, astar_search,
breadth_first_tree_search, depth_first_graph_search, uniform_cost_search,
greedy_best_first_graph_search, depth_limited_search,
recursive_best_first_search)
#Names of the various search algorithms
SEARCHES_SHORT_NAME = [["Breadth First", breadth_first_search, ""], #1
['Breadth First Tree', breadth_first_tree_search, ""], #2
['Depth First Graph', depth_first_graph_search, ""], #3
['Depth Limited', depth_limited_search, ""], #4
['Uniform Cost', uniform_cost_search, ""], #5
['Recursive Best First w/ h1', recursive_best_first_search, 'h_1'], #6
['Greedy Best First Graph w/ h1', greedy_best_first_graph_search, 'h_1'], #7
['Astar w/ h1', astar_search, 'h_1'], #8
['Astar w/ ignore pre-cond.', astar_search, 'h_ignore_preconditions'], #9
['Astar w/ level-sum', astar_search, 'h_pg_levelsum'], #10
]
def show_path(node:Node):
"""
Print solution set to screen
Paremeter
----------
node: Node
Search tree object that has 'solution()' method
"""
if node is None:
print("The selected planner did not find a solution for this problem. " +
"Make sure you have completed the AirCargoProblem implementation " +
"and pass all unit tests first.")
else:
msg = "Search function {} plan length: {} ".format(node[0],len(node[1].solution()) )
print(msg)
for action in node[1].solution():
print("{}{}".format(action.name, action.args))
def run_search_table(problem: Problem, search_function, parameter=None):
"""Perform a test to find a solution to one of cargo problems.
Paremeters:
----------
problem: Problem
Cargo planning problem
search_function: str
Search algorithm function name
parameter: parameter value if any [None]
Parameter value for the search algorithms that require it.
Returns:
----------
Returns tuple of 5 values:
1 = Node expansions count
2 = number of times we tested for goal state
3 = Number of new nodes
4 = Number of steps
5 = Search tree Node object
"""
start = timer()
ip = PrintableProblem(problem)
if parameter is not None:
node = search_function(ip, parameter)
else:
node = search_function(ip)
end = timer()
return (ip.succs, ip.goal_tests, ip.states, end - start, node )
def search_data(problem_id: int, s_choices: list):
""" Perform test to solve cargo planning problem with
the given search algorithms.
Paremeters:
----------
problem_id: int
Cargo planning problem id
s_choices: list
List of the search algorithm to try.
Returns:
----------
Returns tuple of two items:
1 = DataFrame that summarizes test result
2 = A list of tuples, where the first item in the
tuple is the search algorithm name and the second
is its corresponding search Node object.
"""
#lets get a list of problems and search algorithms
problem_name,problem = PROBLEMS[problem_id - 1][0],PROBLEMS[problem_id- 1][1]
searches = [SEARCHES_SHORT_NAME[i-1] for i in map(int, s_choices)]
# helper variables to create DataFrame
steps = []
fun_name = []
expansions = []
goal_test =[]
new_nodes = []
elapsed_time = []
nodes = []
for sname, s, h in searches:
start_time = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %I:%M:%S%p')
print("\nSolving {} using {} start time {}...".format(problem_name, sname, start_time))
_p = problem()
_h = None if not h else getattr(_p, h)
#perform test get result
result = run_search_table(_p, s, _h)
#update helper list variables
fun_name.append(sname)
expansions.append(result[0])
goal_test.append(result[1])
new_nodes.append(result[2])
elapsed_time.append(result[3])
steps.append(len(result[4].solution()) )
nodes.append([sname,result[4]])
#create dictionary for DataFrame input
table_dict = OrderedDict()
table_dict["Function Name"] = fun_name
table_dict["Solution Steps"] = steps
table_dict["Expansions"] = expansions
table_dict["Goal Tests"] = goal_test
table_dict["New_Nodes"] = new_nodes
table_dict["Elapsed Seconds"] = elapsed_time
dataframe = pd.DataFrame(table_dict)
dataframe.index +=1
return dataframe, nodes
| 36.60274
| 98
| 0.595434
|
import pandas as pd
from collections import OrderedDict
import datetime
import time
from aimacode.search import Problem, Node
from timeit import default_timer as timer
from run_search import PrintableProblem, PROBLEMS
from aimacode.search import (breadth_first_search, astar_search,
breadth_first_tree_search, depth_first_graph_search, uniform_cost_search,
greedy_best_first_graph_search, depth_limited_search,
recursive_best_first_search)
SEARCHES_SHORT_NAME = [["Breadth First", breadth_first_search, ""],
['Breadth First Tree', breadth_first_tree_search, ""],
['Depth First Graph', depth_first_graph_search, ""],
['Depth Limited', depth_limited_search, ""],
['Uniform Cost', uniform_cost_search, ""],
['Recursive Best First w/ h1', recursive_best_first_search, 'h_1'],
['Greedy Best First Graph w/ h1', greedy_best_first_graph_search, 'h_1'],
['Astar w/ h1', astar_search, 'h_1'],
['Astar w/ ignore pre-cond.', astar_search, 'h_ignore_preconditions'],
['Astar w/ level-sum', astar_search, 'h_pg_levelsum'],
]
def show_path(node:Node):
if node is None:
print("The selected planner did not find a solution for this problem. " +
"Make sure you have completed the AirCargoProblem implementation " +
"and pass all unit tests first.")
else:
msg = "Search function {} plan length: {} ".format(node[0],len(node[1].solution()) )
print(msg)
for action in node[1].solution():
print("{}{}".format(action.name, action.args))
def run_search_table(problem: Problem, search_function, parameter=None):
start = timer()
ip = PrintableProblem(problem)
if parameter is not None:
node = search_function(ip, parameter)
else:
node = search_function(ip)
end = timer()
return (ip.succs, ip.goal_tests, ip.states, end - start, node )
def search_data(problem_id: int, s_choices: list):
problem_name,problem = PROBLEMS[problem_id - 1][0],PROBLEMS[problem_id- 1][1]
searches = [SEARCHES_SHORT_NAME[i-1] for i in map(int, s_choices)]
steps = []
fun_name = []
expansions = []
goal_test =[]
new_nodes = []
elapsed_time = []
nodes = []
for sname, s, h in searches:
start_time = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %I:%M:%S%p')
print("\nSolving {} using {} start time {}...".format(problem_name, sname, start_time))
_p = problem()
_h = None if not h else getattr(_p, h)
result = run_search_table(_p, s, _h)
fun_name.append(sname)
expansions.append(result[0])
goal_test.append(result[1])
new_nodes.append(result[2])
elapsed_time.append(result[3])
steps.append(len(result[4].solution()) )
nodes.append([sname,result[4]])
table_dict = OrderedDict()
table_dict["Function Name"] = fun_name
table_dict["Solution Steps"] = steps
table_dict["Expansions"] = expansions
table_dict["Goal Tests"] = goal_test
table_dict["New_Nodes"] = new_nodes
table_dict["Elapsed Seconds"] = elapsed_time
dataframe = pd.DataFrame(table_dict)
dataframe.index +=1
return dataframe, nodes
| true
| true
|
f719e4fc1c2f57473dc26131829f497ab8dd2ff2
| 854
|
py
|
Python
|
autogl/module/nas/estimator/one_shot.py
|
THUMNLab/AutoGL
|
9dfcabcda41620a7d12d6322f0e52e68dd7dcec4
|
[
"Apache-2.0"
] | 824
|
2020-11-30T14:38:07.000Z
|
2022-03-19T10:14:04.000Z
|
autogl/module/nas/estimator/one_shot.py
|
MitchellTesla/AutoGL
|
7b551961e90f5042d9b91d92c083f3f09dd9dbdd
|
[
"Apache-2.0"
] | 38
|
2020-12-21T12:32:57.000Z
|
2022-01-31T02:32:05.000Z
|
autogl/module/nas/estimator/one_shot.py
|
MitchellTesla/AutoGL
|
7b551961e90f5042d9b91d92c083f3f09dd9dbdd
|
[
"Apache-2.0"
] | 85
|
2020-12-21T05:16:09.000Z
|
2022-03-28T08:44:22.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from . import register_nas_estimator
from ..space import BaseSpace
from .base import BaseEstimator
@register_nas_estimator("oneshot")
class OneShotEstimator(BaseEstimator):
"""
One shot estimator.
Use model directly to get estimations.
"""
def infer(self, model: BaseSpace, dataset, mask="train"):
device = next(model.parameters()).device
dset = dataset[0].to(device)
pred = model(dset)[getattr(dset, f"{mask}_mask")]
y = dset.y[getattr(dset, f"{mask}_mask")]
loss = getattr(F, self.loss_f)(pred, y)
# acc=sum(pred.max(1)[1]==y).item()/y.size(0)
probs = F.softmax(pred, dim=1).detach().cpu().numpy()
y = y.cpu()
metrics = [eva.evaluate(probs, y) for eva in self.evaluation]
return metrics, loss
| 30.5
| 69
| 0.640515
|
import torch.nn as nn
import torch.nn.functional as F
from . import register_nas_estimator
from ..space import BaseSpace
from .base import BaseEstimator
@register_nas_estimator("oneshot")
class OneShotEstimator(BaseEstimator):
def infer(self, model: BaseSpace, dataset, mask="train"):
device = next(model.parameters()).device
dset = dataset[0].to(device)
pred = model(dset)[getattr(dset, f"{mask}_mask")]
y = dset.y[getattr(dset, f"{mask}_mask")]
loss = getattr(F, self.loss_f)(pred, y)
probs = F.softmax(pred, dim=1).detach().cpu().numpy()
y = y.cpu()
metrics = [eva.evaluate(probs, y) for eva in self.evaluation]
return metrics, loss
| true
| true
|
f719e58172c6f8335918d776edd53b5bed9dae39
| 13,398
|
py
|
Python
|
vissl/optimizers/optimizer_helper.py
|
tjdbsrud/vissl
|
1cf1ee0c82c8a0d65544b82a6fc2f28c7d5eb175
|
[
"MIT"
] | 3
|
2021-07-08T15:06:49.000Z
|
2021-08-13T18:55:02.000Z
|
vissl/optimizers/optimizer_helper.py
|
pzharrington/vissl
|
b647c256447af7ea66655811849be1f642377db8
|
[
"MIT"
] | 2
|
2021-07-25T15:46:07.000Z
|
2021-08-11T10:08:53.000Z
|
vissl/optimizers/optimizer_helper.py
|
pzharrington/vissl
|
b647c256447af7ea66655811849be1f642377db8
|
[
"MIT"
] | 2
|
2021-07-08T15:15:55.000Z
|
2021-08-25T14:16:01.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, List
import torch.nn as nn
from vissl.utils.misc import is_apex_available
_CONV_TYPES = (nn.Conv1d, nn.Conv2d, nn.Conv3d)
_NORM_TYPES = (
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.SyncBatchNorm, # pytorch SyncBN
nn.LayerNorm,
)
if is_apex_available():
import apex
_NORM_TYPES += (apex.parallel.SyncBatchNorm,)
def _get_bn_optimizer_params(
module, regularized_params, unregularized_params, optimizer_config
):
"""
Given the (Sync)BatchNorm module in the model, we separate the module params
into regularized or non-regularized (weight_decay=0).
"""
# this is called by get_optimizer_params for BN specific layer only
if module.weight is not None:
if optimizer_config["regularize_bn"]:
regularized_params.append(module.weight)
else:
unregularized_params.append(module.weight)
if module.bias is not None:
if optimizer_config["regularize_bn"] and optimizer_config["regularize_bias"]:
regularized_params.append(module.bias)
else:
unregularized_params.append(module.bias)
return regularized_params, unregularized_params
def _filter_trainable(param_list: List[Any]) -> List[Any]:
"""
Keep on the trainable parameters of the model and return the list of
trainable params.
"""
# Keep only the trainable params
return list(filter(lambda x: x.requires_grad, param_list))
def _assign_regularized_params(
regularized_param_list=None,
unregularized_param_list=None,
parameters_to_unregularize=None,
):
"""
Takes a list parameters_to_unregularize (a list of parameters to ensure are
not regularized) and compares it to regularized_param_list, a list of
regularized parameters. Any parameters in parameters_to_unregularize that
are present in regularized_param_list are removed from
regularized_param_list. Will also check against an optional
unregularized_param_list (pre-existing list of parameters not to regularize)
and remove any items from parameters_to_unregularize that are in
unregularized_param_list. Used for when we have parameters that we don't
want to regularize (e.g. the class token and position embeddings for the
vision transformer). See config.OPTIMIZER.non_regularized_params. Needs
to be called separately for head, trunk, and remaining params.
"""
indices_to_remove_from_regularized = []
indices_to_remove_from_new_unregularized = []
# Iterate through new parameters to unregularize
for unreg_param_ind, new_unreg_param in enumerate(parameters_to_unregularize):
# Iterate through list of regularized parameters
for reg_param_ind, reg_param in enumerate(regularized_param_list):
# Note any matchess
if reg_param is new_unreg_param:
indices_to_remove_from_regularized.append(reg_param_ind)
if unregularized_param_list:
# Iterate through pre-existing list of unregularized parameters
for unreg_param in unregularized_param_list:
# Note any matches
if unreg_param is new_unreg_param:
indices_to_remove_from_new_unregularized.append(unreg_param_ind)
indices_to_remove_from_regularized.sort(reverse=True)
# Iterate through indices to remove from list regularized params and
# remove them
for i in indices_to_remove_from_regularized:
del regularized_param_list[i]
if unregularized_param_list:
indices_to_remove_from_new_unregularized.sort(reverse=True)
# Iterate through indices to remove from new list of unregularized
# parameters
for i in indices_to_remove_from_new_unregularized:
del parameters_to_unregularize[i]
return parameters_to_unregularize, regularized_param_list, unregularized_param_list
def get_optimizer_param_groups(
model, model_config, optimizer_config, optimizer_schedulers
):
"""
Go through all the layers, sort out which parameters should be regularized,
unregularized and optimization settings for the head/trunk. We filter
the trainable params only and add them to the param_groups.
Returns:
param_groups (List[Dict]): [
{
"params": trunk_regularized_params, "lr": lr_value,
"weight_decay": wd_value,
},
{
"params": trunk_unregularized_params, "lr": lr_value,
"weight_decay": 0.0,
},
{
"params": head_regularized_params, "lr": head_lr_value,
"weight_decay": head_weight_decay,
},
{
"params": head_unregularized_params, "lr": head_lr_value,
"weight_decay": 0.0,
},
{
"params": remaining_regularized_params, "lr": lr_value
}
]
"""
if "weight_decay" in optimizer_schedulers:
weight_decay_main_config = optimizer_schedulers["weight_decay"]
else:
weight_decay_main_config = optimizer_config.weight_decay
if "weight_decay_head" in optimizer_schedulers:
weight_decay_head_main_config = optimizer_schedulers["weight_decay_head"]
else:
weight_decay_head_main_config = (
optimizer_config.head_optimizer_params.weight_decay
)
if optimizer_config.construct_single_param_group_only:
# If single param_group is asked, we just use the parameters
# returned from model.parameters(). This is useful in FSDP
# param flattening mode.
return [
{
"params": list(model.parameters()),
"lr": optimizer_schedulers["lr"],
"weight_decay": weight_decay_main_config,
}
]
# if the different LR, weight decay value for head is not specified, we use the
# same LR/wd as trunk.
if not optimizer_config.head_optimizer_params.use_different_lr:
assert "lr_head" in optimizer_schedulers
# we create 4 params groups: trunk regularized, trunk unregularized, head
# regularized and head unregularized. Unregularized can contain BN layers.
trunk_regularized_params, trunk_unregularized_params = [], []
head_regularized_params, head_unregularized_params = [], []
# for anything else
regularized_params = []
unregularized_params = []
for name, module in model.named_modules():
# head, Linear/Conv layer
if "head" in name and (
isinstance(module, nn.Linear) or isinstance(module, _CONV_TYPES)
):
# weight normalized linear layers, used in swav_prototypes_head for example,
# have "weight_g" and "weight_v" parameters in place of "weight"
if hasattr(module, "weight_g"):
head_regularized_params.append(module.weight_g)
head_regularized_params.append(module.weight_v)
else:
head_regularized_params.append(module.weight)
if module.bias is not None:
if optimizer_config["regularize_bias"]:
head_regularized_params.append(module.bias)
else:
head_unregularized_params.append(module.bias)
# head, BN/LN layer
elif "head" in name and isinstance(module, _NORM_TYPES):
(
head_regularized_params,
head_unregularized_params,
) = _get_bn_optimizer_params(
module,
head_regularized_params,
head_unregularized_params,
optimizer_config,
)
# trunk, Linear/Conv
elif isinstance(module, nn.Linear) or isinstance(module, _CONV_TYPES):
if hasattr(module, "weight_g"): # weight_norm linear layers
trunk_regularized_params.append(module.weight_g)
trunk_regularized_params.append(module.weight_v)
else:
trunk_regularized_params.append(module.weight)
if module.bias is not None:
if optimizer_config["regularize_bias"]:
trunk_regularized_params.append(module.bias)
else:
trunk_unregularized_params.append(module.bias)
# trunk, BN/LN layer
elif isinstance(module, _NORM_TYPES):
(
trunk_regularized_params,
trunk_unregularized_params,
) = _get_bn_optimizer_params(
module,
trunk_regularized_params,
trunk_unregularized_params,
optimizer_config,
)
elif len(list(module.children())) >= 0:
# for any other layers not bn_types, conv_types or nn.Linear, if
# the layers are the leaf nodes and have parameters, we regularize
# them. Similarly, if non-leaf nodes but have parameters, regularize
# them (set recurse=False)
for params in module.parameters(recurse=False):
regularized_params.append(params)
# Collect user-specified non-regularized params and remove them for the
# lists of regularized params, and check they're not already on the lists
# of unregularized params
if optimizer_config.non_regularized_parameters:
non_reg_param_names = optimizer_config.non_regularized_parameters
for name, param in model.named_parameters():
hits = [p for p in non_reg_param_names if p in name]
if any(hits):
unregularized_params.append(param)
# Call for trunk params
(
non_reg_params,
trunk_regularized_params,
trunk_unregularized_params,
) = _assign_regularized_params(
parameters_to_unregularize=unregularized_params,
regularized_param_list=trunk_regularized_params,
unregularized_param_list=trunk_unregularized_params,
)
# Call for head params
(
non_reg_params,
head_regularized_params,
head_unregularized_params,
) = _assign_regularized_params(
parameters_to_unregularize=unregularized_params,
regularized_param_list=head_regularized_params,
unregularized_param_list=head_unregularized_params,
)
# Call for remaining params
non_reg_params, regularized_params, _ = _assign_regularized_params(
parameters_to_unregularize=unregularized_params,
regularized_param_list=regularized_params,
)
# for non-trainable params, set the requires_grad to False
non_trainable_params = []
for name, param in model.named_parameters():
if name in model_config.NON_TRAINABLE_PARAMS:
param.requires_grad = False
non_trainable_params.append(param)
trainable_params = _filter_trainable(model.parameters())
trunk_regularized_params = _filter_trainable(trunk_regularized_params)
trunk_unregularized_params = _filter_trainable(trunk_unregularized_params)
head_regularized_params = _filter_trainable(head_regularized_params)
head_unregularized_params = _filter_trainable(head_unregularized_params)
regularized_params = _filter_trainable(regularized_params)
logging.info(
f"\nTrainable params: {len(trainable_params)}, \n"
f"Non-Trainable params: {len(non_trainable_params)}, \n"
f"Trunk Regularized Parameters: {len(trunk_regularized_params)}, \n"
f"Trunk Unregularized Parameters {len(trunk_unregularized_params)}, \n"
f"Head Regularized Parameters: {len(head_regularized_params)}, \n"
f"Head Unregularized Parameters: {len(head_unregularized_params)} \n"
f"Remaining Regularized Parameters: {len(regularized_params)} \n"
f"Remaining Unregularized Parameters: {len(unregularized_params)}"
)
param_groups = [
{
"params": trunk_regularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": weight_decay_main_config,
},
{
"params": trunk_unregularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": 0.0,
},
{
"params": head_regularized_params,
"lr": optimizer_schedulers["lr_head"],
"weight_decay": weight_decay_head_main_config,
},
{
"params": head_unregularized_params,
"lr": optimizer_schedulers["lr_head"],
"weight_decay": 0.0,
},
]
if len(regularized_params) > 0:
param_groups.append(
{
"params": regularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": weight_decay_main_config,
}
)
if len(unregularized_params) > 0:
param_groups.append(
{
"params": unregularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": 0.0,
}
)
return param_groups
| 40.847561
| 88
| 0.657785
|
import logging
from typing import Any, List
import torch.nn as nn
from vissl.utils.misc import is_apex_available
_CONV_TYPES = (nn.Conv1d, nn.Conv2d, nn.Conv3d)
_NORM_TYPES = (
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.SyncBatchNorm,
nn.LayerNorm,
)
if is_apex_available():
import apex
_NORM_TYPES += (apex.parallel.SyncBatchNorm,)
def _get_bn_optimizer_params(
module, regularized_params, unregularized_params, optimizer_config
):
if module.weight is not None:
if optimizer_config["regularize_bn"]:
regularized_params.append(module.weight)
else:
unregularized_params.append(module.weight)
if module.bias is not None:
if optimizer_config["regularize_bn"] and optimizer_config["regularize_bias"]:
regularized_params.append(module.bias)
else:
unregularized_params.append(module.bias)
return regularized_params, unregularized_params
def _filter_trainable(param_list: List[Any]) -> List[Any]:
return list(filter(lambda x: x.requires_grad, param_list))
def _assign_regularized_params(
regularized_param_list=None,
unregularized_param_list=None,
parameters_to_unregularize=None,
):
indices_to_remove_from_regularized = []
indices_to_remove_from_new_unregularized = []
for unreg_param_ind, new_unreg_param in enumerate(parameters_to_unregularize):
for reg_param_ind, reg_param in enumerate(regularized_param_list):
if reg_param is new_unreg_param:
indices_to_remove_from_regularized.append(reg_param_ind)
if unregularized_param_list:
for unreg_param in unregularized_param_list:
if unreg_param is new_unreg_param:
indices_to_remove_from_new_unregularized.append(unreg_param_ind)
indices_to_remove_from_regularized.sort(reverse=True)
for i in indices_to_remove_from_regularized:
del regularized_param_list[i]
if unregularized_param_list:
indices_to_remove_from_new_unregularized.sort(reverse=True)
for i in indices_to_remove_from_new_unregularized:
del parameters_to_unregularize[i]
return parameters_to_unregularize, regularized_param_list, unregularized_param_list
def get_optimizer_param_groups(
model, model_config, optimizer_config, optimizer_schedulers
):
if "weight_decay" in optimizer_schedulers:
weight_decay_main_config = optimizer_schedulers["weight_decay"]
else:
weight_decay_main_config = optimizer_config.weight_decay
if "weight_decay_head" in optimizer_schedulers:
weight_decay_head_main_config = optimizer_schedulers["weight_decay_head"]
else:
weight_decay_head_main_config = (
optimizer_config.head_optimizer_params.weight_decay
)
if optimizer_config.construct_single_param_group_only:
return [
{
"params": list(model.parameters()),
"lr": optimizer_schedulers["lr"],
"weight_decay": weight_decay_main_config,
}
]
if not optimizer_config.head_optimizer_params.use_different_lr:
assert "lr_head" in optimizer_schedulers
trunk_regularized_params, trunk_unregularized_params = [], []
head_regularized_params, head_unregularized_params = [], []
regularized_params = []
unregularized_params = []
for name, module in model.named_modules():
if "head" in name and (
isinstance(module, nn.Linear) or isinstance(module, _CONV_TYPES)
):
if hasattr(module, "weight_g"):
head_regularized_params.append(module.weight_g)
head_regularized_params.append(module.weight_v)
else:
head_regularized_params.append(module.weight)
if module.bias is not None:
if optimizer_config["regularize_bias"]:
head_regularized_params.append(module.bias)
else:
head_unregularized_params.append(module.bias)
elif "head" in name and isinstance(module, _NORM_TYPES):
(
head_regularized_params,
head_unregularized_params,
) = _get_bn_optimizer_params(
module,
head_regularized_params,
head_unregularized_params,
optimizer_config,
)
elif isinstance(module, nn.Linear) or isinstance(module, _CONV_TYPES):
if hasattr(module, "weight_g"):
trunk_regularized_params.append(module.weight_g)
trunk_regularized_params.append(module.weight_v)
else:
trunk_regularized_params.append(module.weight)
if module.bias is not None:
if optimizer_config["regularize_bias"]:
trunk_regularized_params.append(module.bias)
else:
trunk_unregularized_params.append(module.bias)
elif isinstance(module, _NORM_TYPES):
(
trunk_regularized_params,
trunk_unregularized_params,
) = _get_bn_optimizer_params(
module,
trunk_regularized_params,
trunk_unregularized_params,
optimizer_config,
)
elif len(list(module.children())) >= 0:
for params in module.parameters(recurse=False):
regularized_params.append(params)
# of unregularized params
if optimizer_config.non_regularized_parameters:
non_reg_param_names = optimizer_config.non_regularized_parameters
for name, param in model.named_parameters():
hits = [p for p in non_reg_param_names if p in name]
if any(hits):
unregularized_params.append(param)
# Call for trunk params
(
non_reg_params,
trunk_regularized_params,
trunk_unregularized_params,
) = _assign_regularized_params(
parameters_to_unregularize=unregularized_params,
regularized_param_list=trunk_regularized_params,
unregularized_param_list=trunk_unregularized_params,
)
# Call for head params
(
non_reg_params,
head_regularized_params,
head_unregularized_params,
) = _assign_regularized_params(
parameters_to_unregularize=unregularized_params,
regularized_param_list=head_regularized_params,
unregularized_param_list=head_unregularized_params,
)
# Call for remaining params
non_reg_params, regularized_params, _ = _assign_regularized_params(
parameters_to_unregularize=unregularized_params,
regularized_param_list=regularized_params,
)
# for non-trainable params, set the requires_grad to False
non_trainable_params = []
for name, param in model.named_parameters():
if name in model_config.NON_TRAINABLE_PARAMS:
param.requires_grad = False
non_trainable_params.append(param)
trainable_params = _filter_trainable(model.parameters())
trunk_regularized_params = _filter_trainable(trunk_regularized_params)
trunk_unregularized_params = _filter_trainable(trunk_unregularized_params)
head_regularized_params = _filter_trainable(head_regularized_params)
head_unregularized_params = _filter_trainable(head_unregularized_params)
regularized_params = _filter_trainable(regularized_params)
logging.info(
f"\nTrainable params: {len(trainable_params)}, \n"
f"Non-Trainable params: {len(non_trainable_params)}, \n"
f"Trunk Regularized Parameters: {len(trunk_regularized_params)}, \n"
f"Trunk Unregularized Parameters {len(trunk_unregularized_params)}, \n"
f"Head Regularized Parameters: {len(head_regularized_params)}, \n"
f"Head Unregularized Parameters: {len(head_unregularized_params)} \n"
f"Remaining Regularized Parameters: {len(regularized_params)} \n"
f"Remaining Unregularized Parameters: {len(unregularized_params)}"
)
param_groups = [
{
"params": trunk_regularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": weight_decay_main_config,
},
{
"params": trunk_unregularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": 0.0,
},
{
"params": head_regularized_params,
"lr": optimizer_schedulers["lr_head"],
"weight_decay": weight_decay_head_main_config,
},
{
"params": head_unregularized_params,
"lr": optimizer_schedulers["lr_head"],
"weight_decay": 0.0,
},
]
if len(regularized_params) > 0:
param_groups.append(
{
"params": regularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": weight_decay_main_config,
}
)
if len(unregularized_params) > 0:
param_groups.append(
{
"params": unregularized_params,
"lr": optimizer_schedulers["lr"],
"weight_decay": 0.0,
}
)
return param_groups
| true
| true
|
f719e5ce46b0b141781817964a94f8d39288893c
| 6,959
|
py
|
Python
|
src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py
|
trevordonnelly/ggrc-core
|
499cf0d3cce70737b080991b12c203ec22015cea
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-03-30T11:28:48.000Z
|
2018-03-30T11:28:48.000Z
|
src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py
|
trevordonnelly/ggrc-core
|
499cf0d3cce70737b080991b12c203ec22015cea
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py
|
trevordonnelly/ggrc-core
|
499cf0d3cce70737b080991b12c203ec22015cea
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add non-adjusted next cycle start date
Revision ID: 44047daa31a9
Revises: 1431e7094e26
Create Date: 2015-07-07 14:31:27.780564
"""
# Workaround legacy code which blocks Workflow new attribute addition
# flake8: noqa
# pylint: skip-file
# revision identifiers, used by Alembic.
revision = '44047daa31a9'
down_revision = '4840f4760f4b'
from alembic import op
import sqlalchemy as sa
# from sqlalchemy.dialects import mysql
# from datetime import date
# from ggrc.app import app
# from ggrc import settings, db
# import ggrc_workflows.models as models
# from ggrc_workflows import adjust_next_cycle_start_date
# from ggrc_workflows.services.workflow_cycle_calculator import \
# get_cycle_calculator
def upgrade():
op.add_column('workflows',
sa.Column('non_adjusted_next_cycle_start_date',
sa.Date(), nullable=True))
# Workaround legacy code which blocks Workflow new attribute addition
return
# If somebody deleted all the tasks we must clear the next cycle start
# date
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date < date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
app.logger.warning(
"Removing NCSD from expired WF {} because no tasks are "
"set up. Current NCSD: {}".format(
workflow.id,
workflow.next_cycle_start_date
))
workflow.next_cycle_start_date = None
db.session.add(workflow)
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.non_adjusted_next_cycle_start_date == None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date >= date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
# We must skip tasks that don't have start days and end days defined
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
append_msg = ""
if workflow.next_cycle_start_date:
workflow.next_cycle_start_date = None
append_msg += (" Removing existing next cycle start date "
"because none are configured.")
db.session.add(workflow)
app.logger.warning(
"Skipping active WF {0} because no tasks "
"are set up.{1}".format(
workflow.id,
append_msg
))
continue
pre_compute_ncsd = workflow.next_cycle_start_date
last_cycle_start_date = None
if workflow.cycles:
last_cycle_start_date = max([c.start_date for c in workflow.cycles])
if last_cycle_start_date:
base_date = last_cycle_start_date
else:
base_date = base_date.today()
base_date = max(base_date, workflow.next_cycle_start_date)
calculator = get_cycle_calculator(workflow, base_date=base_date)
if workflow.frequency in {"weekly", "monthly"}:
nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_month = None
else:
nancsd_month, nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date)
if last_cycle_start_date:
while calculator.adjust_date(nancsd_date) <= last_cycle_start_date:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
else:
base_date = base_date - calculator.time_delta
while calculator.adjust_date(nancsd_date) <= pre_compute_ncsd:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
workflow.non_adjusted_next_cycle_start_date = nancsd_date
workflow.next_cycle_start_date = calculator.adjust_date(nancsd_date)
post_compute_ncsd = workflow.next_cycle_start_date
start_dates = ["{}/{}".format(
task.relative_start_month,
task.relative_start_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
end_dates = ["{}/{}".format(
task.relative_end_month,
task.relative_end_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
if pre_compute_ncsd != post_compute_ncsd:
app.logger.warning(
"Adjusted NCSD for workflow {}. "
"Freq: {}, PRE: {}, Last cycle: {}, POST: {}, NON: {},"
"tasks start: {}, tasks end: {},".format(
workflow.id,
workflow.frequency[:2],
pre_compute_ncsd,
last_cycle_start_date,
post_compute_ncsd,
workflow.non_adjusted_next_cycle_start_date,
start_dates,
end_dates))
db.session.add(workflow)
# Save
db.session.commit()
def downgrade():
op.drop_column('workflows', 'non_adjusted_next_cycle_start_date')
| 38.027322
| 80
| 0.601523
|
revision = '44047daa31a9'
down_revision = '4840f4760f4b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('workflows',
sa.Column('non_adjusted_next_cycle_start_date',
sa.Date(), nullable=True))
return
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date < date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
app.logger.warning(
"Removing NCSD from expired WF {} because no tasks are "
"set up. Current NCSD: {}".format(
workflow.id,
workflow.next_cycle_start_date
))
workflow.next_cycle_start_date = None
db.session.add(workflow)
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.non_adjusted_next_cycle_start_date == None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date >= date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
append_msg = ""
if workflow.next_cycle_start_date:
workflow.next_cycle_start_date = None
append_msg += (" Removing existing next cycle start date "
"because none are configured.")
db.session.add(workflow)
app.logger.warning(
"Skipping active WF {0} because no tasks "
"are set up.{1}".format(
workflow.id,
append_msg
))
continue
pre_compute_ncsd = workflow.next_cycle_start_date
last_cycle_start_date = None
if workflow.cycles:
last_cycle_start_date = max([c.start_date for c in workflow.cycles])
if last_cycle_start_date:
base_date = last_cycle_start_date
else:
base_date = base_date.today()
base_date = max(base_date, workflow.next_cycle_start_date)
calculator = get_cycle_calculator(workflow, base_date=base_date)
if workflow.frequency in {"weekly", "monthly"}:
nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_month = None
else:
nancsd_month, nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date)
if last_cycle_start_date:
while calculator.adjust_date(nancsd_date) <= last_cycle_start_date:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
else:
base_date = base_date - calculator.time_delta
while calculator.adjust_date(nancsd_date) <= pre_compute_ncsd:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
workflow.non_adjusted_next_cycle_start_date = nancsd_date
workflow.next_cycle_start_date = calculator.adjust_date(nancsd_date)
post_compute_ncsd = workflow.next_cycle_start_date
start_dates = ["{}/{}".format(
task.relative_start_month,
task.relative_start_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
end_dates = ["{}/{}".format(
task.relative_end_month,
task.relative_end_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
if pre_compute_ncsd != post_compute_ncsd:
app.logger.warning(
"Adjusted NCSD for workflow {}. "
"Freq: {}, PRE: {}, Last cycle: {}, POST: {}, NON: {},"
"tasks start: {}, tasks end: {},".format(
workflow.id,
workflow.frequency[:2],
pre_compute_ncsd,
last_cycle_start_date,
post_compute_ncsd,
workflow.non_adjusted_next_cycle_start_date,
start_dates,
end_dates))
db.session.add(workflow)
# Save
db.session.commit()
def downgrade():
op.drop_column('workflows', 'non_adjusted_next_cycle_start_date')
| true
| true
|
f719e6b707f00ff2d2978971d22b48f62a159092
| 4,228
|
py
|
Python
|
vise/analyzer/dielectric_function.py
|
kumagai-group/vise
|
8adfe61ad8f31767ec562f02f271e2495f357cd4
|
[
"MIT"
] | 16
|
2020-07-14T13:14:05.000Z
|
2022-03-04T13:39:30.000Z
|
vise/analyzer/dielectric_function.py
|
kumagai-group/vise
|
8adfe61ad8f31767ec562f02f271e2495f357cd4
|
[
"MIT"
] | 10
|
2021-03-15T20:47:45.000Z
|
2021-08-19T00:47:12.000Z
|
vise/analyzer/dielectric_function.py
|
kumagai-group/vise
|
8adfe61ad8f31767ec562f02f271e2495f357cd4
|
[
"MIT"
] | 6
|
2020-03-03T00:42:39.000Z
|
2022-02-22T02:34:47.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from dataclasses import dataclass
from math import sqrt, pi
from typing import List
import numpy as np
from monty.json import MSONable
from tqdm import tqdm
from vise.util.mix_in import ToJsonFileMixIn
from scipy.constants import physical_constants as pc
eV_to_inv_cm = pc["electron volt-inverse meter relationship"][0] / 100
def diele_func_to_coeff(freq, real, imag):
return (2 * sqrt(2) * pi * sqrt(sqrt(real ** 2 + imag ** 2) - real)
* freq * eV_to_inv_cm)
@dataclass
class DieleFuncData(MSONable, ToJsonFileMixIn):
energies: List[float] # in eV
diele_func_real: List[List[float]] # [xx, yy, zz, xy, yz, xz]
diele_func_imag: List[List[float]] # [xx, yy, zz, xy, yz, xz]
band_gap: float # in eV
@property
def ave_absorption_coeff(self):
reals = [sum(self.diele_func_real[i][:3]) / 3
for i in range(len(self.energies))]
imags = [sum(self.diele_func_imag[i][:3]) / 3
for i in range(len(self.energies))]
return [diele_func_to_coeff(freq, real, imag)
for freq, real, imag in zip(self.energies, reals, imags)]
def target_coeff_min_e(self, target_coeff: float = 10**4):
for e, coeff in zip(self.energies, self.ave_absorption_coeff):
if coeff > target_coeff:
return e
return None
def make_shifted_diele_func(diele_func_data: DieleFuncData,
original_band_gap: float,
shift: float) -> DieleFuncData:
imag = imag_shift(diele_func_data.diele_func_imag,
diele_func_data.energies,
original_band_gap + shift, shift)
real = kramers_kronig_trans(imag, diele_func_data.energies)
return DieleFuncData(diele_func_data.energies,
real.tolist(),
imag.tolist(),
original_band_gap + shift)
def imag_shift(diele_func_imag: List[List[float]],
energies: List[float],
band_gap: float,
shift: float) -> np.ndarray:
energies = np.array(energies)
assert shift > 0
result = []
for energy_grid in energies:
old_e = energy_grid - shift
right_idx = np.argwhere(energies > old_e)[0][0]
left_e, right_e = energies[right_idx - 1], energies[right_idx]
# linear interpolation
left_ratio = (right_e - old_e) / (right_e - left_e)
inner_result = []
for imag_idx in range(6):
if energy_grid < band_gap:
inner_result.append(0.0)
else:
old_diele = \
diele_func_imag[right_idx - 1][imag_idx] * left_ratio + \
diele_func_imag[right_idx][imag_idx] * (1 - left_ratio)
inner_result.append(
old_diele * (energy_grid - shift) / energy_grid)
result.append(inner_result)
return np.array(result)
def kramers_kronig_trans(diele_func_imag: np.array,
energies: List[float],
ita: float = 0.01) -> np.ndarray:
mesh = energies[1] - energies[0]
result = []
ee2ss = [[e ** 2 - energy_grid ** 2 for e in energies]
for energy_grid in energies]
for imag_idx in tqdm(range(6)):
imags = diele_func_imag[:, imag_idx]
if imag_idx == 0 or \
(imag_idx > 0
and np.allclose(
imags, diele_func_imag[:, imag_idx - 1]) is False):
if np.count_nonzero(imags) == 0:
inner_result = [0.0] * len(energies)
else:
inner_result = []
for ee2s in ee2ss:
integrals = [e * imag * ee2 / (ee2 ** 2 + ita ** 2)
for e, ee2, imag in zip(energies, ee2s, imags)]
integral = sum(integrals) * mesh * 2 / pi
if imag_idx < 3:
integral += 1
inner_result.append(integral)
result.append(inner_result)
return np.array(result).T
| 37.087719
| 80
| 0.565989
|
from dataclasses import dataclass
from math import sqrt, pi
from typing import List
import numpy as np
from monty.json import MSONable
from tqdm import tqdm
from vise.util.mix_in import ToJsonFileMixIn
from scipy.constants import physical_constants as pc
eV_to_inv_cm = pc["electron volt-inverse meter relationship"][0] / 100
def diele_func_to_coeff(freq, real, imag):
return (2 * sqrt(2) * pi * sqrt(sqrt(real ** 2 + imag ** 2) - real)
* freq * eV_to_inv_cm)
@dataclass
class DieleFuncData(MSONable, ToJsonFileMixIn):
energies: List[float]
diele_func_real: List[List[float]]
diele_func_imag: List[List[float]]
band_gap: float
@property
def ave_absorption_coeff(self):
reals = [sum(self.diele_func_real[i][:3]) / 3
for i in range(len(self.energies))]
imags = [sum(self.diele_func_imag[i][:3]) / 3
for i in range(len(self.energies))]
return [diele_func_to_coeff(freq, real, imag)
for freq, real, imag in zip(self.energies, reals, imags)]
def target_coeff_min_e(self, target_coeff: float = 10**4):
for e, coeff in zip(self.energies, self.ave_absorption_coeff):
if coeff > target_coeff:
return e
return None
def make_shifted_diele_func(diele_func_data: DieleFuncData,
original_band_gap: float,
shift: float) -> DieleFuncData:
imag = imag_shift(diele_func_data.diele_func_imag,
diele_func_data.energies,
original_band_gap + shift, shift)
real = kramers_kronig_trans(imag, diele_func_data.energies)
return DieleFuncData(diele_func_data.energies,
real.tolist(),
imag.tolist(),
original_band_gap + shift)
def imag_shift(diele_func_imag: List[List[float]],
energies: List[float],
band_gap: float,
shift: float) -> np.ndarray:
energies = np.array(energies)
assert shift > 0
result = []
for energy_grid in energies:
old_e = energy_grid - shift
right_idx = np.argwhere(energies > old_e)[0][0]
left_e, right_e = energies[right_idx - 1], energies[right_idx]
left_ratio = (right_e - old_e) / (right_e - left_e)
inner_result = []
for imag_idx in range(6):
if energy_grid < band_gap:
inner_result.append(0.0)
else:
old_diele = \
diele_func_imag[right_idx - 1][imag_idx] * left_ratio + \
diele_func_imag[right_idx][imag_idx] * (1 - left_ratio)
inner_result.append(
old_diele * (energy_grid - shift) / energy_grid)
result.append(inner_result)
return np.array(result)
def kramers_kronig_trans(diele_func_imag: np.array,
energies: List[float],
ita: float = 0.01) -> np.ndarray:
mesh = energies[1] - energies[0]
result = []
ee2ss = [[e ** 2 - energy_grid ** 2 for e in energies]
for energy_grid in energies]
for imag_idx in tqdm(range(6)):
imags = diele_func_imag[:, imag_idx]
if imag_idx == 0 or \
(imag_idx > 0
and np.allclose(
imags, diele_func_imag[:, imag_idx - 1]) is False):
if np.count_nonzero(imags) == 0:
inner_result = [0.0] * len(energies)
else:
inner_result = []
for ee2s in ee2ss:
integrals = [e * imag * ee2 / (ee2 ** 2 + ita ** 2)
for e, ee2, imag in zip(energies, ee2s, imags)]
integral = sum(integrals) * mesh * 2 / pi
if imag_idx < 3:
integral += 1
inner_result.append(integral)
result.append(inner_result)
return np.array(result).T
| true
| true
|
f719e7006ad29396ce30e456e8d231c230206adc
| 2,488
|
py
|
Python
|
main.py
|
Kiny-Kiny/WordlistCreator
|
3492f8176959beca23fa22877f2923c74ca6bf89
|
[
"BSD-3-Clause"
] | 2
|
2021-10-31T15:38:55.000Z
|
2021-12-12T06:20:20.000Z
|
main.py
|
Kiny-Kiny/WordlistCreator
|
3492f8176959beca23fa22877f2923c74ca6bf89
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
Kiny-Kiny/WordlistCreator
|
3492f8176959beca23fa22877f2923c74ca6bf89
|
[
"BSD-3-Clause"
] | null | null | null |
# Recomendação : Use apenas se seu computador/celular for bom.
# Autor : Kiny
# Pix : (61) 9603-5417
# Github : https://github.com/Kiny-Kiny
# WhatsApp : http://wa.me/552179180533
# Telegram : @K_iny
# Instagram : @parziovanni
# Twitter : @KinyBruno
############################################
'''Módulos'''
from itertools import product;
from sys import argv,stdout;
from time import sleep;
from os import system;
############################################
'''Cores'''
global R,B,C,G
R='\033[1;31m';
B='\033[1;34m';
C='\033[1;37m';
G='\033[1;32m';
############################################
'''Funções'''
def slow(msg):
for i in msg: stdout.write(i);sleep(0.007);stdout.flush();
def clear(): system('cls||clear');
############################################
'''Banner'''
logo=B+''' __ __ __ __ __ __ __
/\ \/ / /\ \ /\ "-.\ \ /\ \_\ \
\ \ _"-. \ \ \ \ \ \-. \ \ \____ \
\ \_\ \_\ \ \_\ \ \_\\"\_\ \/\_____\
\/_/\/_/ \/_/ \/_/ \/_/ \/_____/ \n'''+C
############################################
'''Parte de criação da Wordlist'''
def wordlist(i):
msg='';res = product('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_1234567890', repeat=i);
for g in res:
senha=''
for i in g: senha+=i
msg+=f'{senha}\n'
return msg
def main(min,max):
lis=[]
slow(
f'[{G}!{C}] Criando a WordList...\n'
)
for i in range(int(min),int(max)): lis.append(str(wordlist(i)));
msg='';
for i in lis: msg+=i
file=open('KingCrimson.txt','w+');
file.write(msg);
file.close();
clear();
slow(
f'{logo}\n[{G}Wordlist Criada!{C}] A wordlist foi criada e salva no arquivo KingCrimson.txt\n'
);
############################################
if int(len(argv)) < 3:
slow(
str(logo) + f'\n{G}- {C}Modo de Uso{G} : {C}python3 '+ str(argv[0]) + G+' {'+C+'Quantidade mínima'+G+'} {' +C+'Quantidade Máxima'+G+'}\n'+C
);exit();
try: int(argv[1]);int(argv[2]);
except: slow(
f'{logo}\n[{R}Error{C}] Use apenas números inteiros! (ex: 7)\n'
);exit();
if __name__=='__main__':
clear()
if int(argv[1]) == int(argv[2]):
slow(
f'{logo}\n[{R}Error{C}] A quantidade mínima não pode ser igual a quantidade máxima.\n'
);
elif int(argv[1]) > int(argv[2]):
slow(
f'{logo}\n[{R}Error{C}] A quantidade mínima não pode ser maior que a quantidade máxima.\n'
);
else:
try:
main(int(argv[1]),int(argv[2]));
except:
clear();
slow(
f'{logo}[{R}Error{C}] Erro Desconhecido.\n'
);
| 27.043478
| 140
| 0.513264
| true
| true
|
|
f719e8a05896b96ec0c6d21c07a0f99539976e6a
| 3,839
|
py
|
Python
|
apps/osis/tests/osisbasic__test.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | 1
|
2015-10-26T10:38:13.000Z
|
2015-10-26T10:38:13.000Z
|
apps/osis/tests/osisbasic__test.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | null | null | null |
apps/osis/tests/osisbasic__test.py
|
Jumpscale/jumpscale6_core
|
0502ddc1abab3c37ed982c142d21ea3955d471d3
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
import re
import time
from JumpScale import j
try:
import ujson as json
except:
import json
import random
descr = """
basic functioning of osis (test set)
"""
organization = "jumpscale"
author = "incubaid"
license = "bsd"
version = "1.0"
category = "osis.basic.testset"
enable=True
priority=1
send2osis=False
import JumpScale.grid.osis
class TEST(unittest.TestCase):
def randomMAC(self):
return j.base.idgenerator.generateGUID().replace("-","")
def setUp(self):
self.client = j.core.osis.getClientByInstance('main')
self.osisclient =j.core.osis.getClientForCategory(self.client, 'system', 'fake4test')
self.prefix = time.time()
def test_setGetBasicVerify(self):
# We first set some elements and verify the reponse
obj = self.osisclient.new()
obj.name = "test"
obj.netaddr = {"AABBCCDDEEFFGG": ['127.0.0.1', '127.0.0.2']}
ckeyOriginal=obj.getContentKey()
assert ckeyOriginal=='f7d877013a2d6c853092e55bad32435b'
assert obj.getUniqueKey()=='098f6bcd4621d373cade4e832627b4f6'
key,new,changed=self.osisclient.set(obj)
key2,new,changed=self.osisclient.set(obj)
print "2x save should have same key"
assert key==key2
print "check 2nd save new & changed are not new or changed"
assert new==False
assert changed==False
print "test content key does not get modified when set"
assert ckeyOriginal==obj.getContentKey()
print "retrieve obj from db"
obj2=self.osisclient.get(key)
print "test content key needs to remain same after fetching object"
assert ckeyOriginal==obj2.getContentKey()
obj.description="a descr"
print "obj needs to be different"
assert ckeyOriginal<>obj.getContentKey()
key3,new,changed=self.osisclient.set(obj)
print "check 3nd save new & changed are False,True for modified obj"
assert new==False
assert changed==True
print "key should be same"
assert key==key3
obj3=self.osisclient.get(key3)
print "guid should be same even after content change"
assert obj3.guid==key
print "verify id structure"
testresult = self.verify_id(key)
assert testresult==True
def test_set_and_self(self):
numbers = range(10)
items = self.client.list("system", "fake4test")
startnr = len(items)
for i in numbers:
obj = self.osisclient.new()
obj.name = "%s_%s" % (self.prefix, i)
obj.netaddr = {self.randomMAC(): ['127.0.0.1', '127.0.0.2']}
key, new, changed = self.osisclient.set(obj)
items = self.client.list("system", "fake4test")
assert len(items)== startnr + 10
def test_set_and_delete(self):
obj = self.osisclient.new()
obj.name = "%s_1" % self.prefix
obj.netaddr = {self.randomMAC(): ['127.0.0.1', '127.0.0.2']}
key, new, changed = self.osisclient.set(obj)
obj = self.client.get("system", "fake4test", key)
self.client.delete("system", "fake4test", key)
items = self.client.list("system", "fake4test")
if key in items:
deleted = False
else:
deleted = True
assert deleted==True
def test_find(self):
pass
def verify_id(self, id):
"""
This function verifies a id, e.g checks if its in the correct format
Id should be clusterid_objectid
Clusterid and objectid are both integers
"""
regex = '^\d+[_]\d+$'
if re.search(regex, id):
return True
else:
return False
def tearDown(self):
self.client.deleteNamespaceCategory("system","fake4test")
| 29.530769
| 93
| 0.616567
|
import unittest
import re
import time
from JumpScale import j
try:
import ujson as json
except:
import json
import random
descr = """
basic functioning of osis (test set)
"""
organization = "jumpscale"
author = "incubaid"
license = "bsd"
version = "1.0"
category = "osis.basic.testset"
enable=True
priority=1
send2osis=False
import JumpScale.grid.osis
class TEST(unittest.TestCase):
def randomMAC(self):
return j.base.idgenerator.generateGUID().replace("-","")
def setUp(self):
self.client = j.core.osis.getClientByInstance('main')
self.osisclient =j.core.osis.getClientForCategory(self.client, 'system', 'fake4test')
self.prefix = time.time()
def test_setGetBasicVerify(self):
obj = self.osisclient.new()
obj.name = "test"
obj.netaddr = {"AABBCCDDEEFFGG": ['127.0.0.1', '127.0.0.2']}
ckeyOriginal=obj.getContentKey()
assert ckeyOriginal=='f7d877013a2d6c853092e55bad32435b'
assert obj.getUniqueKey()=='098f6bcd4621d373cade4e832627b4f6'
key,new,changed=self.osisclient.set(obj)
key2,new,changed=self.osisclient.set(obj)
print "2x save should have same key"
assert key==key2
print "check 2nd save new & changed are not new or changed"
assert new==False
assert changed==False
print "test content key does not get modified when set"
assert ckeyOriginal==obj.getContentKey()
print "retrieve obj from db"
obj2=self.osisclient.get(key)
print "test content key needs to remain same after fetching object"
assert ckeyOriginal==obj2.getContentKey()
obj.description="a descr"
print "obj needs to be different"
assert ckeyOriginal<>obj.getContentKey()
key3,new,changed=self.osisclient.set(obj)
print "check 3nd save new & changed are False,True for modified obj"
assert new==False
assert changed==True
print "key should be same"
assert key==key3
obj3=self.osisclient.get(key3)
print "guid should be same even after content change"
assert obj3.guid==key
print "verify id structure"
testresult = self.verify_id(key)
assert testresult==True
def test_set_and_self(self):
numbers = range(10)
items = self.client.list("system", "fake4test")
startnr = len(items)
for i in numbers:
obj = self.osisclient.new()
obj.name = "%s_%s" % (self.prefix, i)
obj.netaddr = {self.randomMAC(): ['127.0.0.1', '127.0.0.2']}
key, new, changed = self.osisclient.set(obj)
items = self.client.list("system", "fake4test")
assert len(items)== startnr + 10
def test_set_and_delete(self):
obj = self.osisclient.new()
obj.name = "%s_1" % self.prefix
obj.netaddr = {self.randomMAC(): ['127.0.0.1', '127.0.0.2']}
key, new, changed = self.osisclient.set(obj)
obj = self.client.get("system", "fake4test", key)
self.client.delete("system", "fake4test", key)
items = self.client.list("system", "fake4test")
if key in items:
deleted = False
else:
deleted = True
assert deleted==True
def test_find(self):
pass
def verify_id(self, id):
"""
This function verifies a id, e.g checks if its in the correct format
Id should be clusterid_objectid
Clusterid and objectid are both integers
"""
regex = '^\d+[_]\d+$'
if re.search(regex, id):
return True
else:
return False
def tearDown(self):
self.client.deleteNamespaceCategory("system","fake4test")
| false
| true
|
f719e947d81719e7404f7f12a8aca3b32f7370bb
| 66
|
py
|
Python
|
core/__init__.py
|
berendkleinhaneveld/Registrationshop
|
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
|
[
"MIT"
] | 25
|
2015-11-08T16:36:54.000Z
|
2022-01-20T16:03:28.000Z
|
core/__init__.py
|
berendkleinhaneveld/Registrationshop
|
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
|
[
"MIT"
] | 2
|
2016-12-01T23:13:08.000Z
|
2017-07-25T02:40:49.000Z
|
core/__init__.py
|
berendkleinhaneveld/Registrationshop
|
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
|
[
"MIT"
] | 10
|
2016-07-05T14:39:16.000Z
|
2022-01-01T02:05:55.000Z
|
from AppVars import AppVars
from AppResources import AppResources
| 22
| 37
| 0.878788
|
from AppVars import AppVars
from AppResources import AppResources
| true
| true
|
f719e96b6824efbbe4833272ec5ec4b37e319c12
| 2,894
|
py
|
Python
|
test/functional/interface_bitcoin_cli.py
|
ComputerCraftr/pivx-gui
|
79c13d9dcaf48dfb11400f0bc5733aaa7c83cee9
|
[
"MIT"
] | null | null | null |
test/functional/interface_bitcoin_cli.py
|
ComputerCraftr/pivx-gui
|
79c13d9dcaf48dfb11400f0bc5733aaa7c83cee9
|
[
"MIT"
] | null | null | null |
test/functional/interface_bitcoin_cli.py
|
ComputerCraftr/pivx-gui
|
79c13d9dcaf48dfb11400f0bc5733aaa7c83cee9
|
[
"MIT"
] | 1
|
2021-01-23T04:15:52.000Z
|
2021-01-23T04:15:52.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test ysw-cli"""
from test_framework.test_framework import YieldSakingWalletTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
import time
class TestBitcoinCli(YieldSakingWalletTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Sleeping 30 seconds...")
time.sleep(30)
self.log.info("Compare responses from gewalletinfo RPC and `ysw-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `ysw-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Compare responses from `ysw-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| 49.050847
| 103
| 0.715619
|
from test_framework.test_framework import YieldSakingWalletTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
import time
class TestBitcoinCli(YieldSakingWalletTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.log.info("Sleeping 30 seconds...")
time.sleep(30)
self.log.info("Compare responses from gewalletinfo RPC and `ysw-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `ysw-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Compare responses from `ysw-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
if __name__ == '__main__':
TestBitcoinCli().main()
| true
| true
|
f719ea3d7dd63575d0159399e9ac03475a0baa21
| 924
|
py
|
Python
|
aldryn_google_chrome_frame/models.py
|
aldryn/aldryn-google-chrome-frame
|
a0deda5d7b4b60b1ca88b7c3b09685e86b598e2a
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_google_chrome_frame/models.py
|
aldryn/aldryn-google-chrome-frame
|
a0deda5d7b4b60b1ca88b7c3b09685e86b598e2a
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_google_chrome_frame/models.py
|
aldryn/aldryn-google-chrome-frame
|
a0deda5d7b4b60b1ca88b7c3b09685e86b598e2a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from cmscloud.template_api import registry
from django.conf import settings
def get_meta_version(max_version):
max_version = int(max_version)
assert 6 <= max_version <= 9
if max_version == 9:
return '1'
else:
return 'IE%d' % (max_version, )
META_TAG = '<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=%(meta_version)s">'
registry.add_to_head(META_TAG % {'meta_version': get_meta_version(settings.GOOGLE_CHROME_FRAME_MAX_VERSION)})
PROMPT_SCRIPT = """<!--[if lte IE %(max_version)s ]>
<script src="//ajax.googleapis.com/ajax/libs/chrome-frame/1.0.2/CFInstall.min.js"></script>
<script>window.attachEvent("onload",function(){CFInstall.check({mode:"overlay"})})</script>
<![endif]-->"""
if getattr(settings, 'GOOGLE_CHROME_FRAME_PROMPT', False):
registry.add_to_tail(PROMPT_SCRIPT % {'max_version': settings.GOOGLE_CHROME_FRAME_MAX_VERSION})
| 36.96
| 109
| 0.712121
|
from cmscloud.template_api import registry
from django.conf import settings
def get_meta_version(max_version):
max_version = int(max_version)
assert 6 <= max_version <= 9
if max_version == 9:
return '1'
else:
return 'IE%d' % (max_version, )
META_TAG = '<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=%(meta_version)s">'
registry.add_to_head(META_TAG % {'meta_version': get_meta_version(settings.GOOGLE_CHROME_FRAME_MAX_VERSION)})
PROMPT_SCRIPT = """<!--[if lte IE %(max_version)s ]>
<script src="//ajax.googleapis.com/ajax/libs/chrome-frame/1.0.2/CFInstall.min.js"></script>
<script>window.attachEvent("onload",function(){CFInstall.check({mode:"overlay"})})</script>
<![endif]-->"""
if getattr(settings, 'GOOGLE_CHROME_FRAME_PROMPT', False):
registry.add_to_tail(PROMPT_SCRIPT % {'max_version': settings.GOOGLE_CHROME_FRAME_MAX_VERSION})
| true
| true
|
f719ea5c0a903eafeb8163fd0cad4442d6c73370
| 520
|
py
|
Python
|
torchtools/callbacks/__init__.py
|
Time1ess/torchtools
|
1c48591188827f8a7403162728f86229203354c5
|
[
"BSD-3-Clause"
] | 16
|
2017-08-15T14:01:13.000Z
|
2020-12-21T11:23:31.000Z
|
torchtools/callbacks/__init__.py
|
Time1ess/torchtools
|
1c48591188827f8a7403162728f86229203354c5
|
[
"BSD-3-Clause"
] | null | null | null |
torchtools/callbacks/__init__.py
|
Time1ess/torchtools
|
1c48591188827f8a7403162728f86229203354c5
|
[
"BSD-3-Clause"
] | 2
|
2017-12-28T14:09:09.000Z
|
2020-07-14T14:29:30.000Z
|
# coding: UTF-8
from .callback import Hook, Callback
from .checkpoint import ModelCheckPoint
from .csvlogger import CSVLogger
from .early_stopping import EarlyStopping
from .lr_scheduler import (
LambdaLR, StepLR, MultiStepLR, ExponentialLR, ReduceLROnPlateau)
from .tensorboard_logger import TensorBoardLogger
__all__ = [
'Hook', 'Callback',
'ModelCheckPoint',
'CSVLogger',
'EarlyStopping',
'LambdaLR', 'StepLR', 'MultiStepLR', 'ExponentialLR', 'ReduceLROnPlateau',
'TensorBoardLogger',
]
| 27.368421
| 78
| 0.75
|
from .callback import Hook, Callback
from .checkpoint import ModelCheckPoint
from .csvlogger import CSVLogger
from .early_stopping import EarlyStopping
from .lr_scheduler import (
LambdaLR, StepLR, MultiStepLR, ExponentialLR, ReduceLROnPlateau)
from .tensorboard_logger import TensorBoardLogger
__all__ = [
'Hook', 'Callback',
'ModelCheckPoint',
'CSVLogger',
'EarlyStopping',
'LambdaLR', 'StepLR', 'MultiStepLR', 'ExponentialLR', 'ReduceLROnPlateau',
'TensorBoardLogger',
]
| true
| true
|
f719ea9ceaf6800cbd249182d3c34733fdae35f0
| 3,716
|
py
|
Python
|
test.py
|
AlbertoSousaSantana/devopslav_full02
|
679bdca0f2fb886febeba37696f49143105894b6
|
[
"MIT"
] | null | null | null |
test.py
|
AlbertoSousaSantana/devopslav_full02
|
679bdca0f2fb886febeba37696f49143105894b6
|
[
"MIT"
] | null | null | null |
test.py
|
AlbertoSousaSantana/devopslav_full02
|
679bdca0f2fb886febeba37696f49143105894b6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from app import app
import unittest
class Test(unittest.TestCase):
def setUp(self):
# cria uma instância do unittest, precisa do nome "setUp"
self.app = app.test_client()
# envia uma requisicao GET para a URL
self.result = self.app.get('/')
def test_requisicao(self):
# compara o status da requisicao (precisa ser igual a 200)
self.assertEqual(self.result.status_code, 200)
def test_conteudo(self):
# verifica o retorno do conteudo da pagina
self.assertEqual(self.result.data.decode('utf-8'), "mensagem personalizada Alberto3")
if __name__ == "__main__":
print ('INICIANDO OS TESTES')
print('----------------------------------------------------------------------')
unittest.main(verbosity=2)
| 148.64
| 222
| 0.130786
|
from app import app
import unittest
class Test(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.result = self.app.get('/')
def test_requisicao(self):
self.assertEqual(self.result.status_code, 200)
def test_conteudo(self):
self.assertEqual(self.result.data.decode('utf-8'), "mensagem personalizada Alberto3")
if __name__ == "__main__":
print ('INICIANDO OS TESTES')
print('----------------------------------------------------------------------')
unittest.main(verbosity=2)
| true
| true
|
f719eb4a68e5fddea525ab05f9a6de0a28ad334a
| 10,408
|
py
|
Python
|
retinanet/losses_vehicle.py
|
RobinCondat/pytorch-retinanet
|
14a2085cd3785a667454898dc65f5324b1b9c6b8
|
[
"Apache-2.0"
] | null | null | null |
retinanet/losses_vehicle.py
|
RobinCondat/pytorch-retinanet
|
14a2085cd3785a667454898dc65f5324b1b9c6b8
|
[
"Apache-2.0"
] | null | null | null |
retinanet/losses_vehicle.py
|
RobinCondat/pytorch-retinanet
|
14a2085cd3785a667454898dc65f5324b1b9c6b8
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
from retinanet.config_experiment_2 import INDEXES_MIX, VEHICLE_INDEXES
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
def cal_ioa(a, b):
# Intersection over Area (for ignore regions)
area = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]),dim=1)
area = torch.clamp(area, min=1e-8)
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
intersection = iw * ih
IoA = intersection / area
return IoA
class FocalLoss(nn.Module):
#def __init__(self):
def forward(self, classifications, regressions, anchors, annotations, dataset, ignore_index=None, merge_index=None):
classes_from_other_datasets = [i for i in range(classifications.shape[-1]+1) if i not in INDEXES_MIX[dataset]]
alpha = 0.25
gamma = 2.0
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
anchor = anchors[0, :, :]
num_anchors = anchor.shape[0]
anchor_widths = anchor[:, 2] - anchor[:, 0]
anchor_heights = anchor[:, 3] - anchor[:, 1]
anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights
if merge_index is not None:
classifications = torch.cat((classifications,torch.zeros((classifications.shape[0],classifications.shape[1],1)).cuda()),2)
print(classifications.shape)
for j in range(batch_size):
classification = classifications[j, :, :]
regression = regressions[j, :, :]
bbox_annotation = annotations[j, :, :]
bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]
# Merge vehicle detections in vehicle class
if merge_index is not None:
if merge_index not in classes_from_other_datasets:
#print(torch.max(classification[:,VEHICLE_INDEXES], dim=1)[0].shape)
classification[:,merge_index] = torch.max(classification[:,VEHICLE_INDEXES], dim=1)[0]
# Ignore class from other datasets
classification[:,classes_from_other_datasets]=0
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
if bbox_annotation.shape[0] == 0:
if torch.cuda.is_available():
alpha_factor = torch.ones(classification.shape).cuda() * alpha
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
cls_loss = focal_weight * bce
classification_losses.append(cls_loss.sum())
regression_losses.append(torch.tensor(0).float().cuda())
else:
alpha_factor = torch.ones(classification.shape) * alpha
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
cls_loss = focal_weight * bce
classification_losses.append(cls_loss.sum())
regression_losses.append(torch.tensor(0).float())
continue
# Filter ignore class (via ignore_index)
if ignore_index is not None:
# On sépare ici les annotations en 2 objets :
# - bbox_annotation (pour tous les objets à détecter)
# - ignore_annotation (pour toutes les régions à ignorer)
ignore_annotation = bbox_annotation[bbox_annotation[:,4] == ignore_index]
bbox_annotation = bbox_annotation[bbox_annotation[:,4] != ignore_index]
if bbox_annotation.shape[0] != 0:
IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4]) # num_anchors x num_annotations_to_detect
IoU_max, IoU_argmax = torch.max(IoU, dim=1) # num_anchors x 1
else:
IoU_max = None
IoU_argmax = None
if ignore_index is not None:
# On calcule ici l'intersection over area :
# tous les anchors ayant une IoA avec une région à ignorer supérieure à 0.5 seront ignorées pour la suite
if ignore_annotation.shape[0] !=0:
IoA = cal_ioa(anchors[0, :, :], ignore_annotation[:, :4]) # num_anchors x num_annotations_to_ignore
IoA_max, IoA_argmax = torch.max(IoA, dim=1) # num_anchors x 1
else:
IoA_max = None
IoA_argmax = None
# compute the loss for classification
targets = torch.ones(classification.shape) * -1
if torch.cuda.is_available():
targets = targets.cuda()
if IoU_max is not None:
targets[torch.lt(IoU_max, 0.4), :] = 0
else:
targets = targets*0
if ignore_index is not None:
if IoA_max is not None:
ignore_indices = torch.ge(IoA_max, 0.5)
else:
ignore_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)
if IoU_max is not None:
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
else:
positive_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)
num_positive_anchors = torch.tensor(0)
if ignore_index is not None:
if ignore_indices is not None:
targets[ignore_indices, :] = -1
if IoU_argmax is not None:
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
if torch.cuda.is_available():
alpha_factor = torch.ones(targets.shape).cuda() * alpha
else:
alpha_factor = torch.ones(targets.shape) * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
cls_loss = focal_weight * bce
if torch.cuda.is_available():
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())
else:
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape))
classification_losses.append(cls_loss.sum()/torch.clamp(num_positive_anchors.float(), min=1.0))
# compute the loss for regression
if num_positive_anchors > 0:
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
# clip widths to 1
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))
targets = targets.t()
if torch.cuda.is_available():
targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()
else:
targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]])
negative_indices = 1 + (~positive_indices)
regression_diff = torch.abs(targets - regression[positive_indices, :])
regression_loss = torch.where(
torch.le(regression_diff, 1.0 / 9.0),
0.5 * 9.0 * torch.pow(regression_diff, 2),
regression_diff - 0.5 / 9.0
)
regression_losses.append(regression_loss.mean())
else:
if torch.cuda.is_available():
regression_losses.append(torch.tensor(0).float().cuda())
else:
regression_losses.append(torch.tensor(0).float())
return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0, keepdim=True)
| 42.831276
| 133
| 0.560146
|
import numpy as np
import torch
import torch.nn as nn
from retinanet.config_experiment_2 import INDEXES_MIX, VEHICLE_INDEXES
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
def cal_ioa(a, b):
area = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]),dim=1)
area = torch.clamp(area, min=1e-8)
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
intersection = iw * ih
IoA = intersection / area
return IoA
class FocalLoss(nn.Module):
def forward(self, classifications, regressions, anchors, annotations, dataset, ignore_index=None, merge_index=None):
classes_from_other_datasets = [i for i in range(classifications.shape[-1]+1) if i not in INDEXES_MIX[dataset]]
alpha = 0.25
gamma = 2.0
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
anchor = anchors[0, :, :]
num_anchors = anchor.shape[0]
anchor_widths = anchor[:, 2] - anchor[:, 0]
anchor_heights = anchor[:, 3] - anchor[:, 1]
anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights
if merge_index is not None:
classifications = torch.cat((classifications,torch.zeros((classifications.shape[0],classifications.shape[1],1)).cuda()),2)
print(classifications.shape)
for j in range(batch_size):
classification = classifications[j, :, :]
regression = regressions[j, :, :]
bbox_annotation = annotations[j, :, :]
bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]
if merge_index is not None:
if merge_index not in classes_from_other_datasets:
classification[:,merge_index] = torch.max(classification[:,VEHICLE_INDEXES], dim=1)[0]
classification[:,classes_from_other_datasets]=0
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
if bbox_annotation.shape[0] == 0:
if torch.cuda.is_available():
alpha_factor = torch.ones(classification.shape).cuda() * alpha
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
cls_loss = focal_weight * bce
classification_losses.append(cls_loss.sum())
regression_losses.append(torch.tensor(0).float().cuda())
else:
alpha_factor = torch.ones(classification.shape) * alpha
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
cls_loss = focal_weight * bce
classification_losses.append(cls_loss.sum())
regression_losses.append(torch.tensor(0).float())
continue
if ignore_index is not None:
ignore_annotation = bbox_annotation[bbox_annotation[:,4] == ignore_index]
bbox_annotation = bbox_annotation[bbox_annotation[:,4] != ignore_index]
if bbox_annotation.shape[0] != 0:
IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4])
IoU_max, IoU_argmax = torch.max(IoU, dim=1)
else:
IoU_max = None
IoU_argmax = None
if ignore_index is not None:
# tous les anchors ayant une IoA avec une région à ignorer supérieure à 0.5 seront ignorées pour la suite
if ignore_annotation.shape[0] !=0:
IoA = cal_ioa(anchors[0, :, :], ignore_annotation[:, :4]) # num_anchors x num_annotations_to_ignore
IoA_max, IoA_argmax = torch.max(IoA, dim=1) # num_anchors x 1
else:
IoA_max = None
IoA_argmax = None
# compute the loss for classification
targets = torch.ones(classification.shape) * -1
if torch.cuda.is_available():
targets = targets.cuda()
if IoU_max is not None:
targets[torch.lt(IoU_max, 0.4), :] = 0
else:
targets = targets*0
if ignore_index is not None:
if IoA_max is not None:
ignore_indices = torch.ge(IoA_max, 0.5)
else:
ignore_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)
if IoU_max is not None:
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
else:
positive_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)
num_positive_anchors = torch.tensor(0)
if ignore_index is not None:
if ignore_indices is not None:
targets[ignore_indices, :] = -1
if IoU_argmax is not None:
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
if torch.cuda.is_available():
alpha_factor = torch.ones(targets.shape).cuda() * alpha
else:
alpha_factor = torch.ones(targets.shape) * alpha
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
cls_loss = focal_weight * bce
if torch.cuda.is_available():
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())
else:
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape))
classification_losses.append(cls_loss.sum()/torch.clamp(num_positive_anchors.float(), min=1.0))
# compute the loss for regression
if num_positive_anchors > 0:
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
# clip widths to 1
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))
targets = targets.t()
if torch.cuda.is_available():
targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()
else:
targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]])
negative_indices = 1 + (~positive_indices)
regression_diff = torch.abs(targets - regression[positive_indices, :])
regression_loss = torch.where(
torch.le(regression_diff, 1.0 / 9.0),
0.5 * 9.0 * torch.pow(regression_diff, 2),
regression_diff - 0.5 / 9.0
)
regression_losses.append(regression_loss.mean())
else:
if torch.cuda.is_available():
regression_losses.append(torch.tensor(0).float().cuda())
else:
regression_losses.append(torch.tensor(0).float())
return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0, keepdim=True)
| true
| true
|
f719ee1200d97dbce407161a29de73e610926f93
| 1,780
|
py
|
Python
|
kubernetes_asyncio/test/test_storage_v1alpha1_api.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_storage_v1alpha1_api.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_storage_v1alpha1_api.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.api.storage_v1alpha1_api import StorageV1alpha1Api # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestStorageV1alpha1Api(unittest.TestCase):
"""StorageV1alpha1Api unit test stubs"""
def setUp(self):
self.api = kubernetes_asyncio.client.api.storage_v1alpha1_api.StorageV1alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_create_volume_attachment(self):
"""Test case for create_volume_attachment
"""
pass
def test_delete_collection_volume_attachment(self):
"""Test case for delete_collection_volume_attachment
"""
pass
def test_delete_volume_attachment(self):
"""Test case for delete_volume_attachment
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_volume_attachment(self):
"""Test case for list_volume_attachment
"""
pass
def test_patch_volume_attachment(self):
"""Test case for patch_volume_attachment
"""
pass
def test_read_volume_attachment(self):
"""Test case for read_volume_attachment
"""
pass
def test_replace_volume_attachment(self):
"""Test case for replace_volume_attachment
"""
pass
if __name__ == '__main__':
unittest.main()
| 21.707317
| 124
| 0.676404
|
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.api.storage_v1alpha1_api import StorageV1alpha1Api
from kubernetes_asyncio.client.rest import ApiException
class TestStorageV1alpha1Api(unittest.TestCase):
def setUp(self):
self.api = kubernetes_asyncio.client.api.storage_v1alpha1_api.StorageV1alpha1Api()
def tearDown(self):
pass
def test_create_volume_attachment(self):
pass
def test_delete_collection_volume_attachment(self):
pass
def test_delete_volume_attachment(self):
pass
def test_get_api_resources(self):
pass
def test_list_volume_attachment(self):
pass
def test_patch_volume_attachment(self):
pass
def test_read_volume_attachment(self):
pass
def test_replace_volume_attachment(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f719ee2700b12c9c0e630bfb643af003a5b4013a
| 7,162
|
py
|
Python
|
clicktocall/app.py
|
Python-725/clicktocall-flask
|
83268b7c90e487a70fc5ef0dcdbb3343d1dc783d
|
[
"MIT"
] | null | null | null |
clicktocall/app.py
|
Python-725/clicktocall-flask
|
83268b7c90e487a70fc5ef0dcdbb3343d1dc783d
|
[
"MIT"
] | null | null | null |
clicktocall/app.py
|
Python-725/clicktocall-flask
|
83268b7c90e487a70fc5ef0dcdbb3343d1dc783d
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
import os, base64, uuid
from twilio.twiml.voice_response import VoiceResponse, Gather, Dial
from twilio.rest import Client
# Declare and configure application
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('local_settings.py')
# Route for Click to Call demo page.
@app.route('/')
def index():
return render_template('index.html',
configuration_error=None)
sessionID_to_callsid = {}
sessionID_to_confsid = {}
sessionID_to_destNo = {}
# +918698583414
# +919404041811
# +918767805516
# Generate random session id for conference
def get_session_id(source_number, destination_number):
return 'Conf' + destination_number + '-' + uuid.uuid4().hex
def get_client():
try:
twilio_client = Client(app.config['TWILIO_ACCOUNT_SID'],
app.config['TWILIO_AUTH_TOKEN'])
return twilio_client
except Exception as e:
msg = f"Missing configuration variable: {e}"
return jsonify({'error': msg}), 400
# Voice Request URL
@app.route('/join_conference', methods=['GET', 'POST'])
@app.route('/call_number', methods=['GET', 'POST'])
def join_conference():
# Get phone numbers from request
source_number = request.form.get('source_number', None)
dest_number = request.form.get('dest_number', None)
print(f"Call Request received! source_number:{source_number}, dest_number:{dest_number}")
if not source_number or not dest_number:
msg = "Missing phone number value. Expected params source_number and dest_number"
return jsonify({'error': msg}), 400
try:
twilio_client = get_client()
session_id = get_session_id(source_number, dest_number)
call = twilio_client.calls.create(record=True,
from_=app.config['TWILIO_NUMBER'],
to=source_number,
url='https://3.137.150.83:8001/voip/api_voip/voip_callback/' + str(session_id),
status_callback_event=['completed'],
status_callback='https://3.137.150.83:8001/voip/api_voip/complete_call/' + str(session_id)
)
sessionID_to_callsid[session_id] = call.sid
sessionID_to_destNo[session_id] = dest_number
print("Initiated a Source number Call, session_id:", session_id)
except Exception as e:
message = e.msg if hasattr(e, 'msg') else str(e)
return jsonify({'error': message}), 400
return jsonify({'message': 'Success!'})
@app.route('/voip_callback/<string:session_id>', methods=['GET', 'POST'])
def voip_callback(session_id):
print("## Conference request received, session id:{} Making a conference call", session_id)
"""Processes results from the <Gather> prompt in /voice"""
resp = VoiceResponse()
# If Twilio's request to our app included already gathered digits, process them
if 'Digits' in request.values:
# Get which digit the caller chose
choice = request.values['Digits']
# Say a different message depending on the caller's choice
if choice == '1':
resp.say('Adding destination number to the conference!')
resp.redirect('https://3.137.150.83:8001/voip/api_voip/add-user/' + session_id)
print(str(resp))
return jsonify(resp)
elif choice == '2':
resp.say('Thank you for calling, have a nice day!')
# End the call with <Hangup>
resp.hangup()
print(str(resp))
return jsonify(resp)
else:
# If the caller didn't choose 1 or 2, apologize and ask them again
resp.say("Sorry, I don't understand that choice.")
else:
# Get user input
gather = Gather(num_digits=1, action='/voip_callback/' + session_id)
gather.say('Please Press 1 to connect to destination. Press 2 to end the call.')
resp.append(gather)
# If the user didn't choose 1 or 2 (or anything), repeat the message
resp.redirect('https://3.137.150.83:8001/voip/api_voip/voip_callback/' + session_id)
print(str(resp))
return jsonify(resp)
@app.route('/add-user/<string:session_id>', methods=['POST'])
def add_user_to_conf(session_id):
print("# Add user request received, session id:{}", session_id)
destination_number = sessionID_to_destNo.get(session_id)
print("Attemtping to add phone number to call: " + destination_number)
client = get_client()
resp = VoiceResponse()
dial = Dial()
dial.conference(destination_number)
resp.append(dial)
participant = client.conferences(destination_number).participants.create(
from_=app.config['TWILIO_NUMBER'],
to=destination_number,
conference_status_callback='https://3.137.150.83:8001/voip/api_voip/leave/' + session_id,
conference_status_callback_event="leave")
print(participant)
return str(resp)
@app.route('/leave/<string:session_id>', methods=['GET', 'POST'])
def leave(session_id):
event = request.values['SequenceNumber']
conference_sid = request.values['ConferenceSid']
sessionID_to_confsid[session_id] = conference_sid
print("Leave call request:", conference_sid, event, session_id)
if request.values['StatusCallbackEvent'] == 'participant-leave':
print("A Participant Left Call")
client = get_client()
# ends conference call if only 1 participant left
participants = client.conferences(conference_sid).participants
if len(participants.list()) == 1:
client.conferences(conference_sid).update(status='completed')
print("Call ended")
# ends conference call if original caller leaves before callee picks up
elif len(participants.list()) == 0 and event == '2':
client.calls(sessionID_to_callsid.get(session_id)).update(status='completed')
print("Call ended")
resp = VoiceResponse()
return str(resp)
# this is an endpoint to end the conference call if the callee rejects the call
@app.route('/complete_call/<string:call_session_id>', methods=['GET', 'POST'])
def complete_call(call_session_id):
print("## Ending conference call, callee rejected call")
client = get_client()
global sessionID_to_confsid
participants = client.conferences(sessionID_to_confsid.get(call_session_id)).participants
# only does so if 1 participant left in the conference call (i.e. the caller)
if len(participants.list()) == 1:
client.conferences(sessionID_to_confsid.get(call_session_id)).update(status='completed')
print("Call ended")
data = {
"status_code": 200,
}
resp = jsonify(data)
return resp
# Route for Landing Page after deploy.
@app.route('/landing.html')
def landing():
print("Get Request received!")
return render_template('landing.html',
configuration_error=None)
| 36.728205
| 132
| 0.655962
|
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
import os, base64, uuid
from twilio.twiml.voice_response import VoiceResponse, Gather, Dial
from twilio.rest import Client
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('local_settings.py')
@app.route('/')
def index():
return render_template('index.html',
configuration_error=None)
sessionID_to_callsid = {}
sessionID_to_confsid = {}
sessionID_to_destNo = {}
def get_session_id(source_number, destination_number):
return 'Conf' + destination_number + '-' + uuid.uuid4().hex
def get_client():
try:
twilio_client = Client(app.config['TWILIO_ACCOUNT_SID'],
app.config['TWILIO_AUTH_TOKEN'])
return twilio_client
except Exception as e:
msg = f"Missing configuration variable: {e}"
return jsonify({'error': msg}), 400
@app.route('/join_conference', methods=['GET', 'POST'])
@app.route('/call_number', methods=['GET', 'POST'])
def join_conference():
source_number = request.form.get('source_number', None)
dest_number = request.form.get('dest_number', None)
print(f"Call Request received! source_number:{source_number}, dest_number:{dest_number}")
if not source_number or not dest_number:
msg = "Missing phone number value. Expected params source_number and dest_number"
return jsonify({'error': msg}), 400
try:
twilio_client = get_client()
session_id = get_session_id(source_number, dest_number)
call = twilio_client.calls.create(record=True,
from_=app.config['TWILIO_NUMBER'],
to=source_number,
url='https://3.137.150.83:8001/voip/api_voip/voip_callback/' + str(session_id),
status_callback_event=['completed'],
status_callback='https://3.137.150.83:8001/voip/api_voip/complete_call/' + str(session_id)
)
sessionID_to_callsid[session_id] = call.sid
sessionID_to_destNo[session_id] = dest_number
print("Initiated a Source number Call, session_id:", session_id)
except Exception as e:
message = e.msg if hasattr(e, 'msg') else str(e)
return jsonify({'error': message}), 400
return jsonify({'message': 'Success!'})
@app.route('/voip_callback/<string:session_id>', methods=['GET', 'POST'])
def voip_callback(session_id):
print("## Conference request received, session id:{} Making a conference call", session_id)
resp = VoiceResponse()
if 'Digits' in request.values:
# Get which digit the caller chose
choice = request.values['Digits']
# Say a different message depending on the caller's choice
if choice == '1':
resp.say('Adding destination number to the conference!')
resp.redirect('https://3.137.150.83:8001/voip/api_voip/add-user/' + session_id)
print(str(resp))
return jsonify(resp)
elif choice == '2':
resp.say('Thank you for calling, have a nice day!')
resp.hangup()
print(str(resp))
return jsonify(resp)
else:
resp.say("Sorry, I don't understand that choice.")
else:
gather = Gather(num_digits=1, action='/voip_callback/' + session_id)
gather.say('Please Press 1 to connect to destination. Press 2 to end the call.')
resp.append(gather)
resp.redirect('https://3.137.150.83:8001/voip/api_voip/voip_callback/' + session_id)
print(str(resp))
return jsonify(resp)
@app.route('/add-user/<string:session_id>', methods=['POST'])
def add_user_to_conf(session_id):
print("# Add user request received, session id:{}", session_id)
destination_number = sessionID_to_destNo.get(session_id)
print("Attemtping to add phone number to call: " + destination_number)
client = get_client()
resp = VoiceResponse()
dial = Dial()
dial.conference(destination_number)
resp.append(dial)
participant = client.conferences(destination_number).participants.create(
from_=app.config['TWILIO_NUMBER'],
to=destination_number,
conference_status_callback='https://3.137.150.83:8001/voip/api_voip/leave/' + session_id,
conference_status_callback_event="leave")
print(participant)
return str(resp)
@app.route('/leave/<string:session_id>', methods=['GET', 'POST'])
def leave(session_id):
event = request.values['SequenceNumber']
conference_sid = request.values['ConferenceSid']
sessionID_to_confsid[session_id] = conference_sid
print("Leave call request:", conference_sid, event, session_id)
if request.values['StatusCallbackEvent'] == 'participant-leave':
print("A Participant Left Call")
client = get_client()
# ends conference call if only 1 participant left
participants = client.conferences(conference_sid).participants
if len(participants.list()) == 1:
client.conferences(conference_sid).update(status='completed')
print("Call ended")
# ends conference call if original caller leaves before callee picks up
elif len(participants.list()) == 0 and event == '2':
client.calls(sessionID_to_callsid.get(session_id)).update(status='completed')
print("Call ended")
resp = VoiceResponse()
return str(resp)
# this is an endpoint to end the conference call if the callee rejects the call
@app.route('/complete_call/<string:call_session_id>', methods=['GET', 'POST'])
def complete_call(call_session_id):
print("## Ending conference call, callee rejected call")
client = get_client()
global sessionID_to_confsid
participants = client.conferences(sessionID_to_confsid.get(call_session_id)).participants
# only does so if 1 participant left in the conference call (i.e. the caller)
if len(participants.list()) == 1:
client.conferences(sessionID_to_confsid.get(call_session_id)).update(status='completed')
print("Call ended")
data = {
"status_code": 200,
}
resp = jsonify(data)
return resp
# Route for Landing Page after deploy.
@app.route('/landing.html')
def landing():
print("Get Request received!")
return render_template('landing.html',
configuration_error=None)
| true
| true
|
f719ee8eea12cfc3dea84e56aaeb16666fde914e
| 133
|
py
|
Python
|
twl/c2.py
|
xiaolinzi-xl/python_imooc
|
07bde890e3ab0ddef4467b0c77ef33614339a657
|
[
"Apache-2.0"
] | null | null | null |
twl/c2.py
|
xiaolinzi-xl/python_imooc
|
07bde890e3ab0ddef4467b0c77ef33614339a657
|
[
"Apache-2.0"
] | null | null | null |
twl/c2.py
|
xiaolinzi-xl/python_imooc
|
07bde890e3ab0ddef4467b0c77ef33614339a657
|
[
"Apache-2.0"
] | null | null | null |
list_x = [1,2,3,4,5,6,7,8]
def square(x):
return x*x
# for x in list_x:
# square(x)
r = map(square,list_x)
print(list(r))
| 12.090909
| 26
| 0.578947
|
list_x = [1,2,3,4,5,6,7,8]
def square(x):
return x*x
r = map(square,list_x)
print(list(r))
| true
| true
|
f719eeaa3c3602f09dba2e13bf498a6011b27cbe
| 787
|
py
|
Python
|
pyspark-utils/wordcounts.py
|
domvwt/uol-ds-tools
|
62348b3e04a2f27ceaa19776fb3024eaaf21d593
|
[
"MIT"
] | 4
|
2020-11-27T06:08:05.000Z
|
2021-04-29T15:57:12.000Z
|
pyspark-utils/wordcounts.py
|
domvwt/uol-ds-tools
|
62348b3e04a2f27ceaa19776fb3024eaaf21d593
|
[
"MIT"
] | null | null | null |
pyspark-utils/wordcounts.py
|
domvwt/uol-ds-tools
|
62348b3e04a2f27ceaa19776fb3024eaaf21d593
|
[
"MIT"
] | 2
|
2020-12-16T11:01:48.000Z
|
2020-12-28T14:02:24.000Z
|
#! /opt/spark/bin/pyspark
import re
from pathlib import Path
INPUT_TXT = "~/uol-ds-tools/pyspark-utils/frankenstein.txt"
myfile = Path(INPUT_TXT).expanduser().absolute()
rdd_txt = sc.textFile(f"file:///{myfile}")
# Simple word counts splitting on whitespace
counts = (
rdd_txt.flatMap(lambda line: line.split())
.map(lambda word: (word, 1))
.reduceByKey(lambda a, b: a + b)
.map(lambda a: (a[1], a[0]))
)
res1 = counts.collect()[:20]
for i in res1:
print(i)
print()
# Word counts splitting on non word elements
word_counts = (
rdd_txt.flatMap(lambda line: re.split(r"\W+", line))
.map(lambda word: (word, 1))
.reduceByKey(lambda a, b: a + b)
.map(lambda a: (a[1], a[0]))
)
res2 = word_counts.collect()[:20]
for i in res2:
print(i)
print()
| 21.27027
| 59
| 0.64676
|
import re
from pathlib import Path
INPUT_TXT = "~/uol-ds-tools/pyspark-utils/frankenstein.txt"
myfile = Path(INPUT_TXT).expanduser().absolute()
rdd_txt = sc.textFile(f"file:///{myfile}")
counts = (
rdd_txt.flatMap(lambda line: line.split())
.map(lambda word: (word, 1))
.reduceByKey(lambda a, b: a + b)
.map(lambda a: (a[1], a[0]))
)
res1 = counts.collect()[:20]
for i in res1:
print(i)
print()
word_counts = (
rdd_txt.flatMap(lambda line: re.split(r"\W+", line))
.map(lambda word: (word, 1))
.reduceByKey(lambda a, b: a + b)
.map(lambda a: (a[1], a[0]))
)
res2 = word_counts.collect()[:20]
for i in res2:
print(i)
print()
| true
| true
|
f719eeae51beeb20ebbfea489acc9d5269a4d2a2
| 397
|
py
|
Python
|
simpleblog/asgi.py
|
itsmusa/simpleblog
|
fceef520684a8249e119c337898b945689515957
|
[
"MIT"
] | null | null | null |
simpleblog/asgi.py
|
itsmusa/simpleblog
|
fceef520684a8249e119c337898b945689515957
|
[
"MIT"
] | null | null | null |
simpleblog/asgi.py
|
itsmusa/simpleblog
|
fceef520684a8249e119c337898b945689515957
|
[
"MIT"
] | null | null | null |
"""
ASGI config for simpleblog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simpleblog.settings')
application = get_asgi_application()
| 23.352941
| 78
| 0.788413
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simpleblog.settings')
application = get_asgi_application()
| true
| true
|
f719ef2757a2269c2a99feca23fcebc07b42be06
| 2,215
|
py
|
Python
|
src/myip.py
|
Fuhrmann/keypirinha-myip
|
5a68e1061d6c597dfc21ab8cd86319c4d32efb07
|
[
"MIT"
] | 13
|
2018-03-29T23:40:04.000Z
|
2021-06-28T19:18:42.000Z
|
src/myip.py
|
Fuhrmann/keypirinha-myip
|
5a68e1061d6c597dfc21ab8cd86319c4d32efb07
|
[
"MIT"
] | null | null | null |
src/myip.py
|
Fuhrmann/keypirinha-myip
|
5a68e1061d6c597dfc21ab8cd86319c4d32efb07
|
[
"MIT"
] | null | null | null |
# Keypirinha launcher (keypirinha.com)
import socket
import keypirinha as kp
import keypirinha_net as kpnet
import keypirinha_util as kpu
class MyIP(kp.Plugin):
"""
Get your public and local IP directly from Keypirinha.
"""
ITEM_CAT = kp.ItemCategory.USER_BASE + 1
KEYWORD = 'ip'
def __init__(self):
super().__init__()
self._urlopener = kpnet.build_urllib_opener()
def on_suggest(self, user_input, items_chain):
if user_input.lower() == self.KEYWORD:
public_ip = self._get_public_ip()
local_ip = self._get_local_ip()
self.set_catalog(
[
self.create_item(
category=kp.ItemCategory.KEYWORD,
label='Your public IP',
short_desc=public_ip,
target='public_ip',
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.NOARGS
),
self.create_item(
category=kp.ItemCategory.KEYWORD,
label='Your local IP',
short_desc=local_ip,
target='local_ip',
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.NOARGS
)
]
)
def on_execute(self, item, action):
kpu.set_clipboard(item.short_desc())
def on_events(self, flags):
if flags & kp.Events.NETOPTIONS:
self._urlopener = kpnet.build_urllib_opener()
def _get_public_ip(self):
try:
with self._urlopener.open('http://icanhazip.com') as res:
return res.read().decode('utf-8')
except Exception as ex:
self.err(ex)
return 'Could not establish your public ip'
def _get_local_ip(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as ex:
self.err(ex)
return 'Could not establish your local ip'
| 31.197183
| 69
| 0.531377
|
import socket
import keypirinha as kp
import keypirinha_net as kpnet
import keypirinha_util as kpu
class MyIP(kp.Plugin):
ITEM_CAT = kp.ItemCategory.USER_BASE + 1
KEYWORD = 'ip'
def __init__(self):
super().__init__()
self._urlopener = kpnet.build_urllib_opener()
def on_suggest(self, user_input, items_chain):
if user_input.lower() == self.KEYWORD:
public_ip = self._get_public_ip()
local_ip = self._get_local_ip()
self.set_catalog(
[
self.create_item(
category=kp.ItemCategory.KEYWORD,
label='Your public IP',
short_desc=public_ip,
target='public_ip',
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.NOARGS
),
self.create_item(
category=kp.ItemCategory.KEYWORD,
label='Your local IP',
short_desc=local_ip,
target='local_ip',
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.NOARGS
)
]
)
def on_execute(self, item, action):
kpu.set_clipboard(item.short_desc())
def on_events(self, flags):
if flags & kp.Events.NETOPTIONS:
self._urlopener = kpnet.build_urllib_opener()
def _get_public_ip(self):
try:
with self._urlopener.open('http://icanhazip.com') as res:
return res.read().decode('utf-8')
except Exception as ex:
self.err(ex)
return 'Could not establish your public ip'
def _get_local_ip(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as ex:
self.err(ex)
return 'Could not establish your local ip'
| true
| true
|
f719ef5038ede533d676debe6b7851092917855f
| 6,254
|
py
|
Python
|
src/main.py
|
PeterouZh/PyTorch-StudioGAN
|
faef6048d25dadee4fa31b2955f16f7d1ca8e1e2
|
[
"MIT"
] | null | null | null |
src/main.py
|
PeterouZh/PyTorch-StudioGAN
|
faef6048d25dadee4fa31b2955f16f7d1ca8e1e2
|
[
"MIT"
] | null | null | null |
src/main.py
|
PeterouZh/PyTorch-StudioGAN
|
faef6048d25dadee4fa31b2955f16f7d1ca8e1e2
|
[
"MIT"
] | null | null | null |
# PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN
# The MIT License (MIT)
# See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details
# src/main.py
import json
import os
import sys
import random
import warnings
from argparse import ArgumentParser
from utils.misc import *
from utils.make_hdf5 import make_hdf5
from utils.log import make_run_name
from loader import prepare_train_eval
import torch
from torch.backends import cudnn
import torch.multiprocessing as mp
RUN_NAME_FORMAT = (
"{framework}-"
"{phase}-"
"{timestamp}"
)
def main():
parser = ArgumentParser(add_help=False)
parser.add_argument('-c', '--config_path', type=str, default='./src/configs/CIFAR10/ContraGAN.json')
parser.add_argument('--checkpoint_folder', type=str, default=None)
parser.add_argument('-current', '--load_current', action='store_true', help='whether you load the current or best checkpoint')
parser.add_argument('--log_output_path', type=str, default=None)
parser.add_argument('-DDP', '--distributed_data_parallel', action='store_true')
parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')
parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')
parser.add_argument('--seed', type=int, default=-1, help='seed for generating random numbers')
parser.add_argument('--num_workers', type=int, default=8, help='')
parser.add_argument('-sync_bn', '--synchronized_bn', action='store_true', help='whether turn on synchronized batchnorm')
parser.add_argument('-mpc', '--mixed_precision', action='store_true', help='whether turn on mixed precision training')
parser.add_argument('-LARS', '--LARS_optimizer', action='store_true', help='whether turn on LARS optimizer')
parser.add_argument('-rm_API', '--disable_debugging_API', action='store_true', help='whether disable pytorch autograd debugging mode')
parser.add_argument('--reduce_train_dataset', type=float, default=1.0, help='control the number of train dataset')
parser.add_argument('--truncated_factor', type=float, default=-1.0, help='factor for truncation trick')
parser.add_argument('-stat_otf', '--bn_stat_OnTheFly', action='store_true', help='when evaluating, use the statistics of a batch')
parser.add_argument('-std_stat', '--standing_statistics', action='store_true')
parser.add_argument('--standing_step', type=int, default=-1, help='# of steps for accumulation batchnorm')
parser.add_argument('--freeze_layers', type=int, default=-1, help='# of layers for freezing discriminator')
parser.add_argument('-l', '--load_all_data_in_memory', action='store_true')
parser.add_argument('-t', '--train', action='store_true')
parser.add_argument('-e', '--eval', action='store_true')
parser.add_argument('-s', '--save_images', action='store_true')
parser.add_argument('-iv', '--image_visualization', action='store_true', help='select whether conduct image visualization')
parser.add_argument('-knn', '--k_nearest_neighbor', action='store_true', help='select whether conduct k-nearest neighbor analysis')
parser.add_argument('-itp', '--interpolation', action='store_true', help='whether conduct interpolation analysis')
parser.add_argument('-fa', '--frequency_analysis', action='store_true', help='whether conduct frequency analysis')
parser.add_argument('-tsne', '--tsne_analysis', action='store_true', help='whether conduct tsne analysis')
parser.add_argument('--nrow', type=int, default=10, help='number of rows to plot image canvas')
parser.add_argument('--ncol', type=int, default=8, help='number of cols to plot image canvas')
parser.add_argument('--print_every', type=int, default=100, help='control log interval')
parser.add_argument('--save_every', type=int, default=2000, help='control evaluation and save interval')
parser.add_argument('--eval_type', type=str, default='test', help='[train/valid/test]')
from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml, global_cfg
update_parser_defaults_from_yaml(parser=parser)
args = parser.parse_args()
if not args.train and \
not args.eval and \
not args.save_images and \
not args.image_visualization and \
not args.k_nearest_neighbor and \
not args.interpolation and \
not args.frequency_analysis and \
not args.tsne_analysis:
parser.print_help(sys.stderr)
sys.exit(1)
if args.config_path is not None:
with open(args.config_path) as f:
model_configs = json.load(f)
train_configs = vars(args)
else:
raise NotImplementedError
hdf5_path_train = make_hdf5(model_configs['data_processing'], train_configs, mode="train") \
if train_configs['load_all_data_in_memory'] else None
if train_configs['seed'] == -1:
train_configs['seed'] = random.randint(1,4096)
cudnn.benchmark, cudnn.deterministic = True, False
else:
cudnn.benchmark, cudnn.deterministic = False, True
fix_all_seed(train_configs['seed'])
gpus_per_node, rank = torch.cuda.device_count(), torch.cuda.current_device()
world_size = gpus_per_node*train_configs['nodes']
if world_size == 1:
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
run_name = make_run_name(RUN_NAME_FORMAT, framework=train_configs['config_path'].split('/')[-1][:-5], phase='train')
if train_configs['disable_debugging_API']: torch.autograd.set_detect_anomaly(False)
check_flags(train_configs, model_configs, world_size)
if train_configs['distributed_data_parallel'] and world_size > 1:
print("Train the models through DistributedDataParallel (DDP) mode.")
mp.spawn(prepare_train_eval, nprocs=gpus_per_node, args=(gpus_per_node, world_size, run_name,
train_configs, model_configs, hdf5_path_train))
else:
prepare_train_eval(rank, gpus_per_node, world_size, run_name, train_configs, model_configs, hdf5_path_train=hdf5_path_train)
if __name__ == '__main__':
main()
| 50.435484
| 138
| 0.714103
|
import json
import os
import sys
import random
import warnings
from argparse import ArgumentParser
from utils.misc import *
from utils.make_hdf5 import make_hdf5
from utils.log import make_run_name
from loader import prepare_train_eval
import torch
from torch.backends import cudnn
import torch.multiprocessing as mp
RUN_NAME_FORMAT = (
"{framework}-"
"{phase}-"
"{timestamp}"
)
def main():
parser = ArgumentParser(add_help=False)
parser.add_argument('-c', '--config_path', type=str, default='./src/configs/CIFAR10/ContraGAN.json')
parser.add_argument('--checkpoint_folder', type=str, default=None)
parser.add_argument('-current', '--load_current', action='store_true', help='whether you load the current or best checkpoint')
parser.add_argument('--log_output_path', type=str, default=None)
parser.add_argument('-DDP', '--distributed_data_parallel', action='store_true')
parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')
parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')
parser.add_argument('--seed', type=int, default=-1, help='seed for generating random numbers')
parser.add_argument('--num_workers', type=int, default=8, help='')
parser.add_argument('-sync_bn', '--synchronized_bn', action='store_true', help='whether turn on synchronized batchnorm')
parser.add_argument('-mpc', '--mixed_precision', action='store_true', help='whether turn on mixed precision training')
parser.add_argument('-LARS', '--LARS_optimizer', action='store_true', help='whether turn on LARS optimizer')
parser.add_argument('-rm_API', '--disable_debugging_API', action='store_true', help='whether disable pytorch autograd debugging mode')
parser.add_argument('--reduce_train_dataset', type=float, default=1.0, help='control the number of train dataset')
parser.add_argument('--truncated_factor', type=float, default=-1.0, help='factor for truncation trick')
parser.add_argument('-stat_otf', '--bn_stat_OnTheFly', action='store_true', help='when evaluating, use the statistics of a batch')
parser.add_argument('-std_stat', '--standing_statistics', action='store_true')
parser.add_argument('--standing_step', type=int, default=-1, help='# of steps for accumulation batchnorm')
parser.add_argument('--freeze_layers', type=int, default=-1, help='# of layers for freezing discriminator')
parser.add_argument('-l', '--load_all_data_in_memory', action='store_true')
parser.add_argument('-t', '--train', action='store_true')
parser.add_argument('-e', '--eval', action='store_true')
parser.add_argument('-s', '--save_images', action='store_true')
parser.add_argument('-iv', '--image_visualization', action='store_true', help='select whether conduct image visualization')
parser.add_argument('-knn', '--k_nearest_neighbor', action='store_true', help='select whether conduct k-nearest neighbor analysis')
parser.add_argument('-itp', '--interpolation', action='store_true', help='whether conduct interpolation analysis')
parser.add_argument('-fa', '--frequency_analysis', action='store_true', help='whether conduct frequency analysis')
parser.add_argument('-tsne', '--tsne_analysis', action='store_true', help='whether conduct tsne analysis')
parser.add_argument('--nrow', type=int, default=10, help='number of rows to plot image canvas')
parser.add_argument('--ncol', type=int, default=8, help='number of cols to plot image canvas')
parser.add_argument('--print_every', type=int, default=100, help='control log interval')
parser.add_argument('--save_every', type=int, default=2000, help='control evaluation and save interval')
parser.add_argument('--eval_type', type=str, default='test', help='[train/valid/test]')
from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml, global_cfg
update_parser_defaults_from_yaml(parser=parser)
args = parser.parse_args()
if not args.train and \
not args.eval and \
not args.save_images and \
not args.image_visualization and \
not args.k_nearest_neighbor and \
not args.interpolation and \
not args.frequency_analysis and \
not args.tsne_analysis:
parser.print_help(sys.stderr)
sys.exit(1)
if args.config_path is not None:
with open(args.config_path) as f:
model_configs = json.load(f)
train_configs = vars(args)
else:
raise NotImplementedError
hdf5_path_train = make_hdf5(model_configs['data_processing'], train_configs, mode="train") \
if train_configs['load_all_data_in_memory'] else None
if train_configs['seed'] == -1:
train_configs['seed'] = random.randint(1,4096)
cudnn.benchmark, cudnn.deterministic = True, False
else:
cudnn.benchmark, cudnn.deterministic = False, True
fix_all_seed(train_configs['seed'])
gpus_per_node, rank = torch.cuda.device_count(), torch.cuda.current_device()
world_size = gpus_per_node*train_configs['nodes']
if world_size == 1:
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
run_name = make_run_name(RUN_NAME_FORMAT, framework=train_configs['config_path'].split('/')[-1][:-5], phase='train')
if train_configs['disable_debugging_API']: torch.autograd.set_detect_anomaly(False)
check_flags(train_configs, model_configs, world_size)
if train_configs['distributed_data_parallel'] and world_size > 1:
print("Train the models through DistributedDataParallel (DDP) mode.")
mp.spawn(prepare_train_eval, nprocs=gpus_per_node, args=(gpus_per_node, world_size, run_name,
train_configs, model_configs, hdf5_path_train))
else:
prepare_train_eval(rank, gpus_per_node, world_size, run_name, train_configs, model_configs, hdf5_path_train=hdf5_path_train)
if __name__ == '__main__':
main()
| true
| true
|
f719f008f608295f63a3a4f7b4b174f389cd19b8
| 6,048
|
py
|
Python
|
test/Scanner/source_scanner-dict.py
|
KastB/scons
|
b6f9defefba687bc1050605ebcf3d816af3c2808
|
[
"MIT"
] | null | null | null |
test/Scanner/source_scanner-dict.py
|
KastB/scons
|
b6f9defefba687bc1050605ebcf3d816af3c2808
|
[
"MIT"
] | null | null | null |
test/Scanner/source_scanner-dict.py
|
KastB/scons
|
b6f9defefba687bc1050605ebcf3d816af3c2808
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that a source_scanner that uses a dictionary to select more
specific scanners for source file suffixes works correctly, even
when it's handed a file suffix that it doesn't know how to scan
(i.e., for which it doesn't have a specific scanner in its dictionary).
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('build.py', r"""
import sys
with open(sys.argv[1], 'w') as ofp:
for infile in sys.argv[2:]:
with open(infile, 'r') as ifp:
include_prefix = 'include%s ' % infile[-1]
def process(infp, outfp, include_prefix=include_prefix):
for line in infp.readlines():
if line[:len(include_prefix)] == include_prefix:
file = line[len(include_prefix):-1]
with open(file, 'r') as f:
process(f, outfp)
else:
outfp.write(line)
process(ifp, ofp)
sys.exit(0)
""")
# Execute a subsidiary SConscript just to make sure we can
# get at the Scanner keyword from there.
test.write('SConstruct', """
SConscript('SConscript')
""")
test.write('SConscript', """
import re
include1_re = re.compile(r'^include1\s+(\S+)$', re.M)
include2_re = re.compile(r'^include2\s+(\S+)$', re.M)
include3_re = re.compile(r'^include3\s+(\S+)$', re.M)
def k1_scan(node, env, scanpaths, arg=None):
contents = node.get_text_contents()
includes = include1_re.findall(contents)
return includes
def k2_scan(node, env, scanpaths, arg=None):
contents = node.get_text_contents()
includes = include2_re.findall(contents)
return includes
def k3_scan(node, env, scanpaths, arg=None):
contents = node.get_text_contents()
includes = include3_re.findall(contents)
return includes
kscanner = Scanner({'.k1' : Scanner(k1_scan), '.k2': Scanner(k2_scan)})
b = Builder(action=r'%(_python_)s build.py $TARGET $SOURCES',
source_scanner=kscanner)
env = Environment(BUILDERS={'Build':b})
kscanner.add_scanner('.k3', Scanner(k3_scan))
env.Build('aaa', 'aaa.k1')
env.Build('bbb', 'bbb.k2')
env.Build('ccc', 'ccc.k3')
env.Build('ddd', ['ddd.k4', 'aaa.k1', 'bbb.k2', 'ccc.k3'])
""" % locals())
test.write('aaa.k1',
"""aaa.k1 1
line 2
include1 xxx
include2 yyy
include3 zzz
line 6
""")
test.write('bbb.k2',
"""bbb.k2 1
line 2
include1 xxx
include2 yyy
include3 zzz
line 6
""")
test.write('ccc.k3',
"""ccc.k3 1
line 2
include1 xxx
include2 yyy
include3 zzz
line 6
""")
test.write('ddd.k4',
"""ddd.k4 1
line 2
line 3
""")
test.write('xxx', "xxx 1\n")
test.write('yyy', "yyy 1\n")
test.write('zzz', "zzz 1\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py aaa aaa.k1
%(_python_)s build.py bbb bbb.k2
%(_python_)s build.py ccc ccc.k3
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_aaa = 'aaa.k1 1\nline 2\nxxx 1\ninclude2 yyy\ninclude3 zzz\nline 6\n'
expect_bbb = 'bbb.k2 1\nline 2\ninclude1 xxx\nyyy 1\ninclude3 zzz\nline 6\n'
expect_ccc = 'ccc.k3 1\nline 2\ninclude1 xxx\ninclude2 yyy\nzzz 1\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('aaa', expect_aaa, mode='r')
test.must_match('bbb', expect_bbb, mode='r')
test.must_match('ccc', expect_ccc, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.up_to_date(arguments = '.')
test.write('zzz', "zzz 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py ccc ccc.k3
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_ccc = 'ccc.k3 1\nline 2\ninclude1 xxx\ninclude2 yyy\nzzz 2\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('bbb', expect_bbb, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.write('yyy', "yyy 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py bbb bbb.k2
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_bbb = 'bbb.k2 1\nline 2\ninclude1 xxx\nyyy 2\ninclude3 zzz\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('bbb', expect_bbb, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.write('xxx', "xxx 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py aaa aaa.k1
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_aaa = 'aaa.k1 1\nline 2\nxxx 2\ninclude2 yyy\ninclude3 zzz\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('aaa', expect_aaa, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 26.88
| 80
| 0.687335
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('build.py', r"""
import sys
with open(sys.argv[1], 'w') as ofp:
for infile in sys.argv[2:]:
with open(infile, 'r') as ifp:
include_prefix = 'include%s ' % infile[-1]
def process(infp, outfp, include_prefix=include_prefix):
for line in infp.readlines():
if line[:len(include_prefix)] == include_prefix:
file = line[len(include_prefix):-1]
with open(file, 'r') as f:
process(f, outfp)
else:
outfp.write(line)
process(ifp, ofp)
sys.exit(0)
""")
test.write('SConstruct', """
SConscript('SConscript')
""")
test.write('SConscript', """
import re
include1_re = re.compile(r'^include1\s+(\S+)$', re.M)
include2_re = re.compile(r'^include2\s+(\S+)$', re.M)
include3_re = re.compile(r'^include3\s+(\S+)$', re.M)
def k1_scan(node, env, scanpaths, arg=None):
contents = node.get_text_contents()
includes = include1_re.findall(contents)
return includes
def k2_scan(node, env, scanpaths, arg=None):
contents = node.get_text_contents()
includes = include2_re.findall(contents)
return includes
def k3_scan(node, env, scanpaths, arg=None):
contents = node.get_text_contents()
includes = include3_re.findall(contents)
return includes
kscanner = Scanner({'.k1' : Scanner(k1_scan), '.k2': Scanner(k2_scan)})
b = Builder(action=r'%(_python_)s build.py $TARGET $SOURCES',
source_scanner=kscanner)
env = Environment(BUILDERS={'Build':b})
kscanner.add_scanner('.k3', Scanner(k3_scan))
env.Build('aaa', 'aaa.k1')
env.Build('bbb', 'bbb.k2')
env.Build('ccc', 'ccc.k3')
env.Build('ddd', ['ddd.k4', 'aaa.k1', 'bbb.k2', 'ccc.k3'])
""" % locals())
test.write('aaa.k1',
"""aaa.k1 1
line 2
include1 xxx
include2 yyy
include3 zzz
line 6
""")
test.write('bbb.k2',
"""bbb.k2 1
line 2
include1 xxx
include2 yyy
include3 zzz
line 6
""")
test.write('ccc.k3',
"""ccc.k3 1
line 2
include1 xxx
include2 yyy
include3 zzz
line 6
""")
test.write('ddd.k4',
"""ddd.k4 1
line 2
line 3
""")
test.write('xxx', "xxx 1\n")
test.write('yyy', "yyy 1\n")
test.write('zzz', "zzz 1\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py aaa aaa.k1
%(_python_)s build.py bbb bbb.k2
%(_python_)s build.py ccc ccc.k3
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_aaa = 'aaa.k1 1\nline 2\nxxx 1\ninclude2 yyy\ninclude3 zzz\nline 6\n'
expect_bbb = 'bbb.k2 1\nline 2\ninclude1 xxx\nyyy 1\ninclude3 zzz\nline 6\n'
expect_ccc = 'ccc.k3 1\nline 2\ninclude1 xxx\ninclude2 yyy\nzzz 1\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('aaa', expect_aaa, mode='r')
test.must_match('bbb', expect_bbb, mode='r')
test.must_match('ccc', expect_ccc, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.up_to_date(arguments = '.')
test.write('zzz', "zzz 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py ccc ccc.k3
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_ccc = 'ccc.k3 1\nline 2\ninclude1 xxx\ninclude2 yyy\nzzz 2\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('bbb', expect_bbb, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.write('yyy', "yyy 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py bbb bbb.k2
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_bbb = 'bbb.k2 1\nline 2\ninclude1 xxx\nyyy 2\ninclude3 zzz\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('bbb', expect_bbb, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.write('xxx', "xxx 2\n")
expect = test.wrap_stdout("""\
%(_python_)s build.py aaa aaa.k1
%(_python_)s build.py ddd ddd.k4 aaa.k1 bbb.k2 ccc.k3
""" % locals())
test.run(stdout=expect)
expect_aaa = 'aaa.k1 1\nline 2\nxxx 2\ninclude2 yyy\ninclude3 zzz\nline 6\n'
expect_ddd = 'ddd.k4 1\nline 2\nline 3\n' + expect_aaa + expect_bbb + expect_ccc
test.must_match('aaa', expect_aaa, mode='r')
test.must_match('ddd', expect_ddd, mode='r')
test.pass_test()
| true
| true
|
f719f0382623361ba7540988b5ee46b2739e8570
| 1,237
|
py
|
Python
|
HW3/Add-command/cloudmesh_numpy/cloudmesh_numpy/plugins/cm_shell_numpy.py
|
futuresystems/465-git4hiroaki
|
bfd9068e0d074d7b6132844dc0f92780bf63bcb9
|
[
"Apache-2.0"
] | null | null | null |
HW3/Add-command/cloudmesh_numpy/cloudmesh_numpy/plugins/cm_shell_numpy.py
|
futuresystems/465-git4hiroaki
|
bfd9068e0d074d7b6132844dc0f92780bf63bcb9
|
[
"Apache-2.0"
] | null | null | null |
HW3/Add-command/cloudmesh_numpy/cloudmesh_numpy/plugins/cm_shell_numpy.py
|
futuresystems/465-git4hiroaki
|
bfd9068e0d074d7b6132844dc0f92780bf63bcb9
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
from cmd3.console import Console
from cmd3.shell import command
from cloudmesh_numpy.command_numpy import command_numpy
class cm_shell_numpy:
def activate_cm_shell_numpy(self):
self.register_command_topic('mycommands', 'numpy')
@command
def do_numpy(self, args, arguments):
"""
::
Usage:
numpy NAME
tests via ping if the host ith the give NAME is reachable
Arguments:
NAME Name of the machine to test
Options:
-v verbose mode
"""
# pprint(arguments)
if arguments["NAME"] is None:
Console.error("Please specify a host name")
else:
host = arguments["NAME"]
Console.info("trying to reach {0}".format(host))
status = command_numpy.status(host)
if status:
Console.info("machine " + host + " has been found. ok.")
else:
Console.error("machine " + host + " not reachable. error.")
pass
if __name__ == '__main__':
command = cm_shell_numpy()
command.do_numpy("iu.edu")
command.do_numpy("iu.edu-wrong")
| 24.254902
| 75
| 0.579628
|
from __future__ import print_function
import os
from cmd3.console import Console
from cmd3.shell import command
from cloudmesh_numpy.command_numpy import command_numpy
class cm_shell_numpy:
def activate_cm_shell_numpy(self):
self.register_command_topic('mycommands', 'numpy')
@command
def do_numpy(self, args, arguments):
if arguments["NAME"] is None:
Console.error("Please specify a host name")
else:
host = arguments["NAME"]
Console.info("trying to reach {0}".format(host))
status = command_numpy.status(host)
if status:
Console.info("machine " + host + " has been found. ok.")
else:
Console.error("machine " + host + " not reachable. error.")
pass
if __name__ == '__main__':
command = cm_shell_numpy()
command.do_numpy("iu.edu")
command.do_numpy("iu.edu-wrong")
| true
| true
|
f719f0690f45a487b47db964c4e0e9f736b885dc
| 20,953
|
py
|
Python
|
anyex/bitbank.py
|
ttwishing/anyex
|
cfd1f2f04ab992b790add4843aafff91e5773cbf
|
[
"MIT"
] | null | null | null |
anyex/bitbank.py
|
ttwishing/anyex
|
cfd1f2f04ab992b790add4843aafff91e5773cbf
|
[
"MIT"
] | null | null | null |
anyex/bitbank.py
|
ttwishing/anyex
|
cfd1f2f04ab992b790add4843aafff91e5773cbf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/anyex/anyex/blob/master/CONTRIBUTING.md#how-to-contribute-code
from anyex.base.exchange import Exchange
from anyex.base.errors import ExchangeError
from anyex.base.errors import AuthenticationError
from anyex.base.errors import PermissionDenied
from anyex.base.errors import InsufficientFunds
from anyex.base.errors import InvalidOrder
from anyex.base.errors import OrderNotFound
from anyex.base.errors import InvalidNonce
class bitbank (Exchange):
def describe(self):
return self.deep_extend(super(bitbank, self).describe(), {
'id': 'bitbank',
'name': 'bitbank',
'countries': 'JP',
'version': 'v1',
'has': {
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'4h': '4hour',
'8h': '8hour',
'12h': '12hour',
'1d': '1day',
'1w': '1week',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/37808081-b87f2d9c-2e59-11e8-894d-c1900b7584fe.jpg',
'api': {
'public': 'https://public.bitbank.cc',
'private': 'https://api.bitbank.cc',
},
'www': 'https://bitbank.cc/',
'doc': 'https://docs.bitbank.cc/',
'fees': 'https://bitbank.cc/docs/fees/',
},
'api': {
'public': {
'get': [
'{pair}/ticker',
'{pair}/depth',
'{pair}/transactions',
'{pair}/transactions/{YYYYMMDD}',
'{pair}/candlestick/{candle-type}/{YYYYMMDD}',
],
},
'private': {
'get': [
'user/assets',
'user/spot/order',
'user/spot/active_orders',
'user/spot/trade_history',
'user/withdrawal_account',
],
'post': [
'user/spot/order',
'user/spot/cancel_order',
'user/spot/cancel_orders',
'user/spot/orders_info',
'user/request_withdrawal',
],
},
},
'markets': {
'BCH/BTC': {'id': 'bcc_btc', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC', 'baseId': 'bcc', 'quoteId': 'btc'},
'BCH/JPY': {'id': 'bcc_jpy', 'symbol': 'BCH/JPY', 'base': 'BCH', 'quote': 'JPY', 'baseId': 'bcc', 'quoteId': 'jpy'},
'MONA/BTC': {'id': 'mona_btc', 'symbol': 'MONA/BTC', 'base': 'MONA', 'quote': 'BTC', 'baseId': 'mona', 'quoteId': 'btc'},
'MONA/JPY': {'id': 'mona_jpy', 'symbol': 'MONA/JPY', 'base': 'MONA', 'quote': 'JPY', 'baseId': 'mona', 'quoteId': 'jpy'},
'ETH/BTC': {'id': 'eth_btc', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'eth', 'quoteId': 'btc'},
'LTC/BTC': {'id': 'ltc_btc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'ltc', 'quoteId': 'btc'},
'XRP/JPY': {'id': 'xrp_jpy', 'symbol': 'XRP/JPY', 'base': 'XRP', 'quote': 'JPY', 'baseId': 'xrp', 'quoteId': 'jpy'},
'BTC/JPY': {'id': 'btc_jpy', 'symbol': 'BTC/JPY', 'base': 'BTC', 'quote': 'JPY', 'baseId': 'btc', 'quoteId': 'jpy'},
},
'fees': {
'trading': {
# only temporarily
'maker': 0.0,
'taker': 0.0,
},
'funding': {
'withdraw': {
# 'JPY': amount => amount > 756 if 30000 else 540,
'BTC': 0.001,
'LTC': 0.001,
'XRP': 0.15,
'ETH': 0.0005,
'MONA': 0.001,
'BCC': 0.001,
},
},
},
'precision': {
'price': 8,
'amount': 8,
},
'exceptions': {
'20001': AuthenticationError,
'20002': AuthenticationError,
'20003': AuthenticationError,
'20005': AuthenticationError,
'20004': InvalidNonce,
'40020': InvalidOrder,
'40021': InvalidOrder,
'40025': ExchangeError,
'40013': OrderNotFound,
'40014': OrderNotFound,
'50008': PermissionDenied,
'50009': OrderNotFound,
'50010': OrderNotFound,
'60001': InsufficientFunds,
},
})
def parse_ticker(self, ticker, market=None):
symbol = market['symbol']
timestamp = ticker['timestamp']
last = float(ticker['last'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['buy']),
'bidVolume': None,
'ask': float(ticker['sell']),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['vol']),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetPairTicker(self.extend({
'pair': market['id'],
}, params))
return self.parse_ticker(response['data'], market)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetPairDepth(self.extend({
'pair': self.market_id(symbol),
}, params))
orderbook = response['data']
return self.parse_order_book(orderbook, orderbook['timestamp'])
def parse_trade(self, trade, market=None):
timestamp = trade['executed_at']
price = float(trade['price'])
amount = float(trade['amount'])
symbol = market['symbol']
cost = self.cost_to_precision(symbol, price * amount)
id = self.safe_string(trade, 'transaction_id')
if not id:
id = self.safe_string(trade, 'trade_id')
fee = None
if 'fee_amount_quote' in trade:
fee = {
'currency': market['quote'],
'cost': self.safe_float(trade, 'fee_amount_quote'),
}
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': self.safe_string(trade, 'order_id'),
'type': self.safe_string(trade, 'type'),
'side': trade['side'],
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
trades = self.publicGetPairTransactions(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(trades['data']['transactions'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='5m', since=None, limit=None):
return [
ohlcv[5],
float(ohlcv[0]),
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
]
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
date = self.milliseconds()
date = self.ymd(date)
date = date.split('-')
response = self.publicGetPairCandlestickCandleTypeYYYYMMDD(self.extend({
'pair': market['id'],
'candle-type': self.timeframes[timeframe],
'YYYYMMDD': ''.join(date),
}, params))
ohlcv = response['data']['candlestick'][0]['ohlcv']
return self.parse_ohlcvs(ohlcv, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetUserAssets(params)
result = {'info': response}
balances = response['data']['assets']
for i in range(0, len(balances)):
balance = balances[i]
id = balance['asset']
code = id
if id in self.currencies_by_id:
code = self.currencies_by_id[id]['code']
account = {
'free': float(balance['free_amount']),
'used': float(balance['locked_amount']),
'total': float(balance['onhand_amount']),
}
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
marketId = self.safe_string(order, 'pair')
symbol = None
if marketId and not market and(marketId in list(self.marketsById.keys())):
market = self.marketsById[marketId]
if market:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'ordered_at') * 1000
price = float(order['price'])
amount = self.safe_float(order, 'start_amount')
filled = self.safe_float(order, 'executed_amount')
remaining = self.safe_float(order, 'remaining_amount')
cost = filled * self.safe_float(order, 'average_price')
status = self.safe_string(order, 'status')
# UNFILLED
# PARTIALLY_FILLED
# FULLY_FILLED
# CANCELED_UNFILLED
# CANCELED_PARTIALLY_FILLED
if status == 'FULLY_FILLED':
status = 'closed'
elif status == 'CANCELED_UNFILLED' or status == 'CANCELED_PARTIALLY_FILLED':
status = 'canceled'
else:
status = 'open'
return {
'id': self.safe_string(order, 'order_id'),
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': order['type'],
'side': order['side'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
if price is None:
raise InvalidOrder(self.id + ' createOrder requires a price argument for both market and limit orders')
request = {
'pair': market['id'],
'amount': self.amount_to_string(symbol, amount),
'price': self.price_to_precision(symbol, price),
'side': side,
'type': type,
}
response = self.privatePostUserSpotOrder(self.extend(request, params))
id = response['data']['order_id']
order = self.parse_order(response['data'], market)
self.orders[id] = order
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostUserSpotCancelOrder(self.extend({
'order_id': id,
'pair': market['id'],
}, params))
return response['data']
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privateGetUserSpotOrder(self.extend({
'order_id': id,
'pair': market['id'],
}, params))
return self.parse_order(response['data'])
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit:
request['count'] = limit
if since:
request['since'] = int(since / 1000)
orders = self.privateGetUserSpotActiveOrders(self.extend(request, params))
return self.parse_orders(orders['data']['orders'], market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
market = None
if symbol is not None:
self.load_markets()
market = self.market(symbol)
request = {}
if market is not None:
request['pair'] = market['id']
if limit is not None:
request['count'] = limit
if since is not None:
request['since'] = int(since / 1000)
trades = self.privateGetUserSpotTradeHistory(self.extend(request, params))
return self.parse_trades(trades['data']['trades'], market, since, limit)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
response = self.privateGetUserWithdrawalAccount(self.extend({
'asset': currency['id'],
}, params))
# Not sure about self if there could be more accounts...
accounts = response['data']['accounts']
address = self.safe_string(accounts[0], 'address')
status = 'ok' if address else 'none'
return {
'currency': currency,
'address': address,
'tag': None,
'status': status,
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
if not('uuid' in list(params.keys())):
raise ExchangeError(self.id + ' uuid is required for withdrawal')
self.load_markets()
currency = self.currency(code)
response = self.privatePostUserRequestWithdrawal(self.extend({
'asset': currency['id'],
'amount': amount,
}, params))
return {
'info': response,
'id': response['data']['txid'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + '/'
if api == 'public':
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce
url += self.version + '/' + self.implode_params(path, params)
if method == 'POST':
body = self.json(query)
auth += body
else:
auth += '/' + self.version + '/' + path
if query:
query = self.urlencode(query)
url += '?' + query
auth += '?' + query
headers = {
'Content-Type': 'application/json',
'ACCESS-KEY': self.apiKey,
'ACCESS-NONCE': nonce,
'ACCESS-SIGNATURE': self.hmac(self.encode(auth), self.encode(self.secret)),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
success = self.safe_integer(response, 'success')
data = self.safe_value(response, 'data')
if not success or not data:
errorMessages = {
'10000': 'URL does not exist',
'10001': 'A system error occurred. Please contact support',
'10002': 'Invalid JSON format. Please check the contents of transmission',
'10003': 'A system error occurred. Please contact support',
'10005': 'A timeout error occurred. Please wait for a while and try again',
'20001': 'API authentication failed',
'20002': 'Illegal API key',
'20003': 'API key does not exist',
'20004': 'API Nonce does not exist',
'20005': 'API signature does not exist',
'20011': 'Two-step verification failed',
'20014': 'SMS authentication failed',
'30001': 'Please specify the order quantity',
'30006': 'Please specify the order ID',
'30007': 'Please specify the order ID array',
'30009': 'Please specify the stock',
'30012': 'Please specify the order price',
'30013': 'Trade Please specify either',
'30015': 'Please specify the order type',
'30016': 'Please specify asset name',
'30019': 'Please specify uuid',
'30039': 'Please specify the amount to be withdrawn',
'40001': 'The order quantity is invalid',
'40006': 'Count value is invalid',
'40007': 'End time is invalid',
'40008': 'end_id Value is invalid',
'40009': 'The from_id value is invalid',
'40013': 'The order ID is invalid',
'40014': 'The order ID array is invalid',
'40015': 'Too many specified orders',
'40017': 'Incorrect issue name',
'40020': 'The order price is invalid',
'40021': 'The trading classification is invalid',
'40022': 'Start date is invalid',
'40024': 'The order type is invalid',
'40025': 'Incorrect asset name',
'40028': 'uuid is invalid',
'40048': 'The amount of withdrawal is illegal',
'50003': 'Currently, self account is in a state where you can not perform the operation you specified. Please contact support',
'50004': 'Currently, self account is temporarily registered. Please try again after registering your account',
'50005': 'Currently, self account is locked. Please contact support',
'50006': 'Currently, self account is locked. Please contact support',
'50008': 'User identification has not been completed',
'50009': 'Your order does not exist',
'50010': 'Can not cancel specified order',
'50011': 'API not found',
'60001': 'The number of possessions is insufficient',
'60002': 'It exceeds the quantity upper limit of the tender buying order',
'60003': 'The specified quantity exceeds the limit',
'60004': 'The specified quantity is below the threshold',
'60005': 'The specified price is above the limit',
'60006': 'The specified price is below the lower limit',
'70001': 'A system error occurred. Please contact support',
'70002': 'A system error occurred. Please contact support',
'70003': 'A system error occurred. Please contact support',
'70004': 'We are unable to accept orders as the transaction is currently suspended',
'70005': 'Order can not be accepted because purchase order is currently suspended',
'70006': 'We can not accept orders because we are currently unsubscribed ',
}
errorClasses = self.exceptions
code = self.safe_string(data, 'code')
message = self.safe_string(errorMessages, code, 'Error')
ErrorClass = self.safe_value(errorClasses, code)
if ErrorClass is not None:
raise ErrorClass(message)
else:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 41.906
| 143
| 0.509617
|
nge import Exchange
from anyex.base.errors import ExchangeError
from anyex.base.errors import AuthenticationError
from anyex.base.errors import PermissionDenied
from anyex.base.errors import InsufficientFunds
from anyex.base.errors import InvalidOrder
from anyex.base.errors import OrderNotFound
from anyex.base.errors import InvalidNonce
class bitbank (Exchange):
def describe(self):
return self.deep_extend(super(bitbank, self).describe(), {
'id': 'bitbank',
'name': 'bitbank',
'countries': 'JP',
'version': 'v1',
'has': {
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'4h': '4hour',
'8h': '8hour',
'12h': '12hour',
'1d': '1day',
'1w': '1week',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/37808081-b87f2d9c-2e59-11e8-894d-c1900b7584fe.jpg',
'api': {
'public': 'https://public.bitbank.cc',
'private': 'https://api.bitbank.cc',
},
'www': 'https://bitbank.cc/',
'doc': 'https://docs.bitbank.cc/',
'fees': 'https://bitbank.cc/docs/fees/',
},
'api': {
'public': {
'get': [
'{pair}/ticker',
'{pair}/depth',
'{pair}/transactions',
'{pair}/transactions/{YYYYMMDD}',
'{pair}/candlestick/{candle-type}/{YYYYMMDD}',
],
},
'private': {
'get': [
'user/assets',
'user/spot/order',
'user/spot/active_orders',
'user/spot/trade_history',
'user/withdrawal_account',
],
'post': [
'user/spot/order',
'user/spot/cancel_order',
'user/spot/cancel_orders',
'user/spot/orders_info',
'user/request_withdrawal',
],
},
},
'markets': {
'BCH/BTC': {'id': 'bcc_btc', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC', 'baseId': 'bcc', 'quoteId': 'btc'},
'BCH/JPY': {'id': 'bcc_jpy', 'symbol': 'BCH/JPY', 'base': 'BCH', 'quote': 'JPY', 'baseId': 'bcc', 'quoteId': 'jpy'},
'MONA/BTC': {'id': 'mona_btc', 'symbol': 'MONA/BTC', 'base': 'MONA', 'quote': 'BTC', 'baseId': 'mona', 'quoteId': 'btc'},
'MONA/JPY': {'id': 'mona_jpy', 'symbol': 'MONA/JPY', 'base': 'MONA', 'quote': 'JPY', 'baseId': 'mona', 'quoteId': 'jpy'},
'ETH/BTC': {'id': 'eth_btc', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'eth', 'quoteId': 'btc'},
'LTC/BTC': {'id': 'ltc_btc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'ltc', 'quoteId': 'btc'},
'XRP/JPY': {'id': 'xrp_jpy', 'symbol': 'XRP/JPY', 'base': 'XRP', 'quote': 'JPY', 'baseId': 'xrp', 'quoteId': 'jpy'},
'BTC/JPY': {'id': 'btc_jpy', 'symbol': 'BTC/JPY', 'base': 'BTC', 'quote': 'JPY', 'baseId': 'btc', 'quoteId': 'jpy'},
},
'fees': {
'trading': {
'maker': 0.0,
'taker': 0.0,
},
'funding': {
'withdraw': {
'BTC': 0.001,
'LTC': 0.001,
'XRP': 0.15,
'ETH': 0.0005,
'MONA': 0.001,
'BCC': 0.001,
},
},
},
'precision': {
'price': 8,
'amount': 8,
},
'exceptions': {
'20001': AuthenticationError,
'20002': AuthenticationError,
'20003': AuthenticationError,
'20005': AuthenticationError,
'20004': InvalidNonce,
'40020': InvalidOrder,
'40021': InvalidOrder,
'40025': ExchangeError,
'40013': OrderNotFound,
'40014': OrderNotFound,
'50008': PermissionDenied,
'50009': OrderNotFound,
'50010': OrderNotFound,
'60001': InsufficientFunds,
},
})
def parse_ticker(self, ticker, market=None):
symbol = market['symbol']
timestamp = ticker['timestamp']
last = float(ticker['last'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['buy']),
'bidVolume': None,
'ask': float(ticker['sell']),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['vol']),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetPairTicker(self.extend({
'pair': market['id'],
}, params))
return self.parse_ticker(response['data'], market)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetPairDepth(self.extend({
'pair': self.market_id(symbol),
}, params))
orderbook = response['data']
return self.parse_order_book(orderbook, orderbook['timestamp'])
def parse_trade(self, trade, market=None):
timestamp = trade['executed_at']
price = float(trade['price'])
amount = float(trade['amount'])
symbol = market['symbol']
cost = self.cost_to_precision(symbol, price * amount)
id = self.safe_string(trade, 'transaction_id')
if not id:
id = self.safe_string(trade, 'trade_id')
fee = None
if 'fee_amount_quote' in trade:
fee = {
'currency': market['quote'],
'cost': self.safe_float(trade, 'fee_amount_quote'),
}
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': self.safe_string(trade, 'order_id'),
'type': self.safe_string(trade, 'type'),
'side': trade['side'],
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
trades = self.publicGetPairTransactions(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(trades['data']['transactions'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='5m', since=None, limit=None):
return [
ohlcv[5],
float(ohlcv[0]),
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
]
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
date = self.milliseconds()
date = self.ymd(date)
date = date.split('-')
response = self.publicGetPairCandlestickCandleTypeYYYYMMDD(self.extend({
'pair': market['id'],
'candle-type': self.timeframes[timeframe],
'YYYYMMDD': ''.join(date),
}, params))
ohlcv = response['data']['candlestick'][0]['ohlcv']
return self.parse_ohlcvs(ohlcv, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetUserAssets(params)
result = {'info': response}
balances = response['data']['assets']
for i in range(0, len(balances)):
balance = balances[i]
id = balance['asset']
code = id
if id in self.currencies_by_id:
code = self.currencies_by_id[id]['code']
account = {
'free': float(balance['free_amount']),
'used': float(balance['locked_amount']),
'total': float(balance['onhand_amount']),
}
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
marketId = self.safe_string(order, 'pair')
symbol = None
if marketId and not market and(marketId in list(self.marketsById.keys())):
market = self.marketsById[marketId]
if market:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'ordered_at') * 1000
price = float(order['price'])
amount = self.safe_float(order, 'start_amount')
filled = self.safe_float(order, 'executed_amount')
remaining = self.safe_float(order, 'remaining_amount')
cost = filled * self.safe_float(order, 'average_price')
status = self.safe_string(order, 'status')
if status == 'FULLY_FILLED':
status = 'closed'
elif status == 'CANCELED_UNFILLED' or status == 'CANCELED_PARTIALLY_FILLED':
status = 'canceled'
else:
status = 'open'
return {
'id': self.safe_string(order, 'order_id'),
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': order['type'],
'side': order['side'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
if price is None:
raise InvalidOrder(self.id + ' createOrder requires a price argument for both market and limit orders')
request = {
'pair': market['id'],
'amount': self.amount_to_string(symbol, amount),
'price': self.price_to_precision(symbol, price),
'side': side,
'type': type,
}
response = self.privatePostUserSpotOrder(self.extend(request, params))
id = response['data']['order_id']
order = self.parse_order(response['data'], market)
self.orders[id] = order
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostUserSpotCancelOrder(self.extend({
'order_id': id,
'pair': market['id'],
}, params))
return response['data']
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privateGetUserSpotOrder(self.extend({
'order_id': id,
'pair': market['id'],
}, params))
return self.parse_order(response['data'])
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit:
request['count'] = limit
if since:
request['since'] = int(since / 1000)
orders = self.privateGetUserSpotActiveOrders(self.extend(request, params))
return self.parse_orders(orders['data']['orders'], market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
market = None
if symbol is not None:
self.load_markets()
market = self.market(symbol)
request = {}
if market is not None:
request['pair'] = market['id']
if limit is not None:
request['count'] = limit
if since is not None:
request['since'] = int(since / 1000)
trades = self.privateGetUserSpotTradeHistory(self.extend(request, params))
return self.parse_trades(trades['data']['trades'], market, since, limit)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
response = self.privateGetUserWithdrawalAccount(self.extend({
'asset': currency['id'],
}, params))
accounts = response['data']['accounts']
address = self.safe_string(accounts[0], 'address')
status = 'ok' if address else 'none'
return {
'currency': currency,
'address': address,
'tag': None,
'status': status,
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
if not('uuid' in list(params.keys())):
raise ExchangeError(self.id + ' uuid is required for withdrawal')
self.load_markets()
currency = self.currency(code)
response = self.privatePostUserRequestWithdrawal(self.extend({
'asset': currency['id'],
'amount': amount,
}, params))
return {
'info': response,
'id': response['data']['txid'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + '/'
if api == 'public':
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce
url += self.version + '/' + self.implode_params(path, params)
if method == 'POST':
body = self.json(query)
auth += body
else:
auth += '/' + self.version + '/' + path
if query:
query = self.urlencode(query)
url += '?' + query
auth += '?' + query
headers = {
'Content-Type': 'application/json',
'ACCESS-KEY': self.apiKey,
'ACCESS-NONCE': nonce,
'ACCESS-SIGNATURE': self.hmac(self.encode(auth), self.encode(self.secret)),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
success = self.safe_integer(response, 'success')
data = self.safe_value(response, 'data')
if not success or not data:
errorMessages = {
'10000': 'URL does not exist',
'10001': 'A system error occurred. Please contact support',
'10002': 'Invalid JSON format. Please check the contents of transmission',
'10003': 'A system error occurred. Please contact support',
'10005': 'A timeout error occurred. Please wait for a while and try again',
'20001': 'API authentication failed',
'20002': 'Illegal API key',
'20003': 'API key does not exist',
'20004': 'API Nonce does not exist',
'20005': 'API signature does not exist',
'20011': 'Two-step verification failed',
'20014': 'SMS authentication failed',
'30001': 'Please specify the order quantity',
'30006': 'Please specify the order ID',
'30007': 'Please specify the order ID array',
'30009': 'Please specify the stock',
'30012': 'Please specify the order price',
'30013': 'Trade Please specify either',
'30015': 'Please specify the order type',
'30016': 'Please specify asset name',
'30019': 'Please specify uuid',
'30039': 'Please specify the amount to be withdrawn',
'40001': 'The order quantity is invalid',
'40006': 'Count value is invalid',
'40007': 'End time is invalid',
'40008': 'end_id Value is invalid',
'40009': 'The from_id value is invalid',
'40013': 'The order ID is invalid',
'40014': 'The order ID array is invalid',
'40015': 'Too many specified orders',
'40017': 'Incorrect issue name',
'40020': 'The order price is invalid',
'40021': 'The trading classification is invalid',
'40022': 'Start date is invalid',
'40024': 'The order type is invalid',
'40025': 'Incorrect asset name',
'40028': 'uuid is invalid',
'40048': 'The amount of withdrawal is illegal',
'50003': 'Currently, self account is in a state where you can not perform the operation you specified. Please contact support',
'50004': 'Currently, self account is temporarily registered. Please try again after registering your account',
'50005': 'Currently, self account is locked. Please contact support',
'50006': 'Currently, self account is locked. Please contact support',
'50008': 'User identification has not been completed',
'50009': 'Your order does not exist',
'50010': 'Can not cancel specified order',
'50011': 'API not found',
'60001': 'The number of possessions is insufficient',
'60002': 'It exceeds the quantity upper limit of the tender buying order',
'60003': 'The specified quantity exceeds the limit',
'60004': 'The specified quantity is below the threshold',
'60005': 'The specified price is above the limit',
'60006': 'The specified price is below the lower limit',
'70001': 'A system error occurred. Please contact support',
'70002': 'A system error occurred. Please contact support',
'70003': 'A system error occurred. Please contact support',
'70004': 'We are unable to accept orders as the transaction is currently suspended',
'70005': 'Order can not be accepted because purchase order is currently suspended',
'70006': 'We can not accept orders because we are currently unsubscribed ',
}
errorClasses = self.exceptions
code = self.safe_string(data, 'code')
message = self.safe_string(errorMessages, code, 'Error')
ErrorClass = self.safe_value(errorClasses, code)
if ErrorClass is not None:
raise ErrorClass(message)
else:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| true
| true
|
f719f0bd0810de624991f194db2d5e2731bca1d7
| 2,976
|
py
|
Python
|
etcd/setup.py
|
dvanderveer/integrations-core
|
41dd9950296455457c9b7342584153678503d5aa
|
[
"BSD-3-Clause"
] | null | null | null |
etcd/setup.py
|
dvanderveer/integrations-core
|
41dd9950296455457c9b7342584153678503d5aa
|
[
"BSD-3-Clause"
] | null | null | null |
etcd/setup.py
|
dvanderveer/integrations-core
|
41dd9950296455457c9b7342584153678503d5aa
|
[
"BSD-3-Clause"
] | null | null | null |
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
import re
here = path.abspath(path.dirname(__file__))
# get the long description from the readme file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
runtime_reqs = ['datadog_checks_base']
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('--hash') or line[0] == '#':
continue
req = line.rpartition('#')
if not len(req[1]):
if '--hash=' in req[2]:
tokens = req[2].split()
if len(tokens) > 1:
runtime_reqs.append(tokens[0])
elif ';' in req[2]:
runtime_reqs.append(req[2])
else:
runtime_reqs.append(req[0])
def read(*parts):
with open(path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# https://packaging.python.org/guides/single-sourcing-package-version/
version = find_version("datadog_checks", "etcd", "__init__.py")
setup(
name='datadog-etcd',
version=version,
description='The Etcd check',
long_description=long_description,
keywords='datadog agent etcd check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.etcd'],
# Run-time dependencies
install_requires=list(set(runtime_reqs)),
# Development dependencies, run with:
# $ pip install -e .[dev]
extras_require={
'dev': [
'check-manifest',
'datadog_agent_tk>=5.15',
],
},
# Testing setup and dependencies
tests_require=[
'nose',
'coverage',
'datadog_agent_tk>=5.15',
],
test_suite='nose.collector',
# Extra files to ship with the wheel package
package_data={b'datadog_checks.etcd': ['conf.yaml.example']},
include_package_data=True,
)
| 29.176471
| 70
| 0.612231
|
from setuptools import setup
from codecs import open
from os import path
import re
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
runtime_reqs = ['datadog_checks_base']
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('--hash') or line[0] == '#':
continue
req = line.rpartition('#')
if not len(req[1]):
if '--hash=' in req[2]:
tokens = req[2].split()
if len(tokens) > 1:
runtime_reqs.append(tokens[0])
elif ';' in req[2]:
runtime_reqs.append(req[2])
else:
runtime_reqs.append(req[0])
def read(*parts):
with open(path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# https://packaging.python.org/guides/single-sourcing-package-version/
version = find_version("datadog_checks", "etcd", "__init__.py")
setup(
name='datadog-etcd',
version=version,
description='The Etcd check',
long_description=long_description,
keywords='datadog agent etcd check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.etcd'],
# Run-time dependencies
install_requires=list(set(runtime_reqs)),
# Development dependencies, run with:
# $ pip install -e .[dev]
extras_require={
'dev': [
'check-manifest',
'datadog_agent_tk>=5.15',
],
},
# Testing setup and dependencies
tests_require=[
'nose',
'coverage',
'datadog_agent_tk>=5.15',
],
test_suite='nose.collector',
# Extra files to ship with the wheel package
package_data={b'datadog_checks.etcd': ['conf.yaml.example']},
include_package_data=True,
)
| true
| true
|
f719f29ce078fb015c146ba0d7a5bb429d7c7c23
| 69
|
py
|
Python
|
src/masonite/api/middleware/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 1,816
|
2018-02-14T01:59:51.000Z
|
2022-03-31T17:09:20.000Z
|
src/masonite/api/middleware/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 340
|
2018-02-11T00:27:26.000Z
|
2022-03-21T12:00:24.000Z
|
src/masonite/api/middleware/__init__.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 144
|
2018-03-18T00:08:16.000Z
|
2022-02-26T01:51:58.000Z
|
from .JWTAuthenticationMiddleware import JWTAuthenticationMiddleware
| 34.5
| 68
| 0.927536
|
from .JWTAuthenticationMiddleware import JWTAuthenticationMiddleware
| true
| true
|
f719f32c0de53ae35c0223c63678dbad415c2f11
| 22
|
py
|
Python
|
__init__.py
|
andy-96/GFPGAN
|
0ed1214760170cc27fdfd60da1f64a0699a28cf4
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
andy-96/GFPGAN
|
0ed1214760170cc27fdfd60da1f64a0699a28cf4
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
andy-96/GFPGAN
|
0ed1214760170cc27fdfd60da1f64a0699a28cf4
|
[
"BSD-3-Clause"
] | null | null | null |
from .gfpgan import *
| 11
| 21
| 0.727273
|
from .gfpgan import *
| true
| true
|
f719f37af819374470555e086638c20bfd0d0001
| 1,250
|
py
|
Python
|
leasing/viewsets/contact_additional_views.py
|
hkotkanen/mvj
|
a22d40869ef1b13924da428f3026d248acef81a7
|
[
"MIT"
] | null | null | null |
leasing/viewsets/contact_additional_views.py
|
hkotkanen/mvj
|
a22d40869ef1b13924da428f3026d248acef81a7
|
[
"MIT"
] | null | null | null |
leasing/viewsets/contact_additional_views.py
|
hkotkanen/mvj
|
a22d40869ef1b13924da428f3026d248acef81a7
|
[
"MIT"
] | null | null | null |
import re
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.views import APIView
from leasing.models import Contact
from leasing.permissions import PerMethodPermission
class ContactExistsView(APIView):
permission_classes = (PerMethodPermission,)
perms_map = {
'GET': ['leasing.view_contact'],
}
def get_view_name(self):
return _("Check if contact already exist")
def get_view_description(self, html=False):
return _("Check if contact already exist by business id or national identification number")
def get(self, request, format=None):
identifier = request.query_params.get('identifier', None)
if not identifier:
raise APIException(_('Query parameter "identifier" is mandatory'))
if re.match(r'FI\d{8}', identifier, re.IGNORECASE):
identifier = "{}-{}".format(identifier[2:9], identifier[-1])
return Response({
"exists": Contact.objects.filter(
Q(business_id__iexact=identifier) | Q(national_identification_number__iexact=identifier)).exists(),
})
| 33.783784
| 115
| 0.7056
|
import re
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.views import APIView
from leasing.models import Contact
from leasing.permissions import PerMethodPermission
class ContactExistsView(APIView):
permission_classes = (PerMethodPermission,)
perms_map = {
'GET': ['leasing.view_contact'],
}
def get_view_name(self):
return _("Check if contact already exist")
def get_view_description(self, html=False):
return _("Check if contact already exist by business id or national identification number")
def get(self, request, format=None):
identifier = request.query_params.get('identifier', None)
if not identifier:
raise APIException(_('Query parameter "identifier" is mandatory'))
if re.match(r'FI\d{8}', identifier, re.IGNORECASE):
identifier = "{}-{}".format(identifier[2:9], identifier[-1])
return Response({
"exists": Contact.objects.filter(
Q(business_id__iexact=identifier) | Q(national_identification_number__iexact=identifier)).exists(),
})
| true
| true
|
f719f62e2d8ed7d50dbaff87b0c28e125875ad70
| 21,056
|
py
|
Python
|
tensorflow/lite/python/convert.py
|
anigasan/tensorflow
|
5b780b4983007661ca479bf4d7ed9a260d8ce43f
|
[
"Apache-2.0"
] | 1
|
2019-11-18T10:54:10.000Z
|
2019-11-18T10:54:10.000Z
|
tensorflow/lite/python/convert.py
|
anigasan/tensorflow
|
5b780b4983007661ca479bf4d7ed9a260d8ce43f
|
[
"Apache-2.0"
] | 1
|
2018-04-02T23:42:30.000Z
|
2018-05-03T23:12:23.000Z
|
tensorflow/lite/python/convert.py
|
anigasan/tensorflow
|
5b780b4983007661ca479bf4d7ed9a260d8ce43f
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
import six
from six.moves import map
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return six.ensure_text(output)
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information. (default None)
enable_mlir_converter: Enables MLIR-based conversion instead of the default
TOCO conversion. (default False)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(six.ensure_binary(input_data_str))
debug_info_str = debug_info_str if debug_info_str else ""
# if debug_info_str contains a "string value", then the call to
# fp_debug.write(debug_info_str) will fail with the following error
#
# TypeError: a bytes-like object is required, not 'str'
#
# Some of the subtests within the "convert_test" unit-test fail
# with the error shown above. So watch out for that scenario and
# convert debug_info_str to bytes where needed
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
custom_opdefs=None,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None,
conversion_summary_dir=None):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
custom_opdefs: List of strings representing custom ops OpDefs that are
included in the GraphDef. Required when using custom operations with the
MLIR-based converter. (default None)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
quantize_to_float16: Boolean indicating whether to convert float buffers
to float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
debug_info: `GraphDebugInfo` proto containing the stack traces for the
original nodes referred by the converted graph.
conversion_summary_dir: A string, the path to the generated conversion logs.
Returns:
model_flags, toco_flags, debug_info: three protocol buffers describing the
conversion process and debug information.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
if custom_opdefs:
toco.custom_opdefs.extend(custom_opdefs)
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if conversion_summary_dir:
toco.conversion_summary_dir = conversion_summary_dir
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if toco.inference_input_type in \
[_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]:
if not quantized_input_stats:
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(list(map(int, shape)))
for output_tensor in output_tensors:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs)
| 42.537374
| 80
| 0.714381
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
import six
from six.moves import map
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return six.ensure_text(output)
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
TFLITE_BUILTINS = "TFLITE_BUILTINS"
SELECT_TF_OPS = "SELECT_TF_OPS"
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
def __str__(self):
return self.value
@staticmethod
def get_options():
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
pass
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
if not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(six.ensure_binary(input_data_str))
debug_info_str = debug_info_str if debug_info_str else ""
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
custom_opdefs=None,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None,
conversion_summary_dir=None):
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
if custom_opdefs:
toco.custom_opdefs.extend(custom_opdefs)
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if conversion_summary_dir:
toco.conversion_summary_dir = conversion_summary_dir
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if toco.inference_input_type in \
[_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]:
if not quantized_input_stats:
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(list(map(int, shape)))
for output_tensor in output_tensors:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs)
| true
| true
|
f719f728d45f799dab957ca3faa6158730bf0f3b
| 1,609
|
py
|
Python
|
BDSP-Scripts/utils/pokeTwilio.py
|
leecbryant/BDSP-PythonBot
|
db77b08e023ce3942cfff3c6d3e9a32f0d63f3dc
|
[
"MIT"
] | 4
|
2022-03-28T21:00:00.000Z
|
2022-03-29T00:03:20.000Z
|
BDSP-Scripts/utils/pokeTwilio.py
|
leecbryant/BDSP-PythonBot
|
db77b08e023ce3942cfff3c6d3e9a32f0d63f3dc
|
[
"MIT"
] | null | null | null |
BDSP-Scripts/utils/pokeTwilio.py
|
leecbryant/BDSP-PythonBot
|
db77b08e023ce3942cfff3c6d3e9a32f0d63f3dc
|
[
"MIT"
] | 1
|
2022-03-30T05:12:46.000Z
|
2022-03-30T05:12:46.000Z
|
import os
from twilio.rest import Client
#import twilioConfig from one folder up and inside Config_Files folder
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
try:
from Config_Files import twilioConfig
except ImportError:
from Config_Files import twilioConfig_default as twilioConfig
# This file holds a function to call a player with a statement. In this case, finding a shiny.
#
# Setup:
# Create a config.py folder that includes the following varibles:
# to_phone_number = 'your number'
# from_phone_number = 'Twilio number'
# account_sid = 'from Twilio'
# auth_token = 'from Twilio'
def found_shiny_text(found_pokemon = '', to_num = twilioConfig.to_phone_number, from_num = twilioConfig.from_phone_number):
# This function calls a user and says the message "You Found a Shiny!". Usage: found_shiny_call(to_num, from_num). Num format: Country Code + Area Code + Number (example: '+12223333333')
try:
sentence = 'You Found a Shiny ' + found_pokemon
formatted = '<Response><Say>' + sentence + '</Say></Response>'
account_sid = twilioConfig.account_sid
auth_token = twilioConfig.auth_token
client = Client(account_sid, auth_token)
message = client.messages.create(
body=sentence,
from_=from_num,
to=to_num)
# client.calls.create(twiml=formatted, to = to_num, from_ = from_num)
print("Texting Phone Number: "+str(to_num))
except:
print("Twilio is not configured properly. Please check your twilioConfig_default.py file.")
| 43.486486
| 190
| 0.709136
|
import os
from twilio.rest import Client
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
try:
from Config_Files import twilioConfig
except ImportError:
from Config_Files import twilioConfig_default as twilioConfig
def found_shiny_text(found_pokemon = '', to_num = twilioConfig.to_phone_number, from_num = twilioConfig.from_phone_number):
try:
sentence = 'You Found a Shiny ' + found_pokemon
formatted = '<Response><Say>' + sentence + '</Say></Response>'
account_sid = twilioConfig.account_sid
auth_token = twilioConfig.auth_token
client = Client(account_sid, auth_token)
message = client.messages.create(
body=sentence,
from_=from_num,
to=to_num)
print("Texting Phone Number: "+str(to_num))
except:
print("Twilio is not configured properly. Please check your twilioConfig_default.py file.")
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.