hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71a3d3679c710701747a7487f1d3ca7742c6324
| 1,437
|
py
|
Python
|
destruction/render.py
|
tcdude/destruction
|
44d24cee4f73e841e600a814e7b3c659a1a5c98c
|
[
"MIT"
] | null | null | null |
destruction/render.py
|
tcdude/destruction
|
44d24cee4f73e841e600a814e7b3c659a1a5c98c
|
[
"MIT"
] | null | null | null |
destruction/render.py
|
tcdude/destruction
|
44d24cee4f73e841e600a814e7b3c659a1a5c98c
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2019 tcdude
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sdl2.ext
class HWRenderer(sdl2.ext.TextureSpriteRenderSystem):
"""Basic SDL HW Renderer."""
def __init__(self, window):
super(HWRenderer, self).__init__(window)
self.renderer = self.sdlrenderer
def render(self, components, **kwargs):
self._renderer.clear()
super(HWRenderer, self).render(components, **kwargs)
| 37.815789
| 78
| 0.767571
|
import sdl2.ext
class HWRenderer(sdl2.ext.TextureSpriteRenderSystem):
def __init__(self, window):
super(HWRenderer, self).__init__(window)
self.renderer = self.sdlrenderer
def render(self, components, **kwargs):
self._renderer.clear()
super(HWRenderer, self).render(components, **kwargs)
| true
| true
|
f71a3d5dbbec7288cff475d0741e98fb99b63c84
| 1,001
|
py
|
Python
|
integration-tests/environment.py
|
oazmon/sceptre-template-fetcher
|
ff40fea4dcdb7b785b90b70426758475a8d09634
|
[
"Apache-2.0"
] | null | null | null |
integration-tests/environment.py
|
oazmon/sceptre-template-fetcher
|
ff40fea4dcdb7b785b90b70426758475a8d09634
|
[
"Apache-2.0"
] | null | null | null |
integration-tests/environment.py
|
oazmon/sceptre-template-fetcher
|
ff40fea4dcdb7b785b90b70426758475a8d09634
|
[
"Apache-2.0"
] | null | null | null |
import os
import uuid
import yaml
from sceptre_template_fetcher.cli import setup_logging
def before_all(context):
if context.config.wip:
setup_logging(True)
context.uuid = uuid.uuid1().hex
context.project_code = "sceptre-integration-tests-{0}".format(
context.uuid
)
context.sceptre_dir = os.path.join(
os.getcwd(), "integration-tests", "sceptre-project"
)
update_config(context)
def before_scenario(context, scenario):
context.error = None
context.response = None
context.output = None
def update_config(context):
config_path = os.path.join(
context.sceptre_dir, "config", "config.yaml"
)
with open(config_path) as config_file:
env_config = yaml.safe_load(config_file)
env_config["project_code"] = context.project_code
with open(config_path, 'w') as config_file:
yaml.safe_dump(env_config, config_file, default_flow_style=False)
def after_all(context):
update_config(context)
| 23.27907
| 73
| 0.7003
|
import os
import uuid
import yaml
from sceptre_template_fetcher.cli import setup_logging
def before_all(context):
if context.config.wip:
setup_logging(True)
context.uuid = uuid.uuid1().hex
context.project_code = "sceptre-integration-tests-{0}".format(
context.uuid
)
context.sceptre_dir = os.path.join(
os.getcwd(), "integration-tests", "sceptre-project"
)
update_config(context)
def before_scenario(context, scenario):
context.error = None
context.response = None
context.output = None
def update_config(context):
config_path = os.path.join(
context.sceptre_dir, "config", "config.yaml"
)
with open(config_path) as config_file:
env_config = yaml.safe_load(config_file)
env_config["project_code"] = context.project_code
with open(config_path, 'w') as config_file:
yaml.safe_dump(env_config, config_file, default_flow_style=False)
def after_all(context):
update_config(context)
| true
| true
|
f71a3d9d71d9308eee9e5caacf5594010124ebf4
| 17,425
|
py
|
Python
|
openstack_dashboard/dashboards/project/networks/workflows.py
|
ameoba/horizon
|
ff9e367c98a8bb79f10914abffaaa04b0a461819
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/networks/workflows.py
|
ameoba/horizon
|
ff9e367c98a8bb79f10914abffaaa04b0a461819
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/networks/workflows.py
|
ameoba/horizon
|
ff9e367c98a8bb79f10914abffaaa04b0a461819
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
import netaddr
from django.conf import settings
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import fields
from horizon import workflows
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CreateNetworkInfoAction(workflows.Action):
net_name = forms.CharField(max_length=255,
label=_("Network Name"),
required=False)
if api.neutron.is_port_profiles_supported():
net_profile_id = forms.ChoiceField(label=_("Network Profile"))
admin_state = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
if api.neutron.is_port_profiles_supported():
def __init__(self, request, *args, **kwargs):
super(CreateNetworkInfoAction, self).__init__(request,
*args, **kwargs)
self.fields['net_profile_id'].choices = (
self.get_network_profile_choices(request))
def get_network_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'network'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
profiles = []
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
# TODO(absubram): Add ability to view network profile information
# in the network detail if a profile is used.
class Meta:
name = _("Network")
help_text = _("From here you can create a new network.\n"
"In addition a subnet associated with the network "
"can be created in the next panel.")
class CreateNetworkInfo(workflows.Step):
action_class = CreateNetworkInfoAction
if api.neutron.is_port_profiles_supported():
contributes = ("net_name", "admin_state", "net_profile_id")
else:
contributes = ("net_name", "admin_state")
class CreateSubnetInfoAction(workflows.Action):
_ccs_enable_ipv6 = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}).get('enable_ipv6', False)
if _ccs_enable_ipv6:
ip_version_choices = [(4, 'IPv4'), (6, 'IPv6')]
ip_version_fields = fields.IPv4 | fields.IPv6
else:
ip_version_choices = [(4, 'IPv4')]
ip_version_fields = fields.IPv4
with_subnet = forms.BooleanField(label=_("Create Subnet"),
initial=True, required=False)
subnet_name = forms.CharField(max_length=255,
label=_("Subnet Name"),
required=False)
cidr = fields.IPField(label=_("Network Address"),
required=False,
initial="",
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=ip_version_fields,
mask=True)
ip_version = forms.ChoiceField(choices=ip_version_choices,
label=_("IP Version"))
gateway_ip = fields.IPField(
label=_("Gateway IP"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254) "
"The default value is the first IP of the "
"network address (e.g. 192.168.0.1 for "
"192.168.0.0/24). "
"If you use the default, leave blank. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=ip_version_fields,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the new '
'network, in which case "Network Address" must be '
'specified. If you wish to create a network WITHOUT a '
'subnet, uncheck the "Create Subnet" checkbox.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetInfoAction, self).__init__(request, context, *args,
**kwargs)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ip_version'].widget = forms.HiddenInput()
self.fields['ip_version'].initial = 4
def _check_subnet_data(self, cleaned_data, is_create=True):
cidr = cleaned_data.get('cidr')
ip_version = int(cleaned_data.get('ip_version'))
gateway_ip = cleaned_data.get('gateway_ip')
no_gateway = cleaned_data.get('no_gateway')
if not cidr:
msg = _('Specify "Network Address" or '
'clear "Create Subnet" checkbox.')
raise forms.ValidationError(msg)
if cidr:
subnet = netaddr.IPNetwork(cidr)
if subnet.version != ip_version:
msg = _('Network Address and IP version are inconsistent.')
raise forms.ValidationError(msg)
if (ip_version == 4 and subnet.prefixlen == 32) or \
(ip_version == 6 and subnet.prefixlen == 128):
msg = _("The subnet in the Network Address is too small (/%s)."
% subnet.prefixlen)
raise forms.ValidationError(msg)
if not no_gateway and gateway_ip:
if netaddr.IPAddress(gateway_ip).version is not ip_version:
msg = _('Gateway IP and IP version are inconsistent.')
raise forms.ValidationError(msg)
if not is_create and not no_gateway and not gateway_ip:
msg = _('Specify IP address of gateway or '
'check "Disable Gateway".')
raise forms.ValidationError(msg)
def clean(self):
cleaned_data = super(CreateSubnetInfoAction, self).clean()
with_subnet = cleaned_data.get('with_subnet')
if not with_subnet:
return cleaned_data
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("with_subnet", "subnet_name", "cidr",
"ip_version", "gateway_ip", "no_gateway")
class CreateSubnetDetailAction(workflows.Action):
enable_dhcp = forms.BooleanField(label=_("Enable DHCP"),
initial=True, required=False)
allocation_pools = forms.CharField(
widget=forms.Textarea(),
label=_("Allocation Pools"),
help_text=_("IP address allocation pools. Each entry is "
"<start_ip_address>,<end_ip_address> "
"(e.g., 192.168.1.100,192.168.1.120) "
"and one entry per line."),
required=False)
dns_nameservers = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("DNS Name Servers"),
help_text=_("IP address list of DNS name servers for this subnet. "
"One entry per line."),
required=False)
host_routes = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("Host Routes"),
help_text=_("Additional routes announced to the hosts. "
"Each entry is <destination_cidr>,<nexthop> "
"(e.g., 192.168.200.0/24,10.56.1.254) "
"and one entry per line."),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, ValueError):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(ip)s)' % dict(
field_name=field_name, ip=ip))
raise forms.ValidationError(msg)
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, ValueError):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(network)s)' % dict(
field_name=field_name, network=network))
raise forms.ValidationError(msg)
def _check_allocation_pools(self, allocation_pools):
for p in allocation_pools.split('\n'):
p = p.strip()
if not p:
continue
pool = p.split(',')
if len(pool) != 2:
msg = _('Start and end addresses must be specified '
'(value=%s)') % p
raise forms.ValidationError(msg)
start, end = [self._convert_ip_address(ip, "allocation_pools")
for ip in pool]
if start > end:
msg = _('Start address is larger than end address '
'(value=%s)') % p
raise forms.ValidationError(msg)
def _check_dns_nameservers(self, dns_nameservers):
for ns in dns_nameservers.split('\n'):
ns = ns.strip()
if not ns:
continue
self._convert_ip_address(ns, "dns_nameservers")
def _check_host_routes(self, host_routes):
for r in host_routes.split('\n'):
r = r.strip()
if not r:
continue
route = r.split(',')
if len(route) != 2:
msg = _('Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)') % r
raise forms.ValidationError(msg)
self._convert_ip_network(route[0], "host_routes")
self._convert_ip_address(route[1], "host_routes")
def clean(self):
cleaned_data = super(CreateSubnetDetailAction, self).clean()
self._check_allocation_pools(cleaned_data.get('allocation_pools'))
self._check_host_routes(cleaned_data.get('host_routes'))
self._check_dns_nameservers(cleaned_data.get('dns_nameservers'))
return cleaned_data
class CreateSubnetDetail(workflows.Step):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes")
class CreateNetwork(workflows.Workflow):
slug = "create_network"
name = _("Create Network")
finalize_button_name = _("Create")
success_message = _('Created network "%s".')
failure_message = _('Unable to create network "%s".')
default_steps = (CreateNetworkInfo,
CreateSubnetInfo,
CreateSubnetDetail)
def get_success_url(self):
return reverse("horizon:project:networks:index")
def get_failure_url(self):
return reverse("horizon:project:networks:index")
def format_status_message(self, message):
name = self.context.get('net_name') or self.context.get('net_id', '')
return message % name
def _create_network(self, request, data):
try:
params = {'name': data['net_name'],
'admin_state_up': data['admin_state']}
if api.neutron.is_port_profiles_supported():
params['net_profile_id'] = data['net_profile_id']
network = api.neutron.network_create(request, **params)
network.set_id_as_name_if_empty()
self.context['net_id'] = network.id
msg = _('Network "%s" was successfully created.') % network.name
LOG.debug(msg)
return network
except Exception as e:
msg = (_('Failed to create network "%(network)s": %(reason)s') %
{"network": data['net_name'], "reason": e})
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
return False
def _setup_subnet_parameters(self, params, data, is_create=True):
"""Setup subnet parameters
This methods setups subnet parameters which are available
in both create and update.
"""
is_update = not is_create
params['enable_dhcp'] = data['enable_dhcp']
if is_create and data['allocation_pools']:
pools = [dict(zip(['start', 'end'], pool.strip().split(',')))
for pool in data['allocation_pools'].split('\n')
if pool.strip()]
params['allocation_pools'] = pools
if data['host_routes'] or is_update:
routes = [dict(zip(['destination', 'nexthop'],
route.strip().split(',')))
for route in data['host_routes'].split('\n')
if route.strip()]
params['host_routes'] = routes
if data['dns_nameservers'] or is_update:
nameservers = [ns.strip()
for ns in data['dns_nameservers'].split('\n')
if ns.strip()]
params['dns_nameservers'] = nameservers
def _create_subnet(self, request, data, network=None, tenant_id=None,
no_redirect=False):
if network:
network_id = network.id
network_name = network.name
else:
network_id = self.context.get('network_id')
network_name = self.context.get('network_name')
try:
params = {'network_id': network_id,
'name': data['subnet_name'],
'cidr': data['cidr'],
'ip_version': int(data['ip_version'])}
if tenant_id:
params['tenant_id'] = tenant_id
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
self._setup_subnet_parameters(params, data)
subnet = api.neutron.subnet_create(request, **params)
self.context['subnet_id'] = subnet.id
msg = _('Subnet "%s" was successfully created.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = _('Failed to create subnet "%(sub)s" for network "%(net)s": '
' %(reason)s')
if no_redirect:
redirect = None
else:
redirect = self.get_failure_url()
exceptions.handle(request,
msg % {"sub": data['cidr'], "net": network_name,
"reason": e},
redirect=redirect)
return False
def _delete_network(self, request, network):
"""Delete the created network when subnet creation failed"""
try:
api.neutron.network_delete(request, network.id)
msg = _('Delete the created network "%s" '
'due to subnet creation failure.') % network.name
LOG.debug(msg)
redirect = self.get_failure_url()
messages.info(request, msg)
raise exceptions.Http302(redirect)
#return exceptions.RecoverableError
except Exception:
msg = _('Failed to delete network "%s"') % network.name
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
def handle(self, request, data):
network = self._create_network(request, data)
if not network:
return False
# If we do not need to create a subnet, return here.
if not data['with_subnet']:
return True
subnet = self._create_subnet(request, data, network, no_redirect=True)
if subnet:
return True
else:
self._delete_network(request, network)
return False
| 41.587112
| 99
| 0.570502
|
import logging
from django.conf import settings
import netaddr
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import fields
from horizon import workflows
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CreateNetworkInfoAction(workflows.Action):
net_name = forms.CharField(max_length=255,
label=_("Network Name"),
required=False)
if api.neutron.is_port_profiles_supported():
net_profile_id = forms.ChoiceField(label=_("Network Profile"))
admin_state = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
if api.neutron.is_port_profiles_supported():
def __init__(self, request, *args, **kwargs):
super(CreateNetworkInfoAction, self).__init__(request,
*args, **kwargs)
self.fields['net_profile_id'].choices = (
self.get_network_profile_choices(request))
def get_network_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'network'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
profiles = []
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
class Meta:
name = _("Network")
help_text = _("From here you can create a new network.\n"
"In addition a subnet associated with the network "
"can be created in the next panel.")
class CreateNetworkInfo(workflows.Step):
action_class = CreateNetworkInfoAction
if api.neutron.is_port_profiles_supported():
contributes = ("net_name", "admin_state", "net_profile_id")
else:
contributes = ("net_name", "admin_state")
class CreateSubnetInfoAction(workflows.Action):
_ccs_enable_ipv6 = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}).get('enable_ipv6', False)
if _ccs_enable_ipv6:
ip_version_choices = [(4, 'IPv4'), (6, 'IPv6')]
ip_version_fields = fields.IPv4 | fields.IPv6
else:
ip_version_choices = [(4, 'IPv4')]
ip_version_fields = fields.IPv4
with_subnet = forms.BooleanField(label=_("Create Subnet"),
initial=True, required=False)
subnet_name = forms.CharField(max_length=255,
label=_("Subnet Name"),
required=False)
cidr = fields.IPField(label=_("Network Address"),
required=False,
initial="",
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=ip_version_fields,
mask=True)
ip_version = forms.ChoiceField(choices=ip_version_choices,
label=_("IP Version"))
gateway_ip = fields.IPField(
label=_("Gateway IP"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254) "
"The default value is the first IP of the "
"network address (e.g. 192.168.0.1 for "
"192.168.0.0/24). "
"If you use the default, leave blank. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=ip_version_fields,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the new '
'network, in which case "Network Address" must be '
'specified. If you wish to create a network WITHOUT a '
'subnet, uncheck the "Create Subnet" checkbox.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetInfoAction, self).__init__(request, context, *args,
**kwargs)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ip_version'].widget = forms.HiddenInput()
self.fields['ip_version'].initial = 4
def _check_subnet_data(self, cleaned_data, is_create=True):
cidr = cleaned_data.get('cidr')
ip_version = int(cleaned_data.get('ip_version'))
gateway_ip = cleaned_data.get('gateway_ip')
no_gateway = cleaned_data.get('no_gateway')
if not cidr:
msg = _('Specify "Network Address" or '
'clear "Create Subnet" checkbox.')
raise forms.ValidationError(msg)
if cidr:
subnet = netaddr.IPNetwork(cidr)
if subnet.version != ip_version:
msg = _('Network Address and IP version are inconsistent.')
raise forms.ValidationError(msg)
if (ip_version == 4 and subnet.prefixlen == 32) or \
(ip_version == 6 and subnet.prefixlen == 128):
msg = _("The subnet in the Network Address is too small (/%s)."
% subnet.prefixlen)
raise forms.ValidationError(msg)
if not no_gateway and gateway_ip:
if netaddr.IPAddress(gateway_ip).version is not ip_version:
msg = _('Gateway IP and IP version are inconsistent.')
raise forms.ValidationError(msg)
if not is_create and not no_gateway and not gateway_ip:
msg = _('Specify IP address of gateway or '
'check "Disable Gateway".')
raise forms.ValidationError(msg)
def clean(self):
cleaned_data = super(CreateSubnetInfoAction, self).clean()
with_subnet = cleaned_data.get('with_subnet')
if not with_subnet:
return cleaned_data
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("with_subnet", "subnet_name", "cidr",
"ip_version", "gateway_ip", "no_gateway")
class CreateSubnetDetailAction(workflows.Action):
enable_dhcp = forms.BooleanField(label=_("Enable DHCP"),
initial=True, required=False)
allocation_pools = forms.CharField(
widget=forms.Textarea(),
label=_("Allocation Pools"),
help_text=_("IP address allocation pools. Each entry is "
"<start_ip_address>,<end_ip_address> "
"(e.g., 192.168.1.100,192.168.1.120) "
"and one entry per line."),
required=False)
dns_nameservers = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("DNS Name Servers"),
help_text=_("IP address list of DNS name servers for this subnet. "
"One entry per line."),
required=False)
host_routes = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("Host Routes"),
help_text=_("Additional routes announced to the hosts. "
"Each entry is <destination_cidr>,<nexthop> "
"(e.g., 192.168.200.0/24,10.56.1.254) "
"and one entry per line."),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, ValueError):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(ip)s)' % dict(
field_name=field_name, ip=ip))
raise forms.ValidationError(msg)
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, ValueError):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(network)s)' % dict(
field_name=field_name, network=network))
raise forms.ValidationError(msg)
def _check_allocation_pools(self, allocation_pools):
for p in allocation_pools.split('\n'):
p = p.strip()
if not p:
continue
pool = p.split(',')
if len(pool) != 2:
msg = _('Start and end addresses must be specified '
'(value=%s)') % p
raise forms.ValidationError(msg)
start, end = [self._convert_ip_address(ip, "allocation_pools")
for ip in pool]
if start > end:
msg = _('Start address is larger than end address '
'(value=%s)') % p
raise forms.ValidationError(msg)
def _check_dns_nameservers(self, dns_nameservers):
for ns in dns_nameservers.split('\n'):
ns = ns.strip()
if not ns:
continue
self._convert_ip_address(ns, "dns_nameservers")
def _check_host_routes(self, host_routes):
for r in host_routes.split('\n'):
r = r.strip()
if not r:
continue
route = r.split(',')
if len(route) != 2:
msg = _('Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)') % r
raise forms.ValidationError(msg)
self._convert_ip_network(route[0], "host_routes")
self._convert_ip_address(route[1], "host_routes")
def clean(self):
cleaned_data = super(CreateSubnetDetailAction, self).clean()
self._check_allocation_pools(cleaned_data.get('allocation_pools'))
self._check_host_routes(cleaned_data.get('host_routes'))
self._check_dns_nameservers(cleaned_data.get('dns_nameservers'))
return cleaned_data
class CreateSubnetDetail(workflows.Step):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes")
class CreateNetwork(workflows.Workflow):
slug = "create_network"
name = _("Create Network")
finalize_button_name = _("Create")
success_message = _('Created network "%s".')
failure_message = _('Unable to create network "%s".')
default_steps = (CreateNetworkInfo,
CreateSubnetInfo,
CreateSubnetDetail)
def get_success_url(self):
return reverse("horizon:project:networks:index")
def get_failure_url(self):
return reverse("horizon:project:networks:index")
def format_status_message(self, message):
name = self.context.get('net_name') or self.context.get('net_id', '')
return message % name
def _create_network(self, request, data):
try:
params = {'name': data['net_name'],
'admin_state_up': data['admin_state']}
if api.neutron.is_port_profiles_supported():
params['net_profile_id'] = data['net_profile_id']
network = api.neutron.network_create(request, **params)
network.set_id_as_name_if_empty()
self.context['net_id'] = network.id
msg = _('Network "%s" was successfully created.') % network.name
LOG.debug(msg)
return network
except Exception as e:
msg = (_('Failed to create network "%(network)s": %(reason)s') %
{"network": data['net_name'], "reason": e})
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
return False
def _setup_subnet_parameters(self, params, data, is_create=True):
is_update = not is_create
params['enable_dhcp'] = data['enable_dhcp']
if is_create and data['allocation_pools']:
pools = [dict(zip(['start', 'end'], pool.strip().split(',')))
for pool in data['allocation_pools'].split('\n')
if pool.strip()]
params['allocation_pools'] = pools
if data['host_routes'] or is_update:
routes = [dict(zip(['destination', 'nexthop'],
route.strip().split(',')))
for route in data['host_routes'].split('\n')
if route.strip()]
params['host_routes'] = routes
if data['dns_nameservers'] or is_update:
nameservers = [ns.strip()
for ns in data['dns_nameservers'].split('\n')
if ns.strip()]
params['dns_nameservers'] = nameservers
def _create_subnet(self, request, data, network=None, tenant_id=None,
no_redirect=False):
if network:
network_id = network.id
network_name = network.name
else:
network_id = self.context.get('network_id')
network_name = self.context.get('network_name')
try:
params = {'network_id': network_id,
'name': data['subnet_name'],
'cidr': data['cidr'],
'ip_version': int(data['ip_version'])}
if tenant_id:
params['tenant_id'] = tenant_id
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
self._setup_subnet_parameters(params, data)
subnet = api.neutron.subnet_create(request, **params)
self.context['subnet_id'] = subnet.id
msg = _('Subnet "%s" was successfully created.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = _('Failed to create subnet "%(sub)s" for network "%(net)s": '
' %(reason)s')
if no_redirect:
redirect = None
else:
redirect = self.get_failure_url()
exceptions.handle(request,
msg % {"sub": data['cidr'], "net": network_name,
"reason": e},
redirect=redirect)
return False
def _delete_network(self, request, network):
try:
api.neutron.network_delete(request, network.id)
msg = _('Delete the created network "%s" '
'due to subnet creation failure.') % network.name
LOG.debug(msg)
redirect = self.get_failure_url()
messages.info(request, msg)
raise exceptions.Http302(redirect)
except Exception:
msg = _('Failed to delete network "%s"') % network.name
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
def handle(self, request, data):
network = self._create_network(request, data)
if not network:
return False
if not data['with_subnet']:
return True
subnet = self._create_subnet(request, data, network, no_redirect=True)
if subnet:
return True
else:
self._delete_network(request, network)
return False
| true
| true
|
f71a3eaeece4ab1511448b596d52d6ce7165fb16
| 34
|
py
|
Python
|
06_01_name_conflict.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 6
|
2021-05-08T13:19:33.000Z
|
2022-03-20T08:29:44.000Z
|
06_01_name_conflict.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 1
|
2021-03-05T20:27:15.000Z
|
2021-11-17T09:07:43.000Z
|
06_01_name_conflict.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 2
|
2021-07-02T15:19:37.000Z
|
2021-10-06T00:53:25.000Z
|
def print():
pass
print()
| 8.5
| 12
| 0.5
|
def print():
pass
print()
| true
| true
|
f71a3ebfc7a88a941fd26cb5f19083ae093e7d3f
| 18,115
|
py
|
Python
|
src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/tests/test_locks.py
|
aag09/azurecli
|
30c98a75c36c02a657f1753ff5c48502dc7f7933
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/tests/test_locks.py
|
aag09/azurecli
|
30c98a75c36c02a657f1753ff5c48502dc7f7933
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/tests/test_locks.py
|
aag09/azurecli
|
30c98a75c36c02a657f1753ff5c48502dc7f7933
|
[
"MIT"
] | 1
|
2017-12-28T04:51:44.000Z
|
2017-12-28T04:51:44.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from time import sleep
import unittest
from azure.cli.testsdk import ScenarioTest, JMESPathCheck, ResourceGroupPreparer, record_only
from azure.cli.command_modules.resource.custom import _parse_lock_id
class ResourceLockTests(ScenarioTest):
def test_list_locks(self):
# just make sure this doesn't throw
self.cmd('az lock list').get_output_in_json()
@record_only()
def test_subscription_locks(self):
for lock_type in ['ReadOnly', 'CanNotDelete']:
lock_name = self.create_random_name('cli-test-lock', 48)
lock = self.cmd('az lock create -n {} --lock-type {}'.format(lock_name, lock_type)).get_output_in_json()
lock_id = lock.get('id')
self._sleep_for_lock_operation()
locks_list = self.cmd('az lock list').get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, [l['name'] for l in locks_list])
lock = self.cmd('az lock show -n {}'.format(lock_name)).get_output_in_json()
lock_from_id = self.cmd('az lock show --ids {}'.format(lock_id)).get_output_in_json()
self.assertEqual(lock.get('name', None), lock_name)
self.assertEqual(lock_from_id.get('name', None), lock_name)
self.assertEqual(lock.get('level', None), lock_type)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} --notes {} --lock-type {}'
.format(lock_name, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
lock = self.cmd('az lock update --ids {} --lock-type {}'
.format(lock_id, lock_type)).get_output_in_json()
self.assertEqual(lock.get('level', None), lock_type)
self.cmd('az lock delete -n {}'.format(lock_name))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_readonly_resource_group_lock')
def test_readonly_resource_group_lock(self, resource_group):
self._lock_operation_with_resource_group('ReadOnly', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_cannotdelete_resource_group_lock')
def test_cannotdelete_resource_group_lock(self, resource_group):
self._lock_operation_with_resource_group('CanNotDelete', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_readonly_resource_lock')
def test_readonly_resource_lock(self, resource_group):
self._lock_operation_with_resource('ReadOnly', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_cannotdelete_resource_lock')
def test_cannotdelete_resource_lock(self, resource_group):
self._lock_operation_with_resource('CanNotDelete', resource_group)
def _lock_operation_with_resource_group(self, lock_type, resource_group):
lock_name = self.create_random_name('cli-test-lock', 48)
self.cmd('az lock create -n {} -g {} --lock-type {}'.format(lock_name, resource_group, lock_type))
self._sleep_for_lock_operation()
self.cmd('az lock show -g {} -n {}'.format(resource_group, lock_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
locks_list = self.cmd("az lock list -g {} --query '[].name' -ojson".format(resource_group)).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} -g {} --notes {} --lock-type {}'
.format(lock_name, resource_group, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
self.cmd('az lock delete -g {} -n {}'.format(resource_group, lock_name))
self._sleep_for_lock_operation()
def _lock_operation_with_resource(self, lock_type, resource_group):
rsrc_name = self.create_random_name('cli.lock.rsrc', 30)
rsrc_type = 'Microsoft.Network/virtualNetworks'
lock_name = self.create_random_name('cli-test-lock', 74)
self.cmd('az network vnet create -n {} -g {}'.format(rsrc_name, resource_group))
self.cmd('az lock create -n {} -g {} --resource-type {} --resource-name {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, lock_type))
self._sleep_for_lock_operation()
self.cmd('az lock show --name {} -g {} --resource-type {} --resource-name {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
locks_list = self.cmd("az lock list --query '[].name' -ojson").get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} -g {} --resource-type {} --resource-name {} --notes {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
self.cmd('az lock delete --name {} -g {} --resource-name {} --resource-type {}'
.format(lock_name, resource_group, rsrc_name, rsrc_type))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_group_lock')
def test_group_lock_commands(self, resource_group):
lock_name = self.create_random_name('cli-test-lock', 48)
self.cmd('group lock create -n {} -g {} --lock-type CanNotDelete'.format(lock_name, resource_group))
self._sleep_for_lock_operation()
self.cmd('group lock show -g {} -n {}'.format(resource_group, lock_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', 'CanNotDelete')]).get_output_in_json()
locks_list = self.cmd("group lock list -g {} --query [].name -ojson"
.format(resource_group)).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
lock = self.cmd('group lock update -n {} -g {} --notes {} --lock-type ReadOnly'
.format(lock_name, resource_group, notes)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('group lock delete -g {} -n {}'.format(resource_group, lock_name))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_resource_lock')
def test_resource_lock_commands(self, resource_group):
rsrc_name = self.create_random_name('cli.lock.rsrc', 30)
rsrc_type = 'Microsoft.Network/virtualNetworks'
lock_name = self.create_random_name('cli-test-lock', 74)
lock_type = 'CanNotDelete'
self.cmd('network vnet create -n {} -g {}'.format(rsrc_name, resource_group))
self.cmd('resource lock create -n {} -g {} --resource-type {} --resource-name {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, lock_type))
self._sleep_for_lock_operation()
self.cmd('resource lock show --name {} -g {} --resource-type {} --resource-name {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
list_cmd = "resource lock list -g {} --resource-type {} --resource-name {} " \
"--query [].name -ojson".format(resource_group, rsrc_type, rsrc_name)
locks_list = self.cmd(list_cmd).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
lock = self.cmd('resource lock update -n {} -g {} --resource-type {} --resource-name {} --notes {} '
'--lock-type ReadOnly'
.format(lock_name, resource_group, rsrc_type, rsrc_name, notes)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('resource lock delete --name {} -g {} --resource-name {} --resource-type {}'
.format(lock_name, resource_group, rsrc_name, rsrc_type))
self._sleep_for_lock_operation()
@record_only()
def test_subscription_locks(self):
lock_name = self.create_random_name('cli-test-lock', 48)
lock = self.cmd('az account lock create -n {} --lock-type CanNotDelete'.format(lock_name)).get_output_in_json()
lock_id = lock.get('id')
locks_list = self.cmd('az account lock list --query [].name').get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
lock = self.cmd('az account lock show -n {}'.format(lock_name)).get_output_in_json()
lock_from_id = self.cmd('az account lock show --ids {}'.format(lock_id)).get_output_in_json()
self.assertEqual(lock.get('name', None), lock_name)
self.assertEqual(lock_from_id.get('name', None), lock_name)
self.assertEqual(lock.get('level', None), 'CanNotDelete')
notes = self.create_random_name('notes', 20)
lock = self.cmd('az account lock update -n {} --notes {} --lock-type {}'
.format(lock_name, notes, 'ReadOnly')).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
lock = self.cmd('az account lock update --ids {} --lock-type {}'
.format(lock_id, 'CanNotDelete')).get_output_in_json()
self.assertEqual(lock.get('level', None), 'CanNotDelete')
self.cmd('az account lock delete -n {}'.format(lock_name))
@ResourceGroupPreparer(name_prefix='cli_test_lock_commands_with_ids')
def test_lock_commands_with_ids(self, resource_group):
vnet_name = self.create_random_name('cli-lock-vnet', 30)
subnet_name = self.create_random_name('cli-lock-subnet', 30)
group_lock_name = self.create_random_name('cli-test-lock', 50)
vnet_lock_name = self.create_random_name('cli-test-lock', 50)
subnet_lock_name = self.create_random_name('cli-test-lock', 20)
vnet = self.cmd('az network vnet create -n {} -g {}'.format(vnet_name, resource_group)).get_output_in_json()
subnetaddress = vnet.get('newVNet').get('addressSpace').get('addressPrefixes')[0]
self.cmd('az network vnet subnet create -n {} --address-prefix {} --vnet-name {} -g {}'
.format(subnet_name, subnetaddress, vnet_name, resource_group))
locks = []
locks.append(self.cmd('az lock create -n {} -g {} --lock-type CanNotDelete'
.format(group_lock_name, resource_group)).get_output_in_json())
locks.append(self.cmd('az lock create -n {} -g {} --resource-type Microsoft.Network/virtualNetworks'
' --resource-name {} --lock-type CanNotDelete'
.format(vnet_lock_name, resource_group, vnet_name)).get_output_in_json())
locks.append(self.cmd('az lock create -n {} -g {} --resource-name {} --resource-type subnets '
'--namespace Microsoft.Network --parent virtualNetworks/{} --lock-type CanNotDelete'
.format(subnet_lock_name, resource_group, subnet_name, vnet_name)).get_output_in_json())
self._sleep_for_lock_operation()
space_delimited_ids = ' '.join([lock.get('id', None) for lock in locks])
my_locks = self.cmd('az lock show --ids {} --query [].name'.format(space_delimited_ids)).get_output_in_json()
self.assertTrue(len(my_locks) == 3)
for lock in my_locks:
self.assertIn(lock, [group_lock_name, vnet_lock_name, subnet_lock_name])
my_locks = self.cmd('az lock update --ids {} --notes somenotes --lock-type ReadOnly'
.format(space_delimited_ids)).get_output_in_json()
self.assertTrue(len(my_locks) == 3)
for lock in my_locks:
self.assertEqual(lock.get('notes', None), 'somenotes')
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('az lock delete --ids {}'.format(space_delimited_ids))
self._sleep_for_lock_operation()
my_locks = self.cmd("az lock list -g {} -ojson".format(resource_group)).get_output_in_json()
self.assertFalse(my_locks)
def _sleep_for_lock_operation(self):
if self.is_live:
sleep(5)
class ParseIdTests(unittest.TestCase):
def test_parsing_lock_ids(self):
tests = [
{
'input': "/subscriptions/subId/providers/"
"Microsoft.Authorization/locks/sublock",
'expected': {
'resource_group': None,
'resource_provider_namespace': None,
'parent_resource_path': None,
'resource_type': None,
'resource_name': None,
'lock_name': 'sublock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/examplegroup/providers/"
"Microsoft.Authorization/locks/grouplock",
'expected': {
'resource_group': 'examplegroup',
'resource_provider_namespace': None,
'parent_resource_path': None,
'resource_type': None,
'resource_name': None,
'lock_name': 'grouplock'
}
},
{
'input': "/subscriptions/subId/resourcegroups/mygroup/providers/"
"Microsoft.Network/virtualNetworks/myvnet/providers/"
"Microsoft.Authorization/locks/vnetlock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Network',
'parent_resource_path': None,
'resource_type': 'virtualNetworks',
'resource_name': 'myvnet',
'lock_name': 'vnetlock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/mygroup/providers/"
"Microsoft.Network/virtualNetworks/myvnet/subnets/subnet/providers/"
"Microsoft.Authorization/locks/subnetlock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Network',
'parent_resource_path': 'virtualNetworks/myvnet',
'resource_type': 'subnets',
'resource_name': 'subnet',
'lock_name': 'subnetlock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/mygroup/providers/"
"Microsoft.Provider1/resourceType1/name1/providers/"
"Microsoft.Provider2/resourceType2/name2/providers/"
"Microsoft.Authorization/locks/somelock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Provider1',
'parent_resource_path': 'resourceType1/name1/providers/Microsoft.Provider2',
'resource_type': 'resourceType2',
'resource_name': 'name2',
'lock_name': 'somelock'
}
}
]
for test in tests:
kwargs = _parse_lock_id(test['input'])
self.assertDictEqual(kwargs, test['expected'])
fail_tests = [
"/notsubscriptions/subId/providers/Microsoft.Authorization/locks/sublock",
"/subscriptions/subId/notResourceGroups/examplegroup/providers/Microsoft.Authorization/locks/grouplock",
"/subscriptions/subId/resourceGroups/examplegroup/providers/Microsoft.NotAuthorization/not_locks/grouplock",
"/subscriptions/subId/resourcegroups/mygroup/Microsoft.Network/virtualNetworks/myvnet/providers/"
"Microsoft.Authorization/locks/missingProvidersLock",
"/subscriptions/subId/resourcegroups/mygroup/providers/Microsoft.Network/myvnet/providers/"
"Microsoft.Authorization/locks/missingRsrcTypeLock",
"/subscriptions/subId/providers/Microsoft.Network/virtualNetworks/myvnet/subnets/subnet/providers/"
"Microsoft.Authorization/locks/missingRsrcGroupLock",
"not_a_id_at_all"
]
for test in fail_tests:
with self.assertRaises(AttributeError):
_parse_lock_id(test)
if __name__ == '__main__':
unittest.main()
| 51.463068
| 120
| 0.613525
|
from time import sleep
import unittest
from azure.cli.testsdk import ScenarioTest, JMESPathCheck, ResourceGroupPreparer, record_only
from azure.cli.command_modules.resource.custom import _parse_lock_id
class ResourceLockTests(ScenarioTest):
def test_list_locks(self):
self.cmd('az lock list').get_output_in_json()
@record_only()
def test_subscription_locks(self):
for lock_type in ['ReadOnly', 'CanNotDelete']:
lock_name = self.create_random_name('cli-test-lock', 48)
lock = self.cmd('az lock create -n {} --lock-type {}'.format(lock_name, lock_type)).get_output_in_json()
lock_id = lock.get('id')
self._sleep_for_lock_operation()
locks_list = self.cmd('az lock list').get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, [l['name'] for l in locks_list])
lock = self.cmd('az lock show -n {}'.format(lock_name)).get_output_in_json()
lock_from_id = self.cmd('az lock show --ids {}'.format(lock_id)).get_output_in_json()
self.assertEqual(lock.get('name', None), lock_name)
self.assertEqual(lock_from_id.get('name', None), lock_name)
self.assertEqual(lock.get('level', None), lock_type)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} --notes {} --lock-type {}'
.format(lock_name, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
lock = self.cmd('az lock update --ids {} --lock-type {}'
.format(lock_id, lock_type)).get_output_in_json()
self.assertEqual(lock.get('level', None), lock_type)
self.cmd('az lock delete -n {}'.format(lock_name))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_readonly_resource_group_lock')
def test_readonly_resource_group_lock(self, resource_group):
self._lock_operation_with_resource_group('ReadOnly', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_cannotdelete_resource_group_lock')
def test_cannotdelete_resource_group_lock(self, resource_group):
self._lock_operation_with_resource_group('CanNotDelete', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_readonly_resource_lock')
def test_readonly_resource_lock(self, resource_group):
self._lock_operation_with_resource('ReadOnly', resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_cannotdelete_resource_lock')
def test_cannotdelete_resource_lock(self, resource_group):
self._lock_operation_with_resource('CanNotDelete', resource_group)
def _lock_operation_with_resource_group(self, lock_type, resource_group):
lock_name = self.create_random_name('cli-test-lock', 48)
self.cmd('az lock create -n {} -g {} --lock-type {}'.format(lock_name, resource_group, lock_type))
self._sleep_for_lock_operation()
self.cmd('az lock show -g {} -n {}'.format(resource_group, lock_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
locks_list = self.cmd("az lock list -g {} --query '[].name' -ojson".format(resource_group)).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} -g {} --notes {} --lock-type {}'
.format(lock_name, resource_group, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
self.cmd('az lock delete -g {} -n {}'.format(resource_group, lock_name))
self._sleep_for_lock_operation()
def _lock_operation_with_resource(self, lock_type, resource_group):
rsrc_name = self.create_random_name('cli.lock.rsrc', 30)
rsrc_type = 'Microsoft.Network/virtualNetworks'
lock_name = self.create_random_name('cli-test-lock', 74)
self.cmd('az network vnet create -n {} -g {}'.format(rsrc_name, resource_group))
self.cmd('az lock create -n {} -g {} --resource-type {} --resource-name {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, lock_type))
self._sleep_for_lock_operation()
self.cmd('az lock show --name {} -g {} --resource-type {} --resource-name {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
locks_list = self.cmd("az lock list --query '[].name' -ojson").get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
new_lvl = 'ReadOnly' if lock_type == 'CanNotDelete' else 'CanNotDelete'
lock = self.cmd('az lock update -n {} -g {} --resource-type {} --resource-name {} --notes {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, notes, new_lvl)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), new_lvl)
self.cmd('az lock delete --name {} -g {} --resource-name {} --resource-type {}'
.format(lock_name, resource_group, rsrc_name, rsrc_type))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_group_lock')
def test_group_lock_commands(self, resource_group):
lock_name = self.create_random_name('cli-test-lock', 48)
self.cmd('group lock create -n {} -g {} --lock-type CanNotDelete'.format(lock_name, resource_group))
self._sleep_for_lock_operation()
self.cmd('group lock show -g {} -n {}'.format(resource_group, lock_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', 'CanNotDelete')]).get_output_in_json()
locks_list = self.cmd("group lock list -g {} --query [].name -ojson"
.format(resource_group)).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
lock = self.cmd('group lock update -n {} -g {} --notes {} --lock-type ReadOnly'
.format(lock_name, resource_group, notes)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('group lock delete -g {} -n {}'.format(resource_group, lock_name))
self._sleep_for_lock_operation()
@ResourceGroupPreparer(name_prefix='cli_test_resource_lock')
def test_resource_lock_commands(self, resource_group):
rsrc_name = self.create_random_name('cli.lock.rsrc', 30)
rsrc_type = 'Microsoft.Network/virtualNetworks'
lock_name = self.create_random_name('cli-test-lock', 74)
lock_type = 'CanNotDelete'
self.cmd('network vnet create -n {} -g {}'.format(rsrc_name, resource_group))
self.cmd('resource lock create -n {} -g {} --resource-type {} --resource-name {} --lock-type {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name, lock_type))
self._sleep_for_lock_operation()
self.cmd('resource lock show --name {} -g {} --resource-type {} --resource-name {}'
.format(lock_name, resource_group, rsrc_type, rsrc_name)).assert_with_checks([
JMESPathCheck('name', lock_name),
JMESPathCheck('level', lock_type)])
list_cmd = "resource lock list -g {} --resource-type {} --resource-name {} " \
"--query [].name -ojson".format(resource_group, rsrc_type, rsrc_name)
locks_list = self.cmd(list_cmd).get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
notes = self.create_random_name('notes', 20)
lock = self.cmd('resource lock update -n {} -g {} --resource-type {} --resource-name {} --notes {} '
'--lock-type ReadOnly'
.format(lock_name, resource_group, rsrc_type, rsrc_name, notes)).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('resource lock delete --name {} -g {} --resource-name {} --resource-type {}'
.format(lock_name, resource_group, rsrc_name, rsrc_type))
self._sleep_for_lock_operation()
@record_only()
def test_subscription_locks(self):
lock_name = self.create_random_name('cli-test-lock', 48)
lock = self.cmd('az account lock create -n {} --lock-type CanNotDelete'.format(lock_name)).get_output_in_json()
lock_id = lock.get('id')
locks_list = self.cmd('az account lock list --query [].name').get_output_in_json()
self.assertTrue(locks_list)
self.assertIn(lock_name, locks_list)
lock = self.cmd('az account lock show -n {}'.format(lock_name)).get_output_in_json()
lock_from_id = self.cmd('az account lock show --ids {}'.format(lock_id)).get_output_in_json()
self.assertEqual(lock.get('name', None), lock_name)
self.assertEqual(lock_from_id.get('name', None), lock_name)
self.assertEqual(lock.get('level', None), 'CanNotDelete')
notes = self.create_random_name('notes', 20)
lock = self.cmd('az account lock update -n {} --notes {} --lock-type {}'
.format(lock_name, notes, 'ReadOnly')).get_output_in_json()
self.assertEqual(lock.get('notes', None), notes)
self.assertEqual(lock.get('level', None), 'ReadOnly')
lock = self.cmd('az account lock update --ids {} --lock-type {}'
.format(lock_id, 'CanNotDelete')).get_output_in_json()
self.assertEqual(lock.get('level', None), 'CanNotDelete')
self.cmd('az account lock delete -n {}'.format(lock_name))
@ResourceGroupPreparer(name_prefix='cli_test_lock_commands_with_ids')
def test_lock_commands_with_ids(self, resource_group):
vnet_name = self.create_random_name('cli-lock-vnet', 30)
subnet_name = self.create_random_name('cli-lock-subnet', 30)
group_lock_name = self.create_random_name('cli-test-lock', 50)
vnet_lock_name = self.create_random_name('cli-test-lock', 50)
subnet_lock_name = self.create_random_name('cli-test-lock', 20)
vnet = self.cmd('az network vnet create -n {} -g {}'.format(vnet_name, resource_group)).get_output_in_json()
subnetaddress = vnet.get('newVNet').get('addressSpace').get('addressPrefixes')[0]
self.cmd('az network vnet subnet create -n {} --address-prefix {} --vnet-name {} -g {}'
.format(subnet_name, subnetaddress, vnet_name, resource_group))
locks = []
locks.append(self.cmd('az lock create -n {} -g {} --lock-type CanNotDelete'
.format(group_lock_name, resource_group)).get_output_in_json())
locks.append(self.cmd('az lock create -n {} -g {} --resource-type Microsoft.Network/virtualNetworks'
' --resource-name {} --lock-type CanNotDelete'
.format(vnet_lock_name, resource_group, vnet_name)).get_output_in_json())
locks.append(self.cmd('az lock create -n {} -g {} --resource-name {} --resource-type subnets '
'--namespace Microsoft.Network --parent virtualNetworks/{} --lock-type CanNotDelete'
.format(subnet_lock_name, resource_group, subnet_name, vnet_name)).get_output_in_json())
self._sleep_for_lock_operation()
space_delimited_ids = ' '.join([lock.get('id', None) for lock in locks])
my_locks = self.cmd('az lock show --ids {} --query [].name'.format(space_delimited_ids)).get_output_in_json()
self.assertTrue(len(my_locks) == 3)
for lock in my_locks:
self.assertIn(lock, [group_lock_name, vnet_lock_name, subnet_lock_name])
my_locks = self.cmd('az lock update --ids {} --notes somenotes --lock-type ReadOnly'
.format(space_delimited_ids)).get_output_in_json()
self.assertTrue(len(my_locks) == 3)
for lock in my_locks:
self.assertEqual(lock.get('notes', None), 'somenotes')
self.assertEqual(lock.get('level', None), 'ReadOnly')
self.cmd('az lock delete --ids {}'.format(space_delimited_ids))
self._sleep_for_lock_operation()
my_locks = self.cmd("az lock list -g {} -ojson".format(resource_group)).get_output_in_json()
self.assertFalse(my_locks)
def _sleep_for_lock_operation(self):
if self.is_live:
sleep(5)
class ParseIdTests(unittest.TestCase):
def test_parsing_lock_ids(self):
tests = [
{
'input': "/subscriptions/subId/providers/"
"Microsoft.Authorization/locks/sublock",
'expected': {
'resource_group': None,
'resource_provider_namespace': None,
'parent_resource_path': None,
'resource_type': None,
'resource_name': None,
'lock_name': 'sublock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/examplegroup/providers/"
"Microsoft.Authorization/locks/grouplock",
'expected': {
'resource_group': 'examplegroup',
'resource_provider_namespace': None,
'parent_resource_path': None,
'resource_type': None,
'resource_name': None,
'lock_name': 'grouplock'
}
},
{
'input': "/subscriptions/subId/resourcegroups/mygroup/providers/"
"Microsoft.Network/virtualNetworks/myvnet/providers/"
"Microsoft.Authorization/locks/vnetlock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Network',
'parent_resource_path': None,
'resource_type': 'virtualNetworks',
'resource_name': 'myvnet',
'lock_name': 'vnetlock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/mygroup/providers/"
"Microsoft.Network/virtualNetworks/myvnet/subnets/subnet/providers/"
"Microsoft.Authorization/locks/subnetlock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Network',
'parent_resource_path': 'virtualNetworks/myvnet',
'resource_type': 'subnets',
'resource_name': 'subnet',
'lock_name': 'subnetlock'
}
},
{
'input': "/subscriptions/subId/resourceGroups/mygroup/providers/"
"Microsoft.Provider1/resourceType1/name1/providers/"
"Microsoft.Provider2/resourceType2/name2/providers/"
"Microsoft.Authorization/locks/somelock",
'expected': {
'resource_group': 'mygroup',
'resource_provider_namespace': 'Microsoft.Provider1',
'parent_resource_path': 'resourceType1/name1/providers/Microsoft.Provider2',
'resource_type': 'resourceType2',
'resource_name': 'name2',
'lock_name': 'somelock'
}
}
]
for test in tests:
kwargs = _parse_lock_id(test['input'])
self.assertDictEqual(kwargs, test['expected'])
fail_tests = [
"/notsubscriptions/subId/providers/Microsoft.Authorization/locks/sublock",
"/subscriptions/subId/notResourceGroups/examplegroup/providers/Microsoft.Authorization/locks/grouplock",
"/subscriptions/subId/resourceGroups/examplegroup/providers/Microsoft.NotAuthorization/not_locks/grouplock",
"/subscriptions/subId/resourcegroups/mygroup/Microsoft.Network/virtualNetworks/myvnet/providers/"
"Microsoft.Authorization/locks/missingProvidersLock",
"/subscriptions/subId/resourcegroups/mygroup/providers/Microsoft.Network/myvnet/providers/"
"Microsoft.Authorization/locks/missingRsrcTypeLock",
"/subscriptions/subId/providers/Microsoft.Network/virtualNetworks/myvnet/subnets/subnet/providers/"
"Microsoft.Authorization/locks/missingRsrcGroupLock",
"not_a_id_at_all"
]
for test in fail_tests:
with self.assertRaises(AttributeError):
_parse_lock_id(test)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f71a3fac624255159a6714f8e472afdd01de6526
| 1,342
|
py
|
Python
|
molsysmt/form/openmm_Topology/to_openmm_System.py
|
uibcdf/MolModSAKs
|
02263fb710693f0c41817f1a318459b35fd5462a
|
[
"MIT"
] | null | null | null |
molsysmt/form/openmm_Topology/to_openmm_System.py
|
uibcdf/MolModSAKs
|
02263fb710693f0c41817f1a318459b35fd5462a
|
[
"MIT"
] | null | null | null |
molsysmt/form/openmm_Topology/to_openmm_System.py
|
uibcdf/MolModSAKs
|
02263fb710693f0c41817f1a318459b35fd5462a
|
[
"MIT"
] | null | null | null |
from molsysmt._private.exceptions import *
from molsysmt._private.digestion import *
from .is_openmm_Topology import is_openmm_Topology
def to_openmm_System(item, atom_indices='all', forcefield=None, parameters=None, check=True):
if check:
try:
is_openmm_Topology(item)
except:
raise WrongFormError('openmm.Topology')
try:
atom_indices = digest_atom_indices(atom_indices)
except:
raise WrongAtomIndicesError()
try:
forcefield = digest_forcefield(forcefield)
except:
raise WrongForceFieldError()
#forcefield = molecular_mechanics.to_openmm_ForceField()
#system_parameters = molecular_mechanics.get_openmm_System_parameters()
#tmp_item = forcefield.createSystem(item, **parameters)
#if molecular_mechanics.use_dispersion_correction or molecular_mechanics.ewald_error_tolerance:
# forces = {ii.__class__.__name__ : ii for ii in tmp_item.getForces()}
#if molecular_mechanics.use_dispersion_correction:
# forces['NonbondedForce'].setUseDispersionCorrection(True)
#if molecular_mechanics.ewald_error_tolerance:
# forces['NonbondedForce'].setEwaldErrorTolerance(molecular_mechanics.ewald_error_tolerance)
#return tmp_item
raise NotImplementedMethodError
pass
| 34.410256
| 99
| 0.727273
|
from molsysmt._private.exceptions import *
from molsysmt._private.digestion import *
from .is_openmm_Topology import is_openmm_Topology
def to_openmm_System(item, atom_indices='all', forcefield=None, parameters=None, check=True):
if check:
try:
is_openmm_Topology(item)
except:
raise WrongFormError('openmm.Topology')
try:
atom_indices = digest_atom_indices(atom_indices)
except:
raise WrongAtomIndicesError()
try:
forcefield = digest_forcefield(forcefield)
except:
raise WrongForceFieldError()
raise NotImplementedMethodError
pass
| true
| true
|
f71a3fafd60fd3e85163023cfc3f27d9dfd7b309
| 1,273
|
py
|
Python
|
python/app.py
|
webbhm/GBE_T
|
77302ecc57c6997bd646a5a789ec5d55bdc1b8d8
|
[
"MIT"
] | null | null | null |
python/app.py
|
webbhm/GBE_T
|
77302ecc57c6997bd646a5a789ec5d55bdc1b8d8
|
[
"MIT"
] | null | null | null |
python/app.py
|
webbhm/GBE_T
|
77302ecc57c6997bd646a5a789ec5d55bdc1b8d8
|
[
"MIT"
] | 1
|
2021-07-30T15:54:29.000Z
|
2021-07-30T15:54:29.000Z
|
from flask import Flask, render_template, request
from datetime import datetime
from ChartHelper import ChartHelper
from werkzeug.middleware.proxy_fix import ProxyFix
app = Flask(__name__)
#
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_host=1)
@app.route("/")
def index():
return render_template('index.html', title="Test GBE_T")
@app.route("/hello")
def hello():
return render_template('hello.html', title="Temperature Chart")
@app.route("/temp_chart")
def temp_chart():
ch = ChartHelper("Temperature")
arr = ch.get_array()
return render_template('temp_chart.html', title="Temperature Chart", data=arr)
@app.route("/humidity_chart")
def humidity_chart():
ch = ChartHelper("Humidity")
arr = ch.get_array()
return render_template('humidity_chart.html', title="Humidity Chart", data=arr)
@app.route("/pressure_chart")
def pressure_chart():
ch = ChartHelper("Pressure")
arr = ch.get_array()
return render_template('pressure_chart.html', title="Pressure Chart", data=arr)
@app.route("/co2_chart")
def co2_chart():
ch = ChartHelper("CO2")
arr = ch.get_array()
return render_template('co2_chart.html', title="CO2 Chart", data=arr)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| 27.673913
| 83
| 0.711705
|
from flask import Flask, render_template, request
from datetime import datetime
from ChartHelper import ChartHelper
from werkzeug.middleware.proxy_fix import ProxyFix
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_host=1)
@app.route("/")
def index():
return render_template('index.html', title="Test GBE_T")
@app.route("/hello")
def hello():
return render_template('hello.html', title="Temperature Chart")
@app.route("/temp_chart")
def temp_chart():
ch = ChartHelper("Temperature")
arr = ch.get_array()
return render_template('temp_chart.html', title="Temperature Chart", data=arr)
@app.route("/humidity_chart")
def humidity_chart():
ch = ChartHelper("Humidity")
arr = ch.get_array()
return render_template('humidity_chart.html', title="Humidity Chart", data=arr)
@app.route("/pressure_chart")
def pressure_chart():
ch = ChartHelper("Pressure")
arr = ch.get_array()
return render_template('pressure_chart.html', title="Pressure Chart", data=arr)
@app.route("/co2_chart")
def co2_chart():
ch = ChartHelper("CO2")
arr = ch.get_array()
return render_template('co2_chart.html', title="CO2 Chart", data=arr)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| true
| true
|
f71a408dbfa7813e062114f0338906e60d2e2f3e
| 15,336
|
py
|
Python
|
viz/renderer.py
|
AK391/stylegan_xl
|
9854d3d0e96eccaad10cab22379c018e1e031cf0
|
[
"MIT"
] | 214
|
2022-02-02T02:24:57.000Z
|
2022-03-31T18:39:55.000Z
|
viz/renderer.py
|
AK391/stylegan_xl
|
9854d3d0e96eccaad10cab22379c018e1e031cf0
|
[
"MIT"
] | 8
|
2022-02-03T11:21:10.000Z
|
2022-03-31T23:26:24.000Z
|
viz/renderer.py
|
AK391/stylegan_xl
|
9854d3d0e96eccaad10cab22379c018e1e031cf0
|
[
"MIT"
] | 2
|
2022-03-08T08:05:55.000Z
|
2022-03-31T23:01:58.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import copy
import traceback
import numpy as np
import torch
import torch.fft
import torch.nn
import matplotlib.cm
import dnnlib
from torch_utils.ops import upfirdn2d
import legacy # pylint: disable=import-error
#----------------------------------------------------------------------------
class CapturedException(Exception):
def __init__(self, msg=None):
if msg is None:
_type, value, _traceback = sys.exc_info()
assert value is not None
if isinstance(value, CapturedException):
msg = str(value)
else:
msg = traceback.format_exc()
assert isinstance(msg, str)
super().__init__(msg)
#----------------------------------------------------------------------------
class CaptureSuccess(Exception):
def __init__(self, out):
super().__init__()
self.out = out
#----------------------------------------------------------------------------
def _sinc(x):
y = (x * np.pi).abs()
z = torch.sin(y) / y.clamp(1e-30, float('inf'))
return torch.where(y < 1e-30, torch.ones_like(x), z)
def _lanczos_window(x, a):
x = x.abs() / a
return torch.where(x < 1, _sinc(x), torch.zeros_like(x))
#----------------------------------------------------------------------------
def _construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):
assert a <= amax < aflt
mat = torch.as_tensor(mat).to(torch.float32)
# Construct 2D filter taps in input & output coordinate spaces.
taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up)
yi, xi = torch.meshgrid(taps, taps)
xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2)
# Convolution of two oriented 2D sinc filters.
fi = _sinc(xi * cutoff_in) * _sinc(yi * cutoff_in)
fo = _sinc(xo * cutoff_out) * _sinc(yo * cutoff_out)
f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real
# Convolution of two oriented 2D Lanczos windows.
wi = _lanczos_window(xi, a) * _lanczos_window(yi, a)
wo = _lanczos_window(xo, a) * _lanczos_window(yo, a)
w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real
# Construct windowed FIR filter.
f = f * w
# Finalize.
c = (aflt - amax) * up
f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c]
f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up)
f = f / f.sum([0,2], keepdim=True) / (up ** 2)
f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1]
return f
#----------------------------------------------------------------------------
def _apply_affine_transformation(x, mat, up=4, **filter_kwargs):
_N, _C, H, W = x.shape
mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)
# Construct filter.
f = _construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)
assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1
p = f.shape[0] // 2
# Construct sampling grid.
theta = mat.inverse()
theta[:2, 2] *= 2
theta[0, 2] += 1 / up / W
theta[1, 2] += 1 / up / H
theta[0, :] *= W / (W + p / up * 2)
theta[1, :] *= H / (H + p / up * 2)
theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])
g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)
# Resample image.
y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)
z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)
# Form mask.
m = torch.zeros_like(y)
c = p * 2 + 1
m[:, :, c:-c, c:-c] = 1
m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)
return z, m
#----------------------------------------------------------------------------
class Renderer:
def __init__(self):
self._device = torch.device('cuda')
self._pkl_data = dict() # {pkl: dict | CapturedException, ...}
self._networks = dict() # {cache_key: torch.nn.Module, ...}
self._pinned_bufs = dict() # {(shape, dtype): torch.Tensor, ...}
self._cmaps = dict() # {name: torch.Tensor, ...}
self._is_timing = False
self._start_event = torch.cuda.Event(enable_timing=True)
self._end_event = torch.cuda.Event(enable_timing=True)
self._net_layers = dict() # {cache_key: [dnnlib.EasyDict, ...], ...}
def render(self, **args):
self._is_timing = True
self._start_event.record(torch.cuda.current_stream(self._device))
res = dnnlib.EasyDict()
try:
self._render_impl(res, **args)
except:
res.error = CapturedException()
self._end_event.record(torch.cuda.current_stream(self._device))
if 'image' in res:
res.image = self.to_cpu(res.image).numpy()
if 'stats' in res:
res.stats = self.to_cpu(res.stats).numpy()
if 'error' in res:
res.error = str(res.error)
if self._is_timing:
self._end_event.synchronize()
res.render_time = self._start_event.elapsed_time(self._end_event) * 1e-3
self._is_timing = False
return res
def get_network(self, pkl, key, **tweak_kwargs):
data = self._pkl_data.get(pkl, None)
if data is None:
print(f'Loading "{pkl}"... ', end='', flush=True)
try:
with dnnlib.util.open_url(pkl, verbose=False) as f:
data = legacy.load_network_pkl(f)
print('Done.')
except:
data = CapturedException()
print('Failed!')
self._pkl_data[pkl] = data
self._ignore_timing()
if isinstance(data, CapturedException):
raise data
orig_net = data[key]
cache_key = (orig_net, self._device, tuple(sorted(tweak_kwargs.items())))
net = self._networks.get(cache_key, None)
if net is None:
try:
net = copy.deepcopy(orig_net)
net = self._tweak_network(net, **tweak_kwargs)
net.to(self._device)
except:
net = CapturedException()
self._networks[cache_key] = net
self._ignore_timing()
if isinstance(net, CapturedException):
raise net
return net
def _tweak_network(self, net):
# Print diagnostics.
#for name, value in misc.named_params_and_buffers(net):
# if name.endswith('.magnitude_ema'):
# value = value.rsqrt().numpy()
# print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')
# if name.endswith('.weight') and value.ndim == 4:
# value = value.square().mean([1,2,3]).sqrt().numpy()
# print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')
return net
def _get_pinned_buf(self, ref):
key = (tuple(ref.shape), ref.dtype)
buf = self._pinned_bufs.get(key, None)
if buf is None:
buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory()
self._pinned_bufs[key] = buf
return buf
def to_device(self, buf):
return self._get_pinned_buf(buf).copy_(buf).to(self._device)
def to_cpu(self, buf):
return self._get_pinned_buf(buf).copy_(buf).clone()
def _ignore_timing(self):
self._is_timing = False
def _apply_cmap(self, x, name='viridis'):
cmap = self._cmaps.get(name, None)
if cmap is None:
cmap = matplotlib.cm.get_cmap(name)
cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3]
cmap = self.to_device(torch.from_numpy(cmap))
self._cmaps[name] = cmap
hi = cmap.shape[0] - 1
x = (x * hi + 0.5).clamp(0, hi).to(torch.int64)
x = torch.nn.functional.embedding(x, cmap)
return x
def _render_impl(self, res,
pkl = None,
w0_seeds = [[0, 1]],
stylemix_idx = [],
stylemix_seed = 0,
trunc_psi = 1,
trunc_cutoff = 0,
random_seed = 0,
noise_mode = 'const',
force_fp32 = False,
layer_name = None,
sel_channels = 3,
base_channel = 0,
img_scale_db = 0,
img_normalize = False,
fft_show = False,
fft_all = True,
fft_range_db = 50,
fft_beta = 8,
input_transform = None,
untransform = False,
):
# Dig up network details.
G = self.get_network(pkl, 'G_ema')
res.img_resolution = G.img_resolution
res.num_ws = G.num_ws
res.has_noise = any('noise_const' in name for name, _buf in G.synthesis.named_buffers())
res.has_input_transform = (hasattr(G.synthesis, 'input') and hasattr(G.synthesis.input, 'transform'))
# Set input transform.
if res.has_input_transform:
m = np.eye(3)
try:
if input_transform is not None:
m = np.linalg.inv(np.asarray(input_transform))
except np.linalg.LinAlgError:
res.error = CapturedException()
G.synthesis.input.transform.copy_(torch.from_numpy(m))
# Generate random latents.
all_seeds = [seed for seed, _weight in w0_seeds] + [stylemix_seed]
all_seeds = list(set(all_seeds))
all_zs = np.zeros([len(all_seeds), G.z_dim], dtype=np.float32)
all_cs = np.zeros([len(all_seeds), G.c_dim], dtype=np.float32)
for idx, seed in enumerate(all_seeds):
rnd = np.random.RandomState(seed)
all_zs[idx] = rnd.randn(G.z_dim)
cls = rnd.randint(G.c_dim)
if G.c_dim > 0:
all_cs[idx, cls] = 1
# Run mapping network.
w_avg = G.mapping.w_avg[cls]
all_zs = self.to_device(torch.from_numpy(all_zs))
all_cs = self.to_device(torch.from_numpy(all_cs))
all_ws = G.mapping(z=all_zs, c=all_cs, truncation_psi=trunc_psi, truncation_cutoff=trunc_cutoff) - w_avg
all_ws = dict(zip(all_seeds, all_ws))
# Calculate final W.
w = torch.stack([all_ws[seed] * weight for seed, weight in w0_seeds]).sum(dim=0, keepdim=True)
stylemix_idx = [idx for idx in stylemix_idx if 0 <= idx < G.num_ws]
if len(stylemix_idx) > 0:
w[:, stylemix_idx] = all_ws[stylemix_seed][np.newaxis, stylemix_idx]
w += w_avg
# Run synthesis network.
synthesis_kwargs = dnnlib.EasyDict(noise_mode=noise_mode, force_fp32=force_fp32)
torch.manual_seed(random_seed)
out, layers = self.run_synthesis_net(G.synthesis, w, capture_layer=layer_name, **synthesis_kwargs)
# Update layer list.
cache_key = (G.synthesis, tuple(sorted(synthesis_kwargs.items())))
if cache_key not in self._net_layers:
if layer_name is not None:
torch.manual_seed(random_seed)
_out, layers = self.run_synthesis_net(G.synthesis, w, **synthesis_kwargs)
self._net_layers[cache_key] = layers
res.layers = self._net_layers[cache_key]
# Untransform.
if untransform and res.has_input_transform:
out, _mask = _apply_affine_transformation(out.to(torch.float32), G.synthesis.input.transform, amax=6) # Override amax to hit the fast path in upfirdn2d.
# Select channels and compute statistics.
out = out[0].to(torch.float32)
if sel_channels > out.shape[0]:
sel_channels = 1
base_channel = max(min(base_channel, out.shape[0] - sel_channels), 0)
sel = out[base_channel : base_channel + sel_channels]
res.stats = torch.stack([
out.mean(), sel.mean(),
out.std(), sel.std(),
out.norm(float('inf')), sel.norm(float('inf')),
])
# Scale and convert to uint8.
img = sel
if img_normalize:
img = img / img.norm(float('inf'), dim=[1,2], keepdim=True).clip(1e-8, 1e8)
img = img * (10 ** (img_scale_db / 20))
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8).permute(1, 2, 0)
res.image = img
# FFT.
if fft_show:
sig = out if fft_all else sel
sig = sig.to(torch.float32)
sig = sig - sig.mean(dim=[1,2], keepdim=True)
sig = sig * torch.kaiser_window(sig.shape[1], periodic=False, beta=fft_beta, device=self._device)[None, :, None]
sig = sig * torch.kaiser_window(sig.shape[2], periodic=False, beta=fft_beta, device=self._device)[None, None, :]
fft = torch.fft.fftn(sig, dim=[1,2]).abs().square().sum(dim=0)
fft = fft.roll(shifts=[fft.shape[0] // 2, fft.shape[1] // 2], dims=[0,1])
fft = (fft / fft.mean()).log10() * 10 # dB
fft = self._apply_cmap((fft / fft_range_db + 1) / 2)
res.image = torch.cat([img.expand_as(fft), fft], dim=1)
@staticmethod
def run_synthesis_net(net, *args, capture_layer=None, **kwargs): # => out, layers
submodule_names = {mod: name for name, mod in net.named_modules()}
unique_names = set()
layers = []
def module_hook(module, _inputs, outputs):
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [out for out in outputs if isinstance(out, torch.Tensor) and out.ndim in [4, 5]]
for idx, out in enumerate(outputs):
if out.ndim == 5: # G-CNN => remove group dimension.
out = out.mean(2)
name = submodule_names[module]
if name == '':
name = 'output'
if len(outputs) > 1:
name += f':{idx}'
if name in unique_names:
suffix = 2
while f'{name}_{suffix}' in unique_names:
suffix += 1
name += f'_{suffix}'
unique_names.add(name)
shape = [int(x) for x in out.shape]
dtype = str(out.dtype).split('.')[-1]
layers.append(dnnlib.EasyDict(name=name, shape=shape, dtype=dtype))
if name == capture_layer:
raise CaptureSuccess(out)
hooks = [module.register_forward_hook(module_hook) for module in net.modules()]
try:
out = net(*args, **kwargs)
except CaptureSuccess as e:
out = e.out
for hook in hooks:
hook.remove()
return out, layers
#----------------------------------------------------------------------------
| 40.46438
| 164
| 0.555099
|
import sys
import copy
import traceback
import numpy as np
import torch
import torch.fft
import torch.nn
import matplotlib.cm
import dnnlib
from torch_utils.ops import upfirdn2d
import legacy
class CapturedException(Exception):
def __init__(self, msg=None):
if msg is None:
_type, value, _traceback = sys.exc_info()
assert value is not None
if isinstance(value, CapturedException):
msg = str(value)
else:
msg = traceback.format_exc()
assert isinstance(msg, str)
super().__init__(msg)
class CaptureSuccess(Exception):
def __init__(self, out):
super().__init__()
self.out = out
def _sinc(x):
y = (x * np.pi).abs()
z = torch.sin(y) / y.clamp(1e-30, float('inf'))
return torch.where(y < 1e-30, torch.ones_like(x), z)
def _lanczos_window(x, a):
x = x.abs() / a
return torch.where(x < 1, _sinc(x), torch.zeros_like(x))
def _construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):
assert a <= amax < aflt
mat = torch.as_tensor(mat).to(torch.float32)
taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up)
yi, xi = torch.meshgrid(taps, taps)
xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2)
fi = _sinc(xi * cutoff_in) * _sinc(yi * cutoff_in)
fo = _sinc(xo * cutoff_out) * _sinc(yo * cutoff_out)
f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real
wi = _lanczos_window(xi, a) * _lanczos_window(yi, a)
wo = _lanczos_window(xo, a) * _lanczos_window(yo, a)
w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real
f = f * w
c = (aflt - amax) * up
f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c]
f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up)
f = f / f.sum([0,2], keepdim=True) / (up ** 2)
f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1]
return f
def _apply_affine_transformation(x, mat, up=4, **filter_kwargs):
_N, _C, H, W = x.shape
mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)
f = _construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)
assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1
p = f.shape[0] // 2
theta = mat.inverse()
theta[:2, 2] *= 2
theta[0, 2] += 1 / up / W
theta[1, 2] += 1 / up / H
theta[0, :] *= W / (W + p / up * 2)
theta[1, :] *= H / (H + p / up * 2)
theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])
g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)
y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)
z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)
m = torch.zeros_like(y)
c = p * 2 + 1
m[:, :, c:-c, c:-c] = 1
m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)
return z, m
class Renderer:
def __init__(self):
self._device = torch.device('cuda')
self._pkl_data = dict()
self._networks = dict()
self._pinned_bufs = dict()
self._cmaps = dict()
self._is_timing = False
self._start_event = torch.cuda.Event(enable_timing=True)
self._end_event = torch.cuda.Event(enable_timing=True)
self._net_layers = dict()
def render(self, **args):
self._is_timing = True
self._start_event.record(torch.cuda.current_stream(self._device))
res = dnnlib.EasyDict()
try:
self._render_impl(res, **args)
except:
res.error = CapturedException()
self._end_event.record(torch.cuda.current_stream(self._device))
if 'image' in res:
res.image = self.to_cpu(res.image).numpy()
if 'stats' in res:
res.stats = self.to_cpu(res.stats).numpy()
if 'error' in res:
res.error = str(res.error)
if self._is_timing:
self._end_event.synchronize()
res.render_time = self._start_event.elapsed_time(self._end_event) * 1e-3
self._is_timing = False
return res
def get_network(self, pkl, key, **tweak_kwargs):
data = self._pkl_data.get(pkl, None)
if data is None:
print(f'Loading "{pkl}"... ', end='', flush=True)
try:
with dnnlib.util.open_url(pkl, verbose=False) as f:
data = legacy.load_network_pkl(f)
print('Done.')
except:
data = CapturedException()
print('Failed!')
self._pkl_data[pkl] = data
self._ignore_timing()
if isinstance(data, CapturedException):
raise data
orig_net = data[key]
cache_key = (orig_net, self._device, tuple(sorted(tweak_kwargs.items())))
net = self._networks.get(cache_key, None)
if net is None:
try:
net = copy.deepcopy(orig_net)
net = self._tweak_network(net, **tweak_kwargs)
net.to(self._device)
except:
net = CapturedException()
self._networks[cache_key] = net
self._ignore_timing()
if isinstance(net, CapturedException):
raise net
return net
def _tweak_network(self, net):
return net
def _get_pinned_buf(self, ref):
key = (tuple(ref.shape), ref.dtype)
buf = self._pinned_bufs.get(key, None)
if buf is None:
buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory()
self._pinned_bufs[key] = buf
return buf
def to_device(self, buf):
return self._get_pinned_buf(buf).copy_(buf).to(self._device)
def to_cpu(self, buf):
return self._get_pinned_buf(buf).copy_(buf).clone()
def _ignore_timing(self):
self._is_timing = False
def _apply_cmap(self, x, name='viridis'):
cmap = self._cmaps.get(name, None)
if cmap is None:
cmap = matplotlib.cm.get_cmap(name)
cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3]
cmap = self.to_device(torch.from_numpy(cmap))
self._cmaps[name] = cmap
hi = cmap.shape[0] - 1
x = (x * hi + 0.5).clamp(0, hi).to(torch.int64)
x = torch.nn.functional.embedding(x, cmap)
return x
def _render_impl(self, res,
pkl = None,
w0_seeds = [[0, 1]],
stylemix_idx = [],
stylemix_seed = 0,
trunc_psi = 1,
trunc_cutoff = 0,
random_seed = 0,
noise_mode = 'const',
force_fp32 = False,
layer_name = None,
sel_channels = 3,
base_channel = 0,
img_scale_db = 0,
img_normalize = False,
fft_show = False,
fft_all = True,
fft_range_db = 50,
fft_beta = 8,
input_transform = None,
untransform = False,
):
G = self.get_network(pkl, 'G_ema')
res.img_resolution = G.img_resolution
res.num_ws = G.num_ws
res.has_noise = any('noise_const' in name for name, _buf in G.synthesis.named_buffers())
res.has_input_transform = (hasattr(G.synthesis, 'input') and hasattr(G.synthesis.input, 'transform'))
if res.has_input_transform:
m = np.eye(3)
try:
if input_transform is not None:
m = np.linalg.inv(np.asarray(input_transform))
except np.linalg.LinAlgError:
res.error = CapturedException()
G.synthesis.input.transform.copy_(torch.from_numpy(m))
all_seeds = [seed for seed, _weight in w0_seeds] + [stylemix_seed]
all_seeds = list(set(all_seeds))
all_zs = np.zeros([len(all_seeds), G.z_dim], dtype=np.float32)
all_cs = np.zeros([len(all_seeds), G.c_dim], dtype=np.float32)
for idx, seed in enumerate(all_seeds):
rnd = np.random.RandomState(seed)
all_zs[idx] = rnd.randn(G.z_dim)
cls = rnd.randint(G.c_dim)
if G.c_dim > 0:
all_cs[idx, cls] = 1
w_avg = G.mapping.w_avg[cls]
all_zs = self.to_device(torch.from_numpy(all_zs))
all_cs = self.to_device(torch.from_numpy(all_cs))
all_ws = G.mapping(z=all_zs, c=all_cs, truncation_psi=trunc_psi, truncation_cutoff=trunc_cutoff) - w_avg
all_ws = dict(zip(all_seeds, all_ws))
w = torch.stack([all_ws[seed] * weight for seed, weight in w0_seeds]).sum(dim=0, keepdim=True)
stylemix_idx = [idx for idx in stylemix_idx if 0 <= idx < G.num_ws]
if len(stylemix_idx) > 0:
w[:, stylemix_idx] = all_ws[stylemix_seed][np.newaxis, stylemix_idx]
w += w_avg
synthesis_kwargs = dnnlib.EasyDict(noise_mode=noise_mode, force_fp32=force_fp32)
torch.manual_seed(random_seed)
out, layers = self.run_synthesis_net(G.synthesis, w, capture_layer=layer_name, **synthesis_kwargs)
cache_key = (G.synthesis, tuple(sorted(synthesis_kwargs.items())))
if cache_key not in self._net_layers:
if layer_name is not None:
torch.manual_seed(random_seed)
_out, layers = self.run_synthesis_net(G.synthesis, w, **synthesis_kwargs)
self._net_layers[cache_key] = layers
res.layers = self._net_layers[cache_key]
if untransform and res.has_input_transform:
out, _mask = _apply_affine_transformation(out.to(torch.float32), G.synthesis.input.transform, amax=6)
out = out[0].to(torch.float32)
if sel_channels > out.shape[0]:
sel_channels = 1
base_channel = max(min(base_channel, out.shape[0] - sel_channels), 0)
sel = out[base_channel : base_channel + sel_channels]
res.stats = torch.stack([
out.mean(), sel.mean(),
out.std(), sel.std(),
out.norm(float('inf')), sel.norm(float('inf')),
])
img = sel
if img_normalize:
img = img / img.norm(float('inf'), dim=[1,2], keepdim=True).clip(1e-8, 1e8)
img = img * (10 ** (img_scale_db / 20))
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8).permute(1, 2, 0)
res.image = img
if fft_show:
sig = out if fft_all else sel
sig = sig.to(torch.float32)
sig = sig - sig.mean(dim=[1,2], keepdim=True)
sig = sig * torch.kaiser_window(sig.shape[1], periodic=False, beta=fft_beta, device=self._device)[None, :, None]
sig = sig * torch.kaiser_window(sig.shape[2], periodic=False, beta=fft_beta, device=self._device)[None, None, :]
fft = torch.fft.fftn(sig, dim=[1,2]).abs().square().sum(dim=0)
fft = fft.roll(shifts=[fft.shape[0] // 2, fft.shape[1] // 2], dims=[0,1])
fft = (fft / fft.mean()).log10() * 10
fft = self._apply_cmap((fft / fft_range_db + 1) / 2)
res.image = torch.cat([img.expand_as(fft), fft], dim=1)
@staticmethod
def run_synthesis_net(net, *args, capture_layer=None, **kwargs):
submodule_names = {mod: name for name, mod in net.named_modules()}
unique_names = set()
layers = []
def module_hook(module, _inputs, outputs):
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [out for out in outputs if isinstance(out, torch.Tensor) and out.ndim in [4, 5]]
for idx, out in enumerate(outputs):
if out.ndim == 5:
out = out.mean(2)
name = submodule_names[module]
if name == '':
name = 'output'
if len(outputs) > 1:
name += f':{idx}'
if name in unique_names:
suffix = 2
while f'{name}_{suffix}' in unique_names:
suffix += 1
name += f'_{suffix}'
unique_names.add(name)
shape = [int(x) for x in out.shape]
dtype = str(out.dtype).split('.')[-1]
layers.append(dnnlib.EasyDict(name=name, shape=shape, dtype=dtype))
if name == capture_layer:
raise CaptureSuccess(out)
hooks = [module.register_forward_hook(module_hook) for module in net.modules()]
try:
out = net(*args, **kwargs)
except CaptureSuccess as e:
out = e.out
for hook in hooks:
hook.remove()
return out, layers
| true
| true
|
f71a41378868c94ddf49eb311aa584b642394977
| 1,307
|
py
|
Python
|
parse.py
|
itsmehemant123/twitter-hydration
|
543ef7019f3c34e281acc08ae45f24c0407939f6
|
[
"MIT"
] | 1
|
2018-05-05T04:40:01.000Z
|
2018-05-05T04:40:01.000Z
|
parse.py
|
itsmehemant123/twitter-hydration
|
543ef7019f3c34e281acc08ae45f24c0407939f6
|
[
"MIT"
] | null | null | null |
parse.py
|
itsmehemant123/twitter-hydration
|
543ef7019f3c34e281acc08ae45f24c0407939f6
|
[
"MIT"
] | null | null | null |
import os
import json
import time
import logging
from connectors.mongodb.mongohandle import MongoHandle
from twarc import Twarc
logging.basicConfig(level=logging.INFO)
with open('./config/config.json') as data_file:
config = json.load(data_file)
logging.info('Finished parsing config.')
handle = MongoHandle(config)
logging.info('Initialized the Mongo connection.')
t = Twarc(config['twitter']['consumer_key'], config['twitter']['consumer_secret'],
config['twitter']['access_token'], config['twitter']['access_token_secret'])
logging.info('Initialized Twitter connection.')
for source_file in os.listdir('./' + config['source_folder']):
logging.info('Preparing to hydrate: ' + source_file)
tweet_ids = open('./' + config['source_folder'] + '/' + source_file)
new_tweet_ids = []
logging.info('Parsing tweet ids.')
start = time.time()
for line in tweet_ids:
line = line.strip()
if (not handle.is_written(line)):
new_tweet_ids.append(line)
end = time.time()
logging.info('Finished looking for new tweets in %.2f seconds.' % (end - start))
handle.write(t.hydrate(new_tweet_ids), source_file)
tweet_ids.close()
logging.info('Finished hydrating: ' + source_file)
logging.info('Finished hydration task.')
handle.clean()
| 31.878049
| 86
| 0.701607
|
import os
import json
import time
import logging
from connectors.mongodb.mongohandle import MongoHandle
from twarc import Twarc
logging.basicConfig(level=logging.INFO)
with open('./config/config.json') as data_file:
config = json.load(data_file)
logging.info('Finished parsing config.')
handle = MongoHandle(config)
logging.info('Initialized the Mongo connection.')
t = Twarc(config['twitter']['consumer_key'], config['twitter']['consumer_secret'],
config['twitter']['access_token'], config['twitter']['access_token_secret'])
logging.info('Initialized Twitter connection.')
for source_file in os.listdir('./' + config['source_folder']):
logging.info('Preparing to hydrate: ' + source_file)
tweet_ids = open('./' + config['source_folder'] + '/' + source_file)
new_tweet_ids = []
logging.info('Parsing tweet ids.')
start = time.time()
for line in tweet_ids:
line = line.strip()
if (not handle.is_written(line)):
new_tweet_ids.append(line)
end = time.time()
logging.info('Finished looking for new tweets in %.2f seconds.' % (end - start))
handle.write(t.hydrate(new_tweet_ids), source_file)
tweet_ids.close()
logging.info('Finished hydrating: ' + source_file)
logging.info('Finished hydration task.')
handle.clean()
| true
| true
|
f71a414dc127cdf908b1db847cc87bf66e249e05
| 515
|
py
|
Python
|
nkrsiSystem/configDefault.py
|
Kanciarzek/NkrsiSystem
|
ee3d19b1419ee64ccef05051a3892663e7d71625
|
[
"MIT"
] | null | null | null |
nkrsiSystem/configDefault.py
|
Kanciarzek/NkrsiSystem
|
ee3d19b1419ee64ccef05051a3892663e7d71625
|
[
"MIT"
] | null | null | null |
nkrsiSystem/configDefault.py
|
Kanciarzek/NkrsiSystem
|
ee3d19b1419ee64ccef05051a3892663e7d71625
|
[
"MIT"
] | null | null | null |
import os
DEBUG_MODE = True
SECRET_KEY = 'secret'
# Database config
DB_USER = 'postgres'
DB_NAME = 'postgres'
DB_PASSWORD = ''
DB_HOST = os.environ.get('POSTGRES_HOST', 'localhost')
DB_PORT = os.environ.get('POSTGRES_PORT', 5432)
# Slack config
SLACK_TOKEN = 'token'
SLACK_API_INVITE_URL = 'https://slack.com/api/users.admin.invite'
# Email config
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
PROJECTOR_IP = ''
DOOR_ENDPOINT = ''
FACEBOOK_TOKEN = ''
GOOGLE_MAPS_API_KEY = ''
| 18.392857
| 65
| 0.728155
|
import os
DEBUG_MODE = True
SECRET_KEY = 'secret'
DB_USER = 'postgres'
DB_NAME = 'postgres'
DB_PASSWORD = ''
DB_HOST = os.environ.get('POSTGRES_HOST', 'localhost')
DB_PORT = os.environ.get('POSTGRES_PORT', 5432)
SLACK_TOKEN = 'token'
SLACK_API_INVITE_URL = 'https://slack.com/api/users.admin.invite'
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
PROJECTOR_IP = ''
DOOR_ENDPOINT = ''
FACEBOOK_TOKEN = ''
GOOGLE_MAPS_API_KEY = ''
| true
| true
|
f71a41f2f041e11ccff687d63b1853750bc8274a
| 1,270
|
py
|
Python
|
scripts/05_modules/snap/snap_enable_snapping_3d_point_r14.py
|
mgoldshteyn/cinema4d_py_sdk_extended
|
b6c67f1dbae182c09ccbcc1df51f0e7ea4816074
|
[
"Apache-2.0"
] | null | null | null |
scripts/05_modules/snap/snap_enable_snapping_3d_point_r14.py
|
mgoldshteyn/cinema4d_py_sdk_extended
|
b6c67f1dbae182c09ccbcc1df51f0e7ea4816074
|
[
"Apache-2.0"
] | null | null | null |
scripts/05_modules/snap/snap_enable_snapping_3d_point_r14.py
|
mgoldshteyn/cinema4d_py_sdk_extended
|
b6c67f1dbae182c09ccbcc1df51f0e7ea4816074
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright: MAXON Computer GmbH
Description:
- Enables the snap if it's not already the case.
- Sets it to 3D Type and also to Point mode.
Class/method highlighted:
- c4d.modules.snap
- c4d.modules.snap.IsSnapEnabled()
- c4d.modules.snap.GetSnapSettings()
- c4d.modules.snap.SetSnapSettings()
- c4d.modules.snap.EnableSnap()
Compatible:
- Win / Mac
- R14, R15, R16, R17, R18, R19, R20, R21, S22
"""
import c4d
def main():
# Checks snap state
res = c4d.modules.snap.IsSnapEnabled(doc)
if not res:
# Enables snap if not activated
c4d.modules.snap.EnableSnap(True, doc)
print("Snap Enabled:", c4d.modules.snap.IsSnapEnabled(doc))
# Retrieves the BaseContainer storing all the settings
settings = c4d.modules.snap.GetSnapSettings(doc)
# Defines the snapping Type to 3D snapping
settings[c4d.SNAP_SETTINGS_MODE] = c4d.SNAP_SETTINGS_MODE_3D
# Pushes back modification made in the memory BaseContainer to the BaseContainer setting
c4d.modules.snap.SetSnapSettings(doc, settings)
# Enables point snap
c4d.modules.snap.EnableSnap(True, doc, c4d.SNAPMODE_POINT)
# Pushes an update event to Cinema 4D
c4d.EventAdd()
if __name__ == '__main__':
main()
| 26.458333
| 92
| 0.693701
|
import c4d
def main():
res = c4d.modules.snap.IsSnapEnabled(doc)
if not res:
c4d.modules.snap.EnableSnap(True, doc)
print("Snap Enabled:", c4d.modules.snap.IsSnapEnabled(doc))
settings = c4d.modules.snap.GetSnapSettings(doc)
settings[c4d.SNAP_SETTINGS_MODE] = c4d.SNAP_SETTINGS_MODE_3D
c4d.modules.snap.SetSnapSettings(doc, settings)
c4d.modules.snap.EnableSnap(True, doc, c4d.SNAPMODE_POINT)
c4d.EventAdd()
if __name__ == '__main__':
main()
| true
| true
|
f71a4257afb79b3e6037c8b3e3e9cc6b87d2a7dc
| 212
|
py
|
Python
|
analise/urls.py
|
IgorAlmeeida/coronaDataScience
|
f3b7fb4601870882483cc6ef913c6dcee83432da
|
[
"MIT"
] | null | null | null |
analise/urls.py
|
IgorAlmeeida/coronaDataScience
|
f3b7fb4601870882483cc6ef913c6dcee83432da
|
[
"MIT"
] | null | null | null |
analise/urls.py
|
IgorAlmeeida/coronaDataScience
|
f3b7fb4601870882483cc6ef913c6dcee83432da
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from .views import home, infoDiaEstado
urlpatterns = [
path('', home),
path('info_dia_estado', infoDiaEstado, name="dataInfoDiaEstado"),
]
| 19.272727
| 69
| 0.735849
|
from django.contrib import admin
from django.urls import path
from .views import home, infoDiaEstado
urlpatterns = [
path('', home),
path('info_dia_estado', infoDiaEstado, name="dataInfoDiaEstado"),
]
| true
| true
|
f71a428d471b125b47b81715ffe4cf49f8639526
| 15,466
|
py
|
Python
|
package/tests/test_domain_services/test_vpc.py
|
DYeag/AWS-Shell
|
b5318e72373b1a948ac6aced1c0bb4566d5ae46f
|
[
"0BSD"
] | 3
|
2016-08-22T07:14:56.000Z
|
2018-03-16T07:31:44.000Z
|
package/tests/test_domain_services/test_vpc.py
|
DYeag/AWS-Shell
|
b5318e72373b1a948ac6aced1c0bb4566d5ae46f
|
[
"0BSD"
] | 470
|
2016-03-24T13:38:08.000Z
|
2022-02-05T01:14:05.000Z
|
package/tests/test_domain_services/test_vpc.py
|
DYeag/AWS-Shell
|
b5318e72373b1a948ac6aced1c0bb4566d5ae46f
|
[
"0BSD"
] | 9
|
2016-06-20T11:41:54.000Z
|
2020-11-21T00:42:45.000Z
|
from unittest import TestCase
from mock import Mock, call
from cloudshell.cp.aws.domain.services.ec2.vpc import VPCService
from cloudshell.cp.aws.domain.services.waiters.vpc_peering import VpcPeeringConnectionWaiter
class TestVPCService(TestCase):
def setUp(self):
self.tag_service = Mock()
self.tags = Mock()
self.tag_service.get_default_tags = Mock(return_value=self.tags)
self.subnet_service = Mock()
self.logger = Mock()
self.aws_ec2_datamodel = Mock()
self.ec2_client= Mock()
self.ec2_session = Mock()
self.vpc = Mock()
self.vpc_id = Mock()
self.ec2_session.create_vpc = Mock(return_value=self.vpc)
self.ec2_session.Vpc = Mock(return_value=self.vpc)
self.s3_session = Mock()
self.reservation = Mock()
self.cidr = Mock()
self.vpc_waiter = Mock()
self.vpc_peering_waiter = Mock()
self.instance_service = Mock()
self.sg_service = Mock()
self.route_table_service = Mock()
self.traffic_mirror_service = Mock()
self.vpc_service = VPCService(tag_service=self.tag_service,
subnet_service=self.subnet_service,
instance_service=self.instance_service,
vpc_waiter=self.vpc_waiter,
vpc_peering_waiter=self.vpc_peering_waiter,
sg_service=self.sg_service,
route_table_service=self.route_table_service,
traffic_mirror_service=self.traffic_mirror_service)
def test_get_all_internet_gateways(self):
internet_gate = Mock()
self.vpc.internet_gateways = Mock()
self.vpc.internet_gateways.all = Mock(return_value=[internet_gate])
res = self.vpc_service.get_all_internet_gateways(self.vpc)
self.assertEquals(res, [internet_gate])
def test_remove_all_internet_gateways(self):
internet_gate = Mock()
self.vpc.internet_gateways = Mock()
self.vpc.internet_gateways.all = Mock(return_value=[internet_gate])
self.vpc_service.remove_all_internet_gateways(self.vpc)
internet_gate.detach_from_vpc.assert_called_with(VpcId=self.vpc.id)
self.assertTrue(internet_gate.delete.called)
def test_create_and_attach_internet_gateway(self):
internet_gate = Mock()
internet_gate.id = 'super_id'
self.ec2_session.create_internet_gateway = Mock(return_value=internet_gate)
internet_gateway_id = self.vpc_service.create_and_attach_internet_gateway(self.ec2_session, self.vpc, self.reservation)
self.assertTrue(self.ec2_session.create_internet_gateway.called)
self.tag_service.get_default_tags.assert_called_once_with("IGW {0}".format(self.reservation.reservation_id),self.reservation)
self.tag_service.set_ec2_resource_tags.assert_called_once_with(resource=internet_gate, tags=self.tag_service.get_default_tags())
self.assertEqual(internet_gateway_id, internet_gate.id)
def test_create_vpc_for_reservation(self):
vpc = self.vpc_service.create_vpc_for_reservation(self.ec2_session, self.reservation, self.cidr)
vpc_name = self.vpc_service.VPC_RESERVATION.format(self.reservation.reservation_id)
self.vpc_waiter.wait.assert_called_once_with(vpc=vpc, state=self.vpc_waiter.AVAILABLE)
self.assertEqual(self.vpc, vpc)
self.ec2_session.create_vpc.assert_called_once_with(CidrBlock=self.cidr)
self.tag_service.get_default_tags.assert_called_once_with(vpc_name, self.reservation)
self.tag_service.set_ec2_resource_tags.assert_called_once_with(self.vpc, self.tags)
def test_find_vpc_for_reservation(self):
self.ec2_session.vpcs = Mock()
self.ec2_session.vpcs.filter = Mock(return_value=[self.vpc])
vpc = self.vpc_service.find_vpc_for_reservation(self.ec2_session, self.reservation)
self.assertEqual(vpc, self.vpc)
def test_find_vpc_for_reservation_no_vpc(self):
self.ec2_session.vpcs = Mock()
self.ec2_session.vpcs.filter = Mock(return_value=[])
vpc = self.vpc_service.find_vpc_for_reservation(self.ec2_session, self.reservation)
self.assertIsNone(vpc)
def test_find_vpc_for_reservation_too_many(self):
self.ec2_session.vpcs = Mock()
self.ec2_session.vpcs.filter = Mock(return_value=[1, 2])
self.assertRaises(ValueError, self.vpc_service.find_vpc_for_reservation, self.ec2_session, self.reservation)
def test_peer_vpc(self):
def change_to_active(vpc_peering_connection):
vpc_peering_connection.status['Code'] = VpcPeeringConnectionWaiter.ACTIVE
vpc1 = Mock()
vpc2 = Mock()
peered = Mock()
peered.status = {'Code': VpcPeeringConnectionWaiter.PENDING_ACCEPTANCE}
peered.accept = Mock(side_effect=change_to_active(peered))
self.ec2_session.create_vpc_peering_connection = Mock(return_value=peered)
reservation_model = Mock()
res = self.vpc_service.peer_vpcs(self.ec2_session, vpc1, vpc2, reservation_model,Mock())
self.ec2_session.create_vpc_peering_connection.assert_called_once_with(VpcId=vpc1, PeerVpcId=vpc2)
self.assertEqual(peered.status['Code'], VpcPeeringConnectionWaiter.ACTIVE)
self.assertEqual(res, peered.id)
def test_remove_all_peering(self):
peering = Mock()
peering.status = {'Code': 'ok'}
peering1 = Mock()
peering1.status = {'Code': 'failed'}
peering2 = Mock()
peering2.status = {'Code': 'aa'}
self.vpc.accepted_vpc_peering_connections = Mock()
self.vpc.accepted_vpc_peering_connections.all = Mock(return_value=[peering, peering1, peering2])
res = self.vpc_service.remove_all_peering(self.vpc)
self.assertIsNotNone(res)
self.assertTrue(peering.delete.called)
self.assertFalse(peering1.delete.called)
self.assertTrue(peering2.delete.called)
def test_remove_all_sgs(self):
sg = Mock()
self.vpc.security_groups = Mock()
self.vpc.security_groups.all = Mock(return_value=[sg])
res = self.vpc_service.remove_all_security_groups(self.vpc, self.reservation.reservation_id )
self.assertIsNotNone(res)
self.sg_service.delete_security_group.assert_called_once_with(sg)
# When a trying to delete security group(isolated) and it is referenced in another's group rule.
# we get resource sg-XXXXXX has a dependent object, so to fix that , isolated group shall be deleted last.
def test_remove_all_sgs_isolated_group_removed_last(self):
sg = Mock()
sg.group_name = 'dummy'
isolated_sg = Mock()
isolated_sg.group_name = self.sg_service.sandbox_isolated_sg_name(self.reservation.reservation_id)
isolated_at_start_sgs = [isolated_sg, sg]
isolated_at_end_sgs_calls = [call(sg), call(isolated_sg)]
self.vpc.security_groups = Mock()
self.vpc.security_groups.all = Mock(return_value=isolated_at_start_sgs)
res = self.vpc_service.remove_all_security_groups(self.vpc, self.reservation.reservation_id )
self.assertIsNotNone(res)
self.sg_service.delete_security_group.assert_has_calls(isolated_at_end_sgs_calls, any_order=False)
def test_remove_subnets(self):
subnet = Mock()
self.vpc.subnets = Mock()
self.vpc.subnets.all = Mock(return_value=[subnet])
res = self.vpc_service.remove_all_subnets(self.vpc)
self.assertIsNotNone(res)
self.subnet_service.delete_subnet.assert_called_once_with(subnet)
def test_delete_all_instances(self):
instance = Mock()
self.vpc.instances = Mock()
self.vpc.instances.all = Mock(return_value=[instance])
res = self.vpc_service.delete_all_instances(self.vpc)
self.assertIsNotNone(res)
self.instance_service.terminate_instances.assert_called_once_with([instance])
def test_delete_vpc(self):
res = self.vpc_service.delete_vpc(self.vpc)
self.assertTrue(self.vpc.delete.called)
self.assertIsNotNone(res)
def test_get_or_create_subnet_for_vpc_1(self): # Scenario(1): Get
# Arrange
subnet = Mock()
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=subnet)
# Act
result = self.vpc_service.get_or_create_subnet_for_vpc(reservation=self.reservation,
cidr="1.2.3.4/24", alias="MySubnet",
vpc=self.vpc,
ec2_client=self.ec2_client,
aws_ec2_datamodel=self.aws_ec2_datamodel,
logger=self.logger)
# Assert
self.assertEqual(result, subnet)
def test_get_or_create_subnet_for_vpc_2(self): # Scenario(2): Create
# Arrange
subnet = Mock()
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=None)
self.reservation.reservation_id = "123"
self.vpc_service.get_or_pick_availability_zone = Mock(return_value="MyZone")
self.subnet_service.create_subnet_for_vpc = Mock(return_value=subnet)
# Act
result = self.vpc_service.get_or_create_subnet_for_vpc(reservation=self.reservation,
cidr="1.2.3.4/24", alias="MySubnet",
vpc=self.vpc,
ec2_client=self.ec2_client,
aws_ec2_datamodel=self.aws_ec2_datamodel,
logger=self.logger)
# Assert
self.assertEqual(result, subnet)
self.subnet_service.create_subnet_for_vpc.assert_called_once_with(
vpc=self.vpc,
cidr="1.2.3.4/24",
subnet_name="MySubnet Reservation: 123",
availability_zone="MyZone",
reservation=self.reservation)
def test_get_or_create_private_route_table_1(self): # Scenario(1): Get
# Arrange
table = Mock()
self.route_table_service.get_route_table = Mock(return_value=table)
# Act
result = self.vpc_service.get_or_create_private_route_table(ec2_session=self.ec2_session, reservation=self.reservation,
vpc_id=self.vpc_id)
# Assert
self.assertEqual(result, table)
def test_get_or_create_private_route_table_2(self): # Scenario(2): Create
# Arrange
table = Mock()
self.reservation.reservation_id = "123"
self.route_table_service.get_route_table = Mock(return_value=None)
self.route_table_service.create_route_table = Mock(return_value=table)
# Act
result = self.vpc_service.get_or_create_private_route_table(ec2_session=self.ec2_session,
reservation=self.reservation,
vpc_id=self.vpc_id)
# Assert
self.assertEqual(result, table)
self.route_table_service.create_route_table.assert_called_once_with(
self.ec2_session,
self.reservation,
self.vpc_id,
"Private RoutingTable Reservation: 123"
)
def test_get_or_throw_private_route_table(self):
# Arrange
self.route_table_service.get_route_table = Mock(return_value=None)
# Act
with self.assertRaises(Exception) as error:
self.vpc_service.get_or_throw_private_route_table(ec2_session=self.ec2_session, reservation=self.reservation,
vpc_id=self.vpc_id)
# Assert
self.assertEqual(error.exception.message, "Routing table for non-public subnet was not found")
def test_get_vpc_cidr(self):
# Arrange
self.vpc.cidr_block = "1.2.3.4/24"
# Act
result = self.vpc_service.get_vpc_cidr(ec2_session=self.ec2_session, vpc_id=self.vpc_id)
# Assert
self.assertEqual(result, "1.2.3.4/24")
def test_get_or_pick_availability_zone_1(self): #Scenario(1): from existing subnet
# Arrange
subnet = Mock()
subnet.availability_zone = "z"
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=subnet)
# Act
result = self.vpc_service.get_or_pick_availability_zone(ec2_client=self.ec2_client, vpc=self.vpc,
aws_ec2_datamodel=self.aws_ec2_datamodel)
# Assert
self.assertEqual(result, "z")
def test_get_or_pick_availability_zone_2(self): # Scenario(2): from available zones list
# Arrange
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=None)
self.ec2_client.describe_availability_zones = Mock(return_value={"AvailabilityZones":[{"ZoneName":"z"}]})
# Act
result = self.vpc_service.get_or_pick_availability_zone(ec2_client=self.ec2_client, vpc=self.vpc,
aws_ec2_datamodel=self.aws_ec2_datamodel)
# Assert
self.assertEqual(result, "z")
def test_get_or_pick_availability_zone_3(self): # Scenario(3): no available zone
# Arrange
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=None)
self.ec2_client.describe_availability_zones = Mock(return_value=None)
# Act
with self.assertRaises(Exception) as error:
self.vpc_service.get_or_pick_availability_zone(ec2_client=self.ec2_client, vpc=self.vpc,
aws_ec2_datamodel=self.aws_ec2_datamodel)
# Assert
self.assertEqual(error.exception.message, "No AvailabilityZone is available for this vpc")
def test_remove_custom_route_tables(self):
# Arrange
tables = [Mock(), Mock()]
self.vpc.id = "123"
self.route_table_service.get_custom_route_tables = Mock(return_value=tables)
# Act
result = self.vpc_service.remove_custom_route_tables(ec2_session=self.ec2_session, vpc=self.vpc)
# Assert
self.assertTrue(result)
self.route_table_service.delete_table.assert_any_call(tables[0])
self.route_table_service.delete_table.assert_any_call(tables[1])
def test_set_main_route_table_tags(self):
# Arrange
table = Mock()
tags = Mock()
self.reservation.reservation_id = "123"
self.tag_service.get_default_tags = Mock(return_value=tags)
# Act
self.vpc_service.set_main_route_table_tags(main_route_table=table, reservation=self.reservation)
# Assert
self.tag_service.get_default_tags.assert_called_once_with("Main RoutingTable Reservation: 123", self.reservation)
self.tag_service.set_ec2_resource_tags.assert_called_once_with(table, tags)
| 47.009119
| 136
| 0.652916
|
from unittest import TestCase
from mock import Mock, call
from cloudshell.cp.aws.domain.services.ec2.vpc import VPCService
from cloudshell.cp.aws.domain.services.waiters.vpc_peering import VpcPeeringConnectionWaiter
class TestVPCService(TestCase):
def setUp(self):
self.tag_service = Mock()
self.tags = Mock()
self.tag_service.get_default_tags = Mock(return_value=self.tags)
self.subnet_service = Mock()
self.logger = Mock()
self.aws_ec2_datamodel = Mock()
self.ec2_client= Mock()
self.ec2_session = Mock()
self.vpc = Mock()
self.vpc_id = Mock()
self.ec2_session.create_vpc = Mock(return_value=self.vpc)
self.ec2_session.Vpc = Mock(return_value=self.vpc)
self.s3_session = Mock()
self.reservation = Mock()
self.cidr = Mock()
self.vpc_waiter = Mock()
self.vpc_peering_waiter = Mock()
self.instance_service = Mock()
self.sg_service = Mock()
self.route_table_service = Mock()
self.traffic_mirror_service = Mock()
self.vpc_service = VPCService(tag_service=self.tag_service,
subnet_service=self.subnet_service,
instance_service=self.instance_service,
vpc_waiter=self.vpc_waiter,
vpc_peering_waiter=self.vpc_peering_waiter,
sg_service=self.sg_service,
route_table_service=self.route_table_service,
traffic_mirror_service=self.traffic_mirror_service)
def test_get_all_internet_gateways(self):
internet_gate = Mock()
self.vpc.internet_gateways = Mock()
self.vpc.internet_gateways.all = Mock(return_value=[internet_gate])
res = self.vpc_service.get_all_internet_gateways(self.vpc)
self.assertEquals(res, [internet_gate])
def test_remove_all_internet_gateways(self):
internet_gate = Mock()
self.vpc.internet_gateways = Mock()
self.vpc.internet_gateways.all = Mock(return_value=[internet_gate])
self.vpc_service.remove_all_internet_gateways(self.vpc)
internet_gate.detach_from_vpc.assert_called_with(VpcId=self.vpc.id)
self.assertTrue(internet_gate.delete.called)
def test_create_and_attach_internet_gateway(self):
internet_gate = Mock()
internet_gate.id = 'super_id'
self.ec2_session.create_internet_gateway = Mock(return_value=internet_gate)
internet_gateway_id = self.vpc_service.create_and_attach_internet_gateway(self.ec2_session, self.vpc, self.reservation)
self.assertTrue(self.ec2_session.create_internet_gateway.called)
self.tag_service.get_default_tags.assert_called_once_with("IGW {0}".format(self.reservation.reservation_id),self.reservation)
self.tag_service.set_ec2_resource_tags.assert_called_once_with(resource=internet_gate, tags=self.tag_service.get_default_tags())
self.assertEqual(internet_gateway_id, internet_gate.id)
def test_create_vpc_for_reservation(self):
vpc = self.vpc_service.create_vpc_for_reservation(self.ec2_session, self.reservation, self.cidr)
vpc_name = self.vpc_service.VPC_RESERVATION.format(self.reservation.reservation_id)
self.vpc_waiter.wait.assert_called_once_with(vpc=vpc, state=self.vpc_waiter.AVAILABLE)
self.assertEqual(self.vpc, vpc)
self.ec2_session.create_vpc.assert_called_once_with(CidrBlock=self.cidr)
self.tag_service.get_default_tags.assert_called_once_with(vpc_name, self.reservation)
self.tag_service.set_ec2_resource_tags.assert_called_once_with(self.vpc, self.tags)
def test_find_vpc_for_reservation(self):
self.ec2_session.vpcs = Mock()
self.ec2_session.vpcs.filter = Mock(return_value=[self.vpc])
vpc = self.vpc_service.find_vpc_for_reservation(self.ec2_session, self.reservation)
self.assertEqual(vpc, self.vpc)
def test_find_vpc_for_reservation_no_vpc(self):
self.ec2_session.vpcs = Mock()
self.ec2_session.vpcs.filter = Mock(return_value=[])
vpc = self.vpc_service.find_vpc_for_reservation(self.ec2_session, self.reservation)
self.assertIsNone(vpc)
def test_find_vpc_for_reservation_too_many(self):
self.ec2_session.vpcs = Mock()
self.ec2_session.vpcs.filter = Mock(return_value=[1, 2])
self.assertRaises(ValueError, self.vpc_service.find_vpc_for_reservation, self.ec2_session, self.reservation)
def test_peer_vpc(self):
def change_to_active(vpc_peering_connection):
vpc_peering_connection.status['Code'] = VpcPeeringConnectionWaiter.ACTIVE
vpc1 = Mock()
vpc2 = Mock()
peered = Mock()
peered.status = {'Code': VpcPeeringConnectionWaiter.PENDING_ACCEPTANCE}
peered.accept = Mock(side_effect=change_to_active(peered))
self.ec2_session.create_vpc_peering_connection = Mock(return_value=peered)
reservation_model = Mock()
res = self.vpc_service.peer_vpcs(self.ec2_session, vpc1, vpc2, reservation_model,Mock())
self.ec2_session.create_vpc_peering_connection.assert_called_once_with(VpcId=vpc1, PeerVpcId=vpc2)
self.assertEqual(peered.status['Code'], VpcPeeringConnectionWaiter.ACTIVE)
self.assertEqual(res, peered.id)
def test_remove_all_peering(self):
peering = Mock()
peering.status = {'Code': 'ok'}
peering1 = Mock()
peering1.status = {'Code': 'failed'}
peering2 = Mock()
peering2.status = {'Code': 'aa'}
self.vpc.accepted_vpc_peering_connections = Mock()
self.vpc.accepted_vpc_peering_connections.all = Mock(return_value=[peering, peering1, peering2])
res = self.vpc_service.remove_all_peering(self.vpc)
self.assertIsNotNone(res)
self.assertTrue(peering.delete.called)
self.assertFalse(peering1.delete.called)
self.assertTrue(peering2.delete.called)
def test_remove_all_sgs(self):
sg = Mock()
self.vpc.security_groups = Mock()
self.vpc.security_groups.all = Mock(return_value=[sg])
res = self.vpc_service.remove_all_security_groups(self.vpc, self.reservation.reservation_id )
self.assertIsNotNone(res)
self.sg_service.delete_security_group.assert_called_once_with(sg)
# we get resource sg-XXXXXX has a dependent object, so to fix that , isolated group shall be deleted last.
def test_remove_all_sgs_isolated_group_removed_last(self):
sg = Mock()
sg.group_name = 'dummy'
isolated_sg = Mock()
isolated_sg.group_name = self.sg_service.sandbox_isolated_sg_name(self.reservation.reservation_id)
isolated_at_start_sgs = [isolated_sg, sg]
isolated_at_end_sgs_calls = [call(sg), call(isolated_sg)]
self.vpc.security_groups = Mock()
self.vpc.security_groups.all = Mock(return_value=isolated_at_start_sgs)
res = self.vpc_service.remove_all_security_groups(self.vpc, self.reservation.reservation_id )
self.assertIsNotNone(res)
self.sg_service.delete_security_group.assert_has_calls(isolated_at_end_sgs_calls, any_order=False)
def test_remove_subnets(self):
subnet = Mock()
self.vpc.subnets = Mock()
self.vpc.subnets.all = Mock(return_value=[subnet])
res = self.vpc_service.remove_all_subnets(self.vpc)
self.assertIsNotNone(res)
self.subnet_service.delete_subnet.assert_called_once_with(subnet)
def test_delete_all_instances(self):
instance = Mock()
self.vpc.instances = Mock()
self.vpc.instances.all = Mock(return_value=[instance])
res = self.vpc_service.delete_all_instances(self.vpc)
self.assertIsNotNone(res)
self.instance_service.terminate_instances.assert_called_once_with([instance])
def test_delete_vpc(self):
res = self.vpc_service.delete_vpc(self.vpc)
self.assertTrue(self.vpc.delete.called)
self.assertIsNotNone(res)
def test_get_or_create_subnet_for_vpc_1(self): # Scenario(1): Get
# Arrange
subnet = Mock()
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=subnet)
# Act
result = self.vpc_service.get_or_create_subnet_for_vpc(reservation=self.reservation,
cidr="1.2.3.4/24", alias="MySubnet",
vpc=self.vpc,
ec2_client=self.ec2_client,
aws_ec2_datamodel=self.aws_ec2_datamodel,
logger=self.logger)
# Assert
self.assertEqual(result, subnet)
def test_get_or_create_subnet_for_vpc_2(self): # Scenario(2): Create
# Arrange
subnet = Mock()
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=None)
self.reservation.reservation_id = "123"
self.vpc_service.get_or_pick_availability_zone = Mock(return_value="MyZone")
self.subnet_service.create_subnet_for_vpc = Mock(return_value=subnet)
# Act
result = self.vpc_service.get_or_create_subnet_for_vpc(reservation=self.reservation,
cidr="1.2.3.4/24", alias="MySubnet",
vpc=self.vpc,
ec2_client=self.ec2_client,
aws_ec2_datamodel=self.aws_ec2_datamodel,
logger=self.logger)
# Assert
self.assertEqual(result, subnet)
self.subnet_service.create_subnet_for_vpc.assert_called_once_with(
vpc=self.vpc,
cidr="1.2.3.4/24",
subnet_name="MySubnet Reservation: 123",
availability_zone="MyZone",
reservation=self.reservation)
def test_get_or_create_private_route_table_1(self): # Scenario(1): Get
# Arrange
table = Mock()
self.route_table_service.get_route_table = Mock(return_value=table)
# Act
result = self.vpc_service.get_or_create_private_route_table(ec2_session=self.ec2_session, reservation=self.reservation,
vpc_id=self.vpc_id)
# Assert
self.assertEqual(result, table)
def test_get_or_create_private_route_table_2(self): # Scenario(2): Create
# Arrange
table = Mock()
self.reservation.reservation_id = "123"
self.route_table_service.get_route_table = Mock(return_value=None)
self.route_table_service.create_route_table = Mock(return_value=table)
# Act
result = self.vpc_service.get_or_create_private_route_table(ec2_session=self.ec2_session,
reservation=self.reservation,
vpc_id=self.vpc_id)
# Assert
self.assertEqual(result, table)
self.route_table_service.create_route_table.assert_called_once_with(
self.ec2_session,
self.reservation,
self.vpc_id,
"Private RoutingTable Reservation: 123"
)
def test_get_or_throw_private_route_table(self):
# Arrange
self.route_table_service.get_route_table = Mock(return_value=None)
# Act
with self.assertRaises(Exception) as error:
self.vpc_service.get_or_throw_private_route_table(ec2_session=self.ec2_session, reservation=self.reservation,
vpc_id=self.vpc_id)
# Assert
self.assertEqual(error.exception.message, "Routing table for non-public subnet was not found")
def test_get_vpc_cidr(self):
# Arrange
self.vpc.cidr_block = "1.2.3.4/24"
# Act
result = self.vpc_service.get_vpc_cidr(ec2_session=self.ec2_session, vpc_id=self.vpc_id)
# Assert
self.assertEqual(result, "1.2.3.4/24")
def test_get_or_pick_availability_zone_1(self): #Scenario(1): from existing subnet
# Arrange
subnet = Mock()
subnet.availability_zone = "z"
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=subnet)
# Act
result = self.vpc_service.get_or_pick_availability_zone(ec2_client=self.ec2_client, vpc=self.vpc,
aws_ec2_datamodel=self.aws_ec2_datamodel)
# Assert
self.assertEqual(result, "z")
def test_get_or_pick_availability_zone_2(self): # Scenario(2): from available zones list
# Arrange
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=None)
self.ec2_client.describe_availability_zones = Mock(return_value={"AvailabilityZones":[{"ZoneName":"z"}]})
# Act
result = self.vpc_service.get_or_pick_availability_zone(ec2_client=self.ec2_client, vpc=self.vpc,
aws_ec2_datamodel=self.aws_ec2_datamodel)
# Assert
self.assertEqual(result, "z")
def test_get_or_pick_availability_zone_3(self): # Scenario(3): no available zone
# Arrange
self.subnet_service.get_first_or_none_subnet_from_vpc = Mock(return_value=None)
self.ec2_client.describe_availability_zones = Mock(return_value=None)
# Act
with self.assertRaises(Exception) as error:
self.vpc_service.get_or_pick_availability_zone(ec2_client=self.ec2_client, vpc=self.vpc,
aws_ec2_datamodel=self.aws_ec2_datamodel)
# Assert
self.assertEqual(error.exception.message, "No AvailabilityZone is available for this vpc")
def test_remove_custom_route_tables(self):
# Arrange
tables = [Mock(), Mock()]
self.vpc.id = "123"
self.route_table_service.get_custom_route_tables = Mock(return_value=tables)
# Act
result = self.vpc_service.remove_custom_route_tables(ec2_session=self.ec2_session, vpc=self.vpc)
# Assert
self.assertTrue(result)
self.route_table_service.delete_table.assert_any_call(tables[0])
self.route_table_service.delete_table.assert_any_call(tables[1])
def test_set_main_route_table_tags(self):
# Arrange
table = Mock()
tags = Mock()
self.reservation.reservation_id = "123"
self.tag_service.get_default_tags = Mock(return_value=tags)
# Act
self.vpc_service.set_main_route_table_tags(main_route_table=table, reservation=self.reservation)
# Assert
self.tag_service.get_default_tags.assert_called_once_with("Main RoutingTable Reservation: 123", self.reservation)
self.tag_service.set_ec2_resource_tags.assert_called_once_with(table, tags)
| true
| true
|
f71a432e88d1054b78e97329d6efffbbe65f95b6
| 8,264
|
py
|
Python
|
wagtail/wagtailsnippets/views/snippets.py
|
markosamuli/wagtail
|
5158ee7aad594d3d9b8b7cd14c139094080466fb
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailsnippets/views/snippets.py
|
markosamuli/wagtail
|
5158ee7aad594d3d9b8b7cd14c139094080466fb
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailsnippets/views/snippets.py
|
markosamuli/wagtail
|
5158ee7aad594d3d9b8b7cd14c139094080466fb
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import apps
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.utils import permission_denied
from wagtail.wagtailsearch.backends import get_search_backend
from wagtail.wagtailsearch.index import class_is_indexed
from wagtail.wagtailsnippets.models import get_snippet_models
from wagtail.wagtailsnippets.permissions import get_permission_name, user_can_edit_snippet_type
# == Helper functions ==
def get_snippet_model_from_url_params(app_name, model_name):
"""
Retrieve a model from an app_label / model_name combo.
Raise Http404 if the model is not a valid snippet type.
"""
try:
model = apps.get_model(app_name, model_name)
except LookupError:
raise Http404
if model not in get_snippet_models():
# don't allow people to hack the URL to edit content types that aren't registered as snippets
raise Http404
return model
SNIPPET_EDIT_HANDLERS = {}
def get_snippet_edit_handler(model):
if model not in SNIPPET_EDIT_HANDLERS:
if hasattr(model, 'edit_handler'):
# use the edit handler specified on the page class
edit_handler = model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(model)
edit_handler = ObjectList(panels)
SNIPPET_EDIT_HANDLERS[model] = edit_handler.bind_to_model(model)
return SNIPPET_EDIT_HANDLERS[model]
# == Views ==
def index(request):
snippet_model_opts = [
model._meta for model in get_snippet_models()
if user_can_edit_snippet_type(request.user, model)]
return render(request, 'wagtailsnippets/snippets/index.html', {
'snippet_model_opts': sorted(
snippet_model_opts, key=lambda x: x.verbose_name.lower())})
def list(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permissions = [
get_permission_name(action, model)
for action in ['add', 'change', 'delete']
]
if not any([request.user.has_perm(perm) for perm in permissions]):
return permission_denied(request)
items = model.objects.all()
# Search
is_searchable = class_is_indexed(model)
is_searching = False
search_query = None
if is_searchable and 'q' in request.GET:
search_form = SearchForm(request.GET, placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
if search_form.is_valid():
search_query = search_form.cleaned_data['q']
search_backend = get_search_backend()
items = search_backend.search(search_query, items)
is_searching = True
else:
search_form = SearchForm(placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
paginator, paginated_items = paginate(request, items)
# Template
if request.is_ajax():
template = 'wagtailsnippets/snippets/results.html'
else:
template = 'wagtailsnippets/snippets/type_index.html'
return render(request, template, {
'model_opts': model._meta,
'items': paginated_items,
'can_add_snippet': request.user.has_perm(get_permission_name('add', model)),
'is_searchable': is_searchable,
'search_form': search_form,
'is_searching': is_searching,
'query_string': search_query,
})
def create(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('add', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = model()
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' created.").format(
snippet_type=capfirst(model._meta.verbose_name),
instance=instance
),
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, instance.id)
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.error(request, _("The snippet could not be created due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/create.html', {
'model_opts': model._meta,
'edit_handler': edit_handler,
})
def edit(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('change', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, id=id)
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' updated.").format(
snippet_type=capfirst(model._meta.verbose_name_plural),
instance=instance
),
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, instance.id)
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.error(request, _("The snippet could not be saved due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/edit.html', {
'model_opts': model._meta,
'instance': instance,
'edit_handler': edit_handler
})
def delete(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('delete', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, id=id)
if request.POST:
instance.delete()
messages.success(
request,
_("{snippet_type} '{instance}' deleted.").format(
snippet_type=capfirst(model._meta.verbose_name_plural),
instance=instance
)
)
return redirect('wagtailsnippets:list', app_label, model_name)
return render(request, 'wagtailsnippets/snippets/confirm_delete.html', {
'model_opts': model._meta,
'instance': instance,
})
def usage(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
instance = get_object_or_404(model, id=id)
paginator, used_by = paginate(request, instance.get_usage())
return render(request, "wagtailsnippets/snippets/usage.html", {
'instance': instance,
'used_by': used_by
})
| 34.290456
| 101
| 0.666142
|
from django.apps import apps
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.utils import permission_denied
from wagtail.wagtailsearch.backends import get_search_backend
from wagtail.wagtailsearch.index import class_is_indexed
from wagtail.wagtailsnippets.models import get_snippet_models
from wagtail.wagtailsnippets.permissions import get_permission_name, user_can_edit_snippet_type
def get_snippet_model_from_url_params(app_name, model_name):
try:
model = apps.get_model(app_name, model_name)
except LookupError:
raise Http404
if model not in get_snippet_models():
raise Http404
return model
SNIPPET_EDIT_HANDLERS = {}
def get_snippet_edit_handler(model):
if model not in SNIPPET_EDIT_HANDLERS:
if hasattr(model, 'edit_handler'):
edit_handler = model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(model)
edit_handler = ObjectList(panels)
SNIPPET_EDIT_HANDLERS[model] = edit_handler.bind_to_model(model)
return SNIPPET_EDIT_HANDLERS[model]
def index(request):
snippet_model_opts = [
model._meta for model in get_snippet_models()
if user_can_edit_snippet_type(request.user, model)]
return render(request, 'wagtailsnippets/snippets/index.html', {
'snippet_model_opts': sorted(
snippet_model_opts, key=lambda x: x.verbose_name.lower())})
def list(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permissions = [
get_permission_name(action, model)
for action in ['add', 'change', 'delete']
]
if not any([request.user.has_perm(perm) for perm in permissions]):
return permission_denied(request)
items = model.objects.all()
is_searchable = class_is_indexed(model)
is_searching = False
search_query = None
if is_searchable and 'q' in request.GET:
search_form = SearchForm(request.GET, placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
if search_form.is_valid():
search_query = search_form.cleaned_data['q']
search_backend = get_search_backend()
items = search_backend.search(search_query, items)
is_searching = True
else:
search_form = SearchForm(placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
paginator, paginated_items = paginate(request, items)
if request.is_ajax():
template = 'wagtailsnippets/snippets/results.html'
else:
template = 'wagtailsnippets/snippets/type_index.html'
return render(request, template, {
'model_opts': model._meta,
'items': paginated_items,
'can_add_snippet': request.user.has_perm(get_permission_name('add', model)),
'is_searchable': is_searchable,
'search_form': search_form,
'is_searching': is_searching,
'query_string': search_query,
})
def create(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('add', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = model()
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' created.").format(
snippet_type=capfirst(model._meta.verbose_name),
instance=instance
),
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, instance.id)
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.error(request, _("The snippet could not be created due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/create.html', {
'model_opts': model._meta,
'edit_handler': edit_handler,
})
def edit(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('change', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, id=id)
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' updated.").format(
snippet_type=capfirst(model._meta.verbose_name_plural),
instance=instance
),
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, instance.id)
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.error(request, _("The snippet could not be saved due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/edit.html', {
'model_opts': model._meta,
'instance': instance,
'edit_handler': edit_handler
})
def delete(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('delete', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, id=id)
if request.POST:
instance.delete()
messages.success(
request,
_("{snippet_type} '{instance}' deleted.").format(
snippet_type=capfirst(model._meta.verbose_name_plural),
instance=instance
)
)
return redirect('wagtailsnippets:list', app_label, model_name)
return render(request, 'wagtailsnippets/snippets/confirm_delete.html', {
'model_opts': model._meta,
'instance': instance,
})
def usage(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
instance = get_object_or_404(model, id=id)
paginator, used_by = paginate(request, instance.get_usage())
return render(request, "wagtailsnippets/snippets/usage.html", {
'instance': instance,
'used_by': used_by
})
| true
| true
|
f71a43557442ce97907f082e75eb667688ce3597
| 664
|
py
|
Python
|
manage.py
|
bastoune57/gokiting_back_end
|
f3edcbeede292713349b28f2390b5d57e1420f8e
|
[
"MIT"
] | null | null | null |
manage.py
|
bastoune57/gokiting_back_end
|
f3edcbeede292713349b28f2390b5d57e1420f8e
|
[
"MIT"
] | null | null | null |
manage.py
|
bastoune57/gokiting_back_end
|
f3edcbeede292713349b28f2390b5d57e1420f8e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gokiting.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.869565
| 73
| 0.679217
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gokiting.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
f71a4417980584c697fd59995b017ae74c4d8707
| 210
|
py
|
Python
|
visualisation/core/__init__.py
|
dashings/CAMVIS
|
fb7e4e5d885ae227140f7ab40b5f47e730ec249b
|
[
"MIT"
] | 213
|
2018-12-20T12:09:07.000Z
|
2022-03-21T10:09:58.000Z
|
visualisation/core/__init__.py
|
dashings/CAMVIS
|
fb7e4e5d885ae227140f7ab40b5f47e730ec249b
|
[
"MIT"
] | 3
|
2020-07-16T05:11:25.000Z
|
2022-03-16T13:59:07.000Z
|
visualisation/core/__init__.py
|
dashings/CAMVIS
|
fb7e4e5d885ae227140f7ab40b5f47e730ec249b
|
[
"MIT"
] | 41
|
2019-03-06T12:01:24.000Z
|
2022-03-09T07:55:56.000Z
|
from .SaliencyMap import SaliencyMap
from .DeepDream import DeepDream
from .GradCam import GradCam
from .Weights import Weights
from .Base import Base
from .ClassActivationMapping import ClassActivationMapping
| 30
| 58
| 0.857143
|
from .SaliencyMap import SaliencyMap
from .DeepDream import DeepDream
from .GradCam import GradCam
from .Weights import Weights
from .Base import Base
from .ClassActivationMapping import ClassActivationMapping
| true
| true
|
f71a45fb15de192f5a1129710b39b955da52f151
| 13,147
|
py
|
Python
|
tests/query_test/test_observability.py
|
twmarshall/impala
|
bdd904922a220c37326928ac674779acaef5f6fa
|
[
"Apache-2.0"
] | null | null | null |
tests/query_test/test_observability.py
|
twmarshall/impala
|
bdd904922a220c37326928ac674779acaef5f6fa
|
[
"Apache-2.0"
] | null | null | null |
tests/query_test/test_observability.py
|
twmarshall/impala
|
bdd904922a220c37326928ac674779acaef5f6fa
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfS3, SkipIfADLS, SkipIfIsilon, SkipIfLocal
from tests.util.filesystem_utils import IS_EC
import logging
import pytest
import re
import time
class TestObservability(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
def test_merge_exchange_num_rows(self):
"""Regression test for IMPALA-1473 - checks that the exec summary for a merging
exchange with a limit reports the number of rows returned as equal to the limit,
and that the coordinator fragment portion of the runtime profile reports the number
of rows returned correctly."""
query = """select tinyint_col, count(*) from functional.alltypes
group by tinyint_col order by tinyint_col limit 5"""
result = self.execute_query(query)
assert result.exec_summary[0]['operator'] == '05:MERGING-EXCHANGE'
assert result.exec_summary[0]['num_rows'] == 5
assert result.exec_summary[0]['est_num_rows'] == 5
assert result.exec_summary[0]['peak_mem'] > 0
for line in result.runtime_profile.split('\n'):
# The first 'RowsProduced' we find is for the coordinator fragment.
if 'RowsProduced' in line:
assert '(5)' in line
break
def test_broadcast_num_rows(self):
"""Regression test for IMPALA-3002 - checks that the num_rows for a broadcast node
in the exec summaty is correctly set as the max over all instances, not the sum."""
query = """select distinct a.int_col, a.string_col from functional.alltypes a
inner join functional.alltypessmall b on (a.id = b.id)
where a.year = 2009 and b.month = 2"""
result = self.execute_query(query)
assert result.exec_summary[5]['operator'] == '04:EXCHANGE'
assert result.exec_summary[5]['num_rows'] == 25
assert result.exec_summary[5]['est_num_rows'] == 25
assert result.exec_summary[5]['peak_mem'] > 0
@SkipIfS3.hbase
@SkipIfLocal.hbase
@SkipIfIsilon.hbase
@SkipIfADLS.hbase
def test_scan_summary(self):
"""IMPALA-4499: Checks that the exec summary for scans show the table name."""
# HDFS table
query = "select count(*) from functional.alltypestiny"
result = self.execute_query(query)
scan_idx = len(result.exec_summary) - 1
assert result.exec_summary[scan_idx]['operator'] == '00:SCAN HDFS'
assert result.exec_summary[scan_idx]['detail'] == 'functional.alltypestiny'
# KUDU table
query = "select count(*) from functional_kudu.alltypestiny"
result = self.execute_query(query)
scan_idx = len(result.exec_summary) - 1
assert result.exec_summary[scan_idx]['operator'] == '00:SCAN KUDU'
assert result.exec_summary[scan_idx]['detail'] == 'functional_kudu.alltypestiny'
# HBASE table
query = "select count(*) from functional_hbase.alltypestiny"
result = self.execute_query(query)
scan_idx = len(result.exec_summary) - 1
assert result.exec_summary[scan_idx]['operator'] == '00:SCAN HBASE'
assert result.exec_summary[scan_idx]['detail'] == 'functional_hbase.alltypestiny'
def test_query_states(self):
"""Tests that the query profile shows expected query states."""
query = "select count(*) from functional.alltypes"
handle = self.execute_query_async(query,
{"debug_action": "CRS_BEFORE_ADMISSION:SLEEP@1000"})
# If ExecuteStatement() has completed and the query is paused in the admission control
# phase, then the query must be in COMPILED state.
profile = self.client.get_runtime_profile(handle)
assert "Query State: COMPILED" in profile
# After completion of the admission control phase, the query must have at least
# reached RUNNING state.
self.client.wait_for_admission_control(handle)
profile = self.client.get_runtime_profile(handle)
assert "Query State: RUNNING" in profile or \
"Query State: FINISHED" in profile, profile
results = self.client.fetch(query, handle)
profile = self.client.get_runtime_profile(handle)
# After fetching the results, the query must be in state FINISHED.
assert "Query State: FINISHED" in profile, profile
def test_query_options(self):
"""Test that the query profile shows expected non-default query options, both set
explicitly through client and those set by planner"""
# Set mem_limit and runtime_filter_wait_time_ms to non-default and default value.
query_opts = {'mem_limit': 8589934592, 'runtime_filter_wait_time_ms': 0}
profile = self.execute_query("select 1", query_opts).runtime_profile
assert "Query Options (set by configuration): MEM_LIMIT=8589934592" in profile,\
profile
# For this query, the planner sets NUM_NODES=1, NUM_SCANNER_THREADS=1,
# RUNTIME_FILTER_MODE=0 and MT_DOP=0
expected_str = ("Query Options (set by configuration and planner): "
"MEM_LIMIT=8589934592,NUM_NODES=1,NUM_SCANNER_THREADS=1,"
"RUNTIME_FILTER_MODE=0,MT_DOP=0{erasure_coding}\n")
expected_str = expected_str.format(
erasure_coding=",ALLOW_ERASURE_CODED_FILES=1" if IS_EC else "")
assert expected_str in profile
def test_exec_summary(self):
"""Test that the exec summary is populated correctly in every query state"""
query = "select count(*) from functional.alltypes"
handle = self.execute_query_async(query,
{"debug_action": "CRS_BEFORE_ADMISSION:SLEEP@1000"})
# If ExecuteStatement() has completed and the query is paused in the admission control
# phase, then the coordinator has not started yet and exec_summary should be empty.
exec_summary = self.client.get_exec_summary(handle)
assert exec_summary is not None and exec_summary.nodes is None
# After completion of the admission control phase, the coordinator would have started
# and we should get a populated exec_summary.
self.client.wait_for_admission_control(handle)
exec_summary = self.client.get_exec_summary(handle)
assert exec_summary is not None and exec_summary.nodes is not None
self.client.fetch(query, handle)
exec_summary = self.client.get_exec_summary(handle)
# After fetching the results and reaching finished state, we should still be able to
# fetch an exec_summary.
assert exec_summary is not None and exec_summary.nodes is not None
@SkipIfLocal.multiple_impalad
@pytest.mark.xfail(reason="IMPALA-6338")
def test_profile_fragment_instances(self):
"""IMPALA-6081: Test that the expected number of fragment instances and their exec
nodes appear in the runtime profile, even when fragments may be quickly cancelled when
all results are already returned."""
results = self.execute_query("""
with l as (select * from tpch.lineitem UNION ALL select * from tpch.lineitem)
select STRAIGHT_JOIN count(*) from (select * from tpch.lineitem a LIMIT 1) a
join (select * from l LIMIT 2000000) b on a.l_orderkey = -b.l_orderkey;""")
# There are 3 scan nodes and each appears in the profile 4 times (for 3 fragment
# instances + the averaged fragment).
assert results.runtime_profile.count("HDFS_SCAN_NODE") == 12
# There are 3 exchange nodes and each appears in the profile 2 times (for 1 fragment
# instance + the averaged fragment).
assert results.runtime_profile.count("EXCHANGE_NODE") == 6
# The following appear only in the root fragment which has 1 instance.
assert results.runtime_profile.count("HASH_JOIN_NODE") == 2
assert results.runtime_profile.count("AGGREGATION_NODE") == 2
assert results.runtime_profile.count("PLAN_ROOT_SINK") == 2
def test_query_profile_contains_query_events(self):
"""Test that the expected events show up in a query profile."""
event_regexes = [r'Query Timeline:',
r'Query submitted:',
r'Planning finished:',
r'Submit for admission:',
r'Completed admission:',
r'Ready to start on .* backends:',
r'All .* execution backends \(.* fragment instances\) started:',
r'Rows available:',
r'First row fetched:',
r'Last row fetched:',
r'Released admission control resources:']
query = "select * from functional.alltypes"
runtime_profile = self.execute_query(query).runtime_profile
self.__verify_profile_event_sequence(event_regexes, runtime_profile)
def test_query_profile_contains_instance_events(self):
"""Test that /query_profile_encoded contains an event timeline for fragment
instances, even when there are errors."""
event_regexes = [r'Fragment Instance Lifecycle Event Timeline',
r'Prepare Finished',
r'Open Finished',
r'First Batch Produced',
r'First Batch Sent',
r'ExecInternal Finished']
query = "select count(*) from functional.alltypes"
runtime_profile = self.execute_query(query).runtime_profile
self.__verify_profile_event_sequence(event_regexes, runtime_profile)
def __verify_profile_event_sequence(self, event_regexes, runtime_profile):
"""Check that 'event_regexes' appear in a consecutive series of lines in
'runtime_profile'"""
lines = runtime_profile.splitlines()
event_regex_index = 0
# Check that the strings appear in the above order with no gaps in the profile.
for line in runtime_profile.splitlines():
match = re.search(event_regexes[event_regex_index], line)
if match is not None:
event_regex_index += 1
if event_regex_index == len(event_regexes):
# Found all the lines - we're done.
return
else:
# Haven't found the first regex yet.
assert event_regex_index == 0, \
event_regexes[event_regex_index] + " not in " + line + "\n" + runtime_profile
assert event_regex_index == len(event_regexes), \
"Didn't find all events in profile: \n" + runtime_profile
class TestThriftProfile(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
# IMPALA-6399: Run this test serially to avoid a delay over the wait time in fetching
# the profile.
# This test needs to call self.client.close() to force computation of query end time,
# so it has to be in its own suite (IMPALA-6498).
@pytest.mark.execute_serially
def test_query_profile_thrift_timestamps(self):
"""Test that the query profile start and end time date-time strings have
nanosecond precision. Nanosecond precision is expected by management API clients
that consume Impala debug webpages."""
query = "select sleep(5)"
handle = self.client.execute_async(query)
query_id = handle.get_handle().id
results = self.client.fetch(query, handle)
self.client.close()
MAX_WAIT = 300
start = time.time()
end = start + MAX_WAIT
while time.time() <= end:
# Sleep before trying to fetch the profile. This helps to prevent a warning when the
# profile is not yet available immediately. It also makes it less likely to
# introduce an error below in future changes by forgetting to sleep.
time.sleep(1)
tree = self.impalad_test_service.get_thrift_profile(query_id)
if not tree:
continue
# tree.nodes[1] corresponds to ClientRequestState::summary_profile_
# See be/src/service/client-request-state.[h|cc].
start_time = tree.nodes[1].info_strings["Start Time"]
end_time = tree.nodes[1].info_strings["End Time"]
# Start and End Times are of the form "2017-12-07 22:26:52.167711000"
start_time_sub_sec_str = start_time.split('.')[-1]
end_time_sub_sec_str = end_time.split('.')[-1]
if len(end_time_sub_sec_str) == 0:
elapsed = time.time() - start
logging.info("end_time_sub_sec_str hasn't shown up yet, elapsed=%d", elapsed)
continue
assert len(end_time_sub_sec_str) == 9, end_time
assert len(start_time_sub_sec_str) == 9, start_time
return True
# If we're here, we didn't get the final thrift profile from the debug web page.
# This could happen due to heavy system load. The test is then inconclusive.
# Log a message and fail this run.
dbg_str = "Debug thrift profile for query {0} not available in {1} seconds".format(
query_id, MAX_WAIT)
assert False, dbg_str
| 47.634058
| 90
| 0.716818
|
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfS3, SkipIfADLS, SkipIfIsilon, SkipIfLocal
from tests.util.filesystem_utils import IS_EC
import logging
import pytest
import re
import time
class TestObservability(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
def test_merge_exchange_num_rows(self):
query = """select tinyint_col, count(*) from functional.alltypes
group by tinyint_col order by tinyint_col limit 5"""
result = self.execute_query(query)
assert result.exec_summary[0]['operator'] == '05:MERGING-EXCHANGE'
assert result.exec_summary[0]['num_rows'] == 5
assert result.exec_summary[0]['est_num_rows'] == 5
assert result.exec_summary[0]['peak_mem'] > 0
for line in result.runtime_profile.split('\n'):
if 'RowsProduced' in line:
assert '(5)' in line
break
def test_broadcast_num_rows(self):
query = """select distinct a.int_col, a.string_col from functional.alltypes a
inner join functional.alltypessmall b on (a.id = b.id)
where a.year = 2009 and b.month = 2"""
result = self.execute_query(query)
assert result.exec_summary[5]['operator'] == '04:EXCHANGE'
assert result.exec_summary[5]['num_rows'] == 25
assert result.exec_summary[5]['est_num_rows'] == 25
assert result.exec_summary[5]['peak_mem'] > 0
@SkipIfS3.hbase
@SkipIfLocal.hbase
@SkipIfIsilon.hbase
@SkipIfADLS.hbase
def test_scan_summary(self):
query = "select count(*) from functional.alltypestiny"
result = self.execute_query(query)
scan_idx = len(result.exec_summary) - 1
assert result.exec_summary[scan_idx]['operator'] == '00:SCAN HDFS'
assert result.exec_summary[scan_idx]['detail'] == 'functional.alltypestiny'
query = "select count(*) from functional_kudu.alltypestiny"
result = self.execute_query(query)
scan_idx = len(result.exec_summary) - 1
assert result.exec_summary[scan_idx]['operator'] == '00:SCAN KUDU'
assert result.exec_summary[scan_idx]['detail'] == 'functional_kudu.alltypestiny'
query = "select count(*) from functional_hbase.alltypestiny"
result = self.execute_query(query)
scan_idx = len(result.exec_summary) - 1
assert result.exec_summary[scan_idx]['operator'] == '00:SCAN HBASE'
assert result.exec_summary[scan_idx]['detail'] == 'functional_hbase.alltypestiny'
def test_query_states(self):
query = "select count(*) from functional.alltypes"
handle = self.execute_query_async(query,
{"debug_action": "CRS_BEFORE_ADMISSION:SLEEP@1000"})
profile = self.client.get_runtime_profile(handle)
assert "Query State: COMPILED" in profile
self.client.wait_for_admission_control(handle)
profile = self.client.get_runtime_profile(handle)
assert "Query State: RUNNING" in profile or \
"Query State: FINISHED" in profile, profile
results = self.client.fetch(query, handle)
profile = self.client.get_runtime_profile(handle)
assert "Query State: FINISHED" in profile, profile
def test_query_options(self):
query_opts = {'mem_limit': 8589934592, 'runtime_filter_wait_time_ms': 0}
profile = self.execute_query("select 1", query_opts).runtime_profile
assert "Query Options (set by configuration): MEM_LIMIT=8589934592" in profile,\
profile
expected_str = ("Query Options (set by configuration and planner): "
"MEM_LIMIT=8589934592,NUM_NODES=1,NUM_SCANNER_THREADS=1,"
"RUNTIME_FILTER_MODE=0,MT_DOP=0{erasure_coding}\n")
expected_str = expected_str.format(
erasure_coding=",ALLOW_ERASURE_CODED_FILES=1" if IS_EC else "")
assert expected_str in profile
def test_exec_summary(self):
query = "select count(*) from functional.alltypes"
handle = self.execute_query_async(query,
{"debug_action": "CRS_BEFORE_ADMISSION:SLEEP@1000"})
exec_summary = self.client.get_exec_summary(handle)
assert exec_summary is not None and exec_summary.nodes is None
self.client.wait_for_admission_control(handle)
exec_summary = self.client.get_exec_summary(handle)
assert exec_summary is not None and exec_summary.nodes is not None
self.client.fetch(query, handle)
exec_summary = self.client.get_exec_summary(handle)
assert exec_summary is not None and exec_summary.nodes is not None
@SkipIfLocal.multiple_impalad
@pytest.mark.xfail(reason="IMPALA-6338")
def test_profile_fragment_instances(self):
results = self.execute_query("""
with l as (select * from tpch.lineitem UNION ALL select * from tpch.lineitem)
select STRAIGHT_JOIN count(*) from (select * from tpch.lineitem a LIMIT 1) a
join (select * from l LIMIT 2000000) b on a.l_orderkey = -b.l_orderkey;""")
assert results.runtime_profile.count("HDFS_SCAN_NODE") == 12
assert results.runtime_profile.count("EXCHANGE_NODE") == 6
assert results.runtime_profile.count("HASH_JOIN_NODE") == 2
assert results.runtime_profile.count("AGGREGATION_NODE") == 2
assert results.runtime_profile.count("PLAN_ROOT_SINK") == 2
def test_query_profile_contains_query_events(self):
event_regexes = [r'Query Timeline:',
r'Query submitted:',
r'Planning finished:',
r'Submit for admission:',
r'Completed admission:',
r'Ready to start on .* backends:',
r'All .* execution backends \(.* fragment instances\) started:',
r'Rows available:',
r'First row fetched:',
r'Last row fetched:',
r'Released admission control resources:']
query = "select * from functional.alltypes"
runtime_profile = self.execute_query(query).runtime_profile
self.__verify_profile_event_sequence(event_regexes, runtime_profile)
def test_query_profile_contains_instance_events(self):
event_regexes = [r'Fragment Instance Lifecycle Event Timeline',
r'Prepare Finished',
r'Open Finished',
r'First Batch Produced',
r'First Batch Sent',
r'ExecInternal Finished']
query = "select count(*) from functional.alltypes"
runtime_profile = self.execute_query(query).runtime_profile
self.__verify_profile_event_sequence(event_regexes, runtime_profile)
def __verify_profile_event_sequence(self, event_regexes, runtime_profile):
lines = runtime_profile.splitlines()
event_regex_index = 0
for line in runtime_profile.splitlines():
match = re.search(event_regexes[event_regex_index], line)
if match is not None:
event_regex_index += 1
if event_regex_index == len(event_regexes):
return
else:
# Haven't found the first regex yet.
assert event_regex_index == 0, \
event_regexes[event_regex_index] + " not in " + line + "\n" + runtime_profile
assert event_regex_index == len(event_regexes), \
"Didn't find all events in profile: \n" + runtime_profile
class TestThriftProfile(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
# IMPALA-6399: Run this test serially to avoid a delay over the wait time in fetching
# the profile.
# This test needs to call self.client.close() to force computation of query end time,
# so it has to be in its own suite (IMPALA-6498).
@pytest.mark.execute_serially
def test_query_profile_thrift_timestamps(self):
query = "select sleep(5)"
handle = self.client.execute_async(query)
query_id = handle.get_handle().id
results = self.client.fetch(query, handle)
self.client.close()
MAX_WAIT = 300
start = time.time()
end = start + MAX_WAIT
while time.time() <= end:
# Sleep before trying to fetch the profile. This helps to prevent a warning when the
# profile is not yet available immediately. It also makes it less likely to
# introduce an error below in future changes by forgetting to sleep.
time.sleep(1)
tree = self.impalad_test_service.get_thrift_profile(query_id)
if not tree:
continue
# tree.nodes[1] corresponds to ClientRequestState::summary_profile_
# See be/src/service/client-request-state.[h|cc].
start_time = tree.nodes[1].info_strings["Start Time"]
end_time = tree.nodes[1].info_strings["End Time"]
# Start and End Times are of the form "2017-12-07 22:26:52.167711000"
start_time_sub_sec_str = start_time.split('.')[-1]
end_time_sub_sec_str = end_time.split('.')[-1]
if len(end_time_sub_sec_str) == 0:
elapsed = time.time() - start
logging.info("end_time_sub_sec_str hasn't shown up yet, elapsed=%d", elapsed)
continue
assert len(end_time_sub_sec_str) == 9, end_time
assert len(start_time_sub_sec_str) == 9, start_time
return True
dbg_str = "Debug thrift profile for query {0} not available in {1} seconds".format(
query_id, MAX_WAIT)
assert False, dbg_str
| true
| true
|
f71a466907a327211f69a6d078aeba3666c44465
| 3,067
|
py
|
Python
|
GPA-Spider/config.py
|
xsx-123/awesome-sdu-scripts
|
bc371fda9d4d2a616f82c9a44b7d1d6eddb2c6eb
|
[
"MIT"
] | 21
|
2021-06-01T09:54:20.000Z
|
2022-03-11T16:50:42.000Z
|
GPA-Spider/config.py
|
xsx-123/awesome-sdu-scripts
|
bc371fda9d4d2a616f82c9a44b7d1d6eddb2c6eb
|
[
"MIT"
] | 1
|
2019-08-16T05:30:19.000Z
|
2019-08-16T05:30:19.000Z
|
GPA-Spider/config.py
|
xsx-123/awesome-sdu-scripts
|
bc371fda9d4d2a616f82c9a44b7d1d6eddb2c6eb
|
[
"MIT"
] | 8
|
2021-07-21T03:11:40.000Z
|
2021-12-03T08:25:19.000Z
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Copyright 2018 ZhangT. All Rights Reserved.
# Author: ZhangT
# Author-Github: github.com/zhangt2333
# config.py 2018/2/10 21:49
# 包含一些通用常量和工具函数
HEADERS = {"Host": "bkjws.sdu.edu.cn",
"Connection": "keep-alive",
"Accept": "*/*",
"Origin": "http://bkjws.sdu.edu.cn",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept-Language": "zh-CN,zh;q=0.8"}
# 获取成绩时候的post数据
aoData = 'aoData=%5B%7B%22name%22%3A%22sEcho%22%2C%22value%22%3A1%7D%2C%7B%22' \
'name%22%3A%22iColumns%22%2C%22value%22%3A8%7D%2C%7B%22name%22%3A%22' \
'sColumns%22%2C%22value%22%3A%22%22%7D%2C%7B%22name%22%3A%22iDisplay' \
'Start%22%2C%22value%22%3A0%7D%2C%7B%22name%22%3A%22iDisplayLength%2' \
'2%2C%22value%22%3A-1%7D%2C%7B%22name%22%3A%22mDataProp_0%22%2C%22va' \
'lue%22%3A%22function%22%7D%2C%7B%22name%22%3A%22mDataProp_1%22%2C%2' \
'2value%22%3A%22kch%22%7D%2C%7B%22name%22%3A%22mDataProp_2%22%2C%22v' \
'alue%22%3A%22kcm%22%7D%2C%7B%22name%22%3A%22mDataProp_3%22%2C%22val' \
'ue%22%3A%22kxh%22%7D%2C%7B%22name%22%3A%22mDataProp_4%22%2C%22value' \
'%22%3A%22xf%22%7D%2C%7B%22name%22%3A%22mDataProp_5%22%2C%22value%22' \
'%3A%22kssj%22%7D%2C%7B%22name%22%3A%22mDataProp_6%22%2C%22value%22%' \
'3A%22kscjView%22%7D%2C%7B%22name%22%3A%22mDataProp_7%22%2C%22value%' \
'22%3A%22kcsx%22%7D%2C%7B%22name%22%3A%22iSortingCols%22%2C%22value%' \
'22%3A0%7D%2C%7B%22name%22%3A%22bSortable_0%22%2C%22value%22%3Afalse' \
'%7D%2C%7B%22name%22%3A%22bSortable_1%22%2C%22value%22%3Afalse%7D%2C' \
'%7B%22name%22%3A%22bSortable_2%22%2C%22value%22%3Afalse%7D%2C%7B%22' \
'name%22%3A%22bSortable_3%22%2C%22value%22%3Afalse%7D%2C%7B%22name%2' \
'2%3A%22bSortable_4%22%2C%22value%22%3Afalse%7D%2C%7B%22name%22%3A%22' \
'bSortable_5%22%2C%22value%22%3Afalse%7D%2C%7B%22name%22%3A%22bSortab' \
'le_6%22%2C%22value%22%3Afalse%7D%2C%7B%22name%22%3A%22bSortable_7%22' \
'%2C%22value%22%3Afalse%7D%5D'
def strB2Q(ustring):
"""工具函数:全角转半角"""
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 32: # 全角空格直接转换
inside_code = 12288
elif (inside_code >= 33 and inside_code <= 126): # 全角字符(除空格)根据关系转化
inside_code += 65248
rstring += chr(inside_code)
return rstring
def Align_CHstr(str, format_spec):
"""工具函数:处理一个中英文混杂str的填充对齐"""
format_spec = "{0:{1}" + format_spec + "}"
return format_spec.format(strB2Q(str), chr(12288))
def compare_xnxq(xnxq1, xnxq2):
"""返回 xnxq1 > xnxq2"""
tmp = xnxq1.split('-')
xnxq1 = tmp[2] + tmp[1]*10 + tmp[0]*10000
tmp = xnxq2.split('-')
xnxq2 = tmp[2] + tmp[1]*10 + tmp[0]*10000
return xnxq1 > xnxq2
| 45.776119
| 137
| 0.635474
|
HEADERS = {"Host": "bkjws.sdu.edu.cn",
"Connection": "keep-alive",
"Accept": "*/*",
"Origin": "http://bkjws.sdu.edu.cn",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept-Language": "zh-CN,zh;q=0.8"}
aoData = 'aoData=%5B%7B%22name%22%3A%22sEcho%22%2C%22value%22%3A1%7D%2C%7B%22' \
'name%22%3A%22iColumns%22%2C%22value%22%3A8%7D%2C%7B%22name%22%3A%22' \
'sColumns%22%2C%22value%22%3A%22%22%7D%2C%7B%22name%22%3A%22iDisplay' \
'Start%22%2C%22value%22%3A0%7D%2C%7B%22name%22%3A%22iDisplayLength%2' \
'2%2C%22value%22%3A-1%7D%2C%7B%22name%22%3A%22mDataProp_0%22%2C%22va' \
'lue%22%3A%22function%22%7D%2C%7B%22name%22%3A%22mDataProp_1%22%2C%2' \
'2value%22%3A%22kch%22%7D%2C%7B%22name%22%3A%22mDataProp_2%22%2C%22v' \
'alue%22%3A%22kcm%22%7D%2C%7B%22name%22%3A%22mDataProp_3%22%2C%22val' \
'ue%22%3A%22kxh%22%7D%2C%7B%22name%22%3A%22mDataProp_4%22%2C%22value' \
'%22%3A%22xf%22%7D%2C%7B%22name%22%3A%22mDataProp_5%22%2C%22value%22' \
'%3A%22kssj%22%7D%2C%7B%22name%22%3A%22mDataProp_6%22%2C%22value%22%' \
'3A%22kscjView%22%7D%2C%7B%22name%22%3A%22mDataProp_7%22%2C%22value%' \
'22%3A%22kcsx%22%7D%2C%7B%22name%22%3A%22iSortingCols%22%2C%22value%' \
'22%3A0%7D%2C%7B%22name%22%3A%22bSortable_0%22%2C%22value%22%3Afalse' \
'%7D%2C%7B%22name%22%3A%22bSortable_1%22%2C%22value%22%3Afalse%7D%2C' \
'%7B%22name%22%3A%22bSortable_2%22%2C%22value%22%3Afalse%7D%2C%7B%22' \
'name%22%3A%22bSortable_3%22%2C%22value%22%3Afalse%7D%2C%7B%22name%2' \
'2%3A%22bSortable_4%22%2C%22value%22%3Afalse%7D%2C%7B%22name%22%3A%22' \
'bSortable_5%22%2C%22value%22%3Afalse%7D%2C%7B%22name%22%3A%22bSortab' \
'le_6%22%2C%22value%22%3Afalse%7D%2C%7B%22name%22%3A%22bSortable_7%22' \
'%2C%22value%22%3Afalse%7D%5D'
def strB2Q(ustring):
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 32:
inside_code = 12288
elif (inside_code >= 33 and inside_code <= 126):
inside_code += 65248
rstring += chr(inside_code)
return rstring
def Align_CHstr(str, format_spec):
format_spec = "{0:{1}" + format_spec + "}"
return format_spec.format(strB2Q(str), chr(12288))
def compare_xnxq(xnxq1, xnxq2):
tmp = xnxq1.split('-')
xnxq1 = tmp[2] + tmp[1]*10 + tmp[0]*10000
tmp = xnxq2.split('-')
xnxq2 = tmp[2] + tmp[1]*10 + tmp[0]*10000
return xnxq1 > xnxq2
| true
| true
|
f71a467939d4c660726511d6392456a49b013fa9
| 384
|
py
|
Python
|
sandbox/team_members/pudumula/ros/gazebo_ws_1/build/rrbot_description/catkin_generated/pkg.installspace.context.pc.py
|
Project-Heisenberg/quantum
|
f3ad8f4693007e45e80a88f928273adcfdc8529d
|
[
"Apache-2.0"
] | 1
|
2017-04-23T14:23:54.000Z
|
2017-04-23T14:23:54.000Z
|
sandbox/team_members/pudumula/ros/gazebo_ws_1/build/rrbot_description/catkin_generated/pkg.installspace.context.pc.py
|
Project-Heisenberg/quantum
|
f3ad8f4693007e45e80a88f928273adcfdc8529d
|
[
"Apache-2.0"
] | 13
|
2016-03-25T05:15:17.000Z
|
2018-05-30T15:53:12.000Z
|
sandbox/team_members/pudumula/ros/gazebo_ws_1/build/rrbot_description/catkin_generated/pkg.installspace.context.pc.py
|
Project-Heisenberg/quantum
|
f3ad8f4693007e45e80a88f928273adcfdc8529d
|
[
"Apache-2.0"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rrbot_description"
PROJECT_SPACE_DIR = "/home/neo/ros/gazebo_ws_1/install"
PROJECT_VERSION = "0.0.0"
| 42.666667
| 68
| 0.710938
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rrbot_description"
PROJECT_SPACE_DIR = "/home/neo/ros/gazebo_ws_1/install"
PROJECT_VERSION = "0.0.0"
| true
| true
|
f71a46e6e4364c2e9a02fba2afe9a37df835f18f
| 2,165
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/azure_firewall_network_rule_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/azure_firewall_network_rule_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/azure_firewall_network_rule_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureFirewallNetworkRule(Model):
"""Properties of the network rule.
:param name: Name of the network rule.
:type name: str
:param description: Description of the rule.
:type description: str
:param protocols: Array of AzureFirewallNetworkRuleProtocols.
:type protocols: list[str or
~azure.mgmt.network.v2018_11_01.models.AzureFirewallNetworkRuleProtocol]
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param destination_addresses: List of destination IP addresses.
:type destination_addresses: list[str]
:param destination_ports: List of destination ports.
:type destination_ports: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'protocols': {'key': 'protocols', 'type': '[str]'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'destination_ports': {'key': 'destinationPorts', 'type': '[str]'},
}
def __init__(self, *, name: str=None, description: str=None, protocols=None, source_addresses=None, destination_addresses=None, destination_ports=None, **kwargs) -> None:
super(AzureFirewallNetworkRule, self).__init__(**kwargs)
self.name = name
self.description = description
self.protocols = protocols
self.source_addresses = source_addresses
self.destination_addresses = destination_addresses
self.destination_ports = destination_ports
| 43.3
| 174
| 0.647575
|
from msrest.serialization import Model
class AzureFirewallNetworkRule(Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'protocols': {'key': 'protocols', 'type': '[str]'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'destination_ports': {'key': 'destinationPorts', 'type': '[str]'},
}
def __init__(self, *, name: str=None, description: str=None, protocols=None, source_addresses=None, destination_addresses=None, destination_ports=None, **kwargs) -> None:
super(AzureFirewallNetworkRule, self).__init__(**kwargs)
self.name = name
self.description = description
self.protocols = protocols
self.source_addresses = source_addresses
self.destination_addresses = destination_addresses
self.destination_ports = destination_ports
| true
| true
|
f71a47042dc21875d17453ebc714d5444f63f220
| 1,718
|
py
|
Python
|
docker/demultiplexing/demuxlet/generate_zarr.py
|
jggatter/cumulus
|
1dfd9dfce5a44ff867859db6f24a356f72c6ccdd
|
[
"BSD-3-Clause"
] | null | null | null |
docker/demultiplexing/demuxlet/generate_zarr.py
|
jggatter/cumulus
|
1dfd9dfce5a44ff867859db6f24a356f72c6ccdd
|
[
"BSD-3-Clause"
] | null | null | null |
docker/demultiplexing/demuxlet/generate_zarr.py
|
jggatter/cumulus
|
1dfd9dfce5a44ff867859db6f24a356f72c6ccdd
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import pegasusio as pio
import pandas as pd
parser = argparse.ArgumentParser(description='Merge demuxlet result with gene-count matrix.')
parser.add_argument('demux_res', metavar = 'demux_result.best', help = 'Demuxlet demultiplexing results.')
parser.add_argument('raw_mat', metavar = 'raw_feature_bc_matrix.h5', help = 'Raw gene count matrix in 10x format.')
parser.add_argument('out_file', metavar = 'output_result.zarr', help = 'Output zarr file.')
args = parser.parse_args()
demux_type_dict = {'SNG': 'singlet', 'DBL': 'doublet', 'AMB': 'unknown'}
def write_output(assignment_file: str, input_mat_file: str, output_zarr_file: str) -> None:
df = pd.read_csv(assignment_file, sep = '\t', header = 0, index_col = 'BARCODE')
df.index = pd.Index([x[:-2] for x in df.index])
df['demux_type'] = df['DROPLET.TYPE'].apply(lambda s: demux_type_dict[s])
df['assignment'] = ''
df.loc[df['demux_type'] == 'singlet', 'assignment'] = df.loc[df['demux_type'] == 'singlet', 'SNG.BEST.GUESS']
df.loc[df['demux_type'] == 'doublet', 'assignment'] = df.loc[df['demux_type'] == 'doublet', 'DBL.BEST.GUESS'].apply(lambda s: ','.join(s.split(',')[:-1]))
data = pio.read_input(input_mat_file)
data.obs['demux_type'] = ''
data.obs['assignment'] = ''
idx = data.obs_names.isin(df.index)
barcodes = data.obs_names[idx]
df_valid = df.loc[barcodes, ['demux_type', 'assignment']]
data.obs.loc[idx, 'demux_type'] = df_valid['demux_type'].values
data.obs.loc[idx, 'assignment'] = df_valid['assignment'].values
pio.write_output(data, output_zarr_file, zarr_zipstore = True)
if __name__ == '__main__':
write_output(args.demux_res, args.raw_mat, args.out_file)
| 47.722222
| 158
| 0.689173
|
import argparse
import pegasusio as pio
import pandas as pd
parser = argparse.ArgumentParser(description='Merge demuxlet result with gene-count matrix.')
parser.add_argument('demux_res', metavar = 'demux_result.best', help = 'Demuxlet demultiplexing results.')
parser.add_argument('raw_mat', metavar = 'raw_feature_bc_matrix.h5', help = 'Raw gene count matrix in 10x format.')
parser.add_argument('out_file', metavar = 'output_result.zarr', help = 'Output zarr file.')
args = parser.parse_args()
demux_type_dict = {'SNG': 'singlet', 'DBL': 'doublet', 'AMB': 'unknown'}
def write_output(assignment_file: str, input_mat_file: str, output_zarr_file: str) -> None:
df = pd.read_csv(assignment_file, sep = '\t', header = 0, index_col = 'BARCODE')
df.index = pd.Index([x[:-2] for x in df.index])
df['demux_type'] = df['DROPLET.TYPE'].apply(lambda s: demux_type_dict[s])
df['assignment'] = ''
df.loc[df['demux_type'] == 'singlet', 'assignment'] = df.loc[df['demux_type'] == 'singlet', 'SNG.BEST.GUESS']
df.loc[df['demux_type'] == 'doublet', 'assignment'] = df.loc[df['demux_type'] == 'doublet', 'DBL.BEST.GUESS'].apply(lambda s: ','.join(s.split(',')[:-1]))
data = pio.read_input(input_mat_file)
data.obs['demux_type'] = ''
data.obs['assignment'] = ''
idx = data.obs_names.isin(df.index)
barcodes = data.obs_names[idx]
df_valid = df.loc[barcodes, ['demux_type', 'assignment']]
data.obs.loc[idx, 'demux_type'] = df_valid['demux_type'].values
data.obs.loc[idx, 'assignment'] = df_valid['assignment'].values
pio.write_output(data, output_zarr_file, zarr_zipstore = True)
if __name__ == '__main__':
write_output(args.demux_res, args.raw_mat, args.out_file)
| true
| true
|
f71a4726a2407751112c37ace25b054f8f423083
| 152
|
py
|
Python
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_MovingAverage_BestCycle_AR.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_MovingAverage_BestCycle_AR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_MovingAverage_BestCycle_AR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['MovingAverage'] , ['BestCycle'] , ['AR'] );
| 38
| 79
| 0.743421
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['MovingAverage'] , ['BestCycle'] , ['AR'] );
| true
| true
|
f71a48ee730e59aec180da887d03b93c9e9a6c0f
| 40,848
|
py
|
Python
|
gym_miniworld/miniworld.py
|
HuangHaoyu1997/gym-miniworld
|
77dc24bf1b1ca8c2cfefadfe3e35a0deb2d08a1a
|
[
"Apache-2.0"
] | null | null | null |
gym_miniworld/miniworld.py
|
HuangHaoyu1997/gym-miniworld
|
77dc24bf1b1ca8c2cfefadfe3e35a0deb2d08a1a
|
[
"Apache-2.0"
] | null | null | null |
gym_miniworld/miniworld.py
|
HuangHaoyu1997/gym-miniworld
|
77dc24bf1b1ca8c2cfefadfe3e35a0deb2d08a1a
|
[
"Apache-2.0"
] | null | null | null |
import math
from enum import IntEnum
import numpy as np
import gym
from gym import spaces
from .random import *
from .opengl import *
from .objmesh import *
from .entity import *
from .math import *
from .params import *
# Default wall height for room
DEFAULT_WALL_HEIGHT=2.74
# Texture size/density in texels/meter
TEX_DENSITY = 512
def gen_texcs_wall(
tex,
min_x,
min_y,
width,
height
):
"""
Generate texture coordinates for a wall quad
"""
xc = (TEX_DENSITY / tex.width)
yc = (TEX_DENSITY / tex.height)
min_u = (min_x) * xc
max_u = (min_x + width) * xc
min_v = (min_y) * yc
max_v = (min_y + height) * yc
return np.array(
[
[min_u, min_v],
[min_u, max_v],
[max_u, max_v],
[max_u, min_v],
],
dtype=np.float32
)
def gen_texcs_floor(
tex,
poss
):
"""
Generate texture coordinates for the floor or ceiling
This is done by mapping x,z positions directly to texture
coordinates
"""
texc_mul = np.array(
[
TEX_DENSITY / tex.width,
TEX_DENSITY / tex.height
],
dtype=float
)
coords = np.stack([poss[:,0], poss[:,2]], axis=1) * texc_mul
return coords
class Room:
"""
Represent an individual room and its contents
"""
def __init__(
self,
outline,
wall_height=DEFAULT_WALL_HEIGHT,
floor_tex='floor_tiles_bw',
wall_tex='concrete',
ceil_tex='concrete_tiles',
no_ceiling=False
):
# The outlien should have shape Nx2
assert len(outline.shape) == 2
assert outline.shape[1] == 2
assert outline.shape[0] >= 3
# Add a Y coordinate to the outline points
outline = np.insert(outline, 1, 0, axis=1)
# Number of outline vertices / walls
self.num_walls = outline.shape[0]
# List of 2D points forming the outline of the room
# Shape is Nx3
self.outline = outline
# Compute the min and max x, z extents
self.min_x = self.outline[:, 0].min()
self.max_x = self.outline[:, 0].max()
self.min_z = self.outline[:, 2].min()
self.max_z = self.outline[:, 2].max()
# Compute midpoint coordinates
self.mid_x = (self.max_x + self.min_x) / 2
self.mid_z = (self.max_z + self.min_z) / 2
# Compute approximate surface area
self.area = (self.max_x - self.min_x) * (self.max_z - self.min_z)
# Compute room edge directions and normals
# Compute edge vectors (p1 - p0)
# For the first point, p0 is the last
# For the last point, p0 is p_n-1
next_pts = np.concatenate([self.outline[1:], np.expand_dims(self.outline[0], axis=0)], axis=0)
self.edge_dirs = next_pts - self.outline
self.edge_dirs = (self.edge_dirs.T / np.linalg.norm(self.edge_dirs, axis=1)).T
self.edge_norms = -np.cross(self.edge_dirs, Y_VEC)
self.edge_norms = (self.edge_norms.T / np.linalg.norm(self.edge_norms, axis=1)).T
# Height of the room walls
self.wall_height = wall_height
# No ceiling flag
self.no_ceiling = no_ceiling
# Texture names
self.wall_tex_name = wall_tex
self.floor_tex_name = floor_tex
self.ceil_tex_name = ceil_tex
# Lists of portals, indexed by wall/edge index
self.portals = [[] for i in range(self.num_walls)]
# List of neighbor rooms
# Same length as list of portals
self.neighbors = []
def add_portal(
self,
edge,
start_pos=None,
end_pos=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None,
min_y=0,
max_y=None
):
"""
Create a new portal/opening in a wall of this room
"""
if max_y == None:
max_y = self.wall_height
assert edge <= self.num_walls
assert max_y > min_y
# Get the edge points, compute the direction vector
e_p0 = self.outline[edge]
e_p1 = self.outline[(edge+1) % self.num_walls]
e_len = np.linalg.norm(e_p1 - e_p0)
e_dir = (e_p1 - e_p0) / e_len
x0, _, z0 = e_p0
x1, _, z1 = e_p1
dx, _, dz = e_dir
# If the portal extents are specified by x coordinates
if min_x != None:
assert min_z == None and max_z == None
assert start_pos == None and end_pos == None
assert x0 != x1
m0 = (min_x - x0) / dx
m1 = (max_x - x0) / dx
if m1 < m0:
m0, m1 = m1, m0
start_pos, end_pos = m0, m1
# If the portal extents are specified by z coordinates
elif min_z != None:
assert min_x == None and max_x == None
assert start_pos == None and end_pos == None
assert z0 != z1
m0 = (min_z - z0) / dz
m1 = (max_z - z0) / dz
if m1 < m0:
m0, m1 = m1, m0
start_pos, end_pos = m0, m1
else:
assert min_x == None and max_x == None
assert min_z == None and max_z == None
assert end_pos > start_pos
assert start_pos >= 0, "portal outside of wall extents"
assert end_pos <= e_len, "portal outside of wall extents"
self.portals[edge].append({
'start_pos': start_pos,
'end_pos': end_pos,
'min_y': min_y,
'max_y': max_y
})
# Sort the portals by start position
self.portals[edge].sort(key=lambda e: e['start_pos'])
return start_pos, end_pos
def point_inside(self, p):
"""
Test if a point is inside the room
"""
# Vector from edge start to test point
ap = p - self.outline
# Compute the dot products of normals to AP vectors
dotNAP = np.sum(self.edge_norms * ap, axis=1)
# The point is inside if all the dot products are greater than zero
return np.all(np.greater(dotNAP, 0))
def _gen_static_data(self, params, rng):
"""
Generate polygons and static data for this room
Needed for rendering and collision detection
Note: the wall polygons are quads, but the floor and
ceiling can be arbitrary n-gons
"""
# Load the textures and do texture randomization
self.wall_tex = Texture.get(self.wall_tex_name, rng)
self.floor_tex = Texture.get(self.floor_tex_name, rng)
self.ceil_tex = Texture.get(self.ceil_tex_name, rng)
# Generate the floor vertices
self.floor_verts = self.outline
self.floor_texcs = gen_texcs_floor(
self.floor_tex,
self.floor_verts
)
# Generate the ceiling vertices
# Flip the ceiling vertex order because of backface culling
self.ceil_verts = np.flip(self.outline, axis=0) + self.wall_height * Y_VEC
self.ceil_texcs = gen_texcs_floor(
self.ceil_tex,
self.ceil_verts
)
self.wall_verts = []
self.wall_norms = []
self.wall_texcs = []
self.wall_segs = []
def gen_seg_poly(
edge_p0,
side_vec,
seg_start,
seg_end,
min_y,
max_y
):
if seg_end == seg_start:
return
if min_y == max_y:
return
s_p0 = edge_p0 + seg_start * side_vec
s_p1 = edge_p0 + seg_end * side_vec
# If this polygon starts at ground level, add a collidable segment
if min_y == 0:
self.wall_segs.append(np.array([s_p1, s_p0]))
# Generate the vertices
# Vertices are listed in counter-clockwise order
self.wall_verts.append(s_p0 + min_y * Y_VEC)
self.wall_verts.append(s_p0 + max_y * Y_VEC)
self.wall_verts.append(s_p1 + max_y * Y_VEC)
self.wall_verts.append(s_p1 + min_y * Y_VEC)
# Compute the normal for the polygon
normal = np.cross(s_p1 - s_p0, Y_VEC)
normal = -normal / np.linalg.norm(normal)
for i in range(4):
self.wall_norms.append(normal)
# Generate the texture coordinates
texcs = gen_texcs_wall(
self.wall_tex,
seg_start,
min_y,
seg_end - seg_start,
max_y - min_y
)
self.wall_texcs.append(texcs)
# For each wall
for wall_idx in range(self.num_walls):
edge_p0 = self.outline[wall_idx, :]
edge_p1 = self.outline[(wall_idx+1) % self.num_walls, :]
wall_width = np.linalg.norm(edge_p1 - edge_p0)
side_vec = (edge_p1 - edge_p0) / wall_width
if len(self.portals[wall_idx]) > 0:
seg_end = self.portals[wall_idx][0]['start_pos']
else:
seg_end = wall_width
# Generate the first polygon (going up to the first portal)
gen_seg_poly(
edge_p0,
side_vec,
0,
seg_end,
0,
self.wall_height
)
# For each portal in this wall
for portal_idx, portal in enumerate(self.portals[wall_idx]):
portal = self.portals[wall_idx][portal_idx]
start_pos = portal['start_pos']
end_pos = portal['end_pos']
min_y = portal['min_y']
max_y = portal['max_y']
# Generate the bottom polygon
gen_seg_poly(
edge_p0,
side_vec,
start_pos,
end_pos,
0,
min_y
)
# Generate the top polygon
gen_seg_poly(
edge_p0,
side_vec,
start_pos,
end_pos,
max_y,
self.wall_height
)
if portal_idx < len(self.portals[wall_idx]) - 1:
next_portal = self.portals[wall_idx][portal_idx+1]
next_portal_start = next_portal['start_pos']
else:
next_portal_start = wall_width
# Generate the polygon going up to the next portal
gen_seg_poly(
edge_p0,
side_vec,
end_pos,
next_portal_start,
0,
self.wall_height
)
self.wall_verts = np.array(self.wall_verts)
self.wall_norms = np.array(self.wall_norms)
if len(self.wall_segs) > 0:
self.wall_segs = np.array(self.wall_segs)
else:
self.wall_segs = np.array([]).reshape(0, 2, 3)
if len(self.wall_texcs) > 0:
self.wall_texcs = np.concatenate(self.wall_texcs)
else:
self.wall_texcs = np.array([]).reshape(0, 2)
def _render(self):
"""
Render the static elements of the room
"""
glColor3f(1, 1, 1)
# Draw the floor
self.floor_tex.bind()
glBegin(GL_POLYGON)
glNormal3f(0, 1, 0)
for i in range(self.floor_verts.shape[0]):
glTexCoord2f(*self.floor_texcs[i, :])
glVertex3f(*self.floor_verts[i, :])
glEnd()
# Draw the ceiling
if not self.no_ceiling:
self.ceil_tex.bind()
glBegin(GL_POLYGON)
glNormal3f(0, -1, 0)
for i in range(self.ceil_verts.shape[0]):
glTexCoord2f(*self.ceil_texcs[i, :])
glVertex3f(*self.ceil_verts[i, :])
glEnd()
# Draw the walls
self.wall_tex.bind()
glBegin(GL_QUADS)
for i in range(self.wall_verts.shape[0]):
glNormal3f(*self.wall_norms[i, :])
glTexCoord2f(*self.wall_texcs[i, :])
glVertex3f(*self.wall_verts[i, :])
glEnd()
class MiniWorldEnv(gym.Env):
"""
Base class for MiniWorld environments. Implements the procedural
world generation and simulation logic.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 30
}
# Enumeration of possible actions
class Actions(IntEnum):
# Turn left or right by a small amount
turn_left = 0
turn_right = 1
# Move forward or back by a small amount
move_forward = 2
move_back = 3
# Pick up or drop an object being carried
pickup = 4
drop = 5
# Toggle/activate an object
toggle = 6
# Done completing task
done = 7
def __init__(
self,
max_episode_steps=1500,
obs_width=80,
obs_height=60,
window_width=800,
window_height=600,
params=DEFAULT_PARAMS,
domain_rand=False
):
# Action enumeration for this environment
self.actions = MiniWorldEnv.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
# Observations are RGB images with pixels in [0, 255]
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(obs_height, obs_width, 3),
dtype=np.uint8
)
self.reward_range = (-math.inf, math.inf)
# Maximum number of steps per episode
self.max_episode_steps = max_episode_steps
# Simulation parameters, used for domain randomization
self.params = params
# Domain randomization enable/disable flag
self.domain_rand = domain_rand
# Window for displaying the environment to humans
self.window = None
# Invisible window to render into (shadow OpenGL context)
self.shadow_window = pyglet.window.Window(width=1, height=1, visible=False)
# Enable depth testing and backface culling
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
# Frame buffer used to render observations
self.obs_fb = FrameBuffer(obs_width, obs_height, 8)
# Frame buffer used for human visualization
self.vis_fb = FrameBuffer(window_width, window_height, 16)
# Compute the observation display size
self.obs_disp_width = 256
self.obs_disp_height = obs_height * (self.obs_disp_width / obs_width)
# For displaying text
self.text_label = pyglet.text.Label(
font_name="Arial",
font_size=14,
multiline=True,
width=400,
x = window_width + 5,
y = window_height - (self.obs_disp_height + 19)
)
# Initialize the state
self.seed()
self.reset()
def close(self):
pass
def seed(self, seed=None):
self.rand = RandGen(seed)
return [seed]
def reset(self):
"""
Reset the simulation at the start of a new episode
This also randomizes many environment parameters (domain randomization)
"""
# Step count since episode start
self.step_count = 0
# Create the agent
self.agent = Agent()
# List of entities contained
self.entities = []
# List of rooms in the world
self.rooms = []
# Wall segments for collision detection
# Shape is (N, 2, 3)
self.wall_segs = []
# Generate the world
self._gen_world()
# Check if domain randomization is enabled or not
rand = self.rand if self.domain_rand else None
# Randomize elements of the world (domain randomization)
self.params.sample_many(rand, self, [
'sky_color',
'light_pos',
'light_color',
'light_ambient'
])
# Get the max forward step distance
self.max_forward_step = self.params.get_max('forward_step')
# Randomize parameters of the entities
for ent in self.entities:
ent.randomize(self.params, rand)
# Compute the min and max x, z extents of the whole floorplan
self.min_x = min([r.min_x for r in self.rooms])
self.max_x = max([r.max_x for r in self.rooms])
self.min_z = min([r.min_z for r in self.rooms])
self.max_z = max([r.max_z for r in self.rooms])
# Generate static data
if len(self.wall_segs) == 0:
self._gen_static_data()
# Pre-compile static parts of the environment into a display list
self._render_static()
# Generate the first camera image
obs = self.render_obs()
# Return first observation
return obs
def _get_carry_pos(self, agent_pos, ent):
"""
Compute the position at which to place an object being carried
"""
dist = self.agent.radius + ent.radius + self.max_forward_step
pos = agent_pos + self.agent.dir_vec * 1.05 * dist
# Adjust the Y-position so the object is visible while being carried
y_pos = max(self.agent.cam_height - ent.height - 0.3, 0)
pos = pos + Y_VEC * y_pos
return pos
def move_agent(self, fwd_dist, fwd_drift):
"""
Move the agent forward
"""
next_pos = (
self.agent.pos +
self.agent.dir_vec * fwd_dist +
self.agent.right_vec * fwd_drift
)
if self.intersect(self.agent, next_pos, self.agent.radius):
return False
carrying = self.agent.carrying
if carrying:
next_carrying_pos = self._get_carry_pos(next_pos, carrying)
if self.intersect(carrying, next_carrying_pos, carrying.radius):
return False
carrying.pos = next_carrying_pos
self.agent.pos = next_pos
return True
def turn_agent(self, turn_angle):
"""
Turn the agent left or right
"""
turn_angle *= (math.pi / 180)
orig_dir = self.agent.dir
self.agent.dir += turn_angle
carrying = self.agent.carrying
if carrying:
pos = self._get_carry_pos(self.agent.pos, carrying)
if self.intersect(carrying, pos, carrying.radius):
self.agent.dir = orig_dir
return False
carrying.pos = pos
carrying.dir = self.agent.dir
return True
def step(self, action):
"""
Perform one action and update the simulation
"""
self.step_count += 1
rand = self.rand if self.domain_rand else None
fwd_step = self.params.sample(rand, 'forward_step')
fwd_drift = self.params.sample(rand, 'forward_drift')
turn_step = self.params.sample(rand, 'turn_step')
if action == self.actions.move_forward:
self.move_agent(fwd_step, fwd_drift)
elif action == self.actions.move_back:
self.move_agent(-fwd_step, fwd_drift)
elif action == self.actions.turn_left:
self.turn_agent(turn_step)
elif action == self.actions.turn_right:
self.turn_agent(-turn_step)
# Pick up an object
elif action == self.actions.pickup:
# Position at which we will test for an intersection
test_pos = self.agent.pos + self.agent.dir_vec * 1.5 * self.agent.radius
ent = self.intersect(self.agent, test_pos, 1.2 * self.agent.radius)
if not self.agent.carrying:
if isinstance(ent, Entity):
if not ent.is_static:
self.agent.carrying = ent
# Drop an object being carried
elif action == self.actions.drop:
if self.agent.carrying:
self.agent.carrying.pos[1] = 0
self.agent.carrying = None
# If we are carrying an object, update its position as we move
if self.agent.carrying:
ent_pos = self._get_carry_pos(self.agent.pos, self.agent.carrying)
self.agent.carrying.pos = ent_pos
self.agent.carrying.dir = self.agent.dir
# Generate the current camera image
obs = self.render_obs()
# If the maximum time step count is reached
if self.step_count >= self.max_episode_steps:
done = True
reward = 0
return obs, reward, done, {}
reward = 0
done = False
return obs, reward, done, {}
def add_rect_room(
self,
min_x,
max_x,
min_z,
max_z,
**kwargs
):
"""
Create a rectangular room
"""
# 2D outline coordinates of the room,
# listed in counter-clockwise order when viewed from the top
outline = np.array([
# East wall
[max_x, max_z],
# North wall
[max_x, min_z],
# West wall
[min_x, min_z],
# South wall
[min_x, max_z],
])
return self.add_room(outline=outline, **kwargs)
def add_room(self, **kwargs):
"""
Create a new room
"""
assert len(self.wall_segs) == 0, "cannot add rooms after static data is generated"
room = Room(**kwargs)
self.rooms.append(room)
return room
def connect_rooms(
self,
room_a,
room_b,
min_x=None,
max_x=None,
min_z=None,
max_z=None,
max_y=None
):
"""
Connect two rooms along facing edges
"""
def find_facing_edges():
for idx_a in range(room_a.num_walls):
norm_a = room_a.edge_norms[idx_a]
for idx_b in range(room_b.num_walls):
norm_b = room_b.edge_norms[idx_b]
# Reject edges that are not facing each other
if np.dot(norm_a, norm_b) > -0.9:
continue
dir = room_b.outline[idx_b] - room_a.outline[idx_a]
# Reject edges that are not touching
if np.dot(norm_a, dir) > 0.05:
continue
return idx_a, idx_b
return None, None
idx_a, idx_b = find_facing_edges()
assert idx_a != None, "matching edges not found in connect_rooms"
start_a, end_a = room_a.add_portal(
edge=idx_a,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z,
max_y=max_y
)
start_b, end_b = room_b.add_portal(
edge=idx_b,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z,
max_y=max_y
)
a = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * start_a
b = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * end_a
c = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * start_b
d = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * end_b
# If the portals are directly connected, stop
if np.linalg.norm(a - d) < 0.001:
return
len_a = np.linalg.norm(b - a)
len_b = np.linalg.norm(d - c)
# Room outline points must be specified in counter-clockwise order
outline = np.stack([c, b, a, d])
outline = np.stack([outline[:, 0], outline[:, 2]], axis=1)
max_y = max_y if max_y != None else room_a.wall_height
room = Room(
outline,
wall_height=max_y,
wall_tex=room_a.wall_tex_name,
floor_tex=room_a.floor_tex_name,
ceil_tex=room_a.ceil_tex_name,
no_ceiling=room_a.no_ceiling,
)
self.rooms.append(room)
room.add_portal(1, start_pos=0, end_pos=len_a)
room.add_portal(3, start_pos=0, end_pos=len_b)
def place_entity(
self,
ent,
room=None,
pos=None,
dir=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None
):
"""
Place an entity/object in the world.
Find a position that doesn't intersect with any other object.
"""
assert len(self.rooms) > 0, "create rooms before calling place_entity"
assert ent.radius != None, "entity must have physical size defined"
# Generate collision detection data
if len(self.wall_segs) == 0:
self._gen_static_data()
# If an exact position if specified
if pos is not None:
ent.dir = dir if dir != None else self.rand.float(-math.pi, math.pi)
ent.pos = pos
self.entities.append(ent)
return ent
# Keep retrying until we find a suitable position
while True:
# Pick a room, sample rooms proportionally to floor surface area
r = room if room else self.rand.choice(self.rooms, probs=self.room_probs)
# Choose a random point within the square bounding box of the room
lx = r.min_x if min_x == None else min_x
hx = r.max_x if max_x == None else max_x
lz = r.min_z if min_z == None else min_z
hz = r.max_z if max_z == None else max_z
pos = self.rand.float(
low =[lx + ent.radius, 0, lz + ent.radius],
high=[hx - ent.radius, 0, hz - ent.radius]
)
# Make sure the position is within the room's outline
if not r.point_inside(pos):
continue
# Make sure the position doesn't intersect with any walls
if self.intersect(ent, pos, ent.radius):
continue
# Pick a direction
d = dir if dir != None else self.rand.float(-math.pi, math.pi)
ent.pos = pos
ent.dir = d
break
self.entities.append(ent)
return ent
def place_agent(
self,
room=None,
dir=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None
):
"""
Place the agent in the environment at a random position
and orientation
"""
return self.place_entity(
self.agent,
room=room,
dir=dir,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z
)
def intersect(self, ent, pos, radius):
"""
Check if an entity intersects with the world
"""
# Ignore the Y position
px, _, pz = pos
pos = np.array([px, 0, pz])
# Check for intersection with walls
if intersect_circle_segs(pos, radius, self.wall_segs):
return True
# Check for entity intersection
for ent2 in self.entities:
# Entities can't intersect with themselves
if ent2 is ent:
continue
px, _, pz = ent2.pos
pos2 = np.array([px, 0, pz])
d = np.linalg.norm(pos2 - pos)
if d < radius + ent2.radius:
return ent2
return None
def near(self, ent0, ent1=None):
"""
Test if the two entities are near each other.
Used for "go to" or "put next" type tasks
"""
if ent1 == None:
ent1 = self.agent
dist = np.linalg.norm(ent0.pos - ent1.pos)
return dist < ent0.radius + ent1.radius + 1.1 * self.max_forward_step
def _load_tex(self, tex_name):
"""
Load a texture, with or without domain randomization
"""
rand = self.rand if self.params.sample(self.rand, 'tex_rand') else None
return Texture.get(tex_name, rand)
def _gen_static_data(self):
"""
Generate static data needed for rendering and collision detection
"""
# Generate the static data for each room
for room in self.rooms:
room._gen_static_data(
self.params,
self.rand if self.domain_rand else None
)
# Concatenate the wall segments
self.wall_segs = np.concatenate([r.wall_segs for r in self.rooms])
# Room selection probabilities
self.room_probs = np.array([r.area for r in self.rooms], dtype=float)
self.room_probs /= np.sum(self.room_probs)
def _gen_world(self):
"""
Generate the world. Derived classes must implement this method.
"""
raise NotImplementedError
def _reward(self):
"""
Default sparse reward computation
"""
return 1.0 - 0.2 * (self.step_count / self.max_episode_steps)
def _render_static(self):
"""
Render the static elements of the scene into a display list.
Called once at the beginning of each episode.
"""
# TODO: manage this automatically
# glIsList
glDeleteLists(1, 1);
glNewList(1, GL_COMPILE);
# Light position
glLightfv(GL_LIGHT0, GL_POSITION, (GLfloat*4)(*self.light_pos + [1]))
# Background/minimum light level
glLightfv(GL_LIGHT0, GL_AMBIENT, (GLfloat*4)(*self.light_ambient))
# Diffuse light color
glLightfv(GL_LIGHT0, GL_DIFFUSE, (GLfloat*4)(*self.light_color))
#glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 180)
#glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0)
#glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0)
#glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0)
#glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glShadeModel(GL_SMOOTH)
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
# Render the rooms
glEnable(GL_TEXTURE_2D)
for room in self.rooms:
room._render()
# Render the static entities
for ent in self.entities:
if ent.is_static:
ent.render()
glEndList()
def _render_world(
self,
frame_buffer,
render_agent
):
"""
Render the world from a given camera position into a frame buffer,
and produce a numpy image array as output.
"""
# Call the display list for the static parts of the environment
glCallList(1)
# TODO: keep the non-static entities in a different list for efficiency?
# Render the non-static entities
for ent in self.entities:
if not ent.is_static and ent is not self.agent:
ent.render()
#ent.draw_bound()
if render_agent:
self.agent.render()
# Resolve the rendered image into a numpy array
img = frame_buffer.resolve()
return img
def render_top_view(self, frame_buffer=None):
"""
Render a top view of the whole map (from above)
"""
if frame_buffer == None:
frame_buffer = self.obs_fb
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Scene extents to render
min_x = self.min_x - 1
max_x = self.max_x + 1
min_z = self.min_z - 1
max_z = self.max_z + 1
width = max_x - min_x
height = max_z - min_z
aspect = width / height
fb_aspect = frame_buffer.width / frame_buffer.height
# Adjust the aspect extents to match the frame buffer aspect
if aspect > fb_aspect:
# Want to add to denom, add to height
new_h = width / fb_aspect
h_diff = new_h - height
min_z -= h_diff / 2
max_z += h_diff / 2
elif aspect < fb_aspect:
# Want to add to num, add to width
new_w = height * fb_aspect
w_diff = new_w - width
min_x -= w_diff / 2
max_x += w_diff / 2
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(
min_x,
max_x,
-max_z,
-min_z,
-100, 100.0
)
# Setup the camera
# Y maps to +Z, Z maps to +Y
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
m = [
1, 0, 0, 0,
0, 0, 1, 0,
0, -1, 0, 0,
0, 0, 0, 1,
]
glLoadMatrixf((GLfloat * len(m))(*m))
return self._render_world(
frame_buffer,
render_agent=True
)
def render_obs(self, frame_buffer=None):
"""
Render an observation from the point of view of the agent
"""
if frame_buffer == None:
frame_buffer = self.obs_fb
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(
self.agent.cam_fov_y,
frame_buffer.width / float(frame_buffer.height),
0.04,
100.0
)
# Setup the camera
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(
# Eye position
*self.agent.cam_pos,
# Target
*(self.agent.cam_pos + self.agent.cam_dir),
# Up vector
0, 1.0, 0.0
)
return self._render_world(
frame_buffer,
render_agent=False
)
def render_depth(self, frame_buffer=None):
"""
Produce a depth map
Values are floating-point, map shape is (H,W,1)
Distances are in meters from the observer
"""
if frame_buffer == None:
frame_buffer = self.obs_fb
# Render the world
self.render_obs(frame_buffer)
return frame_buffer.get_depth_map(0.04, 100.0)
def get_visible_ents(self):
"""
Get a list of visible entities.
Uses OpenGL occlusion queries to approximate visibility.
:return: set of objects visible to the agent
"""
# Allocate the occlusion query ids
num_ents = len(self.entities)
query_ids = (GLuint * num_ents)()
glGenQueries(num_ents, query_ids)
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Use the small observation frame buffer
frame_buffer = self.obs_fb
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(
self.agent.cam_fov_y,
frame_buffer.width / float(frame_buffer.height),
0.04,
100.0
)
# Setup the cameravisible objects
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(
# Eye position
*self.agent.cam_pos,
# Target
*(self.agent.cam_pos + self.agent.cam_dir),
# Up vector
0, 1.0, 0.0
)
# Render the rooms, without texturing
glDisable(GL_TEXTURE_2D)
for room in self.rooms:
room._render()
# For each entity
for ent_idx, ent in enumerate(self.entities):
if ent is self.agent:
continue
glBeginQuery(GL_ANY_SAMPLES_PASSED, query_ids[ent_idx])
pos = ent.pos
#glColor3f(1, 0, 0)
drawBox(
x_min=pos[0] - 0.1,
x_max=pos[0] + 0.1,
y_min=pos[1],
y_max=pos[1] + 0.2,
z_min=pos[2] - 0.1,
z_max=pos[2] + 0.1
)
glEndQuery(GL_ANY_SAMPLES_PASSED)
vis_objs = set()
# Get query results
for ent_idx, ent in enumerate(self.entities):
if ent is self.agent:
continue
visible = (GLuint*1)(1)
glGetQueryObjectuiv(query_ids[ent_idx], GL_QUERY_RESULT, visible);
if visible[0] != 0:
vis_objs.add(ent)
# Free the occlusion query ids
glDeleteQueries(1, query_ids)
#img = frame_buffer.resolve()
#return img
return vis_objs
def render(self, mode='human', close=False, view='agent'):
"""
Render the environment for human viewing
"""
if close:
if self.window:
self.window.close()
return
# Render the human-view image
assert view in ['agent', 'top']
if view == 'agent':
img = self.render_obs(self.vis_fb)
else:
img = self.render_top_view(self.vis_fb)
img_width = img.shape[1]
img_height = img.shape[0]
if mode == 'rgb_array':
return img
# Render the agent's view
obs = self.render_obs()
obs_width = obs.shape[1]
obs_height = obs.shape[0]
window_width = img_width + self.obs_disp_width
window_height = img_height
if self.window is None:
config = pyglet.gl.Config(double_buffer=True)
self.window = pyglet.window.Window(
width=window_width,
height=window_height,
resizable=False,
config=config
)
self.window.clear()
self.window.switch_to()
# Bind the default frame buffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
# Clear the color and depth buffers
glClearColor(0, 0, 0, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
# Setup orghogonal projection
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glOrtho(0, window_width, 0, window_height, 0, 10)
# Draw the human render to the rendering window
img_flip = np.ascontiguousarray(np.flip(img, axis=0))
img_data = pyglet.image.ImageData(
img_width,
img_height,
'RGB',
img_flip.ctypes.data_as(POINTER(GLubyte)),
pitch=img_width * 3,
)
img_data.blit(
0,
0,
0,
width=img_width,
height=img_height
)
# Draw the observation
obs = np.ascontiguousarray(np.flip(obs, axis=0))
obs_data = pyglet.image.ImageData(
obs_width,
obs_height,
'RGB',
obs.ctypes.data_as(POINTER(GLubyte)),
pitch=obs_width * 3,
)
obs_data.blit(
img_width,
img_height - self.obs_disp_height,
0,
width=self.obs_disp_width,
height=self.obs_disp_height
)
# Draw the text label in the window
self.text_label.text = "pos: (%.2f, %.2f, %.2f)\nangle: %d\nsteps: %d" % (
*self.agent.pos,
int(self.agent.dir * 180 / math.pi) % 360,
self.step_count
)
self.text_label.draw()
# Force execution of queued commands
glFlush()
# If we are not running the Pyglet event loop,
# we have to manually flip the buffers and dispatch events
if mode == 'human':
self.window.flip()
self.window.dispatch_events()
return img
| 28.665263
| 102
| 0.548815
|
import math
from enum import IntEnum
import numpy as np
import gym
from gym import spaces
from .random import *
from .opengl import *
from .objmesh import *
from .entity import *
from .math import *
from .params import *
DEFAULT_WALL_HEIGHT=2.74
TEX_DENSITY = 512
def gen_texcs_wall(
tex,
min_x,
min_y,
width,
height
):
xc = (TEX_DENSITY / tex.width)
yc = (TEX_DENSITY / tex.height)
min_u = (min_x) * xc
max_u = (min_x + width) * xc
min_v = (min_y) * yc
max_v = (min_y + height) * yc
return np.array(
[
[min_u, min_v],
[min_u, max_v],
[max_u, max_v],
[max_u, min_v],
],
dtype=np.float32
)
def gen_texcs_floor(
tex,
poss
):
texc_mul = np.array(
[
TEX_DENSITY / tex.width,
TEX_DENSITY / tex.height
],
dtype=float
)
coords = np.stack([poss[:,0], poss[:,2]], axis=1) * texc_mul
return coords
class Room:
def __init__(
self,
outline,
wall_height=DEFAULT_WALL_HEIGHT,
floor_tex='floor_tiles_bw',
wall_tex='concrete',
ceil_tex='concrete_tiles',
no_ceiling=False
):
assert len(outline.shape) == 2
assert outline.shape[1] == 2
assert outline.shape[0] >= 3
outline = np.insert(outline, 1, 0, axis=1)
self.num_walls = outline.shape[0]
self.outline = outline
self.min_x = self.outline[:, 0].min()
self.max_x = self.outline[:, 0].max()
self.min_z = self.outline[:, 2].min()
self.max_z = self.outline[:, 2].max()
self.mid_x = (self.max_x + self.min_x) / 2
self.mid_z = (self.max_z + self.min_z) / 2
self.area = (self.max_x - self.min_x) * (self.max_z - self.min_z)
next_pts = np.concatenate([self.outline[1:], np.expand_dims(self.outline[0], axis=0)], axis=0)
self.edge_dirs = next_pts - self.outline
self.edge_dirs = (self.edge_dirs.T / np.linalg.norm(self.edge_dirs, axis=1)).T
self.edge_norms = -np.cross(self.edge_dirs, Y_VEC)
self.edge_norms = (self.edge_norms.T / np.linalg.norm(self.edge_norms, axis=1)).T
self.wall_height = wall_height
self.no_ceiling = no_ceiling
self.wall_tex_name = wall_tex
self.floor_tex_name = floor_tex
self.ceil_tex_name = ceil_tex
self.portals = [[] for i in range(self.num_walls)]
self.neighbors = []
def add_portal(
self,
edge,
start_pos=None,
end_pos=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None,
min_y=0,
max_y=None
):
if max_y == None:
max_y = self.wall_height
assert edge <= self.num_walls
assert max_y > min_y
e_p0 = self.outline[edge]
e_p1 = self.outline[(edge+1) % self.num_walls]
e_len = np.linalg.norm(e_p1 - e_p0)
e_dir = (e_p1 - e_p0) / e_len
x0, _, z0 = e_p0
x1, _, z1 = e_p1
dx, _, dz = e_dir
if min_x != None:
assert min_z == None and max_z == None
assert start_pos == None and end_pos == None
assert x0 != x1
m0 = (min_x - x0) / dx
m1 = (max_x - x0) / dx
if m1 < m0:
m0, m1 = m1, m0
start_pos, end_pos = m0, m1
elif min_z != None:
assert min_x == None and max_x == None
assert start_pos == None and end_pos == None
assert z0 != z1
m0 = (min_z - z0) / dz
m1 = (max_z - z0) / dz
if m1 < m0:
m0, m1 = m1, m0
start_pos, end_pos = m0, m1
else:
assert min_x == None and max_x == None
assert min_z == None and max_z == None
assert end_pos > start_pos
assert start_pos >= 0, "portal outside of wall extents"
assert end_pos <= e_len, "portal outside of wall extents"
self.portals[edge].append({
'start_pos': start_pos,
'end_pos': end_pos,
'min_y': min_y,
'max_y': max_y
})
self.portals[edge].sort(key=lambda e: e['start_pos'])
return start_pos, end_pos
def point_inside(self, p):
ap = p - self.outline
dotNAP = np.sum(self.edge_norms * ap, axis=1)
return np.all(np.greater(dotNAP, 0))
def _gen_static_data(self, params, rng):
self.wall_tex = Texture.get(self.wall_tex_name, rng)
self.floor_tex = Texture.get(self.floor_tex_name, rng)
self.ceil_tex = Texture.get(self.ceil_tex_name, rng)
self.floor_verts = self.outline
self.floor_texcs = gen_texcs_floor(
self.floor_tex,
self.floor_verts
)
self.ceil_verts = np.flip(self.outline, axis=0) + self.wall_height * Y_VEC
self.ceil_texcs = gen_texcs_floor(
self.ceil_tex,
self.ceil_verts
)
self.wall_verts = []
self.wall_norms = []
self.wall_texcs = []
self.wall_segs = []
def gen_seg_poly(
edge_p0,
side_vec,
seg_start,
seg_end,
min_y,
max_y
):
if seg_end == seg_start:
return
if min_y == max_y:
return
s_p0 = edge_p0 + seg_start * side_vec
s_p1 = edge_p0 + seg_end * side_vec
if min_y == 0:
self.wall_segs.append(np.array([s_p1, s_p0]))
self.wall_verts.append(s_p0 + min_y * Y_VEC)
self.wall_verts.append(s_p0 + max_y * Y_VEC)
self.wall_verts.append(s_p1 + max_y * Y_VEC)
self.wall_verts.append(s_p1 + min_y * Y_VEC)
normal = np.cross(s_p1 - s_p0, Y_VEC)
normal = -normal / np.linalg.norm(normal)
for i in range(4):
self.wall_norms.append(normal)
texcs = gen_texcs_wall(
self.wall_tex,
seg_start,
min_y,
seg_end - seg_start,
max_y - min_y
)
self.wall_texcs.append(texcs)
for wall_idx in range(self.num_walls):
edge_p0 = self.outline[wall_idx, :]
edge_p1 = self.outline[(wall_idx+1) % self.num_walls, :]
wall_width = np.linalg.norm(edge_p1 - edge_p0)
side_vec = (edge_p1 - edge_p0) / wall_width
if len(self.portals[wall_idx]) > 0:
seg_end = self.portals[wall_idx][0]['start_pos']
else:
seg_end = wall_width
gen_seg_poly(
edge_p0,
side_vec,
0,
seg_end,
0,
self.wall_height
)
for portal_idx, portal in enumerate(self.portals[wall_idx]):
portal = self.portals[wall_idx][portal_idx]
start_pos = portal['start_pos']
end_pos = portal['end_pos']
min_y = portal['min_y']
max_y = portal['max_y']
gen_seg_poly(
edge_p0,
side_vec,
start_pos,
end_pos,
0,
min_y
)
gen_seg_poly(
edge_p0,
side_vec,
start_pos,
end_pos,
max_y,
self.wall_height
)
if portal_idx < len(self.portals[wall_idx]) - 1:
next_portal = self.portals[wall_idx][portal_idx+1]
next_portal_start = next_portal['start_pos']
else:
next_portal_start = wall_width
gen_seg_poly(
edge_p0,
side_vec,
end_pos,
next_portal_start,
0,
self.wall_height
)
self.wall_verts = np.array(self.wall_verts)
self.wall_norms = np.array(self.wall_norms)
if len(self.wall_segs) > 0:
self.wall_segs = np.array(self.wall_segs)
else:
self.wall_segs = np.array([]).reshape(0, 2, 3)
if len(self.wall_texcs) > 0:
self.wall_texcs = np.concatenate(self.wall_texcs)
else:
self.wall_texcs = np.array([]).reshape(0, 2)
def _render(self):
glColor3f(1, 1, 1)
self.floor_tex.bind()
glBegin(GL_POLYGON)
glNormal3f(0, 1, 0)
for i in range(self.floor_verts.shape[0]):
glTexCoord2f(*self.floor_texcs[i, :])
glVertex3f(*self.floor_verts[i, :])
glEnd()
if not self.no_ceiling:
self.ceil_tex.bind()
glBegin(GL_POLYGON)
glNormal3f(0, -1, 0)
for i in range(self.ceil_verts.shape[0]):
glTexCoord2f(*self.ceil_texcs[i, :])
glVertex3f(*self.ceil_verts[i, :])
glEnd()
self.wall_tex.bind()
glBegin(GL_QUADS)
for i in range(self.wall_verts.shape[0]):
glNormal3f(*self.wall_norms[i, :])
glTexCoord2f(*self.wall_texcs[i, :])
glVertex3f(*self.wall_verts[i, :])
glEnd()
class MiniWorldEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 30
}
class Actions(IntEnum):
turn_left = 0
turn_right = 1
move_forward = 2
move_back = 3
pickup = 4
drop = 5
toggle = 6
done = 7
def __init__(
self,
max_episode_steps=1500,
obs_width=80,
obs_height=60,
window_width=800,
window_height=600,
params=DEFAULT_PARAMS,
domain_rand=False
):
self.actions = MiniWorldEnv.Actions
self.action_space = spaces.Discrete(len(self.actions))
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(obs_height, obs_width, 3),
dtype=np.uint8
)
self.reward_range = (-math.inf, math.inf)
self.max_episode_steps = max_episode_steps
self.params = params
self.domain_rand = domain_rand
self.window = None
self.shadow_window = pyglet.window.Window(width=1, height=1, visible=False)
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
self.obs_fb = FrameBuffer(obs_width, obs_height, 8)
self.vis_fb = FrameBuffer(window_width, window_height, 16)
self.obs_disp_width = 256
self.obs_disp_height = obs_height * (self.obs_disp_width / obs_width)
self.text_label = pyglet.text.Label(
font_name="Arial",
font_size=14,
multiline=True,
width=400,
x = window_width + 5,
y = window_height - (self.obs_disp_height + 19)
)
self.seed()
self.reset()
def close(self):
pass
def seed(self, seed=None):
self.rand = RandGen(seed)
return [seed]
def reset(self):
self.step_count = 0
self.agent = Agent()
self.entities = []
self.rooms = []
self.wall_segs = []
self._gen_world()
rand = self.rand if self.domain_rand else None
self.params.sample_many(rand, self, [
'sky_color',
'light_pos',
'light_color',
'light_ambient'
])
self.max_forward_step = self.params.get_max('forward_step')
for ent in self.entities:
ent.randomize(self.params, rand)
self.min_x = min([r.min_x for r in self.rooms])
self.max_x = max([r.max_x for r in self.rooms])
self.min_z = min([r.min_z for r in self.rooms])
self.max_z = max([r.max_z for r in self.rooms])
if len(self.wall_segs) == 0:
self._gen_static_data()
self._render_static()
obs = self.render_obs()
return obs
def _get_carry_pos(self, agent_pos, ent):
dist = self.agent.radius + ent.radius + self.max_forward_step
pos = agent_pos + self.agent.dir_vec * 1.05 * dist
y_pos = max(self.agent.cam_height - ent.height - 0.3, 0)
pos = pos + Y_VEC * y_pos
return pos
def move_agent(self, fwd_dist, fwd_drift):
next_pos = (
self.agent.pos +
self.agent.dir_vec * fwd_dist +
self.agent.right_vec * fwd_drift
)
if self.intersect(self.agent, next_pos, self.agent.radius):
return False
carrying = self.agent.carrying
if carrying:
next_carrying_pos = self._get_carry_pos(next_pos, carrying)
if self.intersect(carrying, next_carrying_pos, carrying.radius):
return False
carrying.pos = next_carrying_pos
self.agent.pos = next_pos
return True
def turn_agent(self, turn_angle):
turn_angle *= (math.pi / 180)
orig_dir = self.agent.dir
self.agent.dir += turn_angle
carrying = self.agent.carrying
if carrying:
pos = self._get_carry_pos(self.agent.pos, carrying)
if self.intersect(carrying, pos, carrying.radius):
self.agent.dir = orig_dir
return False
carrying.pos = pos
carrying.dir = self.agent.dir
return True
def step(self, action):
self.step_count += 1
rand = self.rand if self.domain_rand else None
fwd_step = self.params.sample(rand, 'forward_step')
fwd_drift = self.params.sample(rand, 'forward_drift')
turn_step = self.params.sample(rand, 'turn_step')
if action == self.actions.move_forward:
self.move_agent(fwd_step, fwd_drift)
elif action == self.actions.move_back:
self.move_agent(-fwd_step, fwd_drift)
elif action == self.actions.turn_left:
self.turn_agent(turn_step)
elif action == self.actions.turn_right:
self.turn_agent(-turn_step)
elif action == self.actions.pickup:
test_pos = self.agent.pos + self.agent.dir_vec * 1.5 * self.agent.radius
ent = self.intersect(self.agent, test_pos, 1.2 * self.agent.radius)
if not self.agent.carrying:
if isinstance(ent, Entity):
if not ent.is_static:
self.agent.carrying = ent
elif action == self.actions.drop:
if self.agent.carrying:
self.agent.carrying.pos[1] = 0
self.agent.carrying = None
if self.agent.carrying:
ent_pos = self._get_carry_pos(self.agent.pos, self.agent.carrying)
self.agent.carrying.pos = ent_pos
self.agent.carrying.dir = self.agent.dir
obs = self.render_obs()
if self.step_count >= self.max_episode_steps:
done = True
reward = 0
return obs, reward, done, {}
reward = 0
done = False
return obs, reward, done, {}
def add_rect_room(
self,
min_x,
max_x,
min_z,
max_z,
**kwargs
):
outline = np.array([
[max_x, max_z],
[max_x, min_z],
[min_x, min_z],
[min_x, max_z],
])
return self.add_room(outline=outline, **kwargs)
def add_room(self, **kwargs):
assert len(self.wall_segs) == 0, "cannot add rooms after static data is generated"
room = Room(**kwargs)
self.rooms.append(room)
return room
def connect_rooms(
self,
room_a,
room_b,
min_x=None,
max_x=None,
min_z=None,
max_z=None,
max_y=None
):
def find_facing_edges():
for idx_a in range(room_a.num_walls):
norm_a = room_a.edge_norms[idx_a]
for idx_b in range(room_b.num_walls):
norm_b = room_b.edge_norms[idx_b]
if np.dot(norm_a, norm_b) > -0.9:
continue
dir = room_b.outline[idx_b] - room_a.outline[idx_a]
if np.dot(norm_a, dir) > 0.05:
continue
return idx_a, idx_b
return None, None
idx_a, idx_b = find_facing_edges()
assert idx_a != None, "matching edges not found in connect_rooms"
start_a, end_a = room_a.add_portal(
edge=idx_a,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z,
max_y=max_y
)
start_b, end_b = room_b.add_portal(
edge=idx_b,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z,
max_y=max_y
)
a = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * start_a
b = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * end_a
c = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * start_b
d = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * end_b
if np.linalg.norm(a - d) < 0.001:
return
len_a = np.linalg.norm(b - a)
len_b = np.linalg.norm(d - c)
outline = np.stack([c, b, a, d])
outline = np.stack([outline[:, 0], outline[:, 2]], axis=1)
max_y = max_y if max_y != None else room_a.wall_height
room = Room(
outline,
wall_height=max_y,
wall_tex=room_a.wall_tex_name,
floor_tex=room_a.floor_tex_name,
ceil_tex=room_a.ceil_tex_name,
no_ceiling=room_a.no_ceiling,
)
self.rooms.append(room)
room.add_portal(1, start_pos=0, end_pos=len_a)
room.add_portal(3, start_pos=0, end_pos=len_b)
def place_entity(
self,
ent,
room=None,
pos=None,
dir=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None
):
assert len(self.rooms) > 0, "create rooms before calling place_entity"
assert ent.radius != None, "entity must have physical size defined"
if len(self.wall_segs) == 0:
self._gen_static_data()
if pos is not None:
ent.dir = dir if dir != None else self.rand.float(-math.pi, math.pi)
ent.pos = pos
self.entities.append(ent)
return ent
while True:
r = room if room else self.rand.choice(self.rooms, probs=self.room_probs)
lx = r.min_x if min_x == None else min_x
hx = r.max_x if max_x == None else max_x
lz = r.min_z if min_z == None else min_z
hz = r.max_z if max_z == None else max_z
pos = self.rand.float(
low =[lx + ent.radius, 0, lz + ent.radius],
high=[hx - ent.radius, 0, hz - ent.radius]
)
if not r.point_inside(pos):
continue
# Make sure the position doesn't intersect with any walls
if self.intersect(ent, pos, ent.radius):
continue
d = dir if dir != None else self.rand.float(-math.pi, math.pi)
ent.pos = pos
ent.dir = d
break
self.entities.append(ent)
return ent
def place_agent(
self,
room=None,
dir=None,
min_x=None,
max_x=None,
min_z=None,
max_z=None
):
return self.place_entity(
self.agent,
room=room,
dir=dir,
min_x=min_x,
max_x=max_x,
min_z=min_z,
max_z=max_z
)
def intersect(self, ent, pos, radius):
px, _, pz = pos
pos = np.array([px, 0, pz])
if intersect_circle_segs(pos, radius, self.wall_segs):
return True
for ent2 in self.entities:
if ent2 is ent:
continue
px, _, pz = ent2.pos
pos2 = np.array([px, 0, pz])
d = np.linalg.norm(pos2 - pos)
if d < radius + ent2.radius:
return ent2
return None
def near(self, ent0, ent1=None):
if ent1 == None:
ent1 = self.agent
dist = np.linalg.norm(ent0.pos - ent1.pos)
return dist < ent0.radius + ent1.radius + 1.1 * self.max_forward_step
def _load_tex(self, tex_name):
rand = self.rand if self.params.sample(self.rand, 'tex_rand') else None
return Texture.get(tex_name, rand)
def _gen_static_data(self):
# Generate the static data for each room
for room in self.rooms:
room._gen_static_data(
self.params,
self.rand if self.domain_rand else None
)
# Concatenate the wall segments
self.wall_segs = np.concatenate([r.wall_segs for r in self.rooms])
# Room selection probabilities
self.room_probs = np.array([r.area for r in self.rooms], dtype=float)
self.room_probs /= np.sum(self.room_probs)
def _gen_world(self):
raise NotImplementedError
def _reward(self):
return 1.0 - 0.2 * (self.step_count / self.max_episode_steps)
def _render_static(self):
# TODO: manage this automatically
# glIsList
glDeleteLists(1, 1);
glNewList(1, GL_COMPILE);
# Light position
glLightfv(GL_LIGHT0, GL_POSITION, (GLfloat*4)(*self.light_pos + [1]))
# Background/minimum light level
glLightfv(GL_LIGHT0, GL_AMBIENT, (GLfloat*4)(*self.light_ambient))
# Diffuse light color
glLightfv(GL_LIGHT0, GL_DIFFUSE, (GLfloat*4)(*self.light_color))
#glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 180)
#glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0)
#glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0)
#glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0)
#glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glShadeModel(GL_SMOOTH)
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
# Render the rooms
glEnable(GL_TEXTURE_2D)
for room in self.rooms:
room._render()
# Render the static entities
for ent in self.entities:
if ent.is_static:
ent.render()
glEndList()
def _render_world(
self,
frame_buffer,
render_agent
):
# Call the display list for the static parts of the environment
glCallList(1)
# TODO: keep the non-static entities in a different list for efficiency?
# Render the non-static entities
for ent in self.entities:
if not ent.is_static and ent is not self.agent:
ent.render()
#ent.draw_bound()
if render_agent:
self.agent.render()
# Resolve the rendered image into a numpy array
img = frame_buffer.resolve()
return img
def render_top_view(self, frame_buffer=None):
if frame_buffer == None:
frame_buffer = self.obs_fb
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Scene extents to render
min_x = self.min_x - 1
max_x = self.max_x + 1
min_z = self.min_z - 1
max_z = self.max_z + 1
width = max_x - min_x
height = max_z - min_z
aspect = width / height
fb_aspect = frame_buffer.width / frame_buffer.height
# Adjust the aspect extents to match the frame buffer aspect
if aspect > fb_aspect:
# Want to add to denom, add to height
new_h = width / fb_aspect
h_diff = new_h - height
min_z -= h_diff / 2
max_z += h_diff / 2
elif aspect < fb_aspect:
# Want to add to num, add to width
new_w = height * fb_aspect
w_diff = new_w - width
min_x -= w_diff / 2
max_x += w_diff / 2
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(
min_x,
max_x,
-max_z,
-min_z,
-100, 100.0
)
# Setup the camera
# Y maps to +Z, Z maps to +Y
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
m = [
1, 0, 0, 0,
0, 0, 1, 0,
0, -1, 0, 0,
0, 0, 0, 1,
]
glLoadMatrixf((GLfloat * len(m))(*m))
return self._render_world(
frame_buffer,
render_agent=True
)
def render_obs(self, frame_buffer=None):
if frame_buffer == None:
frame_buffer = self.obs_fb
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(
self.agent.cam_fov_y,
frame_buffer.width / float(frame_buffer.height),
0.04,
100.0
)
# Setup the camera
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(
# Eye position
*self.agent.cam_pos,
# Target
*(self.agent.cam_pos + self.agent.cam_dir),
# Up vector
0, 1.0, 0.0
)
return self._render_world(
frame_buffer,
render_agent=False
)
def render_depth(self, frame_buffer=None):
if frame_buffer == None:
frame_buffer = self.obs_fb
# Render the world
self.render_obs(frame_buffer)
return frame_buffer.get_depth_map(0.04, 100.0)
def get_visible_ents(self):
# Allocate the occlusion query ids
num_ents = len(self.entities)
query_ids = (GLuint * num_ents)()
glGenQueries(num_ents, query_ids)
# Switch to the default OpenGL context
# This is necessary on Linux Nvidia drivers
self.shadow_window.switch_to()
# Use the small observation frame buffer
frame_buffer = self.obs_fb
# Bind the frame buffer before rendering into it
frame_buffer.bind()
# Clear the color and depth buffers
glClearColor(*self.sky_color, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Set the projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(
self.agent.cam_fov_y,
frame_buffer.width / float(frame_buffer.height),
0.04,
100.0
)
# Setup the cameravisible objects
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(
# Eye position
*self.agent.cam_pos,
# Target
*(self.agent.cam_pos + self.agent.cam_dir),
# Up vector
0, 1.0, 0.0
)
# Render the rooms, without texturing
glDisable(GL_TEXTURE_2D)
for room in self.rooms:
room._render()
# For each entity
for ent_idx, ent in enumerate(self.entities):
if ent is self.agent:
continue
glBeginQuery(GL_ANY_SAMPLES_PASSED, query_ids[ent_idx])
pos = ent.pos
#glColor3f(1, 0, 0)
drawBox(
x_min=pos[0] - 0.1,
x_max=pos[0] + 0.1,
y_min=pos[1],
y_max=pos[1] + 0.2,
z_min=pos[2] - 0.1,
z_max=pos[2] + 0.1
)
glEndQuery(GL_ANY_SAMPLES_PASSED)
vis_objs = set()
# Get query results
for ent_idx, ent in enumerate(self.entities):
if ent is self.agent:
continue
visible = (GLuint*1)(1)
glGetQueryObjectuiv(query_ids[ent_idx], GL_QUERY_RESULT, visible);
if visible[0] != 0:
vis_objs.add(ent)
# Free the occlusion query ids
glDeleteQueries(1, query_ids)
#img = frame_buffer.resolve()
#return img
return vis_objs
def render(self, mode='human', close=False, view='agent'):
if close:
if self.window:
self.window.close()
return
# Render the human-view image
assert view in ['agent', 'top']
if view == 'agent':
img = self.render_obs(self.vis_fb)
else:
img = self.render_top_view(self.vis_fb)
img_width = img.shape[1]
img_height = img.shape[0]
if mode == 'rgb_array':
return img
# Render the agent's view
obs = self.render_obs()
obs_width = obs.shape[1]
obs_height = obs.shape[0]
window_width = img_width + self.obs_disp_width
window_height = img_height
if self.window is None:
config = pyglet.gl.Config(double_buffer=True)
self.window = pyglet.window.Window(
width=window_width,
height=window_height,
resizable=False,
config=config
)
self.window.clear()
self.window.switch_to()
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glClearColor(0, 0, 0, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glOrtho(0, window_width, 0, window_height, 0, 10)
img_flip = np.ascontiguousarray(np.flip(img, axis=0))
img_data = pyglet.image.ImageData(
img_width,
img_height,
'RGB',
img_flip.ctypes.data_as(POINTER(GLubyte)),
pitch=img_width * 3,
)
img_data.blit(
0,
0,
0,
width=img_width,
height=img_height
)
obs = np.ascontiguousarray(np.flip(obs, axis=0))
obs_data = pyglet.image.ImageData(
obs_width,
obs_height,
'RGB',
obs.ctypes.data_as(POINTER(GLubyte)),
pitch=obs_width * 3,
)
obs_data.blit(
img_width,
img_height - self.obs_disp_height,
0,
width=self.obs_disp_width,
height=self.obs_disp_height
)
self.text_label.text = "pos: (%.2f, %.2f, %.2f)\nangle: %d\nsteps: %d" % (
*self.agent.pos,
int(self.agent.dir * 180 / math.pi) % 360,
self.step_count
)
self.text_label.draw()
glFlush()
if mode == 'human':
self.window.flip()
self.window.dispatch_events()
return img
| true
| true
|
f71a4919671cb710595a953343f020b773680367
| 163
|
py
|
Python
|
polls/admin.py
|
egemen61/excell
|
654b51d7cb0cb3384b7a8b714a2e21b44fcb7afc
|
[
"BSD-3-Clause"
] | 253
|
2017-09-15T10:01:58.000Z
|
2022-03-27T00:19:49.000Z
|
polls/admin.py
|
egemen61/excell
|
654b51d7cb0cb3384b7a8b714a2e21b44fcb7afc
|
[
"BSD-3-Clause"
] | 35
|
2017-10-26T09:16:30.000Z
|
2022-01-20T19:57:19.000Z
|
polls/admin.py
|
egemen61/excell
|
654b51d7cb0cb3384b7a8b714a2e21b44fcb7afc
|
[
"BSD-3-Clause"
] | 64
|
2017-10-20T15:42:05.000Z
|
2022-02-10T02:25:22.000Z
|
from django.contrib import admin
from polls.models import Question, Choice
# Register your models here.
admin.site.register(Question)
admin.site.register(Choice)
| 23.285714
| 41
| 0.815951
|
from django.contrib import admin
from polls.models import Question, Choice
admin.site.register(Question)
admin.site.register(Choice)
| true
| true
|
f71a4b05579a18c573ff27b6ef2507849421cf07
| 43,124
|
py
|
Python
|
src/transformers/configuration_utils.py
|
arfon/transformers
|
bbd0901805292901e8df05bf7be87d2e43a7ae1b
|
[
"Apache-2.0"
] | 2
|
2021-12-25T10:04:17.000Z
|
2022-03-13T05:37:13.000Z
|
src/transformers/configuration_utils.py
|
arfon/transformers
|
bbd0901805292901e8df05bf7be87d2e43a7ae1b
|
[
"Apache-2.0"
] | 9
|
2021-06-08T22:35:33.000Z
|
2021-10-04T08:53:44.000Z
|
src/transformers/configuration_utils.py
|
arfon/transformers
|
bbd0901805292901e8df05bf7be87d2e43a7ae1b
|
[
"Apache-2.0"
] | 1
|
2020-06-26T08:13:16.000Z
|
2020-06-26T08:13:16.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import warnings
from typing import Any, Dict, Tuple, Union
from . import __version__
from .file_utils import (
CONFIG_NAME,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
)
from .utils import logging
logger = logging.get_logger(__name__)
class PretrainedConfig(PushToHubMixin):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`) -- An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
- **is_composition** (:obj:`bool`) -- Whether the config class is composed of multiple sub-configs. In this
case the config has to be initialized from two or more configs of type
:class:`~transformers.PretrainedConfig` like: :class:`~transformers.EncoderDecoderConfig` or
:class:`~RagConfig`.
- **keys_to_ignore_at_inference** (:obj:`List[str]`) -- A list of keys to ignore by default when looking at
dictionary outputs of the model during inference.
- **attribute_map** (:obj:`Dict[str, str]`) -- A dict that maps model specific attribute names to the
standardized naming of attributes.
Common attributes (present in all subclasses)
- **vocab_size** (:obj:`int`) -- The number of tokens in the vocabulary, which is also the first dimension of
the embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (:obj:`int`) -- The hidden size of the model.
- **num_attention_heads** (:obj:`int`) -- The number of attention heads used in the multi-head attention layers
of the model.
- **num_hidden_layers** (:obj:`int`) -- The number of blocks in the model.
Args:
name_or_path (:obj:`str`, `optional`, defaults to :obj:`""`):
Store the string that was passed to :func:`~transformers.PreTrainedModel.from_pretrained` or
:func:`~transformers.TFPreTrainedModel.from_pretrained` as ``pretrained_model_name_or_path`` if the
configuration was created with such a method.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a plain
tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which
consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``.
tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of :obj:`0` means
that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes
:obj:`n` < sequence_length embeddings at a time. For more information on feed forward chunking, see `How
does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by default in the
:obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by default in the
:obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in the
:obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default
in the :obj:`generate` method of the model. Whether to stop the beam search when at least ``num_beams``
sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be used by
default in the :obj:`generate` method of the model. 1 means no beam search.
- **num_beam_groups** (:obj:`int`, `optional`, defaults to 1) -- Number of groups to divide :obj:`num_beams`
into in order to ensure diversity among different groups of beams that will be used by default in the
:obj:`generate` method of the model. 1 means no group beam search.
- **diversity_penalty** (:obj:`float`, `optional`, defaults to 0.0) -- Value to control diversity for group
beam search. that will be used by default in the :obj:`generate` method of the model. 0 means no diversity
penalty. The higher the penalty, the more diverse are the outputs.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to keep
for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens with
probabilities that add up to ``top_p`` or higher are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty that
will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that will
be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default in the
:obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of that size
can only occur once.
- **encoder_no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by
default in the :obj:`generate` method of the model for ``encoder_no_repeat_ngram_size``. If set to int > 0,
all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the ``decoder_input_ids``.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be generated
that will be used by default in the :obj:`generate` method of the model. In order to get the tokens of the
words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word,
add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed returned
sequences for each element in the batch that will be used by default in the :obj:`generate` method of the
model.
- **output_scores** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should return the
logits when used for generation
- **return_dict_in_generate** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should
return a :class:`~transformers.file_utils.ModelOutput` instead of a :obj:`torch.LongTensor`
- **forced_bos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the first generated token
after the :obj:`decoder_start_token_id`. Useful for multilingual models like :doc:`mBART
<../model_doc/mbart>` where the first generated token needs to be the target language token.
- **forced_eos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the last generated token
when :obj:`max_length` is reached.
- **remove_invalid_values** (:obj:`bool`, `optional`) -- Whether to remove possible `nan` and `inf` outputs of
the model to prevent the generation method to crash. Note that using ``remove_invalid_values`` can slow down
generation.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the model
pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`Dict[int, str]`, `optional`) -- A map from index (for instance prediction index, or
target index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for the
current task.
- **problem_type** (:obj:`str`, `optional`) -- Problem type for :obj:`XxxForSequenceClassification` models. Can
be one of (:obj:`"regression"`, :obj:`"single_label_classification"`, :obj:`"multi_label_classification"`).
Please note that this parameter is only available in the following models: `AlbertForSequenceClassification`,
`BertForSequenceClassification`, `BigBirdForSequenceClassification`, `ConvBertForSequenceClassification`,
`DistilBertForSequenceClassification`, `ElectraForSequenceClassification`, `FunnelForSequenceClassification`,
`LongformerForSequenceClassification`, `MobileBertForSequenceClassification`,
`ReformerForSequenceClassification`, `RobertaForSequenceClassification`,
`SqueezeBertForSequenceClassification`, `XLMForSequenceClassification` and `XLNetForSequenceClassification`.
Parameters linked to the tokenizer
- **tokenizer_class** (:obj:`str`, `optional`) -- The name of the associated tokenizer class to use (if none is
set, will use the tokenizer associated to the model by default).
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each text
before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with a
different token than `bos`, the id of that token.
- **sep_token_id** (:obj:`int`, `optional`)) -- The id of the `separation` token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
- **tie_word_embeddings** (:obj:`bool`, `optional`, defaults to :obj:`True`) -- Whether the model's input and
output word embeddings should be tied. Note that this is only relevant if the model has a output word
embedding layer.
- **torch_dtype** (:obj:`str`, `optional`) -- The :obj:`dtype` of the weights. This attribute can be used to
initialize the model to a non-default ``dtype`` (which is normally ``float32``) and thus allow for optimal
storage allocation. For example, if the saved model is ``float16``, ideally we want to load it back using the
minimal amount of memory needed to load ``float16`` weights. Since the config object is stored in plain text,
this attribute contains just the floating type string without the ``torch.`` prefix. For example, for
``torch.float16`` ``torch_dtype`` is the ``"float16"`` string.
This attribute is currently not being used during model loading time, but this may change in the future
versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should use
BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
# we will start using self.torch_dtype in v5, but to be consistent with
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type}"
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return self._name_or_path
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
.. warning::
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
instead.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pretrained model
configuration.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g., ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.,
``./my_model_directory/configuration.json``.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final configuration object.
If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e.,
the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the ``return_unused_kwargs`` keyword parameter.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from this pretrained model.
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True,
foo=False, return_unused_kwargs=True)
assert config.output_attentions == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warn(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a
:class:`~transformers.PretrainedConfig` using ``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=CONFIG_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
if revision is not None:
msg += f"- or '{revision}' is a valid git identifier (branch name, a tag name, or a commit id) that exists for this model name as listed on its model page on 'https://huggingface.co/models'\n\n"
raise EnvironmentError(msg)
except (json.JSONDecodeError, UnicodeDecodeError):
msg = (
f"Couldn't reach server at '{config_file}' to download configuration file or "
"configuration file is not a valid JSON file. "
f"Please check network or file content here: {resolved_config_file}."
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.PretrainedConfig.get_config_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from the path to a JSON file of parameters.
Args:
json_file (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
# Transformers version when serializing the model
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from ``config_dict``.
Args:
config_dict (:obj:`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from ``update_str``.
The expected format is ints, floats and strings as is, and for booleans use ``true`` or ``false``. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (:obj:`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary has a `torch_dtype` key and if it's not None, converts torch.dtype to a
string of just the type. For example, :obj:`torch.float32` get converted into `"float32"` string, which can
then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
| 54.0401
| 210
| 0.649082
|
import copy
import json
import os
import warnings
from typing import Any, Dict, Tuple, Union
from . import __version__
from .file_utils import (
CONFIG_NAME,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
)
from .utils import logging
logger = logging.get_logger(__name__)
class PretrainedConfig(PushToHubMixin):
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False)
self.torch_dtype = kwargs.pop("torch_dtype", None)
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
)
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type}"
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return self._name_or_path
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value)
@property
def use_return_dict(self) -> bool:
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warn(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=CONFIG_NAME, revision=revision, mirror=None
)
try:
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
if revision is not None:
msg += f"- or '{revision}' is a valid git identifier (branch name, a tag name, or a commit id) that exists for this model name as listed on its model page on 'https://huggingface.co/models'\n\n"
raise EnvironmentError(msg)
except (json.JSONDecodeError, UnicodeDecodeError):
msg = (
f"Couldn't reach server at '{config_file}' to download configuration file or "
"configuration file is not a valid JSON file. "
f"Please check network or file content here: {resolved_config_file}."
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
config_dict = self.to_dict()
default_config_dict = PretrainedConfig().to_dict()
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
| true
| true
|
f71a4b33d3f34e4911b2fce2af6caffbdfbb62bf
| 1,326
|
py
|
Python
|
Assessments/count-contained-permutations.py
|
SaumyaRai2010/algoexpert-data-structures-algorithms
|
bcafd8d7798661bf86c2d6234221d764c68fc19f
|
[
"MIT"
] | 2
|
2021-08-17T14:13:01.000Z
|
2021-08-17T14:13:16.000Z
|
Assessments/count-contained-permutations.py
|
SaumyaRai2010/algoexpert-data-structures-algorithms
|
bcafd8d7798661bf86c2d6234221d764c68fc19f
|
[
"MIT"
] | null | null | null |
Assessments/count-contained-permutations.py
|
SaumyaRai2010/algoexpert-data-structures-algorithms
|
bcafd8d7798661bf86c2d6234221d764c68fc19f
|
[
"MIT"
] | null | null | null |
# COUNT CONTAINED PERMUTATIONS
# O(M * U + N) time and O(U) space, where M -> length of big string,
# U -> number of unique characters in small string, N -> length
# of small string.
# U is actually a constant since it can't be greater than 26. and
# M > N, so M will dissolve N
# So, modified complexities:
# O(M) time and O(1) space, M -> length of big string
def countContainedPermutations(bigString, smallString):
# Write your code here.
smallCount, bigCount = {}, {}
for letter in smallString:
if letter not in smallCount:
smallCount[letter] = 0
smallCount[letter] += 1
bigSize, smallSize = len(bigString), len(smallString)
start, end, totalCount = 0, 0, 0
while end < bigSize:
letterToAdd = bigString[end]
if letterToAdd not in bigCount:
bigCount[letterToAdd] = 0
bigCount[letterToAdd] += 1
if end - start == smallSize:
letterToRemove = bigString[start]
if bigCount[letterToRemove] == 1:
del bigCount[letterToRemove]
else:
bigCount[letterToRemove] -= 1
start += 1
if matchCounts(bigCount, smallCount):
totalCount += 1
end += 1
return totalCount
def matchCounts(bigCount, smallCount):
for letter in smallCount:
if letter not in bigCount:
return False
if smallCount[letter] != bigCount[letter]:
return False
return True
| 26
| 68
| 0.684766
|
# M > N, so M will dissolve N
# So, modified complexities:
# O(M) time and O(1) space, M -> length of big string
def countContainedPermutations(bigString, smallString):
# Write your code here.
smallCount, bigCount = {}, {}
for letter in smallString:
if letter not in smallCount:
smallCount[letter] = 0
smallCount[letter] += 1
bigSize, smallSize = len(bigString), len(smallString)
start, end, totalCount = 0, 0, 0
while end < bigSize:
letterToAdd = bigString[end]
if letterToAdd not in bigCount:
bigCount[letterToAdd] = 0
bigCount[letterToAdd] += 1
if end - start == smallSize:
letterToRemove = bigString[start]
if bigCount[letterToRemove] == 1:
del bigCount[letterToRemove]
else:
bigCount[letterToRemove] -= 1
start += 1
if matchCounts(bigCount, smallCount):
totalCount += 1
end += 1
return totalCount
def matchCounts(bigCount, smallCount):
for letter in smallCount:
if letter not in bigCount:
return False
if smallCount[letter] != bigCount[letter]:
return False
return True
| false
| true
|
f71a4c3038f108011a235c4b7bce53875e9cbabb
| 173
|
py
|
Python
|
sentence-embedding/python-lib/dku_language_model/__init__.py
|
RedaAffane/dataiku-contrib
|
d409ddc25d31570972a14abb19a84ac101afc6cc
|
[
"Apache-2.0"
] | 1
|
2020-10-11T14:53:53.000Z
|
2020-10-11T14:53:53.000Z
|
sentence-embedding/python-lib/dku_language_model/__init__.py
|
RedaAffane/dataiku-contrib
|
d409ddc25d31570972a14abb19a84ac101afc6cc
|
[
"Apache-2.0"
] | 10
|
2020-04-24T13:14:42.000Z
|
2022-02-10T01:07:28.000Z
|
python-lib/dku_language_model/__init__.py
|
dataiku/dss-plugin-nlp-embedding
|
7805151307210e2be15d844728be4ace2d381f13
|
[
"Apache-2.0"
] | null | null | null |
from dku_language_model.context_independent_language_model import FasttextModel, Word2vecModel, GloveModel
from dku_language_model.contextual_language_model import ElmoModel
| 86.5
| 106
| 0.924855
|
from dku_language_model.context_independent_language_model import FasttextModel, Word2vecModel, GloveModel
from dku_language_model.contextual_language_model import ElmoModel
| true
| true
|
f71a4cd9e12534305a660dab19c40de08f3f20a3
| 6,545
|
py
|
Python
|
loldib/getratings/models/NA/na_syndra/na_syndra_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_syndra/na_syndra_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_syndra/na_syndra_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Syndra_Jng_Aatrox(Ratings):
pass
class NA_Syndra_Jng_Ahri(Ratings):
pass
class NA_Syndra_Jng_Akali(Ratings):
pass
class NA_Syndra_Jng_Alistar(Ratings):
pass
class NA_Syndra_Jng_Amumu(Ratings):
pass
class NA_Syndra_Jng_Anivia(Ratings):
pass
class NA_Syndra_Jng_Annie(Ratings):
pass
class NA_Syndra_Jng_Ashe(Ratings):
pass
class NA_Syndra_Jng_AurelionSol(Ratings):
pass
class NA_Syndra_Jng_Azir(Ratings):
pass
class NA_Syndra_Jng_Bard(Ratings):
pass
class NA_Syndra_Jng_Blitzcrank(Ratings):
pass
class NA_Syndra_Jng_Brand(Ratings):
pass
class NA_Syndra_Jng_Braum(Ratings):
pass
class NA_Syndra_Jng_Caitlyn(Ratings):
pass
class NA_Syndra_Jng_Camille(Ratings):
pass
class NA_Syndra_Jng_Cassiopeia(Ratings):
pass
class NA_Syndra_Jng_Chogath(Ratings):
pass
class NA_Syndra_Jng_Corki(Ratings):
pass
class NA_Syndra_Jng_Darius(Ratings):
pass
class NA_Syndra_Jng_Diana(Ratings):
pass
class NA_Syndra_Jng_Draven(Ratings):
pass
class NA_Syndra_Jng_DrMundo(Ratings):
pass
class NA_Syndra_Jng_Ekko(Ratings):
pass
class NA_Syndra_Jng_Elise(Ratings):
pass
class NA_Syndra_Jng_Evelynn(Ratings):
pass
class NA_Syndra_Jng_Ezreal(Ratings):
pass
class NA_Syndra_Jng_Fiddlesticks(Ratings):
pass
class NA_Syndra_Jng_Fiora(Ratings):
pass
class NA_Syndra_Jng_Fizz(Ratings):
pass
class NA_Syndra_Jng_Galio(Ratings):
pass
class NA_Syndra_Jng_Gangplank(Ratings):
pass
class NA_Syndra_Jng_Garen(Ratings):
pass
class NA_Syndra_Jng_Gnar(Ratings):
pass
class NA_Syndra_Jng_Gragas(Ratings):
pass
class NA_Syndra_Jng_Graves(Ratings):
pass
class NA_Syndra_Jng_Hecarim(Ratings):
pass
class NA_Syndra_Jng_Heimerdinger(Ratings):
pass
class NA_Syndra_Jng_Illaoi(Ratings):
pass
class NA_Syndra_Jng_Irelia(Ratings):
pass
class NA_Syndra_Jng_Ivern(Ratings):
pass
class NA_Syndra_Jng_Janna(Ratings):
pass
class NA_Syndra_Jng_JarvanIV(Ratings):
pass
class NA_Syndra_Jng_Jax(Ratings):
pass
class NA_Syndra_Jng_Jayce(Ratings):
pass
class NA_Syndra_Jng_Jhin(Ratings):
pass
class NA_Syndra_Jng_Jinx(Ratings):
pass
class NA_Syndra_Jng_Kalista(Ratings):
pass
class NA_Syndra_Jng_Karma(Ratings):
pass
class NA_Syndra_Jng_Karthus(Ratings):
pass
class NA_Syndra_Jng_Kassadin(Ratings):
pass
class NA_Syndra_Jng_Katarina(Ratings):
pass
class NA_Syndra_Jng_Kayle(Ratings):
pass
class NA_Syndra_Jng_Kayn(Ratings):
pass
class NA_Syndra_Jng_Kennen(Ratings):
pass
class NA_Syndra_Jng_Khazix(Ratings):
pass
class NA_Syndra_Jng_Kindred(Ratings):
pass
class NA_Syndra_Jng_Kled(Ratings):
pass
class NA_Syndra_Jng_KogMaw(Ratings):
pass
class NA_Syndra_Jng_Leblanc(Ratings):
pass
class NA_Syndra_Jng_LeeSin(Ratings):
pass
class NA_Syndra_Jng_Leona(Ratings):
pass
class NA_Syndra_Jng_Lissandra(Ratings):
pass
class NA_Syndra_Jng_Lucian(Ratings):
pass
class NA_Syndra_Jng_Lulu(Ratings):
pass
class NA_Syndra_Jng_Lux(Ratings):
pass
class NA_Syndra_Jng_Malphite(Ratings):
pass
class NA_Syndra_Jng_Malzahar(Ratings):
pass
class NA_Syndra_Jng_Maokai(Ratings):
pass
class NA_Syndra_Jng_MasterYi(Ratings):
pass
class NA_Syndra_Jng_MissFortune(Ratings):
pass
class NA_Syndra_Jng_MonkeyKing(Ratings):
pass
class NA_Syndra_Jng_Mordekaiser(Ratings):
pass
class NA_Syndra_Jng_Morgana(Ratings):
pass
class NA_Syndra_Jng_Nami(Ratings):
pass
class NA_Syndra_Jng_Nasus(Ratings):
pass
class NA_Syndra_Jng_Nautilus(Ratings):
pass
class NA_Syndra_Jng_Nidalee(Ratings):
pass
class NA_Syndra_Jng_Nocturne(Ratings):
pass
class NA_Syndra_Jng_Nunu(Ratings):
pass
class NA_Syndra_Jng_Olaf(Ratings):
pass
class NA_Syndra_Jng_Orianna(Ratings):
pass
class NA_Syndra_Jng_Ornn(Ratings):
pass
class NA_Syndra_Jng_Pantheon(Ratings):
pass
class NA_Syndra_Jng_Poppy(Ratings):
pass
class NA_Syndra_Jng_Quinn(Ratings):
pass
class NA_Syndra_Jng_Rakan(Ratings):
pass
class NA_Syndra_Jng_Rammus(Ratings):
pass
class NA_Syndra_Jng_RekSai(Ratings):
pass
class NA_Syndra_Jng_Renekton(Ratings):
pass
class NA_Syndra_Jng_Rengar(Ratings):
pass
class NA_Syndra_Jng_Riven(Ratings):
pass
class NA_Syndra_Jng_Rumble(Ratings):
pass
class NA_Syndra_Jng_Ryze(Ratings):
pass
class NA_Syndra_Jng_Sejuani(Ratings):
pass
class NA_Syndra_Jng_Shaco(Ratings):
pass
class NA_Syndra_Jng_Shen(Ratings):
pass
class NA_Syndra_Jng_Shyvana(Ratings):
pass
class NA_Syndra_Jng_Singed(Ratings):
pass
class NA_Syndra_Jng_Sion(Ratings):
pass
class NA_Syndra_Jng_Sivir(Ratings):
pass
class NA_Syndra_Jng_Skarner(Ratings):
pass
class NA_Syndra_Jng_Sona(Ratings):
pass
class NA_Syndra_Jng_Soraka(Ratings):
pass
class NA_Syndra_Jng_Swain(Ratings):
pass
class NA_Syndra_Jng_Syndra(Ratings):
pass
class NA_Syndra_Jng_TahmKench(Ratings):
pass
class NA_Syndra_Jng_Taliyah(Ratings):
pass
class NA_Syndra_Jng_Talon(Ratings):
pass
class NA_Syndra_Jng_Taric(Ratings):
pass
class NA_Syndra_Jng_Teemo(Ratings):
pass
class NA_Syndra_Jng_Thresh(Ratings):
pass
class NA_Syndra_Jng_Tristana(Ratings):
pass
class NA_Syndra_Jng_Trundle(Ratings):
pass
class NA_Syndra_Jng_Tryndamere(Ratings):
pass
class NA_Syndra_Jng_TwistedFate(Ratings):
pass
class NA_Syndra_Jng_Twitch(Ratings):
pass
class NA_Syndra_Jng_Udyr(Ratings):
pass
class NA_Syndra_Jng_Urgot(Ratings):
pass
class NA_Syndra_Jng_Varus(Ratings):
pass
class NA_Syndra_Jng_Vayne(Ratings):
pass
class NA_Syndra_Jng_Veigar(Ratings):
pass
class NA_Syndra_Jng_Velkoz(Ratings):
pass
class NA_Syndra_Jng_Vi(Ratings):
pass
class NA_Syndra_Jng_Viktor(Ratings):
pass
class NA_Syndra_Jng_Vladimir(Ratings):
pass
class NA_Syndra_Jng_Volibear(Ratings):
pass
class NA_Syndra_Jng_Warwick(Ratings):
pass
class NA_Syndra_Jng_Xayah(Ratings):
pass
class NA_Syndra_Jng_Xerath(Ratings):
pass
class NA_Syndra_Jng_XinZhao(Ratings):
pass
class NA_Syndra_Jng_Yasuo(Ratings):
pass
class NA_Syndra_Jng_Yorick(Ratings):
pass
class NA_Syndra_Jng_Zac(Ratings):
pass
class NA_Syndra_Jng_Zed(Ratings):
pass
class NA_Syndra_Jng_Ziggs(Ratings):
pass
class NA_Syndra_Jng_Zilean(Ratings):
pass
class NA_Syndra_Jng_Zyra(Ratings):
pass
| 15.695444
| 46
| 0.766692
|
from getratings.models.ratings import Ratings
class NA_Syndra_Jng_Aatrox(Ratings):
pass
class NA_Syndra_Jng_Ahri(Ratings):
pass
class NA_Syndra_Jng_Akali(Ratings):
pass
class NA_Syndra_Jng_Alistar(Ratings):
pass
class NA_Syndra_Jng_Amumu(Ratings):
pass
class NA_Syndra_Jng_Anivia(Ratings):
pass
class NA_Syndra_Jng_Annie(Ratings):
pass
class NA_Syndra_Jng_Ashe(Ratings):
pass
class NA_Syndra_Jng_AurelionSol(Ratings):
pass
class NA_Syndra_Jng_Azir(Ratings):
pass
class NA_Syndra_Jng_Bard(Ratings):
pass
class NA_Syndra_Jng_Blitzcrank(Ratings):
pass
class NA_Syndra_Jng_Brand(Ratings):
pass
class NA_Syndra_Jng_Braum(Ratings):
pass
class NA_Syndra_Jng_Caitlyn(Ratings):
pass
class NA_Syndra_Jng_Camille(Ratings):
pass
class NA_Syndra_Jng_Cassiopeia(Ratings):
pass
class NA_Syndra_Jng_Chogath(Ratings):
pass
class NA_Syndra_Jng_Corki(Ratings):
pass
class NA_Syndra_Jng_Darius(Ratings):
pass
class NA_Syndra_Jng_Diana(Ratings):
pass
class NA_Syndra_Jng_Draven(Ratings):
pass
class NA_Syndra_Jng_DrMundo(Ratings):
pass
class NA_Syndra_Jng_Ekko(Ratings):
pass
class NA_Syndra_Jng_Elise(Ratings):
pass
class NA_Syndra_Jng_Evelynn(Ratings):
pass
class NA_Syndra_Jng_Ezreal(Ratings):
pass
class NA_Syndra_Jng_Fiddlesticks(Ratings):
pass
class NA_Syndra_Jng_Fiora(Ratings):
pass
class NA_Syndra_Jng_Fizz(Ratings):
pass
class NA_Syndra_Jng_Galio(Ratings):
pass
class NA_Syndra_Jng_Gangplank(Ratings):
pass
class NA_Syndra_Jng_Garen(Ratings):
pass
class NA_Syndra_Jng_Gnar(Ratings):
pass
class NA_Syndra_Jng_Gragas(Ratings):
pass
class NA_Syndra_Jng_Graves(Ratings):
pass
class NA_Syndra_Jng_Hecarim(Ratings):
pass
class NA_Syndra_Jng_Heimerdinger(Ratings):
pass
class NA_Syndra_Jng_Illaoi(Ratings):
pass
class NA_Syndra_Jng_Irelia(Ratings):
pass
class NA_Syndra_Jng_Ivern(Ratings):
pass
class NA_Syndra_Jng_Janna(Ratings):
pass
class NA_Syndra_Jng_JarvanIV(Ratings):
pass
class NA_Syndra_Jng_Jax(Ratings):
pass
class NA_Syndra_Jng_Jayce(Ratings):
pass
class NA_Syndra_Jng_Jhin(Ratings):
pass
class NA_Syndra_Jng_Jinx(Ratings):
pass
class NA_Syndra_Jng_Kalista(Ratings):
pass
class NA_Syndra_Jng_Karma(Ratings):
pass
class NA_Syndra_Jng_Karthus(Ratings):
pass
class NA_Syndra_Jng_Kassadin(Ratings):
pass
class NA_Syndra_Jng_Katarina(Ratings):
pass
class NA_Syndra_Jng_Kayle(Ratings):
pass
class NA_Syndra_Jng_Kayn(Ratings):
pass
class NA_Syndra_Jng_Kennen(Ratings):
pass
class NA_Syndra_Jng_Khazix(Ratings):
pass
class NA_Syndra_Jng_Kindred(Ratings):
pass
class NA_Syndra_Jng_Kled(Ratings):
pass
class NA_Syndra_Jng_KogMaw(Ratings):
pass
class NA_Syndra_Jng_Leblanc(Ratings):
pass
class NA_Syndra_Jng_LeeSin(Ratings):
pass
class NA_Syndra_Jng_Leona(Ratings):
pass
class NA_Syndra_Jng_Lissandra(Ratings):
pass
class NA_Syndra_Jng_Lucian(Ratings):
pass
class NA_Syndra_Jng_Lulu(Ratings):
pass
class NA_Syndra_Jng_Lux(Ratings):
pass
class NA_Syndra_Jng_Malphite(Ratings):
pass
class NA_Syndra_Jng_Malzahar(Ratings):
pass
class NA_Syndra_Jng_Maokai(Ratings):
pass
class NA_Syndra_Jng_MasterYi(Ratings):
pass
class NA_Syndra_Jng_MissFortune(Ratings):
pass
class NA_Syndra_Jng_MonkeyKing(Ratings):
pass
class NA_Syndra_Jng_Mordekaiser(Ratings):
pass
class NA_Syndra_Jng_Morgana(Ratings):
pass
class NA_Syndra_Jng_Nami(Ratings):
pass
class NA_Syndra_Jng_Nasus(Ratings):
pass
class NA_Syndra_Jng_Nautilus(Ratings):
pass
class NA_Syndra_Jng_Nidalee(Ratings):
pass
class NA_Syndra_Jng_Nocturne(Ratings):
pass
class NA_Syndra_Jng_Nunu(Ratings):
pass
class NA_Syndra_Jng_Olaf(Ratings):
pass
class NA_Syndra_Jng_Orianna(Ratings):
pass
class NA_Syndra_Jng_Ornn(Ratings):
pass
class NA_Syndra_Jng_Pantheon(Ratings):
pass
class NA_Syndra_Jng_Poppy(Ratings):
pass
class NA_Syndra_Jng_Quinn(Ratings):
pass
class NA_Syndra_Jng_Rakan(Ratings):
pass
class NA_Syndra_Jng_Rammus(Ratings):
pass
class NA_Syndra_Jng_RekSai(Ratings):
pass
class NA_Syndra_Jng_Renekton(Ratings):
pass
class NA_Syndra_Jng_Rengar(Ratings):
pass
class NA_Syndra_Jng_Riven(Ratings):
pass
class NA_Syndra_Jng_Rumble(Ratings):
pass
class NA_Syndra_Jng_Ryze(Ratings):
pass
class NA_Syndra_Jng_Sejuani(Ratings):
pass
class NA_Syndra_Jng_Shaco(Ratings):
pass
class NA_Syndra_Jng_Shen(Ratings):
pass
class NA_Syndra_Jng_Shyvana(Ratings):
pass
class NA_Syndra_Jng_Singed(Ratings):
pass
class NA_Syndra_Jng_Sion(Ratings):
pass
class NA_Syndra_Jng_Sivir(Ratings):
pass
class NA_Syndra_Jng_Skarner(Ratings):
pass
class NA_Syndra_Jng_Sona(Ratings):
pass
class NA_Syndra_Jng_Soraka(Ratings):
pass
class NA_Syndra_Jng_Swain(Ratings):
pass
class NA_Syndra_Jng_Syndra(Ratings):
pass
class NA_Syndra_Jng_TahmKench(Ratings):
pass
class NA_Syndra_Jng_Taliyah(Ratings):
pass
class NA_Syndra_Jng_Talon(Ratings):
pass
class NA_Syndra_Jng_Taric(Ratings):
pass
class NA_Syndra_Jng_Teemo(Ratings):
pass
class NA_Syndra_Jng_Thresh(Ratings):
pass
class NA_Syndra_Jng_Tristana(Ratings):
pass
class NA_Syndra_Jng_Trundle(Ratings):
pass
class NA_Syndra_Jng_Tryndamere(Ratings):
pass
class NA_Syndra_Jng_TwistedFate(Ratings):
pass
class NA_Syndra_Jng_Twitch(Ratings):
pass
class NA_Syndra_Jng_Udyr(Ratings):
pass
class NA_Syndra_Jng_Urgot(Ratings):
pass
class NA_Syndra_Jng_Varus(Ratings):
pass
class NA_Syndra_Jng_Vayne(Ratings):
pass
class NA_Syndra_Jng_Veigar(Ratings):
pass
class NA_Syndra_Jng_Velkoz(Ratings):
pass
class NA_Syndra_Jng_Vi(Ratings):
pass
class NA_Syndra_Jng_Viktor(Ratings):
pass
class NA_Syndra_Jng_Vladimir(Ratings):
pass
class NA_Syndra_Jng_Volibear(Ratings):
pass
class NA_Syndra_Jng_Warwick(Ratings):
pass
class NA_Syndra_Jng_Xayah(Ratings):
pass
class NA_Syndra_Jng_Xerath(Ratings):
pass
class NA_Syndra_Jng_XinZhao(Ratings):
pass
class NA_Syndra_Jng_Yasuo(Ratings):
pass
class NA_Syndra_Jng_Yorick(Ratings):
pass
class NA_Syndra_Jng_Zac(Ratings):
pass
class NA_Syndra_Jng_Zed(Ratings):
pass
class NA_Syndra_Jng_Ziggs(Ratings):
pass
class NA_Syndra_Jng_Zilean(Ratings):
pass
class NA_Syndra_Jng_Zyra(Ratings):
pass
| true
| true
|
f71a4d115d47444e362a89b60f0c30a6669b0ce0
| 606
|
py
|
Python
|
setup.py
|
donno2048/BS
|
ef6539a75770031da5838d1ecdeb83e49e63cf7e
|
[
"MIT"
] | null | null | null |
setup.py
|
donno2048/BS
|
ef6539a75770031da5838d1ecdeb83e49e63cf7e
|
[
"MIT"
] | null | null | null |
setup.py
|
donno2048/BS
|
ef6539a75770031da5838d1ecdeb83e49e63cf7e
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='backboard',
version='1.0.3',
description='Background noises for your keyboard typing',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/donno2048/BS',
packages=find_packages(),
license='MIT',
author='Elisha Hollander',
classifiers=['Programming Language :: Python :: 3'],
install_requires=['pygame>=1.9.6','keyboard>=0.13.5','numpy>=1.20.3','scipy>=1.6.3'],
entry_points={ 'console_scripts': [ 'backboard=backboard.__main__:main' ] }
)
| 37.875
| 89
| 0.684818
|
from setuptools import setup, find_packages
setup(
name='backboard',
version='1.0.3',
description='Background noises for your keyboard typing',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/donno2048/BS',
packages=find_packages(),
license='MIT',
author='Elisha Hollander',
classifiers=['Programming Language :: Python :: 3'],
install_requires=['pygame>=1.9.6','keyboard>=0.13.5','numpy>=1.20.3','scipy>=1.6.3'],
entry_points={ 'console_scripts': [ 'backboard=backboard.__main__:main' ] }
)
| true
| true
|
f71a4d41c019c5c22fe4a775dccecbf2510b5ece
| 7,159
|
py
|
Python
|
flash/image/classification/integrations/baal/loop.py
|
twsl/lightning-flash
|
9927853ac23551b444dbe969e287879c69be4094
|
[
"Apache-2.0"
] | null | null | null |
flash/image/classification/integrations/baal/loop.py
|
twsl/lightning-flash
|
9927853ac23551b444dbe969e287879c69be4094
|
[
"Apache-2.0"
] | null | null | null |
flash/image/classification/integrations/baal/loop.py
|
twsl/lightning-flash
|
9927853ac23551b444dbe969e287879c69be4094
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, Dict, Optional
import torch
from pytorch_lightning.loops import Loop
from pytorch_lightning.loops.fit_loop import FitLoop
from pytorch_lightning.trainer.connectors.data_connector import _PatchDataLoader
from pytorch_lightning.trainer.progress import Progress
from pytorch_lightning.trainer.states import TrainerFn, TrainerStatus
import flash
from flash.core.data.utils import _STAGES_PREFIX
from flash.core.utilities.imports import requires
from flash.core.utilities.stages import RunningStage
from flash.image.classification.integrations.baal.data import ActiveLearningDataModule
from flash.image.classification.integrations.baal.dropout import InferenceMCDropoutTask
class ActiveLearningLoop(Loop):
@requires("baal")
def __init__(self, label_epoch_frequency: int, inference_iteration: int = 2, should_reset_weights: bool = True):
"""The `ActiveLearning Loop` describes the following training procedure. This loop is connected with the
`ActiveLearningTrainer`
Example::
while unlabelled data or budget critera not reached:
if labelled data
trainer.fit(model, labelled data)
if unlabelled data:
predictions = trainer.predict(model, unlabelled data)
uncertainties = heuristic(predictions)
request labellelisation for the sample with highest uncertainties under a given budget
Args:
label_epoch_frequency: Number of epoch to train on before requesting labellisation.
inference_iteration: Number of inference to perform to compute uncertainty.
"""
super().__init__()
self.label_epoch_frequency = label_epoch_frequency
self.inference_iteration = inference_iteration
self.should_reset_weights = should_reset_weights
self.fit_loop: Optional[FitLoop] = None
self.progress = Progress()
self._model_state_dict: Optional[Dict[str, torch.Tensor]] = None
self._lightning_module: Optional[flash.Task] = None
@property
def done(self) -> bool:
return self.progress.current.completed >= self.max_epochs
def connect(self, fit_loop: FitLoop):
self.fit_loop = fit_loop
self.max_epochs = self.fit_loop.max_epochs
self.fit_loop.max_epochs = self.label_epoch_frequency
def on_run_start(self, *args: Any, **kwargs: Any) -> None:
assert isinstance(self.trainer.datamodule, ActiveLearningDataModule)
self.trainer.predict_loop._return_predictions = True
self._lightning_module = self.trainer.lightning_module
self._model_state_dict = deepcopy(self._lightning_module.state_dict())
self.inference_model = InferenceMCDropoutTask(self._lightning_module, self.inference_iteration)
def reset(self) -> None:
pass
def on_advance_start(self, *args: Any, **kwargs: Any) -> None:
if self.trainer.datamodule.has_labelled_data:
self._reset_dataloader_for_stage(RunningStage.TRAINING)
self._reset_dataloader_for_stage(RunningStage.VALIDATING)
if self.trainer.datamodule.has_test:
self._reset_dataloader_for_stage(RunningStage.TESTING)
if self.trainer.datamodule.has_unlabelled_data:
self._reset_dataloader_for_stage(RunningStage.PREDICTING)
self.progress.increment_ready()
def advance(self, *args: Any, **kwargs: Any) -> None:
self.progress.increment_started()
if self.trainer.datamodule.has_labelled_data:
self.fit_loop.run()
if self.trainer.datamodule.has_test:
self._reset_testing()
metrics = self.trainer.test_loop.run()
if metrics:
self.trainer.logger.log_metrics(metrics[0], step=self.trainer.global_step)
if self.trainer.datamodule.has_unlabelled_data:
self._reset_predicting()
probabilities = self.trainer.predict_loop.run()
self.trainer.datamodule.label(probabilities=probabilities)
else:
raise StopIteration
self._reset_fitting()
self.progress.increment_processed()
def on_advance_end(self) -> None:
if self.trainer.datamodule.has_unlabelled_data and self.should_reset_weights:
# reload the weights to retrain from scratch with the new labelled data.
self._lightning_module.load_state_dict(self._model_state_dict)
self.progress.increment_completed()
return super().on_advance_end()
def on_run_end(self):
self._reset_fitting()
return super().on_run_end()
def on_save_checkpoint(self) -> Dict:
return {"datamodule_state_dict": self.trainer.datamodule.state_dict()}
def on_load_checkpoint(self, state_dict) -> None:
self.trainer.datamodule.load_state_dict(state_dict.pop("datamodule_state_dict"))
def __getattr__(self, key):
if key not in self.__dict__:
return getattr(self.fit_loop, key)
return self.__dict__[key]
def _reset_fitting(self):
self.trainer.state.fn = TrainerFn.FITTING
self.trainer.training = True
self.trainer.lightning_module.on_train_dataloader()
self.trainer.accelerator.connect(self._lightning_module)
self.fit_loop.epoch_progress = Progress()
def _reset_predicting(self):
self.trainer.state.fn = TrainerFn.PREDICTING
self.trainer.predicting = True
self.trainer.lightning_module.on_predict_dataloader()
self.trainer.accelerator.connect(self.inference_model)
def _reset_testing(self):
self.trainer.state.fn = TrainerFn.TESTING
self.trainer.state.status = TrainerStatus.RUNNING
self.trainer.testing = True
self.trainer.lightning_module.on_test_dataloader()
self.trainer.accelerator.connect(self._lightning_module)
def _reset_dataloader_for_stage(self, running_state: RunningStage):
dataloader_name = f"{_STAGES_PREFIX[running_state]}_dataloader"
# If the dataloader exists, we reset it.
dataloader = getattr(self.trainer.datamodule, dataloader_name, None)
if dataloader:
setattr(
self.trainer.lightning_module,
dataloader_name,
_PatchDataLoader(dataloader(), running_state),
)
setattr(self.trainer, dataloader_name, None)
getattr(self.trainer, f"reset_{dataloader_name}")(self.trainer.lightning_module)
| 42.613095
| 116
| 0.70778
|
from copy import deepcopy
from typing import Any, Dict, Optional
import torch
from pytorch_lightning.loops import Loop
from pytorch_lightning.loops.fit_loop import FitLoop
from pytorch_lightning.trainer.connectors.data_connector import _PatchDataLoader
from pytorch_lightning.trainer.progress import Progress
from pytorch_lightning.trainer.states import TrainerFn, TrainerStatus
import flash
from flash.core.data.utils import _STAGES_PREFIX
from flash.core.utilities.imports import requires
from flash.core.utilities.stages import RunningStage
from flash.image.classification.integrations.baal.data import ActiveLearningDataModule
from flash.image.classification.integrations.baal.dropout import InferenceMCDropoutTask
class ActiveLearningLoop(Loop):
@requires("baal")
def __init__(self, label_epoch_frequency: int, inference_iteration: int = 2, should_reset_weights: bool = True):
super().__init__()
self.label_epoch_frequency = label_epoch_frequency
self.inference_iteration = inference_iteration
self.should_reset_weights = should_reset_weights
self.fit_loop: Optional[FitLoop] = None
self.progress = Progress()
self._model_state_dict: Optional[Dict[str, torch.Tensor]] = None
self._lightning_module: Optional[flash.Task] = None
@property
def done(self) -> bool:
return self.progress.current.completed >= self.max_epochs
def connect(self, fit_loop: FitLoop):
self.fit_loop = fit_loop
self.max_epochs = self.fit_loop.max_epochs
self.fit_loop.max_epochs = self.label_epoch_frequency
def on_run_start(self, *args: Any, **kwargs: Any) -> None:
assert isinstance(self.trainer.datamodule, ActiveLearningDataModule)
self.trainer.predict_loop._return_predictions = True
self._lightning_module = self.trainer.lightning_module
self._model_state_dict = deepcopy(self._lightning_module.state_dict())
self.inference_model = InferenceMCDropoutTask(self._lightning_module, self.inference_iteration)
def reset(self) -> None:
pass
def on_advance_start(self, *args: Any, **kwargs: Any) -> None:
if self.trainer.datamodule.has_labelled_data:
self._reset_dataloader_for_stage(RunningStage.TRAINING)
self._reset_dataloader_for_stage(RunningStage.VALIDATING)
if self.trainer.datamodule.has_test:
self._reset_dataloader_for_stage(RunningStage.TESTING)
if self.trainer.datamodule.has_unlabelled_data:
self._reset_dataloader_for_stage(RunningStage.PREDICTING)
self.progress.increment_ready()
def advance(self, *args: Any, **kwargs: Any) -> None:
self.progress.increment_started()
if self.trainer.datamodule.has_labelled_data:
self.fit_loop.run()
if self.trainer.datamodule.has_test:
self._reset_testing()
metrics = self.trainer.test_loop.run()
if metrics:
self.trainer.logger.log_metrics(metrics[0], step=self.trainer.global_step)
if self.trainer.datamodule.has_unlabelled_data:
self._reset_predicting()
probabilities = self.trainer.predict_loop.run()
self.trainer.datamodule.label(probabilities=probabilities)
else:
raise StopIteration
self._reset_fitting()
self.progress.increment_processed()
def on_advance_end(self) -> None:
if self.trainer.datamodule.has_unlabelled_data and self.should_reset_weights:
self._lightning_module.load_state_dict(self._model_state_dict)
self.progress.increment_completed()
return super().on_advance_end()
def on_run_end(self):
self._reset_fitting()
return super().on_run_end()
def on_save_checkpoint(self) -> Dict:
return {"datamodule_state_dict": self.trainer.datamodule.state_dict()}
def on_load_checkpoint(self, state_dict) -> None:
self.trainer.datamodule.load_state_dict(state_dict.pop("datamodule_state_dict"))
def __getattr__(self, key):
if key not in self.__dict__:
return getattr(self.fit_loop, key)
return self.__dict__[key]
def _reset_fitting(self):
self.trainer.state.fn = TrainerFn.FITTING
self.trainer.training = True
self.trainer.lightning_module.on_train_dataloader()
self.trainer.accelerator.connect(self._lightning_module)
self.fit_loop.epoch_progress = Progress()
def _reset_predicting(self):
self.trainer.state.fn = TrainerFn.PREDICTING
self.trainer.predicting = True
self.trainer.lightning_module.on_predict_dataloader()
self.trainer.accelerator.connect(self.inference_model)
def _reset_testing(self):
self.trainer.state.fn = TrainerFn.TESTING
self.trainer.state.status = TrainerStatus.RUNNING
self.trainer.testing = True
self.trainer.lightning_module.on_test_dataloader()
self.trainer.accelerator.connect(self._lightning_module)
def _reset_dataloader_for_stage(self, running_state: RunningStage):
dataloader_name = f"{_STAGES_PREFIX[running_state]}_dataloader"
dataloader = getattr(self.trainer.datamodule, dataloader_name, None)
if dataloader:
setattr(
self.trainer.lightning_module,
dataloader_name,
_PatchDataLoader(dataloader(), running_state),
)
setattr(self.trainer, dataloader_name, None)
getattr(self.trainer, f"reset_{dataloader_name}")(self.trainer.lightning_module)
| true
| true
|
f71a4e72b400b79bef2912f5ab1fa11a4cf0e50a
| 15,580
|
py
|
Python
|
GUI_functions/build_asp.py
|
AntonAlbertovich/Eusocial-Cluster-Utility
|
fef4f583b6151bb40e54d6825d65d668581c2121
|
[
"MIT"
] | 2
|
2019-03-22T15:08:31.000Z
|
2019-03-23T20:10:40.000Z
|
GUI_functions/build_asp.py
|
AntonAlbertovich/Eusocial-Cluster-Utility
|
fef4f583b6151bb40e54d6825d65d668581c2121
|
[
"MIT"
] | 1
|
2019-03-23T20:08:12.000Z
|
2019-03-23T20:08:12.000Z
|
GUI_functions/build_asp.py
|
AntonAlbertovich/Eusocial-Cluster-Utility
|
fef4f583b6151bb40e54d6825d65d668581c2121
|
[
"MIT"
] | 1
|
2019-03-23T19:56:07.000Z
|
2019-03-23T19:56:07.000Z
|
import pickle
# This is the script which builds a ASP model intended to be solved with clingo.
# This program has been test on Ubuntu 18.04 and CentOS 7.
# Using Clingo 5.3.0 installed via Conda
# Parsing the output of this program will require clyngor-with-clingo which may be installed via pip.
if __name__ == "__main__":
input_file= open("GUI_functions/Cluster_details.bin", "rb")
# Loads the data structure for the machines in the cluster.
all_macs= list(pickle.load(input_file))
input_file.close()
input_file= open("GUI_functions/Tasks_details.bin", "rb")
# Loads the data structure for the tasks of the cluster.
all_jobs= list(pickle.load(input_file))
input_file.close()
asp_file = open("GUI_functions/asp.lp", 'w')
# The program asp.lp is made.
asp_file.write("#include <incmode>. \n")
asp_file.write("#program base. \n")
asp_file.write("% A dynamically generated program.\n")
asp_file.write("% Made by build_asp.py using the data structures stored in Cluster_details.bin and Tasks_details.bin\n")
asp_file.write("% Define the fluents of the program. \n")
# this section writes a header to the asp.lp file.
asp_file.write("status(-done).\n")
asp_file.write("status(done).\n")
asp_file.write("location(home).\n")
asp_file.write("% location() describes the individual nodes/machines of a cluster. \n")
asp_file.write("% home is the ECU master directory on one machine in a given cluster. \n")
asp_file.write("% The machine which home is on is assumed to be directly connected to all other machines in the cluster. \n")
# Comment section detailing location and home.
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
asp_file.write("location("+mac+").\n")
for i in range(len(all_macs)):
# In the Cluster_details data structure there exists the detials pertaining to which machines are networked to other machines.
# In this loop this data is used to build a model of the cluster's network in asp.lp.
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
asp_file.write("connection(home, "+mac+").\n")
# Here home is connected to all the machines in the cluster.
for j in range(len(all_macs[i][2])):
mac1 = all_macs[i][2][j]
mac1.replace(" ", "")
mac1.lower()
asp_file.write("connection("+mac+", "+mac1+").\n")
# Here the connection for each machine is modeled.
# At this time ECU does not assume two way edge connection.
# The graph representing the network of a cluster is thus a directed graph.
# This is a core featur of ECU.
asp_file.write("holds(F,0) :- init(F).\n")
asp_file.write("#program step(t).\n")
asp_file.write("{ move(X,Y,t) : task(X), location(Y)} :- holds(on(X,M),t-1), connection(M, Y).\n")
asp_file.write("0{ turn(X,Y,t)}1 :- status(Y), task(X), holds(on(X,Z),t), valid_on(X, Z).\n")
asp_file.write(":- move(X,Y,t), holds(on(X,Y1),t-1), Y == home.\n")
asp_file.write("% Programs can not be moved back into the home directory.\n")
asp_file.write(":- turn(X,Y,t), holds(at(X,done),t-1).\n")
asp_file.write("% Programs can not be executed if they are already complete.\n")
asp_file.write(":- turn(X,Y,t), holds(on(X,M),t), depends_on(X, X1), not holds(on(X1,M),t).\n")
# Comments detailing limits of move and turn.
asp_file.write("moved(X,t) :- move(X,Y,t).\n")
asp_file.write("% moved() indicated what task X was moved at turn t.\n")
# Comment detailing moved()
asp_file.write("turned(X,t) :- turn(X, Y, t).\n")
asp_file.write("% turn() indicated what task X was executed at what turn t.\n")
# Comment detailing turn()
asp_file.write("turned_at(X, M, t) :- turned(X, t), holds(on(X,M),t).\n")
asp_file.write("% turned_at() indicated what task X was executed at Machine M at what turn t.\n")
# Comment detailing turned_at()
asp_file.write("turned_with_2(M, X, X1, Z, t) :- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned(X1,t), holds(on(X1,M),t), thread_cost(X1, C1), X != X1, Z = C + C1.\n")
asp_file.write("turned_with_3(M, X, X1, X2, Z, t) :- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned_with_2(M, X1, X2, C1, t), X != X1, X != X2, Z = C + C1.\n")
asp_file.write("turned_with_4(M, X, X1, X2, X3, Z, t) :- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned_with_3(M, X1, X2, X3, C1, t), X != X1, X != X2, X != X3, Z = C + C1.\n")
asp_file.write(":- turned_with_2(M, X, X1, Z, t), machine_threads(M, C), Z > C.\n")
asp_file.write(":- turned_with_3(M, X, X1, X2, Z, t), machine_threads(M, C), Z > C.\n")
asp_file.write(":- turned_with_4(M, X, X1, X2, X3, Z, t), machine_threads(M, C), Z > C.\n")
asp_file.write(":- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned_with_4(M, X1, X2, X3, X4, C1, t), X != X1, X != X2, X != X3, X != X4.\n")
asp_file.write("% These rules allow for up to 4 task to be ran in parrallel on any one machine at a time, \n")
asp_file.write("% if and only if the sum of the thread cost of said tasks does not add up to a number greater than \n")
asp_file.write("% the core count of said machine. \n")
# Comment section detailing the rules which allow for parrallel taks execution on a machine while preventing an overloading of a the machine's multi-threading capabilities.
asp_file.write(":- turned_at(X, M, t), cuda_not_on(M), cuda_needed(X).\n")
asp_file.write(":- turned_at(X, M, t), spacy_not_on(M), spacy_needed(X).\n")
asp_file.write(":- turned_at(X, M, t), psutil_not_on(M), psutil_needed(X).\n")
asp_file.write(":- turned_at(X, M, t), clingo_not_on(M), clingo_needed(X).\n")
asp_file.write("% This section will prevent a program which requires a given toolkit from being scheduled to run on a machine\n")
asp_file.write("% which does not have said toolkit.\n")
asp_file.write(":- move(X, Z, Y1), turned(X, Y2), Y1 == Y2.\n")
asp_file.write(":- move(X, Z, Y1), move(X, Z, Y2), Y1 != Y2.\n")
asp_file.write(":- move(X, Z, T1), turned(X,T2), T1 > T2, nobody_depends_on(X).\n")
asp_file.write("% A program can not be moved and executed at the same time.\n")
# This section may not needed as there is nothing wrong with creating duplicates of completed programs so long as they are needed.
#asp_file.write(":- move(X, Z1, Y), move(X, Z2, Y), Z1 != Z2.\n")
#asp_file.write("% A program can not be moved to two different locations at the same time.\n")
# By preventing a program from being moved to two different locations at the same time we prevent duplicates of programs from existing and proliferating throughout the system.
asp_file.write(":- turned(X1, T1), turned(X2, T2), depends_on(X2, X1), T1 >= T2, moved(X2,T).\n")
asp_file.write("% A program can executed before all of it's dependencies.\n")
asp_file.write("holds(on(X,Y),t) :- move(X,Y,t).\n")
asp_file.write("holds(on(X,Z),t) :- holds(on(X,Z),t-1), not moved(X,t).\n")
asp_file.write("holds(at(X,Y),t) :- turn(X,Y,t).\n")
asp_file.write("holds(at(X,Z),t) :- holds(at(X,Z),t-1), not turned(X,t).\n")
asp_file.write("valid_on(X, Y) :- thread_cost(X, Z1), machine_threads(Y, Z2), Z1 <= Z2.\n")
asp_file.write(":- os_needed(X, S), turned_at(X, M, t), not os_on(M, S), not -os_needed(X).\n")
asp_file.write(":- holds(on(X,M1),t), holds(on(X,M2),t), M1 != M2, holds(at(X,-done),t).\n")
asp_file.write("% A program can not be duplicated if it has not been executed.\n")
asp_file.write(":- holds(on(X,M1),t), holds(on(X,M2),t), M1 != M2, task(X1), task(X2), not depends_on(X1, X), not depends_on(X2, X), X1 != X2, turned_at(X1, M1, T1), turned_at(X2, M2, T2).\n")
asp_file.write("% A program can not be dupllicated if it is not the dependecy of at least two different later programs which are executed on atleast two diffent machines.\n")
# This prevents the over-duplication of dependencies.
# Given that sending programs is exspensive, limiting this process must be a priority.
asp_file.write("% An unfinished program can not be at to two different locations at the same time.\n")
asp_file.write("#program check(t).\n")
asp_file.write(":- query(t), goal(F), not holds(F,t).\n")
asp_file.write("#show move/3.\n")
asp_file.write("#show turned_at/3.\n")
asp_file.write("#program base.\n")
# Here all the tasks are added to the model
all_tasks= []
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
asp_file.write("task("+job+").\n")
all_tasks.append(job)
asp_file.write("os(ubuntu_DE).\n")
asp_file.write("os(centOS_7_DE).\n")
asp_file.write("os(centOS_7_NE).\n")
asp_file.write("os(debian).\n")
asp_file.write("os(red_hat).\n")
asp_file.write("os(no_os).\n")
# Here the needed toolkits for each task are added
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
for j in range(len(all_jobs[i][3])):
if all_jobs[i][3][j] == "CUDA":
asp_file.write("cuda_needed("+job+").\n")
elif all_jobs[i][3][j] == "psutil":
asp_file.write("psutil_needed("+job+").\n")
elif all_jobs[i][3][j] == "spaCy":
asp_file.write("spacy_needed("+job+").\n")
elif all_jobs[i][3][j] == "clingo":
asp_file.write("clingo_needed("+job+").\n")
# Here, if a toolkit is designated to be installed on a machine then this fact is added to the model.
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
for j in range(len(all_macs[i][3])):
if all_macs[i][3][j] == "CUDA":
asp_file.write("cuda_on("+mac+").\n")
elif all_macs[i][3][j] == "psutil":
asp_file.write("psutil_on("+mac+").\n")
elif all_macs[i][3][j] == "spaCy":
asp_file.write("spacy_on("+mac+").\n")
elif all_macs[i][3][j] == "clingo":
asp_file.write("clingo_on("+mac+").\n")
asp_file.write("% If a toolkit is not on on a machine then this rule is ture for that machine.\n")
asp_file.write("cuda_not_on(X) :- location(X), not cuda_on(X).\n")
asp_file.write("spacy_not_on(X) :- location(X), not spacy_on(X).\n")
asp_file.write("psutil_not_on(X) :- location(X), not psutil_on(X).\n")
asp_file.write("clingo_not_on(X) :- location(X), not clingo_on(X).\n")
asp_file.write("% If a task can only be executed on a specific OS then the rule os_needed() represents this in the model.\n")
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
if all_jobs[i][1][1] == "Ubuntu 18.04 [Desktop Edition]":
asp_file.write("os_needed("+job+", ubuntu_DE).\n")
elif all_jobs[i][1][1] == "CentOS 7 [Desktop Edition]":
asp_file.write("os_needed("+job+", centOS_7_DE).\n")
elif all_jobs[i][1][1] == "CentOS 7 [Node/server Edition]":
asp_file.write("os_needed("+job+", centOS_7_NE).\n")
elif all_jobs[i][1][1] == "Unlisted Debian based OS":
asp_file.write("os_needed("+job+", debian).\n")
elif all_jobs[i][1][1] == "Unlisted Red Hat based OS":
asp_file.write("os_needed("+job+", red_hat).\n")
elif all_jobs[i][1][1] == "N/A":
asp_file.write("-os_needed("+job+").\n")
asp_file.write("% Here the OS of each machine in the cluster is represented in the model.\n")
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
if all_macs[i][7] == "Ubuntu 18.04 [Desktop Edition]":
asp_file.write("os_on("+mac+", ubuntu_DE).\n")
elif all_macs[i][7] == "CentOS 7 [Desktop Edition]":
asp_file.write("os_on("+mac+", centOS_7_DE).\n")
elif all_macs[i][7] == "CentOS 7 [Node/server Edition]":
asp_file.write("os_on("+mac+", centOS_7_NE).\n")
elif all_macs[i][7] == "Unlisted Debian based OS":
asp_file.write("os_on("+mac+", debian).\n")
elif all_macs[i][7] == "Unlisted Red Hat based OS":
asp_file.write("os_on("+mac+").\n")
asp_file.write("% The thread_cost() rule represents how many threads a given task requires.\n")
# At this time, ECU assumes that the user knows how many threads each task needs.
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
thread = str(all_jobs[i][4])
asp_file.write("thread_cost("+job+", "+thread+").\n")
asp_file.write("% The depends_on(X1, X2) rule represents that X2 must be exectued and on the machine executing X1.\n")
# A program P1 may need to be executed at a different machine than another program P2, even if P2 depends on P1.
depended_on = []
for i in range(len(all_jobs)):
job0 = all_jobs[i][0]
job0 = job0.replace(" ", "")
job0 = job0.replace(".", "_")
job0 = job0.lower()
for j in range(len(all_jobs[i][2])):
job1 = all_jobs[i][2][j]
job1 = job1.replace(" ", "")
job1 = job1.replace(".", "_")
job1 = job1.lower()
depended_on.append(job1)
asp_file.write("depends_on("+job0+", "+job1+").\n")
for k in range(len(all_tasks)):
for l in range(len(depended_on)) :
if all_tasks[k] == depended_on[l]:
all_tasks[k] = False
break
for k in range(len(all_tasks)):
if all_tasks[k] != False:
asp_file.write("nobody_depends_on("+all_tasks[k]+").\n")
asp_file.write("% The machine_threads() rule represents how many cores on any given machine.\n")
# Though a task which has a higher multi-threading demand than the total cores on the machine which said task is being ran on may execute without issue, this is not always the case.
# ECU assumes that every task being executed in a cluster is an exspensive task requiring near full usage of the core on any given machine.
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
thread = str(all_macs[i][6])
asp_file.write("machine_threads("+mac+", "+thread+").\n")
asp_file.write("% Initialization of the statuses of all tasks.\n")
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
asp_file.write("init(on("+job+", home)).\n") # All tasks are started at home.
asp_file.write("init(at("+job+", -done)).\n")
asp_file.write("% Declartion of the goals of the system.\n")
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
asp_file.write("goal(at("+job+", done)).\n")
# Comments for all loops are written to asp.lp
asp_file.close()
| 52.635135
| 196
| 0.60706
|
import pickle
if __name__ == "__main__":
input_file= open("GUI_functions/Cluster_details.bin", "rb")
all_macs= list(pickle.load(input_file))
input_file.close()
input_file= open("GUI_functions/Tasks_details.bin", "rb")
all_jobs= list(pickle.load(input_file))
input_file.close()
asp_file = open("GUI_functions/asp.lp", 'w')
asp_file.write("#include <incmode>. \n")
asp_file.write("#program base. \n")
asp_file.write("% A dynamically generated program.\n")
asp_file.write("% Made by build_asp.py using the data structures stored in Cluster_details.bin and Tasks_details.bin\n")
asp_file.write("% Define the fluents of the program. \n")
asp_file.write("status(-done).\n")
asp_file.write("status(done).\n")
asp_file.write("location(home).\n")
asp_file.write("% location() describes the individual nodes/machines of a cluster. \n")
asp_file.write("% home is the ECU master directory on one machine in a given cluster. \n")
asp_file.write("% The machine which home is on is assumed to be directly connected to all other machines in the cluster. \n")
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
asp_file.write("location("+mac+").\n")
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
asp_file.write("connection(home, "+mac+").\n")
# Here home is connected to all the machines in the cluster.
for j in range(len(all_macs[i][2])):
mac1 = all_macs[i][2][j]
mac1.replace(" ", "")
mac1.lower()
asp_file.write("connection("+mac+", "+mac1+").\n")
# Here the connection for each machine is modeled.
# At this time ECU does not assume two way edge connection.
# The graph representing the network of a cluster is thus a directed graph.
# This is a core featur of ECU.
asp_file.write("holds(F,0) :- init(F).\n")
asp_file.write("#program step(t).\n")
asp_file.write("{ move(X,Y,t) : task(X), location(Y)} :- holds(on(X,M),t-1), connection(M, Y).\n")
asp_file.write("0{ turn(X,Y,t)}1 :- status(Y), task(X), holds(on(X,Z),t), valid_on(X, Z).\n")
asp_file.write(":- move(X,Y,t), holds(on(X,Y1),t-1), Y == home.\n")
asp_file.write("% Programs can not be moved back into the home directory.\n")
asp_file.write(":- turn(X,Y,t), holds(at(X,done),t-1).\n")
asp_file.write("% Programs can not be executed if they are already complete.\n")
asp_file.write(":- turn(X,Y,t), holds(on(X,M),t), depends_on(X, X1), not holds(on(X1,M),t).\n")
# Comments detailing limits of move and turn.
asp_file.write("moved(X,t) :- move(X,Y,t).\n")
asp_file.write("% moved() indicated what task X was moved at turn t.\n")
# Comment detailing moved()
asp_file.write("turned(X,t) :- turn(X, Y, t).\n")
asp_file.write("% turn() indicated what task X was executed at what turn t.\n")
# Comment detailing turn()
asp_file.write("turned_at(X, M, t) :- turned(X, t), holds(on(X,M),t).\n")
asp_file.write("% turned_at() indicated what task X was executed at Machine M at what turn t.\n")
# Comment detailing turned_at()
asp_file.write("turned_with_2(M, X, X1, Z, t) :- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned(X1,t), holds(on(X1,M),t), thread_cost(X1, C1), X != X1, Z = C + C1.\n")
asp_file.write("turned_with_3(M, X, X1, X2, Z, t) :- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned_with_2(M, X1, X2, C1, t), X != X1, X != X2, Z = C + C1.\n")
asp_file.write("turned_with_4(M, X, X1, X2, X3, Z, t) :- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned_with_3(M, X1, X2, X3, C1, t), X != X1, X != X2, X != X3, Z = C + C1.\n")
asp_file.write(":- turned_with_2(M, X, X1, Z, t), machine_threads(M, C), Z > C.\n")
asp_file.write(":- turned_with_3(M, X, X1, X2, Z, t), machine_threads(M, C), Z > C.\n")
asp_file.write(":- turned_with_4(M, X, X1, X2, X3, Z, t), machine_threads(M, C), Z > C.\n")
asp_file.write(":- turned(X,t), holds(on(X,M),t), thread_cost(X, C), turned_with_4(M, X1, X2, X3, X4, C1, t), X != X1, X != X2, X != X3, X != X4.\n")
asp_file.write("% These rules allow for up to 4 task to be ran in parrallel on any one machine at a time, \n")
asp_file.write("% if and only if the sum of the thread cost of said tasks does not add up to a number greater than \n")
asp_file.write("% the core count of said machine. \n")
# Comment section detailing the rules which allow for parrallel taks execution on a machine while preventing an overloading of a the machine's multi-threading capabilities.
asp_file.write(":- turned_at(X, M, t), cuda_not_on(M), cuda_needed(X).\n")
asp_file.write(":- turned_at(X, M, t), spacy_not_on(M), spacy_needed(X).\n")
asp_file.write(":- turned_at(X, M, t), psutil_not_on(M), psutil_needed(X).\n")
asp_file.write(":- turned_at(X, M, t), clingo_not_on(M), clingo_needed(X).\n")
asp_file.write("% This section will prevent a program which requires a given toolkit from being scheduled to run on a machine\n")
asp_file.write("% which does not have said toolkit.\n")
asp_file.write(":- move(X, Z, Y1), turned(X, Y2), Y1 == Y2.\n")
asp_file.write(":- move(X, Z, Y1), move(X, Z, Y2), Y1 != Y2.\n")
asp_file.write(":- move(X, Z, T1), turned(X,T2), T1 > T2, nobody_depends_on(X).\n")
asp_file.write("% A program can not be moved and executed at the same time.\n")
asp_file.write(":- turned(X1, T1), turned(X2, T2), depends_on(X2, X1), T1 >= T2, moved(X2,T).\n")
asp_file.write("% A program can executed before all of it's dependencies.\n")
asp_file.write("holds(on(X,Y),t) :- move(X,Y,t).\n")
asp_file.write("holds(on(X,Z),t) :- holds(on(X,Z),t-1), not moved(X,t).\n")
asp_file.write("holds(at(X,Y),t) :- turn(X,Y,t).\n")
asp_file.write("holds(at(X,Z),t) :- holds(at(X,Z),t-1), not turned(X,t).\n")
asp_file.write("valid_on(X, Y) :- thread_cost(X, Z1), machine_threads(Y, Z2), Z1 <= Z2.\n")
asp_file.write(":- os_needed(X, S), turned_at(X, M, t), not os_on(M, S), not -os_needed(X).\n")
asp_file.write(":- holds(on(X,M1),t), holds(on(X,M2),t), M1 != M2, holds(at(X,-done),t).\n")
asp_file.write("% A program can not be duplicated if it has not been executed.\n")
asp_file.write(":- holds(on(X,M1),t), holds(on(X,M2),t), M1 != M2, task(X1), task(X2), not depends_on(X1, X), not depends_on(X2, X), X1 != X2, turned_at(X1, M1, T1), turned_at(X2, M2, T2).\n")
asp_file.write("% A program can not be dupllicated if it is not the dependecy of at least two different later programs which are executed on atleast two diffent machines.\n")
# This prevents the over-duplication of dependencies.
# Given that sending programs is exspensive, limiting this process must be a priority.
asp_file.write("% An unfinished program can not be at to two different locations at the same time.\n")
asp_file.write("#program check(t).\n")
asp_file.write(":- query(t), goal(F), not holds(F,t).\n")
asp_file.write("#show move/3.\n")
asp_file.write("#show turned_at/3.\n")
asp_file.write("#program base.\n")
# Here all the tasks are added to the model
all_tasks= []
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
asp_file.write("task("+job+").\n")
all_tasks.append(job)
asp_file.write("os(ubuntu_DE).\n")
asp_file.write("os(centOS_7_DE).\n")
asp_file.write("os(centOS_7_NE).\n")
asp_file.write("os(debian).\n")
asp_file.write("os(red_hat).\n")
asp_file.write("os(no_os).\n")
# Here the needed toolkits for each task are added
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
for j in range(len(all_jobs[i][3])):
if all_jobs[i][3][j] == "CUDA":
asp_file.write("cuda_needed("+job+").\n")
elif all_jobs[i][3][j] == "psutil":
asp_file.write("psutil_needed("+job+").\n")
elif all_jobs[i][3][j] == "spaCy":
asp_file.write("spacy_needed("+job+").\n")
elif all_jobs[i][3][j] == "clingo":
asp_file.write("clingo_needed("+job+").\n")
# Here, if a toolkit is designated to be installed on a machine then this fact is added to the model.
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
for j in range(len(all_macs[i][3])):
if all_macs[i][3][j] == "CUDA":
asp_file.write("cuda_on("+mac+").\n")
elif all_macs[i][3][j] == "psutil":
asp_file.write("psutil_on("+mac+").\n")
elif all_macs[i][3][j] == "spaCy":
asp_file.write("spacy_on("+mac+").\n")
elif all_macs[i][3][j] == "clingo":
asp_file.write("clingo_on("+mac+").\n")
asp_file.write("% If a toolkit is not on on a machine then this rule is ture for that machine.\n")
asp_file.write("cuda_not_on(X) :- location(X), not cuda_on(X).\n")
asp_file.write("spacy_not_on(X) :- location(X), not spacy_on(X).\n")
asp_file.write("psutil_not_on(X) :- location(X), not psutil_on(X).\n")
asp_file.write("clingo_not_on(X) :- location(X), not clingo_on(X).\n")
asp_file.write("% If a task can only be executed on a specific OS then the rule os_needed() represents this in the model.\n")
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
if all_jobs[i][1][1] == "Ubuntu 18.04 [Desktop Edition]":
asp_file.write("os_needed("+job+", ubuntu_DE).\n")
elif all_jobs[i][1][1] == "CentOS 7 [Desktop Edition]":
asp_file.write("os_needed("+job+", centOS_7_DE).\n")
elif all_jobs[i][1][1] == "CentOS 7 [Node/server Edition]":
asp_file.write("os_needed("+job+", centOS_7_NE).\n")
elif all_jobs[i][1][1] == "Unlisted Debian based OS":
asp_file.write("os_needed("+job+", debian).\n")
elif all_jobs[i][1][1] == "Unlisted Red Hat based OS":
asp_file.write("os_needed("+job+", red_hat).\n")
elif all_jobs[i][1][1] == "N/A":
asp_file.write("-os_needed("+job+").\n")
asp_file.write("% Here the OS of each machine in the cluster is represented in the model.\n")
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
if all_macs[i][7] == "Ubuntu 18.04 [Desktop Edition]":
asp_file.write("os_on("+mac+", ubuntu_DE).\n")
elif all_macs[i][7] == "CentOS 7 [Desktop Edition]":
asp_file.write("os_on("+mac+", centOS_7_DE).\n")
elif all_macs[i][7] == "CentOS 7 [Node/server Edition]":
asp_file.write("os_on("+mac+", centOS_7_NE).\n")
elif all_macs[i][7] == "Unlisted Debian based OS":
asp_file.write("os_on("+mac+", debian).\n")
elif all_macs[i][7] == "Unlisted Red Hat based OS":
asp_file.write("os_on("+mac+").\n")
asp_file.write("% The thread_cost() rule represents how many threads a given task requires.\n")
# At this time, ECU assumes that the user knows how many threads each task needs.
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
thread = str(all_jobs[i][4])
asp_file.write("thread_cost("+job+", "+thread+").\n")
asp_file.write("% The depends_on(X1, X2) rule represents that X2 must be exectued and on the machine executing X1.\n")
# A program P1 may need to be executed at a different machine than another program P2, even if P2 depends on P1.
depended_on = []
for i in range(len(all_jobs)):
job0 = all_jobs[i][0]
job0 = job0.replace(" ", "")
job0 = job0.replace(".", "_")
job0 = job0.lower()
for j in range(len(all_jobs[i][2])):
job1 = all_jobs[i][2][j]
job1 = job1.replace(" ", "")
job1 = job1.replace(".", "_")
job1 = job1.lower()
depended_on.append(job1)
asp_file.write("depends_on("+job0+", "+job1+").\n")
for k in range(len(all_tasks)):
for l in range(len(depended_on)) :
if all_tasks[k] == depended_on[l]:
all_tasks[k] = False
break
for k in range(len(all_tasks)):
if all_tasks[k] != False:
asp_file.write("nobody_depends_on("+all_tasks[k]+").\n")
asp_file.write("% The machine_threads() rule represents how many cores on any given machine.\n")
# Though a task which has a higher multi-threading demand than the total cores on the machine which said task is being ran on may execute without issue, this is not always the case.
# ECU assumes that every task being executed in a cluster is an exspensive task requiring near full usage of the core on any given machine.
for i in range(len(all_macs)):
mac = all_macs[i][0]
mac.replace(" ", "")
mac.lower()
thread = str(all_macs[i][6])
asp_file.write("machine_threads("+mac+", "+thread+").\n")
asp_file.write("% Initialization of the statuses of all tasks.\n")
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
asp_file.write("init(on("+job+", home)).\n") # All tasks are started at home.
asp_file.write("init(at("+job+", -done)).\n")
asp_file.write("% Declartion of the goals of the system.\n")
for i in range(len(all_jobs)):
job = all_jobs[i][0]
job = job.replace(" ", "")
job = job.replace(".", "_")
job = job.lower()
asp_file.write("goal(at("+job+", done)).\n")
# Comments for all loops are written to asp.lp
asp_file.close()
| true
| true
|
f71a4eddc4f441ac7f58d13143c891e1a2b0e540
| 5,556
|
py
|
Python
|
datasets/dataloader_infer.py
|
Nitin-Mane/dense-ulearn-vos
|
9e39d359a53a2343522ce5820fdf27223a4ffcb4
|
[
"Apache-2.0"
] | 157
|
2021-11-11T13:45:48.000Z
|
2022-03-14T03:06:09.000Z
|
datasets/dataloader_infer.py
|
Nitin-Mane/dense-ulearn-vos
|
9e39d359a53a2343522ce5820fdf27223a4ffcb4
|
[
"Apache-2.0"
] | 11
|
2021-11-20T11:53:47.000Z
|
2022-03-30T01:51:56.000Z
|
datasets/dataloader_infer.py
|
Nitin-Mane/dense-ulearn-vos
|
9e39d359a53a2343522ce5820fdf27223a4ffcb4
|
[
"Apache-2.0"
] | 16
|
2021-11-12T09:19:45.000Z
|
2022-03-16T10:32:39.000Z
|
"""
Copyright (c) 2021 TU Darmstadt
Author: Nikita Araslanov <nikita.araslanov@tu-darmstadt.de>
License: Apache License 2.0
"""
import os
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as tf
from .dataloader_base import DLBase
class DataSeg(DLBase):
def __init__(self, cfg, split, ignore_labels=[], \
root=os.path.expanduser('./data'), renorm=False):
super(DataSeg, self).__init__()
self.cfg = cfg
self.root = root
self.split = split
self.ignore_labels = ignore_labels
self._init_palette(self.cfg.DATASET.NUM_CLASSES)
# train/val/test splits are pre-cut
split_fn = os.path.join(self.root, self.split + ".txt")
assert os.path.isfile(split_fn)
self.sequence_ids = []
self.sequence_names = []
def add_sequence(name):
vlen = len(self.images)
assert vlen >= cfg.DATASET.VIDEO_LEN, \
"Detected video shorter [{}] than training length [{}]".format(vlen, \
cfg.DATASET.VIDEO_LEN)
self.sequence_ids.append(vlen)
self.sequence_names.append(name)
return vlen
self.images = []
self.masks = []
self.flags = []
token = None
with open(split_fn, "r") as lines:
for line in lines:
_flag, _image, _mask = line.strip("\n").split(' ')
# save every frame
#_flag = 1
self.flags.append(int(_flag))
_image = os.path.join(cfg.DATASET.ROOT, _image.lstrip('/'))
assert os.path.isfile(_image), '%s not found' % _image
# each sequence may have a different length
# do some book-keeping e.g. to ensure we have
# sequences long enough for subsequent sampling
_token = _image.split("/")[-2] # parent directory
# sequence ID is in the filename
#_token = os.path.basename(_image).split("_")[0]
if token != _token:
if not token is None:
add_sequence(token)
token = _token
self.images.append(_image)
if _mask is None:
self.masks.append(None)
else:
_mask = os.path.join(cfg.DATASET.ROOT, _mask.lstrip('/'))
#assert os.path.isfile(_mask), '%s not found' % _mask
self.masks.append(_mask)
# update the last sequence
# returns the total amount of frames
add_sequence(token)
print("Loaded {} sequences".format(len(self.sequence_ids)))
# definint data augmentation:
print("Dataloader: {}".format(split), " #", len(self.images))
print("\t {}: no augmentation".format(split))
self.tf = tf.Compose([tf.ToTensor(), tf.Normalize(mean=self.MEAN, std=self.STD)])
self._num_samples = len(self.images)
def __len__(self):
return len(self.sequence_ids)
def _mask2tensor(self, mask, num_classes=6):
h,w = mask.shape
ones = torch.ones(1,h,w)
zeros = torch.zeros(num_classes,h,w)
max_idx = mask.max()
assert max_idx < num_classes, "{} >= {}".format(max_idx, num_classes)
return zeros.scatter(0, mask[None, ...], ones)
def denorm(self, image):
if image.dim() == 3:
assert image.dim() == 3, "Expected image [CxHxW]"
assert image.size(0) == 3, "Expected RGB image [3xHxW]"
for t, m, s in zip(image, self.MEAN, self.STD):
t.mul_(s).add_(m)
elif image.dim() == 4:
# batch mode
assert image.size(1) == 3, "Expected RGB image [3xHxW]"
for t, m, s in zip((0,1,2), self.MEAN, self.STD):
image[:, t, :, :].mul_(s).add_(m)
return image
def __getitem__(self, index):
seq_to = self.sequence_ids[index]
seq_from = 0 if index == 0 else self.sequence_ids[index - 1]
image0 = Image.open(self.images[seq_from])
w,h = image0.size
images, masks, fns, flags = [], [], [], []
tracks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES).fill_(-1)
masks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES, h, w).zero_()
known_ids = set()
for t in range(seq_from, seq_to):
t0 = t - seq_from
image = Image.open(self.images[t]).convert('RGB')
fns.append(os.path.basename(self.images[t].replace(".jpg", "")))
flags.append(self.flags[t])
if os.path.isfile(self.masks[t]):
mask = Image.open(self.masks[t])
mask = torch.from_numpy(np.array(mask, np.long, copy=False))
unique_ids = np.unique(mask)
for oid in unique_ids:
if not oid in known_ids:
tracks[oid] = t0
known_ids.add(oid)
masks[oid] = (mask == oid).long()
else:
mask = Image.new('L', image.size)
image = self.tf(image)
images.append(image)
images = torch.stack(images, 0)
seq_name = self.sequence_names[index]
flags = torch.LongTensor(flags)
return images, images, masks, tracks, len(known_ids), fns, flags, seq_name
| 33.071429
| 89
| 0.533837
|
import os
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as tf
from .dataloader_base import DLBase
class DataSeg(DLBase):
def __init__(self, cfg, split, ignore_labels=[], \
root=os.path.expanduser('./data'), renorm=False):
super(DataSeg, self).__init__()
self.cfg = cfg
self.root = root
self.split = split
self.ignore_labels = ignore_labels
self._init_palette(self.cfg.DATASET.NUM_CLASSES)
split_fn = os.path.join(self.root, self.split + ".txt")
assert os.path.isfile(split_fn)
self.sequence_ids = []
self.sequence_names = []
def add_sequence(name):
vlen = len(self.images)
assert vlen >= cfg.DATASET.VIDEO_LEN, \
"Detected video shorter [{}] than training length [{}]".format(vlen, \
cfg.DATASET.VIDEO_LEN)
self.sequence_ids.append(vlen)
self.sequence_names.append(name)
return vlen
self.images = []
self.masks = []
self.flags = []
token = None
with open(split_fn, "r") as lines:
for line in lines:
_flag, _image, _mask = line.strip("\n").split(' ')
self.flags.append(int(_flag))
_image = os.path.join(cfg.DATASET.ROOT, _image.lstrip('/'))
assert os.path.isfile(_image), '%s not found' % _image
_token = _image.split("/")[-2]
if token != _token:
if not token is None:
add_sequence(token)
token = _token
self.images.append(_image)
if _mask is None:
self.masks.append(None)
else:
_mask = os.path.join(cfg.DATASET.ROOT, _mask.lstrip('/'))
self.masks.append(_mask)
add_sequence(token)
print("Loaded {} sequences".format(len(self.sequence_ids)))
print("Dataloader: {}".format(split), " #", len(self.images))
print("\t {}: no augmentation".format(split))
self.tf = tf.Compose([tf.ToTensor(), tf.Normalize(mean=self.MEAN, std=self.STD)])
self._num_samples = len(self.images)
def __len__(self):
return len(self.sequence_ids)
def _mask2tensor(self, mask, num_classes=6):
h,w = mask.shape
ones = torch.ones(1,h,w)
zeros = torch.zeros(num_classes,h,w)
max_idx = mask.max()
assert max_idx < num_classes, "{} >= {}".format(max_idx, num_classes)
return zeros.scatter(0, mask[None, ...], ones)
def denorm(self, image):
if image.dim() == 3:
assert image.dim() == 3, "Expected image [CxHxW]"
assert image.size(0) == 3, "Expected RGB image [3xHxW]"
for t, m, s in zip(image, self.MEAN, self.STD):
t.mul_(s).add_(m)
elif image.dim() == 4:
assert image.size(1) == 3, "Expected RGB image [3xHxW]"
for t, m, s in zip((0,1,2), self.MEAN, self.STD):
image[:, t, :, :].mul_(s).add_(m)
return image
def __getitem__(self, index):
seq_to = self.sequence_ids[index]
seq_from = 0 if index == 0 else self.sequence_ids[index - 1]
image0 = Image.open(self.images[seq_from])
w,h = image0.size
images, masks, fns, flags = [], [], [], []
tracks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES).fill_(-1)
masks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES, h, w).zero_()
known_ids = set()
for t in range(seq_from, seq_to):
t0 = t - seq_from
image = Image.open(self.images[t]).convert('RGB')
fns.append(os.path.basename(self.images[t].replace(".jpg", "")))
flags.append(self.flags[t])
if os.path.isfile(self.masks[t]):
mask = Image.open(self.masks[t])
mask = torch.from_numpy(np.array(mask, np.long, copy=False))
unique_ids = np.unique(mask)
for oid in unique_ids:
if not oid in known_ids:
tracks[oid] = t0
known_ids.add(oid)
masks[oid] = (mask == oid).long()
else:
mask = Image.new('L', image.size)
image = self.tf(image)
images.append(image)
images = torch.stack(images, 0)
seq_name = self.sequence_names[index]
flags = torch.LongTensor(flags)
return images, images, masks, tracks, len(known_ids), fns, flags, seq_name
| true
| true
|
f71a4f157fbcfd39a6a5a1e24d4913bdf4df7d2c
| 6,777
|
py
|
Python
|
etna/analysis/eda_utils.py
|
Carlosbogo/etna
|
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
|
[
"Apache-2.0"
] | null | null | null |
etna/analysis/eda_utils.py
|
Carlosbogo/etna
|
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
|
[
"Apache-2.0"
] | null | null | null |
etna/analysis/eda_utils.py
|
Carlosbogo/etna
|
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
|
[
"Apache-2.0"
] | null | null | null |
import math
import warnings
from itertools import combinations
from typing import TYPE_CHECKING
from typing import Optional
from typing import Sequence
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import statsmodels.api as sm
from matplotlib.ticker import MaxNLocator
from statsmodels.graphics import utils
if TYPE_CHECKING:
from etna.datasets import TSDataset
plot_acf = sm.graphics.tsa.plot_acf
plot_pacf = sm.graphics.tsa.plot_pacf
def cross_corr_plot(ts: "TSDataset", n_segments: int = 10, maxlags: int = 21, segments: Optional[Sequence] = None):
"""
Cross-correlation plot between multiple timeseries.
Parameters
----------
ts:
TSDataset with timeseries data
n_segments:
number of random segments to plot
maxlags:
number of timeseries shifts for cross-correlation
segments:
segments to plot
"""
if not segments:
segments = list(ts.segments)
segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)
segment_pairs = list(combinations(segments, r=2))
if len(segment_pairs) == 0:
raise ValueError("There are no pairs to plot! Try set n_segments > 1.")
columns_num = min(2, len(segment_pairs))
rows_num = math.ceil(len(segment_pairs) / columns_num)
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)
ax = ax.ravel()
fig.suptitle("Cross-correlation", fontsize=16)
for i, (segment_1, segment_2) in enumerate(segment_pairs):
df_segment_1 = ts[:, segment_1, :][segment_1]
df_segment_2 = ts[:, segment_2, :][segment_2]
fig, axx = utils.create_mpl_ax(ax[i])
target_1 = df_segment_1.target
target_2 = df_segment_2.target
if target_1.dtype == int or target_2.dtype == int:
warnings.warn(
"At least one target column has integer dtype, "
"it is converted to float in order to calculate correlation."
)
target_1 = target_1.astype(float)
target_2 = target_2.astype(float)
lags, level, _, _ = axx.xcorr(x=target_1, y=target_2, maxlags=maxlags)
ax[i].plot(lags, level, "o", markersize=5)
ax[i].set_title(f"{segment_1} vs {segment_2}")
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
def sample_acf_plot(ts: "TSDataset", n_segments: int = 10, lags: int = 21, segments: Sequence = None):
"""
Autocorrelation plot for multiple timeseries.
Parameters
----------
ts:
TSDataset with timeseries data
n_segments:
number of random segments to plot
lags:
number of timeseries shifts for cross-correlation
segments:
segments to plot
Notes
-----
https://en.wikipedia.org/wiki/Autocorrelation
"""
if not segments:
segments = sorted(ts.segments)
k = min(n_segments, len(segments))
columns_num = min(2, k)
rows_num = math.ceil(k / columns_num)
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)
ax = ax.ravel()
fig.suptitle("Partial Autocorrelation", fontsize=16)
for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):
df_slice = ts[:, name, :][name]
plot_acf(x=df_slice["target"].values, ax=ax[i], lags=lags)
ax[i].set_title(name)
plt.show()
def sample_pacf_plot(ts: "TSDataset", n_segments: int = 10, lags: int = 21, segments: Sequence = None):
"""
Partial autocorrelation plot for multiple timeseries.
Parameters
----------
ts:
TSDataset with timeseries data
n_segments:
number of random segments to plot
lags:
number of timeseries shifts for cross-correlation
segments:
segments to plot
Notes
-----
https://en.wikipedia.org/wiki/Partial_autocorrelation_function
"""
if not segments:
segments = sorted(ts.segments)
k = min(n_segments, len(segments))
columns_num = min(2, k)
rows_num = math.ceil(k / columns_num)
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)
ax = ax.ravel()
fig.suptitle("Partial Autocorrelation", fontsize=16)
for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):
df_slice = ts[:, name, :][name]
plot_pacf(x=df_slice["target"].values, ax=ax[i], lags=lags)
ax[i].set_title(name)
plt.show()
def distribution_plot(
ts: "TSDataset",
n_segments: int = 10,
segments: Sequence = None,
shift: int = 30,
window: int = 30,
freq: str = "1M",
n_rows: int = 10,
):
"""Distribution of z-values grouped by segments and time frequency.
... math:
mean_{i} = \\sum_{j=i-\\text{shift}}^{i-\\text{shift}+\\text{window}} \\frac{x_{j}}{\\text{window}}
Parameters
----------
ts:
dataset with timeseries data
n_segments:
number of random segments to plot
segments:
segments to plot
shift:
number of timeseries shifts for statistics calc
window:
number of points for statistics calc
freq:
group for z_{i}
n_rows:
maximum number of rows to plot
"""
df_pd = ts.to_pandas(flatten=True)
if not segments:
segments = df_pd.segment.unique()
segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)
df_full = df_pd[df_pd.segment.isin(segments)]
df_full.loc[:, "mean"] = (
df_full.groupby("segment").target.shift(shift).transform(lambda s: s.rolling(window).mean())
)
df_full.loc[:, "std"] = df_full.groupby("segment").target.shift(shift).transform(lambda s: s.rolling(window).std())
df_full = df_full.dropna()
df_full.loc[:, "z"] = (df_full["target"] - df_full["mean"]) / df_full["std"]
grouped_data = df_full.groupby([df_full.timestamp.dt.to_period(freq)])
columns_num = min(2, len(grouped_data))
rows_num = min(n_rows, math.ceil(len(grouped_data) / columns_num))
groups = set(list(grouped_data.groups.keys())[-rows_num * columns_num :])
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 7.5 * rows_num), constrained_layout=True, squeeze=False)
fig.suptitle(f"Z statistic shift: {shift} window: {window}", fontsize=16)
ax = ax.ravel()
i = 0
for period, df_slice in grouped_data:
if period not in groups:
continue
sns.boxplot(data=df_slice.sort_values(by="segment"), y="z", x="segment", ax=ax[i], fliersize=False)
ax[i].set_title(f"{period}")
i += 1
| 34.576531
| 119
| 0.649255
|
import math
import warnings
from itertools import combinations
from typing import TYPE_CHECKING
from typing import Optional
from typing import Sequence
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import statsmodels.api as sm
from matplotlib.ticker import MaxNLocator
from statsmodels.graphics import utils
if TYPE_CHECKING:
from etna.datasets import TSDataset
plot_acf = sm.graphics.tsa.plot_acf
plot_pacf = sm.graphics.tsa.plot_pacf
def cross_corr_plot(ts: "TSDataset", n_segments: int = 10, maxlags: int = 21, segments: Optional[Sequence] = None):
if not segments:
segments = list(ts.segments)
segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)
segment_pairs = list(combinations(segments, r=2))
if len(segment_pairs) == 0:
raise ValueError("There are no pairs to plot! Try set n_segments > 1.")
columns_num = min(2, len(segment_pairs))
rows_num = math.ceil(len(segment_pairs) / columns_num)
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)
ax = ax.ravel()
fig.suptitle("Cross-correlation", fontsize=16)
for i, (segment_1, segment_2) in enumerate(segment_pairs):
df_segment_1 = ts[:, segment_1, :][segment_1]
df_segment_2 = ts[:, segment_2, :][segment_2]
fig, axx = utils.create_mpl_ax(ax[i])
target_1 = df_segment_1.target
target_2 = df_segment_2.target
if target_1.dtype == int or target_2.dtype == int:
warnings.warn(
"At least one target column has integer dtype, "
"it is converted to float in order to calculate correlation."
)
target_1 = target_1.astype(float)
target_2 = target_2.astype(float)
lags, level, _, _ = axx.xcorr(x=target_1, y=target_2, maxlags=maxlags)
ax[i].plot(lags, level, "o", markersize=5)
ax[i].set_title(f"{segment_1} vs {segment_2}")
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
def sample_acf_plot(ts: "TSDataset", n_segments: int = 10, lags: int = 21, segments: Sequence = None):
if not segments:
segments = sorted(ts.segments)
k = min(n_segments, len(segments))
columns_num = min(2, k)
rows_num = math.ceil(k / columns_num)
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)
ax = ax.ravel()
fig.suptitle("Partial Autocorrelation", fontsize=16)
for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):
df_slice = ts[:, name, :][name]
plot_acf(x=df_slice["target"].values, ax=ax[i], lags=lags)
ax[i].set_title(name)
plt.show()
def sample_pacf_plot(ts: "TSDataset", n_segments: int = 10, lags: int = 21, segments: Sequence = None):
if not segments:
segments = sorted(ts.segments)
k = min(n_segments, len(segments))
columns_num = min(2, k)
rows_num = math.ceil(k / columns_num)
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)
ax = ax.ravel()
fig.suptitle("Partial Autocorrelation", fontsize=16)
for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):
df_slice = ts[:, name, :][name]
plot_pacf(x=df_slice["target"].values, ax=ax[i], lags=lags)
ax[i].set_title(name)
plt.show()
def distribution_plot(
ts: "TSDataset",
n_segments: int = 10,
segments: Sequence = None,
shift: int = 30,
window: int = 30,
freq: str = "1M",
n_rows: int = 10,
):
df_pd = ts.to_pandas(flatten=True)
if not segments:
segments = df_pd.segment.unique()
segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)
df_full = df_pd[df_pd.segment.isin(segments)]
df_full.loc[:, "mean"] = (
df_full.groupby("segment").target.shift(shift).transform(lambda s: s.rolling(window).mean())
)
df_full.loc[:, "std"] = df_full.groupby("segment").target.shift(shift).transform(lambda s: s.rolling(window).std())
df_full = df_full.dropna()
df_full.loc[:, "z"] = (df_full["target"] - df_full["mean"]) / df_full["std"]
grouped_data = df_full.groupby([df_full.timestamp.dt.to_period(freq)])
columns_num = min(2, len(grouped_data))
rows_num = min(n_rows, math.ceil(len(grouped_data) / columns_num))
groups = set(list(grouped_data.groups.keys())[-rows_num * columns_num :])
fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 7.5 * rows_num), constrained_layout=True, squeeze=False)
fig.suptitle(f"Z statistic shift: {shift} window: {window}", fontsize=16)
ax = ax.ravel()
i = 0
for period, df_slice in grouped_data:
if period not in groups:
continue
sns.boxplot(data=df_slice.sort_values(by="segment"), y="z", x="segment", ax=ax[i], fliersize=False)
ax[i].set_title(f"{period}")
i += 1
| true
| true
|
f71a5114748409f8688b38305fe77035a3f0228a
| 2,251
|
py
|
Python
|
18_Working with Dates and Times in Python/03_Time Zones and Daylight Saving/05_What time did the bike leave.py
|
mohd-faizy/DataScience-With-Python
|
13ebb10cf9083343056d5b782957241de1d595f9
|
[
"MIT"
] | 5
|
2021-02-03T14:36:58.000Z
|
2022-01-01T10:29:26.000Z
|
18_Working with Dates and Times in Python/03_Time Zones and Daylight Saving/05_What time did the bike leave.py
|
mohd-faizy/DataScience-With-Python
|
13ebb10cf9083343056d5b782957241de1d595f9
|
[
"MIT"
] | null | null | null |
18_Working with Dates and Times in Python/03_Time Zones and Daylight Saving/05_What time did the bike leave.py
|
mohd-faizy/DataScience-With-Python
|
13ebb10cf9083343056d5b782957241de1d595f9
|
[
"MIT"
] | 3
|
2021-02-08T00:31:16.000Z
|
2022-03-17T13:52:32.000Z
|
'''
05 - What time did the bike leave? (Global edition)
When you need to move a datetime from one timezone into another, use
.astimezone() and tz. Often you will be moving things into UTC, but for
fun let's try moving things from 'America/New_York' into a few different
time zones.
------------------------------------------------------------------
Instructions:
- Set uk to be the timezone for the UK: 'Europe/London'.
- Change local to be in the uk timezone and assign it to notlocal.
------------------------------------------------------------------
'''
# Create the timezone object
uk = tz.gettz('Europe/London')
# Pull out the start of the first trip
local = onebike_datetimes[0]['start']
# What time was it in the UK?
notlocal = local.astimezone(uk)
# Print them out and see the difference
print(local.isoformat())
print(notlocal.isoformat())
'''
<script.py> output:
2017-10-01T15:23:25-04:00
2017-10-01T20:23:25+01:00
'''
'''
------------------------------------------------------------------
- Set ist to be the timezone for India: 'Asia/Kolkata'.
- Change local to be in the ist timezone and assign it to notlocal.
------------------------------------------------------------------
'''
# Create the timezone object
ist = tz.gettz('Asia/Kolkata')
# Pull out the start of the first trip
local = onebike_datetimes[0]['start']
# What time was it in the UK?
notlocal = local.astimezone(ist)
# Print them out and see the difference
print(local.isoformat())
print(notlocal.isoformat())
'''
<script.py> output:
2017-10-01T15:23:25-04:00
2017-10-02T00:53:25+05:30
'''
'''
------------------------------------------------------------------
- Set sm to be the timezone for Samoa: 'Pacific/Apia'.
- Change local to be in the sm timezone and assign it to notlocal.
------------------------------------------------------------------
'''
# Create the timezone object
sm = tz.gettz('Pacific/Apia')
# Pull out the start of the first trip
local = onebike_datetimes[0]['start']
# What time was it in Samoa?
notlocal = local.astimezone(sm)
# Print them out and see the difference
print(local.isoformat())
print(notlocal.isoformat())
'''
<script.py> output:
2017-10-01T15:23:25-04:00
2017-10-02T09:23:25+14:00
'''
| 27.120482
| 73
| 0.581519
|
uk = tz.gettz('Europe/London')
local = onebike_datetimes[0]['start']
notlocal = local.astimezone(uk)
print(local.isoformat())
print(notlocal.isoformat())
ist = tz.gettz('Asia/Kolkata')
local = onebike_datetimes[0]['start']
notlocal = local.astimezone(ist)
print(local.isoformat())
print(notlocal.isoformat())
sm = tz.gettz('Pacific/Apia')
local = onebike_datetimes[0]['start']
notlocal = local.astimezone(sm)
print(local.isoformat())
print(notlocal.isoformat())
| true
| true
|
f71a5174d2bf23ea7be1f3e9c5de988669aecc72
| 7,805
|
py
|
Python
|
src/tools/scripts/lofreq2_cluster.py
|
joshwkearney/lofreq
|
8966e95044875ec9068d2ea4d1cf72ed96d92781
|
[
"MIT"
] | 74
|
2015-01-02T19:18:01.000Z
|
2022-02-25T04:16:18.000Z
|
src/tools/scripts/lofreq2_cluster.py
|
joshwkearney/lofreq
|
8966e95044875ec9068d2ea4d1cf72ed96d92781
|
[
"MIT"
] | 125
|
2015-01-06T07:25:30.000Z
|
2022-03-15T12:56:23.000Z
|
src/tools/scripts/lofreq2_cluster.py
|
joshwkearney/lofreq
|
8966e95044875ec9068d2ea4d1cf72ed96d92781
|
[
"MIT"
] | 31
|
2015-01-14T00:41:14.000Z
|
2022-02-16T14:45:13.000Z
|
#!/usr/bin/env python
"""Cluster SNVs based on SNV freqs confidence interval
"""
__author__ = "Andreas Wilm, Niranjan Nagarajan"
__email__ = "wilma@gis.a-star.edu.sg"
__copyright__ = "2013,2014 Genome Institute of Singapore"
__license__ = "The MIT License"
# --- standard library imports
#
import sys
import logging
import os
import argparse
from math import sqrt
from collections import namedtuple
from itertools import groupby
#--- third-party imports
#
# /
#--- project specific imports
#
# James Casbon's pyvcf
import vcf
#global logger
# http://docs.python.org/library/logging.html
LOG = logging.getLogger("")
logging.basicConfig(level=logging.WARN,
format='%(levelname)s [%(asctime)s]: %(message)s')
CI = namedtuple('CI', ['min', 'max'])
# invocation of ipython on exceptions
#import sys, pdb
#from IPython.core import ultratb
#sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux', call_pdb=1)
def compute_ci(coverage, var_count):
"""Compute confidnce interval:
Agresti-Coull Interval at the 0.05 level
http://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Agresti-Coull_Interval
n~ = n + 4
p~ = 1/n~ * (X + 4/2)
ci: p~ +- 2*sqrt(1/n~ * p~ * (1-p~)
"""
n_t = float(coverage + 4)
p_t = (var_count + 2) / n_t
ci = 2 * sqrt(p_t * (1-p_t) / n_t)
min_ci = p_t - 3*ci
if min_ci < 0.0:
min_ci = 0.0
max_ci = p_t + 3*ci
return CI._make([min_ci, max_ci])
def fasta_iter(fasta_name):
"""
given a fasta file. yield tuples of header, sequence
Brent Pedersen: https://www.biostars.org/p/710/
"""
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
#header = header.next()[1:].strip()
header = header.next()[1:].strip().split(" ")[0]
# join all sequence lines to one.
seq = "".join(s.strip() for s in faiter.next())
yield header, seq
def cmdline_parser():
"""
creates an OptionParser instance
"""
# http://docs.python.org/dev/howto/argparse.html
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose",
action="store_true",
dest="verbose",
help="be verbose")
parser.add_argument("--debug",
action="store_true",
dest="debug",
help="enable debugging")
parser.add_argument("-i", "--variants",
dest="var_file",
help="variant input file (vcf format)")
parser.add_argument("-r", "--ref",
dest="reffa",
help="Reference fasta file (for reconstructing cluster sequence)")
parser.add_argument("-o", "--out",
dest="cluster_file",
default="-",
help="Cluster output file (- for stdout = default)")
return parser
def vcf_var_to_str(v):
return "%s %d %s>%s %f" % (
v.CHROM, v.POS, v.REF, ','.join(["%s" % x for x in v.ALT]), v.INFO['AF'])
def main():
"""The main function
"""
parser = cmdline_parser()
args = parser.parse_args()
# FIXME catch unrecognized args (not just (len(args)
if args.verbose:
LOG.setLevel(logging.INFO)
if args.debug:
LOG.setLevel(logging.DEBUG)
for (in_file, descr) in [(args.var_file, "variant file")]:
if not in_file:
parser.error("%s input file argument missing." % descr)
sys.exit(1)
if not os.path.exists(in_file) and in_file != "-":
sys.stderr.write(
"file '%s' does not exist.\n" % in_file)
sys.exit(1)
for (out_file, descr) in [(args.cluster_file, "cluster output file")]:
if not out_file:
parser.error("%s output file argument missing." % descr)
sys.exit(1)
if os.path.exists(out_file) and out_file!="-":
sys.stderr.write(
"Cowardly refusing to overwrite existing"
" output file '%s'.\n" % out_file)
sys.exit(1)
if args.cluster_file == '-':
fh_out = sys.stdout
else:
fh_out = open(args.cluster_file, 'w')
if args.reffa:
refno = 0
for refname, refseq in fasta_iter(args.reffa):
if refno > 0:
sys.stderr.write("Only supporting one sequence\n")
sys.exit(1)
refno += 1
else:
refseq = ""
if args.var_file == '-':
vcf_fh = sys.stdin
else:
vcf_fh = vcf.VCFReader(filename=args.var_file)
var_list = [v for v in vcf_fh]
if any([not v.is_snp for v in var_list]):
sys.stderr.write("WARNING: Only supporting SNPs! Automatically removing others\n")
var_list = [v for v in var_list if v.is_snp]
LOG.info("Parsed %d SNPs from %s" % (len(var_list), args.var_file))
assert all([x.INFO.has_key('AF') and x.INFO.has_key('DP')
for x in var_list])
var_list = sorted(var_list, key=lambda x: x.INFO['AF'], reverse=True)
ci_list = [compute_ci(v.INFO['DP'], int(v.INFO['AF'] * v.INFO['DP']))
for v in var_list]
var_ci_list = list(zip(var_list, ci_list))
del var_list, ci_list# paranoia
if len(var_ci_list)==0:
fh_out.write("No variants <-> no clusters!\n")
if fh_out != sys.stdout:
fh_out.close()
sys.exit(0)
cluster = dict()
clu_no = 0
seed_var, seed_ci = var_ci_list[0]
#cluster[clu_no,'members'] = ["%s %f" % (seed.repr, seed.freq)]
cluster[clu_no,'members'] = [seed_var]
cluster[clu_no,'min'] = seed_ci.min
cluster[clu_no,'max'] = seed_ci.max
for var, ci in var_ci_list[1:]:
LOG.debug("checking %s %f: max_ci %f vvar. clu_min %f" % (
var, var.INFO['AF'], ci.max, cluster[clu_no,'min']))
if ci.max > cluster[clu_no,'min']:
#cluster[clu_no,'members'].append("%s %f" % (var.repr, var.freq))
cluster[clu_no,'members'].append(var)
else:
clu_no += 1
seed = var
#cluster[clu_no,'members'] = ["%s %f" % (seed.repr, seed.freq)]
cluster[clu_no,'members'] = [seed]
cluster[clu_no,'min'] = ci.min
cluster[clu_no,'max'] = ci.max
for i in range(clu_no+1):
fh_out.write("# cluster %d (freq. range: %f - %f): %s\n" % (
i+1, cluster[i,'min'], cluster[i,'max'],
', '.join([vcf_var_to_str(x) for x in cluster[i,'members']])))
# write sequence as well if we have a reference
if refseq:
haplotype = refseq
for v in sorted(cluster[i,'members'], key = lambda v: v.POS):
# FIXME random order for multi-allelic psositions
assert v.CHROM == refname
assert refseq[v.POS-1] == v.REF# use refseq to not break for multi-allelic positions
assert len(v.ALT)==1, ("Support for 1 base alt only")
alt = str(v.ALT[0])
idx = v.POS-1
haplotype = haplotype[:idx] + alt + haplotype[idx + 1:]
fh_out.write(">haplotype-cluster-{}\n{}\n".format(i+1, haplotype))
if fh_out != sys.stdout:
fh_out.close()
print("%d clusters found (written to %s)" % (clu_no+1, fh_out.name))
if __name__ == "__main__":
main()
LOG.info("Successful program exit")
| 30.251938
| 100
| 0.557207
|
__author__ = "Andreas Wilm, Niranjan Nagarajan"
__email__ = "wilma@gis.a-star.edu.sg"
__copyright__ = "2013,2014 Genome Institute of Singapore"
__license__ = "The MIT License"
import sys
import logging
import os
import argparse
from math import sqrt
from collections import namedtuple
from itertools import groupby
import vcf
#global logger
# http://docs.python.org/library/logging.html
LOG = logging.getLogger("")
logging.basicConfig(level=logging.WARN,
format='%(levelname)s [%(asctime)s]: %(message)s')
CI = namedtuple('CI', ['min', 'max'])
# invocation of ipython on exceptions
#import sys, pdb
#from IPython.core import ultratb
#sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux', call_pdb=1)
def compute_ci(coverage, var_count):
n_t = float(coverage + 4)
p_t = (var_count + 2) / n_t
ci = 2 * sqrt(p_t * (1-p_t) / n_t)
min_ci = p_t - 3*ci
if min_ci < 0.0:
min_ci = 0.0
max_ci = p_t + 3*ci
return CI._make([min_ci, max_ci])
def fasta_iter(fasta_name):
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
#header = header.next()[1:].strip()
header = header.next()[1:].strip().split(" ")[0]
# join all sequence lines to one.
seq = "".join(s.strip() for s in faiter.next())
yield header, seq
def cmdline_parser():
# http://docs.python.org/dev/howto/argparse.html
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose",
action="store_true",
dest="verbose",
help="be verbose")
parser.add_argument("--debug",
action="store_true",
dest="debug",
help="enable debugging")
parser.add_argument("-i", "--variants",
dest="var_file",
help="variant input file (vcf format)")
parser.add_argument("-r", "--ref",
dest="reffa",
help="Reference fasta file (for reconstructing cluster sequence)")
parser.add_argument("-o", "--out",
dest="cluster_file",
default="-",
help="Cluster output file (- for stdout = default)")
return parser
def vcf_var_to_str(v):
return "%s %d %s>%s %f" % (
v.CHROM, v.POS, v.REF, ','.join(["%s" % x for x in v.ALT]), v.INFO['AF'])
def main():
parser = cmdline_parser()
args = parser.parse_args()
# FIXME catch unrecognized args (not just (len(args)
if args.verbose:
LOG.setLevel(logging.INFO)
if args.debug:
LOG.setLevel(logging.DEBUG)
for (in_file, descr) in [(args.var_file, "variant file")]:
if not in_file:
parser.error("%s input file argument missing." % descr)
sys.exit(1)
if not os.path.exists(in_file) and in_file != "-":
sys.stderr.write(
"file '%s' does not exist.\n" % in_file)
sys.exit(1)
for (out_file, descr) in [(args.cluster_file, "cluster output file")]:
if not out_file:
parser.error("%s output file argument missing." % descr)
sys.exit(1)
if os.path.exists(out_file) and out_file!="-":
sys.stderr.write(
"Cowardly refusing to overwrite existing"
" output file '%s'.\n" % out_file)
sys.exit(1)
if args.cluster_file == '-':
fh_out = sys.stdout
else:
fh_out = open(args.cluster_file, 'w')
if args.reffa:
refno = 0
for refname, refseq in fasta_iter(args.reffa):
if refno > 0:
sys.stderr.write("Only supporting one sequence\n")
sys.exit(1)
refno += 1
else:
refseq = ""
if args.var_file == '-':
vcf_fh = sys.stdin
else:
vcf_fh = vcf.VCFReader(filename=args.var_file)
var_list = [v for v in vcf_fh]
if any([not v.is_snp for v in var_list]):
sys.stderr.write("WARNING: Only supporting SNPs! Automatically removing others\n")
var_list = [v for v in var_list if v.is_snp]
LOG.info("Parsed %d SNPs from %s" % (len(var_list), args.var_file))
assert all([x.INFO.has_key('AF') and x.INFO.has_key('DP')
for x in var_list])
var_list = sorted(var_list, key=lambda x: x.INFO['AF'], reverse=True)
ci_list = [compute_ci(v.INFO['DP'], int(v.INFO['AF'] * v.INFO['DP']))
for v in var_list]
var_ci_list = list(zip(var_list, ci_list))
del var_list, ci_list# paranoia
if len(var_ci_list)==0:
fh_out.write("No variants <-> no clusters!\n")
if fh_out != sys.stdout:
fh_out.close()
sys.exit(0)
cluster = dict()
clu_no = 0
seed_var, seed_ci = var_ci_list[0]
#cluster[clu_no,'members'] = ["%s %f" % (seed.repr, seed.freq)]
cluster[clu_no,'members'] = [seed_var]
cluster[clu_no,'min'] = seed_ci.min
cluster[clu_no,'max'] = seed_ci.max
for var, ci in var_ci_list[1:]:
LOG.debug("checking %s %f: max_ci %f vvar. clu_min %f" % (
var, var.INFO['AF'], ci.max, cluster[clu_no,'min']))
if ci.max > cluster[clu_no,'min']:
#cluster[clu_no,'members'].append("%s %f" % (var.repr, var.freq))
cluster[clu_no,'members'].append(var)
else:
clu_no += 1
seed = var
#cluster[clu_no,'members'] = ["%s %f" % (seed.repr, seed.freq)]
cluster[clu_no,'members'] = [seed]
cluster[clu_no,'min'] = ci.min
cluster[clu_no,'max'] = ci.max
for i in range(clu_no+1):
fh_out.write("# cluster %d (freq. range: %f - %f): %s\n" % (
i+1, cluster[i,'min'], cluster[i,'max'],
', '.join([vcf_var_to_str(x) for x in cluster[i,'members']])))
# write sequence as well if we have a reference
if refseq:
haplotype = refseq
for v in sorted(cluster[i,'members'], key = lambda v: v.POS):
# FIXME random order for multi-allelic psositions
assert v.CHROM == refname
assert refseq[v.POS-1] == v.REF# use refseq to not break for multi-allelic positions
assert len(v.ALT)==1, ("Support for 1 base alt only")
alt = str(v.ALT[0])
idx = v.POS-1
haplotype = haplotype[:idx] + alt + haplotype[idx + 1:]
fh_out.write(">haplotype-cluster-{}\n{}\n".format(i+1, haplotype))
if fh_out != sys.stdout:
fh_out.close()
print("%d clusters found (written to %s)" % (clu_no+1, fh_out.name))
if __name__ == "__main__":
main()
LOG.info("Successful program exit")
| true
| true
|
f71a51a2c95b6595d277af331364047551e8377e
| 608
|
py
|
Python
|
problems/number-complement.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/number-complement.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/number-complement.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
"""
First, we convert the num to its birary.
```
>>> bin(5)
>>> '0b101'
```
Second, we need to return the base10 of binary's the complement.
Complement is easy `'101' => '010'`.
Turn to base10:
```
'010' => 0*pow(2, 2) + 1*pow(2, 1) + 0*pow(2, 0)
'11011' => 1*pow(2, 4) + 1*pow(2, 3) + 0*pow(2, 2) + 1*pow(2, 1) + 1*pow(2, 0)
```
Basics bit manipulation.
<https://www.youtube.com/watch?v=NLKQEOgBAnw>
"""
class Solution(object):
def findComplement(self, num):
b = bin(num)[2:]
opt = 0
for i, c in enumerate(reversed(b)):
if c=='0': opt+=pow(2, i)
return opt
| 23.384615
| 78
| 0.555921
|
class Solution(object):
def findComplement(self, num):
b = bin(num)[2:]
opt = 0
for i, c in enumerate(reversed(b)):
if c=='0': opt+=pow(2, i)
return opt
| true
| true
|
f71a52383480c16caea8e9d42551045766340f5e
| 251
|
py
|
Python
|
jacoren/__version__.py
|
kuszaj/jacoren
|
42344982248ed688da8f3d9383ca4ae63f542cf3
|
[
"MIT"
] | 1
|
2018-02-27T08:54:40.000Z
|
2018-02-27T08:54:40.000Z
|
jacoren/__version__.py
|
kuszaj/jacoren
|
42344982248ed688da8f3d9383ca4ae63f542cf3
|
[
"MIT"
] | null | null | null |
jacoren/__version__.py
|
kuszaj/jacoren
|
42344982248ed688da8f3d9383ca4ae63f542cf3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Package info."""
__version__ = '0.1.0'
__title__ = 'jacoren'
__description__ = ''
__author__ = 'Piotr Kuszaj'
__author_email__ = 'peterkuszaj@gmail.com'
__license__ = 'MIT'
__all__ = ('platform', 'cpu', 'memory', 'disks')
| 20.916667
| 48
| 0.657371
|
__version__ = '0.1.0'
__title__ = 'jacoren'
__description__ = ''
__author__ = 'Piotr Kuszaj'
__author_email__ = 'peterkuszaj@gmail.com'
__license__ = 'MIT'
__all__ = ('platform', 'cpu', 'memory', 'disks')
| true
| true
|
f71a5241dff474c819eaebc8af456389f5a76087
| 4,386
|
py
|
Python
|
tests/unit/test_task.py
|
lekshmimallika-aot/business-schemas
|
d95b43f1d04e29fd9bab101789c277db54123d9b
|
[
"Apache-2.0"
] | 2
|
2020-02-05T21:36:27.000Z
|
2021-08-28T23:56:52.000Z
|
tests/unit/test_task.py
|
lekshmimallika-aot/business-schemas
|
d95b43f1d04e29fd9bab101789c277db54123d9b
|
[
"Apache-2.0"
] | 13
|
2020-03-25T17:28:11.000Z
|
2022-03-30T20:06:04.000Z
|
tests/unit/test_task.py
|
lekshmimallika-aot/business-schemas
|
d95b43f1d04e29fd9bab101789c277db54123d9b
|
[
"Apache-2.0"
] | 19
|
2020-01-31T23:11:47.000Z
|
2022-03-30T18:08:15.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the legal task schema is valid.
This suite should have at least 1 test for filing and todo task items.
"""
from registry_schemas import validate
from registry_schemas.example_data import FILING_HEADER, UNMANAGED
def test_valid_task_todo():
"""Assert that the schema accepts a valid todo task."""
task = {
'task': {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
},
'order': 2,
'enabled': False
}
is_valid, errors = validate(task, 'task')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert is_valid
def test_valid_task_filing():
"""Assert that the schema accepts a valid filing task."""
import copy
filing = copy.deepcopy(FILING_HEADER)
filing['filing']['unmanaged'] = UNMANAGED
new_task = {
'task': {
'filing': copy.deepcopy(filing['filing'])
},
'order': 1,
'enabled': True
}
is_valid, errors = validate(new_task, 'task')
assert is_valid
def test_invalid_task_neither():
"""Assert that the schema rejects an invalid task."""
task = {
'task': {
'invalid': {
'foo': 'abc',
'bar': '123'
}
},
'order': 2,
'enabled': False
}
is_valid, errors = validate(task, 'task')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert not is_valid
def test_invalid_task_missing_order():
"""Assert that the schema rejects a task missing the 'order' property."""
task = {
'task': {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
},
'enabled': False
}
is_valid, errors = validate(task, 'task')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert not is_valid
def test_invalid_task_missing_enabled():
"""Assert that the schema rejects a task missing the 'enabled' property."""
task = {
'task': {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
},
'order': 2
}
is_valid, errors = validate(task, 'task')
# if errors:
# for err in errors:
# print(err.message)
print(errors)
assert not is_valid
| 27.4125
| 79
| 0.512312
|
from registry_schemas import validate
from registry_schemas.example_data import FILING_HEADER, UNMANAGED
def test_valid_task_todo():
task = {
'task': {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
},
'order': 2,
'enabled': False
}
is_valid, errors = validate(task, 'task')
print(errors)
assert is_valid
def test_valid_task_filing():
import copy
filing = copy.deepcopy(FILING_HEADER)
filing['filing']['unmanaged'] = UNMANAGED
new_task = {
'task': {
'filing': copy.deepcopy(filing['filing'])
},
'order': 1,
'enabled': True
}
is_valid, errors = validate(new_task, 'task')
assert is_valid
def test_invalid_task_neither():
task = {
'task': {
'invalid': {
'foo': 'abc',
'bar': '123'
}
},
'order': 2,
'enabled': False
}
is_valid, errors = validate(task, 'task')
print(errors)
assert not is_valid
def test_invalid_task_missing_order():
task = {
'task': {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
},
'enabled': False
}
is_valid, errors = validate(task, 'task')
print(errors)
assert not is_valid
def test_invalid_task_missing_enabled():
task = {
'task': {
'todo': {
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08T00:00:00+00:00',
'identifier': 'CP0002098',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'Legal Name - CP0002098'
},
'header': {
'name': 'annualReport',
'ARFilingYear': 2019,
'status': 'NEW'
}
}
},
'order': 2
}
is_valid, errors = validate(task, 'task')
print(errors)
assert not is_valid
| true
| true
|
f71a524f93d7cd5915ce95bc5b60b531dbf7e8cf
| 18,115
|
py
|
Python
|
scons-local/SCons/Tool/GettextCommon.py
|
bibleuspro/scons
|
625d446ae8996ff1b3d660c44e2827fc832cf12b
|
[
"MIT"
] | 1
|
2017-02-10T00:26:44.000Z
|
2017-02-10T00:26:44.000Z
|
scons-local/SCons/Tool/GettextCommon.py
|
bibleuspro/scons
|
625d446ae8996ff1b3d660c44e2827fc832cf12b
|
[
"MIT"
] | null | null | null |
scons-local/SCons/Tool/GettextCommon.py
|
bibleuspro/scons
|
625d446ae8996ff1b3d660c44e2827fc832cf12b
|
[
"MIT"
] | null | null | null |
"""SCons.Tool.GettextCommon module
Used by several tools of `gettext` toolset.
"""
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/GettextCommon.py 2014/07/05 09:42:21 garyo"
import SCons.Warnings
import re
#############################################################################
class XgettextToolWarning(SCons.Warnings.Warning): pass
class XgettextNotFound(XgettextToolWarning): pass
class MsginitToolWarning(SCons.Warnings.Warning): pass
class MsginitNotFound(MsginitToolWarning): pass
class MsgmergeToolWarning(SCons.Warnings.Warning): pass
class MsgmergeNotFound(MsgmergeToolWarning): pass
class MsgfmtToolWarning(SCons.Warnings.Warning): pass
class MsgfmtNotFound(MsgfmtToolWarning): pass
#############################################################################
SCons.Warnings.enableWarningClass(XgettextToolWarning)
SCons.Warnings.enableWarningClass(XgettextNotFound)
SCons.Warnings.enableWarningClass(MsginitToolWarning)
SCons.Warnings.enableWarningClass(MsginitNotFound)
SCons.Warnings.enableWarningClass(MsgmergeToolWarning)
SCons.Warnings.enableWarningClass(MsgmergeNotFound)
SCons.Warnings.enableWarningClass(MsgfmtToolWarning)
SCons.Warnings.enableWarningClass(MsgfmtNotFound)
#############################################################################
#############################################################################
class _POTargetFactory(object):
""" A factory of `PO` target files.
Factory defaults differ from these of `SCons.Node.FS.FS`. We set `precious`
(this is required by builders and actions gettext) and `noclean` flags by
default for all produced nodes.
"""
def __init__( self, env, nodefault = True, alias = None, precious = True
, noclean = True ):
""" Object constructor.
**Arguments**
- *env* (`SCons.Environment.Environment`)
- *nodefault* (`boolean`) - if `True`, produced nodes will be ignored
from default target `'.'`
- *alias* (`string`) - if provided, produced nodes will be automatically
added to this alias, and alias will be set as `AlwaysBuild`
- *precious* (`boolean`) - if `True`, the produced nodes will be set as
`Precious`.
- *noclen* (`boolean`) - if `True`, the produced nodes will be excluded
from `Clean`.
"""
self.env = env
self.alias = alias
self.precious = precious
self.noclean = noclean
self.nodefault = nodefault
def _create_node(self, name, factory, directory = None, create = 1):
""" Create node, and set it up to factory settings. """
import SCons.Util
node = factory(name, directory, create)
node.set_noclean(self.noclean)
node.set_precious(self.precious)
if self.nodefault:
self.env.Ignore('.', node)
if self.alias:
self.env.AlwaysBuild(self.env.Alias(self.alias, node))
return node
def Entry(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.Entry` """
return self._create_node(name, self.env.fs.Entry, directory, create)
def File(self, name, directory = None, create = 1):
""" Create `SCons.Node.FS.File` """
return self._create_node(name, self.env.fs.File, directory, create)
#############################################################################
#############################################################################
_re_comment = re.compile(r'(#[^\n\r]+)$', re.M)
_re_lang = re.compile(r'([a-zA-Z0-9_]+)', re.M)
#############################################################################
def _read_linguas_from_files(env, linguas_files = None):
""" Parse `LINGUAS` file and return list of extracted languages """
import SCons.Util
import SCons.Environment
global _re_comment
global _re_lang
if not SCons.Util.is_List(linguas_files) \
and not SCons.Util.is_String(linguas_files) \
and not isinstance(linguas_files, SCons.Node.FS.Base) \
and linguas_files:
# If, linguas_files==True or such, then read 'LINGUAS' file.
linguas_files = [ 'LINGUAS' ]
if linguas_files is None:
return []
fnodes = env.arg2nodes(linguas_files)
linguas = []
for fnode in fnodes:
contents = _re_comment.sub("", fnode.get_text_contents())
ls = [ l for l in _re_lang.findall(contents) if l ]
linguas.extend(ls)
return linguas
#############################################################################
#############################################################################
from SCons.Builder import BuilderBase
#############################################################################
class _POFileBuilder(BuilderBase):
""" `PO` file builder.
This is multi-target single-source builder. In typical situation the source
is single `POT` file, e.g. `messages.pot`, and there are multiple `PO`
targets to be updated from this `POT`. We must run
`SCons.Builder.BuilderBase._execute()` separatelly for each target to track
dependencies separatelly for each target file.
**NOTE**: if we call `SCons.Builder.BuilderBase._execute(.., target, ...)`
with target being list of all targets, all targets would be rebuilt each time
one of the targets from this list is missing. This would happen, for example,
when new language `ll` enters `LINGUAS_FILE` (at this moment there is no
`ll.po` file yet). To avoid this, we override
`SCons.Builder.BuilerBase._execute()` and call it separatelly for each
target. Here we also append to the target list the languages read from
`LINGUAS_FILE`.
"""
#
#* The argument for overriding _execute(): We must use environment with
# builder overrides applied (see BuilderBase.__init__(). Here it comes for
# free.
#* The argument against using 'emitter': The emitter is called too late
# by BuilderBase._execute(). If user calls, for example:
#
# env.POUpdate(LINGUAS_FILE = 'LINGUAS')
#
# the builder throws error, because it is called with target=None,
# source=None and is trying to "generate" sources or target list first.
# If user calls
#
# env.POUpdate(['foo', 'baz'], LINGUAS_FILE = 'LINGUAS')
#
# the env.BuilderWrapper() calls our builder with target=None,
# source=['foo', 'baz']. The BuilderBase._execute() then splits execution
# and execute iterativelly (recursion) self._execute(None, source[i]).
# After that it calls emitter (which is quite too late). The emitter is
# also called in each iteration, what makes things yet worse.
def __init__(self, env, **kw):
if not 'suffix' in kw:
kw['suffix'] = '$POSUFFIX'
if not 'src_suffix' in kw:
kw['src_suffix'] = '$POTSUFFIX'
if not 'src_builder' in kw:
kw['src_builder'] = '_POTUpdateBuilder'
if not 'single_source' in kw:
kw['single_source'] = True
alias = None
if 'target_alias' in kw:
alias = kw['target_alias']
del kw['target_alias']
if not 'target_factory' in kw:
kw['target_factory'] = _POTargetFactory(env, alias=alias).File
BuilderBase.__init__(self, **kw)
def _execute(self, env, target, source, *args, **kw):
""" Execute builder's actions.
Here we append to `target` the languages read from `$LINGUAS_FILE` and
apply `SCons.Builder.BuilderBase._execute()` separatelly to each target.
The arguments and return value are same as for
`SCons.Builder.BuilderBase._execute()`.
"""
import SCons.Util
import SCons.Node
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
env['LINGUAS_FILE'] = None
linguas = _read_linguas_from_files(env,linguas_files)
if SCons.Util.is_List(target):
target.extend(linguas)
elif target is not None:
target = [target] + linguas
else:
target = linguas
if not target:
# Let the SCons.BuilderBase to handle this patologic situation
return BuilderBase._execute( self, env, target, source, *args, **kw)
# The rest is ours
if not SCons.Util.is_List(target):
target = [ target ]
result = []
for tgt in target:
r = BuilderBase._execute( self, env, [tgt], source, *args, **kw)
result.extend(r)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return SCons.Node.NodeList(result)
#############################################################################
import SCons.Environment
#############################################################################
def _translate(env, target=None, source=SCons.Environment._null, *args, **kw):
""" Function for `Translate()` pseudo-builder """
if target is None: target = []
pot = env.POTUpdate(None, source, *args, **kw)
po = env.POUpdate(target, pot, *args, **kw)
return po
#############################################################################
#############################################################################
class RPaths(object):
""" Callable object, which returns pathnames relative to SCons current
working directory.
It seems like `SCons.Node.FS.Base.get_path()` returns absolute paths
for nodes that are outside of current working directory (`env.fs.getcwd()`).
Here, we often have `SConscript`, `POT` and `PO` files within `po/`
directory and source files (e.g. `*.c`) outside of it. When generating `POT`
template file, references to source files are written to `POT` template, so
a translator may later quickly jump to appropriate source file and line from
its `PO` editor (e.g. `poedit`). Relative paths in `PO` file are usually
interpreted by `PO` editor as paths relative to the place, where `PO` file
lives. The absolute paths would make resultant `POT` file nonportable, as
the references would be correct only on the machine, where `POT` file was
recently re-created. For such reason, we need a function, which always
returns relative paths. This is the purpose of `RPaths` callable object.
The `__call__` method returns paths relative to current woking directory, but
we assume, that *xgettext(1)* is run from the directory, where target file is
going to be created.
Note, that this may not work for files distributed over several hosts or
across different drives on windows. We assume here, that single local
filesystem holds both source files and target `POT` templates.
Intended use of `RPaths` - in `xgettext.py`::
def generate(env):
from GettextCommon import RPaths
...
sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET, SOURCES)} $)'
env.Append(
...
XGETTEXTCOM = 'XGETTEXT ... ' + sources,
...
XgettextRPaths = RPaths(env)
)
"""
# NOTE: This callable object returns pathnames of dirs/files relative to
# current working directory. The pathname remains relative also for entries
# that are outside of current working directory (node, that
# SCons.Node.FS.File and siblings return absolute path in such case). For
# simplicity we compute path relative to current working directory, this
# seems be enough for our purposes (don't need TARGET variable and
# SCons.Defaults.Variable_Caller stuff).
def __init__(self, env):
""" Initialize `RPaths` callable object.
**Arguments**:
- *env* - a `SCons.Environment.Environment` object, defines *current
working dir*.
"""
self.env = env
# FIXME: I'm not sure, how it should be implemented (what the *args are in
# general, what is **kw).
def __call__(self, nodes, *args, **kw):
""" Return nodes' paths (strings) relative to current working directory.
**Arguments**:
- *nodes* ([`SCons.Node.FS.Base`]) - list of nodes.
- *args* - currently unused.
- *kw* - currently unused.
**Returns**:
- Tuple of strings, which represent paths relative to current working
directory (for given environment).
"""
# os.path.relpath is available only on python >= 2.6. We use our own
# implementation. It's taken from BareNecessities package:
# http://jimmyg.org/work/code/barenecessities/index.html
from posixpath import curdir
def relpath(path, start=curdir):
import posixpath
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
import os
import SCons.Node.FS
rpaths = ()
cwd = self.env.fs.getcwd().get_abspath()
for node in nodes:
rpath = None
if isinstance(node, SCons.Node.FS.Base):
rpath = relpath(node.get_abspath(), cwd)
# FIXME: Other types possible here?
if rpath is not None:
rpaths += (rpath,)
return rpaths
#############################################################################
#############################################################################
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env : 0
if env.has_key('POAUTOINIT'):
autoinit = env['POAUTOINIT']
else:
autoinit = False
# Well, if everything outside works well, this loop should do single
# iteration. Otherwise we are rebuilding all the targets even, if just
# one has changed (but is this out fault?).
for tgt in target:
if not tgt.exists():
if autoinit:
action = SCons.Action.Action('$MSGINITCOM', '$MSGINITCOMSTR')
else:
msg = 'File ' + repr(str(tgt)) + ' does not exist. ' \
+ 'If you are a translator, you can create it through: \n' \
+ '$MSGINITCOM'
action = SCons.Action.Action(nop, msg)
status = action([tgt], source, env)
if status: return status
return 0
#############################################################################
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
if env.has_key('XGETTEXT'):
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
return xgettext
raise SCons.Errors.StopError(XgettextNotFound,"Could not detect xgettext")
return None
#############################################################################
def _xgettext_exists(env):
return _detect_xgettext(env)
#############################################################################
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if env.has_key('MSGINIT'):
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None
#############################################################################
def _msginit_exists(env):
return _detect_msginit(env)
#############################################################################
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
if env.has_key('MSGMERGE'):
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
return msgmerge
raise SCons.Errors.StopError(MsgmergeNotFound, "Could not detect msgmerge")
return None
#############################################################################
def _msgmerge_exists(env):
return _detect_msgmerge(env)
#############################################################################
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
if env.has_key('MSGFMT'):
return env['MSGFMT']
msgfmt = env.Detect('msgfmt');
if msgfmt:
return msgfmt
raise SCons.Errors.StopError(MsgfmtNotFound, "Could not detect msgfmt")
return None
#############################################################################
def _msgfmt_exists(env):
return _detect_msgfmt(env)
#############################################################################
#############################################################################
def tool_list(platform, env):
""" List tools that shall be generated by top-level `gettext` tool """
return [ 'xgettext', 'msginit', 'msgmerge', 'msgfmt' ]
#############################################################################
| 42.030162
| 96
| 0.599558
|
__revision__ = "src/engine/SCons/Tool/GettextCommon.py 2014/07/05 09:42:21 garyo"
import SCons.Warnings
import re
| true
| true
|
f71a5321b655a69d95438bc4946e72b3c1c4abfa
| 5,314
|
py
|
Python
|
scilab2py/utils.py
|
blink1073/scilab2py
|
d487828a7087890ce1e035a7c09c4819ff8276c4
|
[
"MIT"
] | 8
|
2015-10-16T23:28:16.000Z
|
2020-06-19T18:49:18.000Z
|
scilab2py/utils.py
|
blink1073/scilab2py
|
d487828a7087890ce1e035a7c09c4819ff8276c4
|
[
"MIT"
] | 8
|
2015-06-25T20:57:56.000Z
|
2020-04-03T22:33:16.000Z
|
scilab2py/utils.py
|
blink1073/scilab2py
|
d487828a7087890ce1e035a7c09c4819ff8276c4
|
[
"MIT"
] | 6
|
2015-04-21T12:23:44.000Z
|
2021-10-01T00:08:47.000Z
|
"""
.. module:: utils
:synopsis: Miscellaneous helper constructs
.. moduleauthor:: Steven Silvester <steven.silvester@ieee.org>
"""
import os
import inspect
import dis
import tempfile
import sys
from .compat import PY2
def _remove_temp_files(dirname):
"""
Remove the created mat files in the user's temp folder
"""
import os
import glob
for fname in glob.glob(os.path.join(dirname, 'tmp*.mat')):
try:
os.remove(fname)
except OSError: # pragma: no cover
pass
def get_nout():
"""
Return the number of return values the caller is expecting.
Adapted from the ompc project.
Returns
=======
out : int
Number of arguments expected by caller.
"""
frame = inspect.currentframe()
# step into the function that called us
# nout is two frames back
frame = frame.f_back.f_back
bytecode = frame.f_code.co_code
if sys.version_info >= (3, 6):
instruction = bytecode[frame.f_lasti + 2]
else:
instruction = bytecode[frame.f_lasti + 3]
instruction = ord(instruction) if PY2 else instruction
if instruction == dis.opmap['UNPACK_SEQUENCE']:
if sys.version_info >= (3, 6):
howmany = bytecode[frame.f_lasti + 3]
else:
howmany = bytecode[frame.f_lasti + 4]
howmany = ord(howmany) if PY2 else howmany
return howmany
elif instruction in [dis.opmap['POP_TOP'], dis.opmap['PRINT_EXPR']]:
return 0
return 1
def create_file(temp_dir):
"""
Create a MAT file with a random name in the temp directory
Parameters
==========
temp_dir : str, optional
If specified, the file will be created in that directory,
otherwise a default directory is used.
Returns
=======
out : str
Random file name with the desired extension
"""
temp_file = tempfile.NamedTemporaryFile(suffix='.mat', delete=False,
dir=temp_dir)
temp_file.close()
return os.path.abspath(temp_file.name)
class Scilab2PyError(Exception):
""" Called when we can't open Scilab or Scilab throws an error
"""
pass
class Struct(dict):
"""
Scilab style struct, enhanced.
Supports dictionary and attribute style access. Can be pickled,
and supports code completion in a REPL.
Examples
========
>>> from pprint import pprint
>>> from scilab2py import Struct
>>> a = Struct()
>>> a.b = 'spam' # a["b"] == 'spam'
>>> a.c["d"] = 'eggs' # a.c.d == 'eggs'
>>> pprint(a)
{'b': 'spam', 'c': {'d': 'eggs'}}
"""
def __getattr__(self, attr):
"""Access the dictionary keys for unknown attributes."""
try:
return self[attr]
except KeyError:
msg = "'Struct' object has no attribute %s" % attr
raise AttributeError(msg)
def __getitem__(self, attr):
"""
Get a dict value; create a Struct if requesting a Struct member.
Do not create a key if the attribute starts with an underscore.
"""
if attr in self.keys() or attr.startswith('_'):
return dict.__getitem__(self, attr)
frame = inspect.currentframe()
# step into the function that called us
if frame.f_back.f_back and self._is_allowed(frame.f_back.f_back):
dict.__setitem__(self, attr, Struct())
elif self._is_allowed(frame.f_back):
dict.__setitem__(self, attr, Struct())
return dict.__getitem__(self, attr)
def _is_allowed(self, frame):
"""Check for allowed op code in the calling frame"""
allowed = [dis.opmap['STORE_ATTR'], dis.opmap['LOAD_CONST'],
dis.opmap.get('STOP_CODE', 0)]
bytecode = frame.f_code.co_code
instruction = bytecode[frame.f_lasti + 3]
instruction = ord(instruction) if PY2 else instruction
return instruction in allowed
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
@property
def __dict__(self):
"""Allow for code completion in a REPL"""
return self.copy()
def get_log(name=None):
"""Return a console logger.
Output may be sent to the logger using the `debug`, `info`, `warning`,
`error` and `critical` methods.
Parameters
----------
name : str
Name of the log.
References
----------
.. [1] Logging facility for Python,
http://docs.python.org/library/logging.html
"""
import logging
if name is None:
name = 'scilab2py'
else:
name = 'scilab2py.' + name
log = logging.getLogger(name)
log.setLevel(logging.WARN)
return log
def _setup_log():
"""Configure root logger.
"""
import logging
import sys
try:
handler = logging.StreamHandler(stream=sys.stdout)
except TypeError: # pragma: no cover
handler = logging.StreamHandler(strm=sys.stdout)
log = get_log()
log.addHandler(handler)
log.setLevel(logging.WARN)
log.propagate = False
_setup_log()
| 26.974619
| 75
| 0.585058
|
import os
import inspect
import dis
import tempfile
import sys
from .compat import PY2
def _remove_temp_files(dirname):
import os
import glob
for fname in glob.glob(os.path.join(dirname, 'tmp*.mat')):
try:
os.remove(fname)
except OSError:
pass
def get_nout():
frame = inspect.currentframe()
frame = frame.f_back.f_back
bytecode = frame.f_code.co_code
if sys.version_info >= (3, 6):
instruction = bytecode[frame.f_lasti + 2]
else:
instruction = bytecode[frame.f_lasti + 3]
instruction = ord(instruction) if PY2 else instruction
if instruction == dis.opmap['UNPACK_SEQUENCE']:
if sys.version_info >= (3, 6):
howmany = bytecode[frame.f_lasti + 3]
else:
howmany = bytecode[frame.f_lasti + 4]
howmany = ord(howmany) if PY2 else howmany
return howmany
elif instruction in [dis.opmap['POP_TOP'], dis.opmap['PRINT_EXPR']]:
return 0
return 1
def create_file(temp_dir):
temp_file = tempfile.NamedTemporaryFile(suffix='.mat', delete=False,
dir=temp_dir)
temp_file.close()
return os.path.abspath(temp_file.name)
class Scilab2PyError(Exception):
pass
class Struct(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
msg = "'Struct' object has no attribute %s" % attr
raise AttributeError(msg)
def __getitem__(self, attr):
if attr in self.keys() or attr.startswith('_'):
return dict.__getitem__(self, attr)
frame = inspect.currentframe()
if frame.f_back.f_back and self._is_allowed(frame.f_back.f_back):
dict.__setitem__(self, attr, Struct())
elif self._is_allowed(frame.f_back):
dict.__setitem__(self, attr, Struct())
return dict.__getitem__(self, attr)
def _is_allowed(self, frame):
allowed = [dis.opmap['STORE_ATTR'], dis.opmap['LOAD_CONST'],
dis.opmap.get('STOP_CODE', 0)]
bytecode = frame.f_code.co_code
instruction = bytecode[frame.f_lasti + 3]
instruction = ord(instruction) if PY2 else instruction
return instruction in allowed
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
@property
def __dict__(self):
return self.copy()
def get_log(name=None):
import logging
if name is None:
name = 'scilab2py'
else:
name = 'scilab2py.' + name
log = logging.getLogger(name)
log.setLevel(logging.WARN)
return log
def _setup_log():
import logging
import sys
try:
handler = logging.StreamHandler(stream=sys.stdout)
except TypeError:
handler = logging.StreamHandler(strm=sys.stdout)
log = get_log()
log.addHandler(handler)
log.setLevel(logging.WARN)
log.propagate = False
_setup_log()
| true
| true
|
f71a539e1bc739d74244c33e61ec48175b1a0e68
| 182
|
py
|
Python
|
yatube/yatube/urls.py
|
Cooke64/hw02_community
|
10005d05e0142ec9e68b3578d239b6e3da66c0a3
|
[
"BSD-3-Clause"
] | null | null | null |
yatube/yatube/urls.py
|
Cooke64/hw02_community
|
10005d05e0142ec9e68b3578d239b6e3da66c0a3
|
[
"BSD-3-Clause"
] | null | null | null |
yatube/yatube/urls.py
|
Cooke64/hw02_community
|
10005d05e0142ec9e68b3578d239b6e3da66c0a3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('posts.urls', namespace='post')),
path('admin/', admin.site.urls),
]
| 22.75
| 54
| 0.686813
|
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('posts.urls', namespace='post')),
path('admin/', admin.site.urls),
]
| true
| true
|
f71a53b58b0c817babbdccd697976cfe68604cef
| 182
|
py
|
Python
|
Chapter 4/4-5.py
|
lzhang1/BeginningPygame
|
c239925041a6fa361386f65316ef4bea12c3b482
|
[
"MIT"
] | 43
|
2015-09-20T02:05:48.000Z
|
2022-03-01T22:00:43.000Z
|
Chapter 4/4-5.py
|
lzhang1/BeginningPygame
|
c239925041a6fa361386f65316ef4bea12c3b482
|
[
"MIT"
] | null | null | null |
Chapter 4/4-5.py
|
lzhang1/BeginningPygame
|
c239925041a6fa361386f65316ef4bea12c3b482
|
[
"MIT"
] | 40
|
2015-05-19T06:51:13.000Z
|
2022-03-27T18:11:16.000Z
|
def lerp(value1, value2, factor):
return value1+(value2-value1)*factor
print(lerp(100, 200, 0.))
print(lerp(100, 200, 1.))
print(lerp(100, 200, .5))
print(lerp(100, 200, .25))
| 22.75
| 40
| 0.659341
|
def lerp(value1, value2, factor):
return value1+(value2-value1)*factor
print(lerp(100, 200, 0.))
print(lerp(100, 200, 1.))
print(lerp(100, 200, .5))
print(lerp(100, 200, .25))
| true
| true
|
f71a53e8b0bfef59cec65a1838904cf9ebf97f18
| 3,838
|
py
|
Python
|
paasta_tools/metrics/metrics_lib.py
|
xcorail/paasta
|
3f132c73b45fcf0afc31ddb889205ecd9394d4bb
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/metrics/metrics_lib.py
|
xcorail/paasta
|
3f132c73b45fcf0afc31ddb889205ecd9394d4bb
|
[
"Apache-2.0"
] | 4
|
2021-02-08T20:42:08.000Z
|
2021-06-02T00:51:04.000Z
|
paasta_tools/metrics/metrics_lib.py
|
eric-erki/An-open-distributed-platform-as-a-service
|
6769c5601685deb1017910ab8d09109e8e998892
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
from typing_extensions import Protocol
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
try:
import yelp_meteorite
except ImportError:
yelp_meteorite = None
_metrics_interfaces: Dict[str, Type['BaseMetrics']] = {}
class TimerProtocol(Protocol):
def start(self) -> None:
raise NotImplementedError()
def stop(self) -> None:
raise NotImplementedError()
class GaugeProtocol(Protocol):
def set(self, value: Union[int, float]) -> None:
raise NotImplementedError()
class CounterProtocol(Protocol):
def count(self) -> None:
raise NotImplementedError()
class BaseMetrics(ABC):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
@abstractmethod
def create_timer(self, name: str, **kwargs: Any) -> TimerProtocol:
raise NotImplementedError()
@abstractmethod
def create_gauge(self, name: str, **kwargs: Any) -> GaugeProtocol:
raise NotImplementedError()
@abstractmethod
def create_counter(self, name: str, **kwargs: Any) -> CounterProtocol:
raise NotImplementedError()
def get_metrics_interface(base_name: str) -> BaseMetrics:
metrics_provider = load_system_paasta_config().get_metrics_provider()
return _metrics_interfaces[metrics_provider](base_name)
def register_metrics_interface(name: Optional[str]) -> Callable[[Type[BaseMetrics]], Type[BaseMetrics]]:
def outer(func: Type[BaseMetrics]) -> Type[BaseMetrics]:
_metrics_interfaces[name] = func
return func
return outer
@register_metrics_interface('meteorite')
class MeteoriteMetrics(BaseMetrics):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
if yelp_meteorite is None:
raise ImportError("yelp_meteorite not imported, pleast try another metrics provider")
def create_timer(self, name: str, **kwargs: Any) -> TimerProtocol:
return yelp_meteorite.create_timer(self.base_name + '.' + name, kwargs)
def create_gauge(self, name: str, **kwargs: Any) -> GaugeProtocol:
return yelp_meteorite.create_gauge(self.base_name + '.' + name, kwargs)
def create_counter(self, name: str, **kwargs: Any) -> CounterProtocol:
return yelp_meteorite.create_counter(self.base_name + '.' + name, kwargs)
class Timer(TimerProtocol):
def __init__(self, name: str) -> None:
self.name = name
def start(self) -> None:
log.debug("timer {} start at {}".format(self.name, time.time()))
def stop(self) -> None:
log.debug("timer {} stop at {}".format(self.name, time.time()))
class Gauge(GaugeProtocol):
def __init__(self, name: str) -> None:
self.name = name
def set(self, value: Union[int, float]) -> None:
log.debug(f"gauge {self.name} set to {value}")
class Counter(GaugeProtocol):
def __init__(self, name: str) -> None:
self.name = name
self.counter = 0
def count(self) -> None:
self.counter += 1
log.debug(f"counter {self.name} incremented to {self.counter}")
@register_metrics_interface(None)
class NoMetrics(BaseMetrics):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
def create_timer(self, name: str, **kwargs: Any) -> Timer:
return Timer(self.base_name + '.' + name)
def create_gauge(self, name: str, **kwargs: Any) -> Gauge:
return Gauge(self.base_name + '.' + name)
def create_counter(self, name: str, **kwargs: Any) -> Counter:
return Counter(self.base_name + '.' + name)
| 29.075758
| 104
| 0.683689
|
import logging
import time
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
from typing_extensions import Protocol
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
try:
import yelp_meteorite
except ImportError:
yelp_meteorite = None
_metrics_interfaces: Dict[str, Type['BaseMetrics']] = {}
class TimerProtocol(Protocol):
def start(self) -> None:
raise NotImplementedError()
def stop(self) -> None:
raise NotImplementedError()
class GaugeProtocol(Protocol):
def set(self, value: Union[int, float]) -> None:
raise NotImplementedError()
class CounterProtocol(Protocol):
def count(self) -> None:
raise NotImplementedError()
class BaseMetrics(ABC):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
@abstractmethod
def create_timer(self, name: str, **kwargs: Any) -> TimerProtocol:
raise NotImplementedError()
@abstractmethod
def create_gauge(self, name: str, **kwargs: Any) -> GaugeProtocol:
raise NotImplementedError()
@abstractmethod
def create_counter(self, name: str, **kwargs: Any) -> CounterProtocol:
raise NotImplementedError()
def get_metrics_interface(base_name: str) -> BaseMetrics:
metrics_provider = load_system_paasta_config().get_metrics_provider()
return _metrics_interfaces[metrics_provider](base_name)
def register_metrics_interface(name: Optional[str]) -> Callable[[Type[BaseMetrics]], Type[BaseMetrics]]:
def outer(func: Type[BaseMetrics]) -> Type[BaseMetrics]:
_metrics_interfaces[name] = func
return func
return outer
@register_metrics_interface('meteorite')
class MeteoriteMetrics(BaseMetrics):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
if yelp_meteorite is None:
raise ImportError("yelp_meteorite not imported, pleast try another metrics provider")
def create_timer(self, name: str, **kwargs: Any) -> TimerProtocol:
return yelp_meteorite.create_timer(self.base_name + '.' + name, kwargs)
def create_gauge(self, name: str, **kwargs: Any) -> GaugeProtocol:
return yelp_meteorite.create_gauge(self.base_name + '.' + name, kwargs)
def create_counter(self, name: str, **kwargs: Any) -> CounterProtocol:
return yelp_meteorite.create_counter(self.base_name + '.' + name, kwargs)
class Timer(TimerProtocol):
def __init__(self, name: str) -> None:
self.name = name
def start(self) -> None:
log.debug("timer {} start at {}".format(self.name, time.time()))
def stop(self) -> None:
log.debug("timer {} stop at {}".format(self.name, time.time()))
class Gauge(GaugeProtocol):
def __init__(self, name: str) -> None:
self.name = name
def set(self, value: Union[int, float]) -> None:
log.debug(f"gauge {self.name} set to {value}")
class Counter(GaugeProtocol):
def __init__(self, name: str) -> None:
self.name = name
self.counter = 0
def count(self) -> None:
self.counter += 1
log.debug(f"counter {self.name} incremented to {self.counter}")
@register_metrics_interface(None)
class NoMetrics(BaseMetrics):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
def create_timer(self, name: str, **kwargs: Any) -> Timer:
return Timer(self.base_name + '.' + name)
def create_gauge(self, name: str, **kwargs: Any) -> Gauge:
return Gauge(self.base_name + '.' + name)
def create_counter(self, name: str, **kwargs: Any) -> Counter:
return Counter(self.base_name + '.' + name)
| true
| true
|
f71a540bc5690d18d0e43343992b3cd169988b23
| 3,516
|
py
|
Python
|
DDQN.py
|
TimoleonLatinopoulos/MortalKombatOpenAI
|
59dc89d1f50dd74690859e5e1fa18701a5246382
|
[
"MIT"
] | 1
|
2020-08-12T08:16:06.000Z
|
2020-08-12T08:16:06.000Z
|
DDQN.py
|
TimoleonLatinopoulos/MortalKombatOpenAI
|
59dc89d1f50dd74690859e5e1fa18701a5246382
|
[
"MIT"
] | null | null | null |
DDQN.py
|
TimoleonLatinopoulos/MortalKombatOpenAI
|
59dc89d1f50dd74690859e5e1fa18701a5246382
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from keras.activations import relu
from keras.initializers import VarianceScaling
from keras.layers import Dense, Conv2D, Flatten
from keras.losses import logcosh
class DDQN:
""" Implements a Dueling Dual Deep Q-Network based on the frames of the Retro Environment """
def __init__(self, n_actions, frame_height=63, frame_width=113, stacked_frames=4, learning_rate=0.00001):
self.n_actions = n_actions
self.frame_height = frame_height
self.frame_width = frame_width
self.stacked_frames = stacked_frames
self.learning_rate = learning_rate
self.input = tf.placeholder(shape=[None, self.frame_height, self.frame_width, self.stacked_frames],
dtype=tf.float32)
self.input = self.input / 255
# Convolutional layers
self.conv1 = self.conv_layer(self.input, 32, [8, 8], 4, 'conv1')
self.conv2 = self.conv_layer(self.conv1, 64, [4, 4], 2, 'conv2')
self.conv3 = self.conv_layer(self.conv2, 64, [3, 3], 1, 'conv3')
self.flat = Flatten()(self.conv3)
self.dense1 = self.dense_layer(self.flat, 512, 'dense1', relu)
# Splitting into value and advantage streams
self.v_stream, self.a_stream = tf.split(self.dense1, 2, 1)
self.value = self.dense_layer(self.v_stream, 1, 'value')
self.advantage = self.dense_layer(self.a_stream, self.n_actions, 'advantage')
# Getting Q-values from value and advantage streams
self.q_values = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))
self.prediction = tf.argmax(self.q_values, 1)
# targetQ according to Bellman equation
self.target_q = tf.placeholder(shape=[None], dtype=tf.float32)
self.action = tf.placeholder(shape=[None], dtype=tf.uint8)
self.action_one_hot = tf.one_hot(self.action, self.n_actions, dtype=tf.float32)
self.Q = tf.reduce_sum(tf.multiply(self.q_values, self.action_one_hot), axis=1)
# Parameter updates
self.error = logcosh(self.target_q, self.Q)
self.loss = tf.reduce_mean(self.error)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.update = self.optimizer.minimize(self.loss)
@staticmethod
def conv_layer(_inputs, _filters, _kernel_size, _strides, _name):
return Conv2D(filters=_filters, kernel_size=_kernel_size, strides=_strides,
kernel_initializer=VarianceScaling(scale=2.0), padding="valid",
activation=relu, use_bias=False, name=_name)(_inputs)
@staticmethod
def dense_layer(_inputs, _units, _name, _activation=None):
return Dense(activation=_activation, units=_units,
kernel_initializer=VarianceScaling(scale=2.0), name=_name)(_inputs)
class TargetNetworkUpdater:
""" Updates the variables and the weights of the target network based on the main network """
def __init__(self, main_vars, target_vars):
self.main_vars = main_vars
self.target_vars = target_vars
def update_target_vars(self):
update_ops = []
for i, var in enumerate(self.main_vars):
copy_op = self.target_vars[i].assign(var.value())
update_ops.append(copy_op)
return update_ops
def update_networks(self, sess):
update_ops = self.update_target_vars()
for copy_op in update_ops:
sess.run(copy_op)
| 43.95
| 119
| 0.674346
|
import tensorflow as tf
from keras.activations import relu
from keras.initializers import VarianceScaling
from keras.layers import Dense, Conv2D, Flatten
from keras.losses import logcosh
class DDQN:
def __init__(self, n_actions, frame_height=63, frame_width=113, stacked_frames=4, learning_rate=0.00001):
self.n_actions = n_actions
self.frame_height = frame_height
self.frame_width = frame_width
self.stacked_frames = stacked_frames
self.learning_rate = learning_rate
self.input = tf.placeholder(shape=[None, self.frame_height, self.frame_width, self.stacked_frames],
dtype=tf.float32)
self.input = self.input / 255
self.conv1 = self.conv_layer(self.input, 32, [8, 8], 4, 'conv1')
self.conv2 = self.conv_layer(self.conv1, 64, [4, 4], 2, 'conv2')
self.conv3 = self.conv_layer(self.conv2, 64, [3, 3], 1, 'conv3')
self.flat = Flatten()(self.conv3)
self.dense1 = self.dense_layer(self.flat, 512, 'dense1', relu)
self.v_stream, self.a_stream = tf.split(self.dense1, 2, 1)
self.value = self.dense_layer(self.v_stream, 1, 'value')
self.advantage = self.dense_layer(self.a_stream, self.n_actions, 'advantage')
self.q_values = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))
self.prediction = tf.argmax(self.q_values, 1)
self.target_q = tf.placeholder(shape=[None], dtype=tf.float32)
self.action = tf.placeholder(shape=[None], dtype=tf.uint8)
self.action_one_hot = tf.one_hot(self.action, self.n_actions, dtype=tf.float32)
self.Q = tf.reduce_sum(tf.multiply(self.q_values, self.action_one_hot), axis=1)
self.error = logcosh(self.target_q, self.Q)
self.loss = tf.reduce_mean(self.error)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.update = self.optimizer.minimize(self.loss)
@staticmethod
def conv_layer(_inputs, _filters, _kernel_size, _strides, _name):
return Conv2D(filters=_filters, kernel_size=_kernel_size, strides=_strides,
kernel_initializer=VarianceScaling(scale=2.0), padding="valid",
activation=relu, use_bias=False, name=_name)(_inputs)
@staticmethod
def dense_layer(_inputs, _units, _name, _activation=None):
return Dense(activation=_activation, units=_units,
kernel_initializer=VarianceScaling(scale=2.0), name=_name)(_inputs)
class TargetNetworkUpdater:
def __init__(self, main_vars, target_vars):
self.main_vars = main_vars
self.target_vars = target_vars
def update_target_vars(self):
update_ops = []
for i, var in enumerate(self.main_vars):
copy_op = self.target_vars[i].assign(var.value())
update_ops.append(copy_op)
return update_ops
def update_networks(self, sess):
update_ops = self.update_target_vars()
for copy_op in update_ops:
sess.run(copy_op)
| true
| true
|
f71a5820fe472212056e6d6abaa0d96203b1f555
| 939
|
py
|
Python
|
pglast/enums/pg_class.py
|
fentik/pglast
|
c4652b3a6098faf26fa8d3a8fd054f23acd72f9c
|
[
"PostgreSQL"
] | 1
|
2021-08-20T10:09:59.000Z
|
2021-08-20T10:09:59.000Z
|
pglast/enums/pg_class.py
|
fentik/pglast
|
c4652b3a6098faf26fa8d3a8fd054f23acd72f9c
|
[
"PostgreSQL"
] | null | null | null |
pglast/enums/pg_class.py
|
fentik/pglast
|
c4652b3a6098faf26fa8d3a8fd054f23acd72f9c
|
[
"PostgreSQL"
] | null | null | null |
# -*- coding: utf-8 -*-
# :Project: pglast -- DO NOT EDIT: automatically extracted from pg_class.h @ 13-2.0.6-0-ga248206
# :Author: Lele Gaifax <lele@metapensiero.it>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017-2021 Lele Gaifax
#
from enum import Enum, IntEnum, IntFlag, auto
try:
from enum import StrEnum
except ImportError:
# Python < 3.10
class StrEnum(str, Enum):
pass
# #define-ed constants
RELKIND_RELATION = 'r'
RELKIND_INDEX = 'i'
RELKIND_SEQUENCE = 'S'
RELKIND_TOASTVALUE = 't'
RELKIND_VIEW = 'v'
RELKIND_MATVIEW = 'm'
RELKIND_COMPOSITE_TYPE = 'c'
RELKIND_FOREIGN_TABLE = 'f'
RELKIND_PARTITIONED_TABLE = 'p'
RELKIND_PARTITIONED_INDEX = 'I'
RELPERSISTENCE_PERMANENT = 'p'
RELPERSISTENCE_UNLOGGED = 'u'
RELPERSISTENCE_TEMP = 't'
REPLICA_IDENTITY_DEFAULT = 'd'
REPLICA_IDENTITY_NOTHING = 'n'
REPLICA_IDENTITY_FULL = 'f'
REPLICA_IDENTITY_INDEX = 'i'
| 17.388889
| 98
| 0.713525
|
from enum import Enum, IntEnum, IntFlag, auto
try:
from enum import StrEnum
except ImportError:
class StrEnum(str, Enum):
pass
'r'
RELKIND_INDEX = 'i'
RELKIND_SEQUENCE = 'S'
RELKIND_TOASTVALUE = 't'
RELKIND_VIEW = 'v'
RELKIND_MATVIEW = 'm'
RELKIND_COMPOSITE_TYPE = 'c'
RELKIND_FOREIGN_TABLE = 'f'
RELKIND_PARTITIONED_TABLE = 'p'
RELKIND_PARTITIONED_INDEX = 'I'
RELPERSISTENCE_PERMANENT = 'p'
RELPERSISTENCE_UNLOGGED = 'u'
RELPERSISTENCE_TEMP = 't'
REPLICA_IDENTITY_DEFAULT = 'd'
REPLICA_IDENTITY_NOTHING = 'n'
REPLICA_IDENTITY_FULL = 'f'
REPLICA_IDENTITY_INDEX = 'i'
| true
| true
|
f71a5952c0b0537a3a97b410e481a15d260c9393
| 7,086
|
py
|
Python
|
d3rlpy/models/torch/encoders.py
|
meokz/d3rlpy
|
40504e2d8b424547558ab82786c523e8f4626a82
|
[
"MIT"
] | 2
|
2021-04-21T08:19:29.000Z
|
2021-05-17T09:08:06.000Z
|
d3rlpy/models/torch/encoders.py
|
meokz/d3rlpy
|
40504e2d8b424547558ab82786c523e8f4626a82
|
[
"MIT"
] | null | null | null |
d3rlpy/models/torch/encoders.py
|
meokz/d3rlpy
|
40504e2d8b424547558ab82786c523e8f4626a82
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
def _create_activation(activation_type):
if activation_type == 'relu':
return torch.relu
elif activation_type == 'swish':
return lambda x: x * torch.sigmoid(x)
raise ValueError('invalid activation_type.')
def create_encoder(observation_shape,
action_size=None,
use_batch_norm=False,
discrete_action=False,
activation_type='relu',
**kwargs):
activation = _create_activation(activation_type)
if len(observation_shape) == 3:
# pixel input
if action_size is not None:
return PixelEncoderWithAction(observation_shape,
action_size,
use_batch_norm=use_batch_norm,
discrete_action=discrete_action,
activation=activation,
**kwargs)
return PixelEncoder(observation_shape,
use_batch_norm=use_batch_norm,
activation=activation,
**kwargs)
elif len(observation_shape) == 1:
# vector input
if action_size is not None:
return VectorEncoderWithAction(observation_shape,
action_size,
use_batch_norm=use_batch_norm,
discrete_action=discrete_action,
activation=activation,
**kwargs)
return VectorEncoder(observation_shape,
use_batch_norm=use_batch_norm,
activation=activation,
**kwargs)
else:
raise ValueError('observation_shape must be 1d or 3d.')
class PixelEncoder(nn.Module):
def __init__(self,
observation_shape,
filters=None,
feature_size=None,
use_batch_norm=False,
activation=torch.relu):
super().__init__()
# default architecture is based on Nature DQN paper.
if filters is None:
filters = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
if feature_size is None:
feature_size = 512
self.observation_shape = observation_shape
self.use_batch_norm = use_batch_norm
self.activation = activation
self.feature_size = feature_size
# convolutional layers
in_channels = [observation_shape[0]] + [f[0] for f in filters[:-1]]
self.convs = nn.ModuleList()
self.conv_bns = nn.ModuleList()
for in_channel, f in zip(in_channels, filters):
out_channel, kernel_size, stride = f
conv = nn.Conv2d(in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride)
self.convs.append(conv)
if use_batch_norm:
self.conv_bns.append(nn.BatchNorm2d(out_channel))
# last dense layer
self.fc = nn.Linear(self._get_linear_input_size(), feature_size)
if use_batch_norm:
self.fc_bn = nn.BatchNorm1d(feature_size)
def _get_linear_input_size(self):
x = torch.rand((1, ) + self.observation_shape)
with torch.no_grad():
return self._conv_encode(x).view(1, -1).shape[1]
def _conv_encode(self, x):
h = x
for i in range(len(self.convs)):
h = self.activation(self.convs[i](h))
if self.use_batch_norm:
h = self.conv_bns[i](h)
return h
def forward(self, x):
h = self._conv_encode(x)
h = self.activation(self.fc(h.view(h.shape[0], -1)))
if self.use_batch_norm:
h = self.fc_bn(h)
return h
class PixelEncoderWithAction(PixelEncoder):
def __init__(self,
observation_shape,
action_size,
filters=None,
feature_size=None,
use_batch_norm=False,
discrete_action=False,
activation=torch.relu):
self.action_size = action_size
self.discrete_action = discrete_action
super().__init__(observation_shape, filters, feature_size,
use_batch_norm, activation)
def _get_linear_input_size(self):
size = super()._get_linear_input_size()
return size + self.action_size
def forward(self, x, action):
h = self._conv_encode(x)
if self.discrete_action:
action = F.one_hot(action.view(-1).long(),
num_classes=self.action_size).float()
# cocat feature and action
h = torch.cat([h.view(h.shape[0], -1), action], dim=1)
h = self.activation(self.fc(h))
if self.use_batch_norm:
h = self.fc_bn(h)
return h
class VectorEncoder(nn.Module):
def __init__(self,
observation_shape,
hidden_units=None,
use_batch_norm=False,
activation=torch.relu):
super().__init__()
self.observation_shape = observation_shape
if hidden_units is None:
hidden_units = [256, 256]
self.use_batch_norm = use_batch_norm
self.feature_size = hidden_units[-1]
self.activation = activation
in_units = [observation_shape[0]] + hidden_units[:-1]
self.fcs = nn.ModuleList()
self.bns = nn.ModuleList()
for in_unit, out_unit in zip(in_units, hidden_units):
self.fcs.append(nn.Linear(in_unit, out_unit))
if use_batch_norm:
self.bns.append(nn.BatchNorm1d(out_unit))
def forward(self, x):
h = x
for i in range(len(self.fcs)):
h = self.activation(self.fcs[i](h))
if self.use_batch_norm:
h = self.bns[i](h)
return h
class VectorEncoderWithAction(VectorEncoder):
def __init__(self,
observation_shape,
action_size,
hidden_units=None,
use_batch_norm=False,
discrete_action=False,
activation=torch.relu):
self.action_size = action_size
self.discrete_action = discrete_action
concat_shape = (observation_shape[0] + action_size, )
super().__init__(concat_shape, hidden_units, use_batch_norm,
activation)
self.observation_shape = observation_shape
def forward(self, x, action):
if self.discrete_action:
action = F.one_hot(action.view(-1).long(),
num_classes=self.action_size).float()
x = torch.cat([x, action], dim=1)
return super().forward(x)
| 34.565854
| 75
| 0.540785
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def _create_activation(activation_type):
if activation_type == 'relu':
return torch.relu
elif activation_type == 'swish':
return lambda x: x * torch.sigmoid(x)
raise ValueError('invalid activation_type.')
def create_encoder(observation_shape,
action_size=None,
use_batch_norm=False,
discrete_action=False,
activation_type='relu',
**kwargs):
activation = _create_activation(activation_type)
if len(observation_shape) == 3:
if action_size is not None:
return PixelEncoderWithAction(observation_shape,
action_size,
use_batch_norm=use_batch_norm,
discrete_action=discrete_action,
activation=activation,
**kwargs)
return PixelEncoder(observation_shape,
use_batch_norm=use_batch_norm,
activation=activation,
**kwargs)
elif len(observation_shape) == 1:
if action_size is not None:
return VectorEncoderWithAction(observation_shape,
action_size,
use_batch_norm=use_batch_norm,
discrete_action=discrete_action,
activation=activation,
**kwargs)
return VectorEncoder(observation_shape,
use_batch_norm=use_batch_norm,
activation=activation,
**kwargs)
else:
raise ValueError('observation_shape must be 1d or 3d.')
class PixelEncoder(nn.Module):
def __init__(self,
observation_shape,
filters=None,
feature_size=None,
use_batch_norm=False,
activation=torch.relu):
super().__init__()
if filters is None:
filters = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
if feature_size is None:
feature_size = 512
self.observation_shape = observation_shape
self.use_batch_norm = use_batch_norm
self.activation = activation
self.feature_size = feature_size
in_channels = [observation_shape[0]] + [f[0] for f in filters[:-1]]
self.convs = nn.ModuleList()
self.conv_bns = nn.ModuleList()
for in_channel, f in zip(in_channels, filters):
out_channel, kernel_size, stride = f
conv = nn.Conv2d(in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride)
self.convs.append(conv)
if use_batch_norm:
self.conv_bns.append(nn.BatchNorm2d(out_channel))
self.fc = nn.Linear(self._get_linear_input_size(), feature_size)
if use_batch_norm:
self.fc_bn = nn.BatchNorm1d(feature_size)
def _get_linear_input_size(self):
x = torch.rand((1, ) + self.observation_shape)
with torch.no_grad():
return self._conv_encode(x).view(1, -1).shape[1]
def _conv_encode(self, x):
h = x
for i in range(len(self.convs)):
h = self.activation(self.convs[i](h))
if self.use_batch_norm:
h = self.conv_bns[i](h)
return h
def forward(self, x):
h = self._conv_encode(x)
h = self.activation(self.fc(h.view(h.shape[0], -1)))
if self.use_batch_norm:
h = self.fc_bn(h)
return h
class PixelEncoderWithAction(PixelEncoder):
def __init__(self,
observation_shape,
action_size,
filters=None,
feature_size=None,
use_batch_norm=False,
discrete_action=False,
activation=torch.relu):
self.action_size = action_size
self.discrete_action = discrete_action
super().__init__(observation_shape, filters, feature_size,
use_batch_norm, activation)
def _get_linear_input_size(self):
size = super()._get_linear_input_size()
return size + self.action_size
def forward(self, x, action):
h = self._conv_encode(x)
if self.discrete_action:
action = F.one_hot(action.view(-1).long(),
num_classes=self.action_size).float()
h = torch.cat([h.view(h.shape[0], -1), action], dim=1)
h = self.activation(self.fc(h))
if self.use_batch_norm:
h = self.fc_bn(h)
return h
class VectorEncoder(nn.Module):
def __init__(self,
observation_shape,
hidden_units=None,
use_batch_norm=False,
activation=torch.relu):
super().__init__()
self.observation_shape = observation_shape
if hidden_units is None:
hidden_units = [256, 256]
self.use_batch_norm = use_batch_norm
self.feature_size = hidden_units[-1]
self.activation = activation
in_units = [observation_shape[0]] + hidden_units[:-1]
self.fcs = nn.ModuleList()
self.bns = nn.ModuleList()
for in_unit, out_unit in zip(in_units, hidden_units):
self.fcs.append(nn.Linear(in_unit, out_unit))
if use_batch_norm:
self.bns.append(nn.BatchNorm1d(out_unit))
def forward(self, x):
h = x
for i in range(len(self.fcs)):
h = self.activation(self.fcs[i](h))
if self.use_batch_norm:
h = self.bns[i](h)
return h
class VectorEncoderWithAction(VectorEncoder):
def __init__(self,
observation_shape,
action_size,
hidden_units=None,
use_batch_norm=False,
discrete_action=False,
activation=torch.relu):
self.action_size = action_size
self.discrete_action = discrete_action
concat_shape = (observation_shape[0] + action_size, )
super().__init__(concat_shape, hidden_units, use_batch_norm,
activation)
self.observation_shape = observation_shape
def forward(self, x, action):
if self.discrete_action:
action = F.one_hot(action.view(-1).long(),
num_classes=self.action_size).float()
x = torch.cat([x, action], dim=1)
return super().forward(x)
| true
| true
|
f71a5d40d4f7e3452efff0eee8f88d7ba699febc
| 1,600
|
py
|
Python
|
status.py
|
Drakulix/plugin.program.steam.streaming
|
ce2bc62e68dee7ebc249a075bd57e05586834702
|
[
"MIT"
] | 4
|
2016-06-19T18:23:09.000Z
|
2019-02-08T18:00:20.000Z
|
status.py
|
Drakulix/plugin.program.steam.streaming
|
ce2bc62e68dee7ebc249a075bd57e05586834702
|
[
"MIT"
] | null | null | null |
status.py
|
Drakulix/plugin.program.steam.streaming
|
ce2bc62e68dee7ebc249a075bd57e05586834702
|
[
"MIT"
] | null | null | null |
import xbmc
import xbmcgui
import sys
import urllib
import utils
from stream_api import app_state
class Updater(object):
installed = False
percent = 0
title1 = ""
def update(self, app):
if app["state"] == 0 or app["state"] == 2 or app["state"] == 258 or app["state"] == 1282 or app["state"] == 260 or app["state"] == 1048576 or app["state"] == 1286:
self.title1 = ""
if app["estimated_seconds_remaining"] != -1:
self.title1 = utils.translation(32021)+" "+str(int(app["estimated_seconds_remaining"] / 60 + 1))+" "+utils.translation(32022)
self.percent = int((float(app["bytes_downloaded"]) / float(app["bytes_to_download"])) * 100.0)
elif app["state"] == 4:
self.installed = True
else:
self.percent = 0
self.title1 = utils.translation(32023)+": "+utils.translation(app_state.state(app["state"]))
print "Unknown State: "+str(app["state"])
def status(service, params):
app_id = int(params.get('id'))
username = urllib.unquote_plus(params.get('username'))
hostname = urllib.unquote_plus(params.get('hostname'))
progress = xbmcgui.DialogProgress()
progress.create(utils.translation(32020), " ", " ", " ")
update = Updater()
#get app updates
while not progress.iscanceled():
xbmc.sleep(100)
update.update(service.update_app((hostname, username), app_id))
if update.installed:
break
else:
progress.update(update.percent, update.title1, "", "")
progress.close()
| 32.653061
| 171
| 0.605
|
import xbmc
import xbmcgui
import sys
import urllib
import utils
from stream_api import app_state
class Updater(object):
installed = False
percent = 0
title1 = ""
def update(self, app):
if app["state"] == 0 or app["state"] == 2 or app["state"] == 258 or app["state"] == 1282 or app["state"] == 260 or app["state"] == 1048576 or app["state"] == 1286:
self.title1 = ""
if app["estimated_seconds_remaining"] != -1:
self.title1 = utils.translation(32021)+" "+str(int(app["estimated_seconds_remaining"] / 60 + 1))+" "+utils.translation(32022)
self.percent = int((float(app["bytes_downloaded"]) / float(app["bytes_to_download"])) * 100.0)
elif app["state"] == 4:
self.installed = True
else:
self.percent = 0
self.title1 = utils.translation(32023)+": "+utils.translation(app_state.state(app["state"]))
print "Unknown State: "+str(app["state"])
def status(service, params):
app_id = int(params.get('id'))
username = urllib.unquote_plus(params.get('username'))
hostname = urllib.unquote_plus(params.get('hostname'))
progress = xbmcgui.DialogProgress()
progress.create(utils.translation(32020), " ", " ", " ")
update = Updater()
while not progress.iscanceled():
xbmc.sleep(100)
update.update(service.update_app((hostname, username), app_id))
if update.installed:
break
else:
progress.update(update.percent, update.title1, "", "")
progress.close()
| false
| true
|
f71a5d5dd300e03985a3ca77a605a2e70ab1f462
| 121,589
|
py
|
Python
|
tests/git_cl_test.py
|
2youyou2/depot_tools
|
8b94108e684872a89f7108f51ba74f01220d64fa
|
[
"BSD-3-Clause"
] | 7
|
2018-09-26T11:10:40.000Z
|
2020-12-19T13:32:12.000Z
|
tests/git_cl_test.py
|
2youyou2/depot_tools
|
8b94108e684872a89f7108f51ba74f01220d64fa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/git_cl_test.py
|
2youyou2/depot_tools
|
8b94108e684872a89f7108f51ba74f01220d64fa
|
[
"BSD-3-Clause"
] | 4
|
2020-03-27T07:49:45.000Z
|
2020-11-17T02:46:42.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_cl.py."""
import contextlib
import datetime
import json
import logging
import os
import StringIO
import sys
import tempfile
import unittest
import urlparse
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.auto_stub import TestCase
import metrics
# We have to disable monitoring before importing git_cl.
metrics.DISABLE_METRICS_COLLECTION = True
import gerrit_util
import git_cl
import git_common
import git_footers
import subprocess2
def callError(code=1, cmd='', cwd='', stdout='', stderr=''):
return subprocess2.CalledProcessError(code, cmd, cwd, stdout, stderr)
CERR1 = callError(1)
def MakeNamedTemporaryFileMock(expected_content):
class NamedTemporaryFileMock(object):
def __init__(self, *args, **kwargs):
self.name = '/tmp/named'
self.expected_content = expected_content
def __enter__(self):
return self
def __exit__(self, _type, _value, _tb):
pass
def write(self, content):
if self.expected_content:
assert content == self.expected_content
def close(self):
pass
return NamedTemporaryFileMock
class ChangelistMock(object):
# A class variable so we can access it when we don't have access to the
# instance that's being set.
desc = ""
def __init__(self, **kwargs):
pass
def GetIssue(self):
return 1
def GetDescription(self, force=False):
return ChangelistMock.desc
def UpdateDescription(self, desc, force=False):
ChangelistMock.desc = desc
class PresubmitMock(object):
def __init__(self, *args, **kwargs):
self.reviewers = []
self.more_cc = ['chromium-reviews+test-more-cc@chromium.org']
@staticmethod
def should_continue():
return True
class GitCheckoutMock(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def reset():
GitCheckoutMock.conflict = False
def apply_patch(self, p):
if GitCheckoutMock.conflict:
raise Exception('failed')
class WatchlistsMock(object):
def __init__(self, _):
pass
@staticmethod
def GetWatchersForPaths(_):
return ['joe@example.com']
class CodereviewSettingsFileMock(object):
def __init__(self):
pass
# pylint: disable=no-self-use
def read(self):
return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" +
"GERRIT_HOST: True\n")
class AuthenticatorMock(object):
def __init__(self, *_args):
pass
def has_cached_credentials(self):
return True
def authorize(self, http):
return http
def CookiesAuthenticatorMockFactory(hosts_with_creds=None, same_auth=False):
"""Use to mock Gerrit/Git credentials from ~/.netrc or ~/.gitcookies.
Usage:
>>> self.mock(git_cl.gerrit_util, "CookiesAuthenticator",
CookiesAuthenticatorMockFactory({'host': ('user', _, 'pass')})
OR
>>> self.mock(git_cl.gerrit_util, "CookiesAuthenticator",
CookiesAuthenticatorMockFactory(
same_auth=('user', '', 'pass'))
"""
class CookiesAuthenticatorMock(git_cl.gerrit_util.CookiesAuthenticator):
def __init__(self): # pylint: disable=super-init-not-called
# Intentionally not calling super() because it reads actual cookie files.
pass
@classmethod
def get_gitcookies_path(cls):
return '~/.gitcookies'
@classmethod
def get_netrc_path(cls):
return '~/.netrc'
def _get_auth_for_host(self, host):
if same_auth:
return same_auth
return (hosts_with_creds or {}).get(host)
return CookiesAuthenticatorMock
class MockChangelistWithBranchAndIssue():
def __init__(self, branch, issue):
self.branch = branch
self.issue = issue
def GetBranch(self):
return self.branch
def GetIssue(self):
return self.issue
class SystemExitMock(Exception):
pass
class TestGitClBasic(unittest.TestCase):
def test_get_description(self):
cl = git_cl.Changelist(issue=1, codereview='gerrit',
codereview_host='host')
cl.description = 'x'
cl.has_description = True
cl._codereview_impl.FetchDescription = lambda *a, **kw: 'y'
self.assertEquals(cl.GetDescription(), 'x')
self.assertEquals(cl.GetDescription(force=True), 'y')
self.assertEquals(cl.GetDescription(), 'y')
def test_description_footers(self):
cl = git_cl.Changelist(issue=1, codereview='gerrit',
codereview_host='host')
cl.description = '\n'.join([
'This is some message',
'',
'It has some lines',
'and, also',
'',
'Some: Really',
'Awesome: Footers',
])
cl.has_description = True
cl._codereview_impl.UpdateDescriptionRemote = lambda *a, **kw: 'y'
msg, footers = cl.GetDescriptionFooters()
self.assertEquals(
msg, ['This is some message', '', 'It has some lines', 'and, also'])
self.assertEquals(footers, [('Some', 'Really'), ('Awesome', 'Footers')])
msg.append('wut')
footers.append(('gnarly-dude', 'beans'))
cl.UpdateDescriptionFooters(msg, footers)
self.assertEquals(cl.GetDescription().splitlines(), [
'This is some message',
'',
'It has some lines',
'and, also',
'wut'
'',
'Some: Really',
'Awesome: Footers',
'Gnarly-Dude: beans',
])
def test_get_bug_line_values(self):
f = lambda p, bugs: list(git_cl._get_bug_line_values(p, bugs))
self.assertEqual(f('', ''), [])
self.assertEqual(f('', '123,v8:456'), ['123', 'v8:456'])
self.assertEqual(f('v8', '456'), ['v8:456'])
self.assertEqual(f('v8', 'chromium:123,456'), ['v8:456', 'chromium:123'])
# Not nice, but not worth carying.
self.assertEqual(f('v8', 'chromium:123,456,v8:123'),
['v8:456', 'chromium:123', 'v8:123'])
def _test_git_number(self, parent_msg, dest_ref, child_msg,
parent_hash='parenthash'):
desc = git_cl.ChangeDescription(child_msg)
desc.update_with_git_number_footers(parent_hash, parent_msg, dest_ref)
return desc.description
def assertEqualByLine(self, actual, expected):
self.assertEqual(actual.splitlines(), expected.splitlines())
def test_git_number_bad_parent(self):
with self.assertRaises(ValueError):
self._test_git_number('Parent', 'refs/heads/master', 'Child')
def test_git_number_bad_parent_footer(self):
with self.assertRaises(AssertionError):
self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: wrong',
'refs/heads/master', 'Child')
def test_git_number_bad_lineage_ignored(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#1}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}',
'refs/heads/master', 'Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#2}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}')
def test_git_number_same_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_same_branch_mixed_footers(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child\n'
'\n'
'Broken-by: design\n'
'BUG=123')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Broken-by: design\n'
'BUG=123\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_same_branch_with_originals(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Whatever: value\n'
'Cr-Commit-Position: refs/copy/paste@{#22}')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Original-Whatever: value\n'
'Cr-Original-Commit-Position: refs/copy/paste@{#22}\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_new_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/master@{#12}')
def test_git_number_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_moooooooore_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/mooore',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/mooore@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_ever_moooooooore_lineage(self):
self.maxDiff = 10000 # pylint: disable=attribute-defined-outside-init
actual = self._test_git_number(
'CQ commit on fresh new branch + numbering.\n'
'\n'
'NOTRY=True\n'
'NOPRESUBMIT=True\n'
'BUG=\n'
'\n'
'Review-Url: https://codereview.chromium.org/2577703003\n'
'Cr-Commit-Position: refs/heads/gnumb-test/br@{#1}\n'
'Cr-Branched-From: 0749ff9edc-refs/heads/gnumb-test/cq@{#4}\n'
'Cr-Branched-From: 5c49df2da6-refs/heads/master@{#41618}',
dest_ref='refs/heads/gnumb-test/cl',
child_msg='git cl on fresh new branch + numbering.\n'
'\n'
'Review-Url: https://codereview.chromium.org/2575043003 .\n')
self.assertEqualByLine(
actual,
'git cl on fresh new branch + numbering.\n'
'\n'
'Review-Url: https://codereview.chromium.org/2575043003 .\n'
'Cr-Commit-Position: refs/heads/gnumb-test/cl@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/gnumb-test/br@{#1}\n'
'Cr-Branched-From: 0749ff9edc-refs/heads/gnumb-test/cq@{#4}\n'
'Cr-Branched-From: 5c49df2da6-refs/heads/master@{#41618}')
def test_git_number_cherry_pick(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child, which is cherry-pick from master\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#100}\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)')
self.assertEqualByLine(
actual,
'Child, which is cherry-pick from master\n'
'\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)\n'
'\n'
'Cr-Original-Commit-Position: refs/heads/master@{#100}\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_gerrit_mirror_hack(self):
cr = 'chromium-review.googlesource.com'
url0 = 'https://%s/a/changes/x?a=b' % cr
origMirrors = git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES
try:
git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES = ['us1', 'us2']
url1 = git_cl.gerrit_util._UseGerritMirror(url0, cr)
url2 = git_cl.gerrit_util._UseGerritMirror(url1, cr)
url3 = git_cl.gerrit_util._UseGerritMirror(url2, cr)
self.assertNotEqual(url1, url2)
self.assertEqual(sorted((url1, url2)), [
'https://us1-mirror-chromium-review.googlesource.com/a/changes/x?a=b',
'https://us2-mirror-chromium-review.googlesource.com/a/changes/x?a=b'])
self.assertEqual(url1, url3)
finally:
git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES = origMirrors
def test_valid_accounts(self):
mock_per_account = {
'u1': None, # 404, doesn't exist.
'u2': {
'_account_id': 123124,
'avatars': [],
'email': 'u2@example.com',
'name': 'User Number 2',
'status': 'OOO',
},
'u3': git_cl.gerrit_util.GerritError(500, 'retries didn\'t help :('),
}
def GetAccountDetailsMock(_, account):
# Poor-man's mock library's side_effect.
v = mock_per_account.pop(account)
if isinstance(v, Exception):
raise v
return v
original = git_cl.gerrit_util.GetAccountDetails
try:
git_cl.gerrit_util.GetAccountDetails = GetAccountDetailsMock
actual = git_cl.gerrit_util.ValidAccounts(
'host', ['u1', 'u2', 'u3'], max_threads=1)
finally:
git_cl.gerrit_util.GetAccountDetails = original
self.assertEqual(actual, {
'u2': {
'_account_id': 123124,
'avatars': [],
'email': 'u2@example.com',
'name': 'User Number 2',
'status': 'OOO',
},
})
class TestParseIssueURL(unittest.TestCase):
def _validate(self, parsed, issue=None, patchset=None, hostname=None,
codereview=None, fail=False):
self.assertIsNotNone(parsed)
if fail:
self.assertFalse(parsed.valid)
return
self.assertTrue(parsed.valid)
self.assertEqual(parsed.issue, issue)
self.assertEqual(parsed.patchset, patchset)
self.assertEqual(parsed.hostname, hostname)
self.assertEqual(parsed.codereview, codereview)
def _run_and_validate(self, func, url, *args, **kwargs):
result = func(urlparse.urlparse(url))
if kwargs.pop('fail', False):
self.assertIsNone(result)
return None
self._validate(result, *args, fail=False, **kwargs)
def test_gerrit(self):
def test(url, issue=None, patchset=None, hostname=None, fail=None):
self._test_ParseIssueUrl(
git_cl._GerritChangelistImpl.ParseIssueURL,
url, issue, patchset, hostname, fail)
def test(url, *args, **kwargs):
self._run_and_validate(git_cl._GerritChangelistImpl.ParseIssueURL, url,
*args, codereview='gerrit', **kwargs)
test('http://chrome-review.source.com/c/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/#/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/1/whatisthis', fail=True)
test('https://chrome-review.source.com/c/abc/', fail=True)
test('ssh://chrome-review.source.com/c/123/1/', fail=True)
def test_ParseIssueNumberArgument(self):
def test(arg, *args, **kwargs):
codereview_hint = kwargs.pop('hint', None)
self._validate(git_cl.ParseIssueNumberArgument(arg, codereview_hint),
*args, **kwargs)
test('123', 123)
test('', fail=True)
test('abc', fail=True)
test('123/1', fail=True)
test('123a', fail=True)
test('ssh://chrome-review.source.com/#/c/123/4/', fail=True)
# Looks like Rietveld and Gerrit, but we should select Gerrit now
# w/ or w/o hint.
test('https://codereview.source.com/123',
123, None, 'codereview.source.com', 'gerrit',
hint='gerrit')
test('https://codereview.source.com/123',
123, None, 'codereview.source.com', 'gerrit')
# Gerrrit.
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com', 'gerrit')
test('https://chrome-review.source.com/bad/123/4', fail=True)
class GitCookiesCheckerTest(TestCase):
def setUp(self):
super(GitCookiesCheckerTest, self).setUp()
self.c = git_cl._GitCookiesChecker()
self.c._all_hosts = []
def mock_hosts_creds(self, subhost_identity_pairs):
def ensure_googlesource(h):
if not h.endswith(self.c._GOOGLESOURCE):
assert not h.endswith('.')
return h + '.' + self.c._GOOGLESOURCE
return h
self.c._all_hosts = [(ensure_googlesource(h), i, '.gitcookies')
for h, i in subhost_identity_pairs]
def test_identity_parsing(self):
self.assertEqual(self.c._parse_identity('ldap.google.com'),
('ldap', 'google.com'))
self.assertEqual(self.c._parse_identity('git-ldap.example.com'),
('ldap', 'example.com'))
# Specical case because we know there are no subdomains in chromium.org.
self.assertEqual(self.c._parse_identity('git-note.period.chromium.org'),
('note.period', 'chromium.org'))
# Pathological: ".period." can be either username OR domain, more likely
# domain.
self.assertEqual(self.c._parse_identity('git-note.period.example.com'),
('note', 'period.example.com'))
def test_analysis_nothing(self):
self.c._all_hosts = []
self.assertFalse(self.c.has_generic_host())
self.assertEqual(set(), self.c.get_conflicting_hosts())
self.assertEqual(set(), self.c.get_duplicated_hosts())
self.assertEqual(set(), self.c.get_partially_configured_hosts())
self.assertEqual(set(), self.c.get_hosts_with_wrong_identities())
def test_analysis(self):
self.mock_hosts_creds([
('.googlesource.com', 'git-example.chromium.org'),
('chromium', 'git-example.google.com'),
('chromium-review', 'git-example.google.com'),
('chrome-internal', 'git-example.chromium.org'),
('chrome-internal-review', 'git-example.chromium.org'),
('conflict', 'git-example.google.com'),
('conflict-review', 'git-example.chromium.org'),
('dup', 'git-example.google.com'),
('dup', 'git-example.google.com'),
('dup-review', 'git-example.google.com'),
('partial', 'git-example.google.com'),
('gpartial-review', 'git-example.google.com'),
])
self.assertTrue(self.c.has_generic_host())
self.assertEqual(set(['conflict.googlesource.com']),
self.c.get_conflicting_hosts())
self.assertEqual(set(['dup.googlesource.com']),
self.c.get_duplicated_hosts())
self.assertEqual(set(['partial.googlesource.com',
'gpartial-review.googlesource.com']),
self.c.get_partially_configured_hosts())
self.assertEqual(set(['chromium.googlesource.com',
'chrome-internal.googlesource.com']),
self.c.get_hosts_with_wrong_identities())
def test_report_no_problems(self):
self.test_analysis_nothing()
self.mock(sys, 'stdout', StringIO.StringIO())
self.assertFalse(self.c.find_and_report_problems())
self.assertEqual(sys.stdout.getvalue(), '')
def test_report(self):
self.test_analysis()
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.gerrit_util.CookiesAuthenticator, 'get_gitcookies_path',
classmethod(lambda _: '~/.gitcookies'))
self.assertTrue(self.c.find_and_report_problems())
with open(os.path.join(os.path.dirname(__file__),
'git_cl_creds_check_report.txt')) as f:
expected = f.read()
def by_line(text):
return [l.rstrip() for l in text.rstrip().splitlines()]
self.maxDiff = 10000 # pylint: disable=attribute-defined-outside-init
self.assertEqual(by_line(sys.stdout.getvalue().strip()), by_line(expected))
class TestGitCl(TestCase):
def setUp(self):
super(TestGitCl, self).setUp()
self.calls = []
self._calls_done = []
self.mock(git_cl, 'time_time',
lambda: self._mocked_call('time.time'))
self.mock(git_cl.metrics.collector, 'add_repeated',
lambda *a: self._mocked_call('add_repeated', *a))
self.mock(subprocess2, 'call', self._mocked_call)
self.mock(subprocess2, 'check_call', self._mocked_call)
self.mock(subprocess2, 'check_output', self._mocked_call)
self.mock(subprocess2, 'communicate',
lambda *a, **kw: ([self._mocked_call(*a, **kw), ''], 0))
self.mock(git_cl.gclient_utils, 'CheckCallAndFilter', self._mocked_call)
self.mock(git_common, 'is_dirty_git_tree', lambda x: False)
self.mock(git_common, 'get_or_create_merge_base',
lambda *a: (
self._mocked_call(['get_or_create_merge_base']+list(a))))
self.mock(git_cl, 'BranchExists', lambda _: True)
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '')
self.mock(git_cl, 'SaveDescriptionBackup', lambda _:
self._mocked_call('SaveDescriptionBackup'))
self.mock(git_cl, 'ask_for_data', lambda *a, **k: self._mocked_call(
*(['ask_for_data'] + list(a)), **k))
self.mock(git_cl, 'write_json', lambda path, contents:
self._mocked_call('write_json', path, contents))
self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock)
self.mock(git_cl.checkout, 'GitCheckout', GitCheckoutMock)
GitCheckoutMock.reset()
self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock)
self.mock(git_cl.auth, 'get_authenticator_for_host', AuthenticatorMock)
self.mock(git_cl.gerrit_util, 'GetChangeDetail',
lambda *args, **kwargs: self._mocked_call(
'GetChangeDetail', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'GetChangeComments',
lambda *args, **kwargs: self._mocked_call(
'GetChangeComments', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'GetChangeRobotComments',
lambda *args, **kwargs: self._mocked_call(
'GetChangeRobotComments', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'AddReviewers',
lambda h, i, reviewers, ccs, notify: self._mocked_call(
'AddReviewers', h, i, reviewers, ccs, notify))
self.mock(git_cl.gerrit_util, 'SetReview',
lambda h, i, msg=None, labels=None, notify=None:
self._mocked_call('SetReview', h, i, msg, labels, notify))
self.mock(git_cl.gerrit_util.LuciContextAuthenticator, 'is_luci',
staticmethod(lambda: False))
self.mock(git_cl.gerrit_util.GceAuthenticator, 'is_gce',
classmethod(lambda _: False))
self.mock(git_cl.gerrit_util, 'ValidAccounts',
lambda host, accounts:
self._mocked_call('ValidAccounts', host, accounts))
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call(['DieWithError', msg]))
# It's important to reset settings to not have inter-tests interference.
git_cl.settings = None
def tearDown(self):
try:
self.assertEquals([], self.calls)
except AssertionError:
if not self.has_failed():
raise
# Sadly, has_failed() returns True if this OR any other tests before this
# one have failed.
git_cl.logging.error(
'!!!!!! IF YOU SEE THIS, READ BELOW, IT WILL SAVE YOUR TIME !!!!!\n'
'There are un-consumed self.calls after this test has finished.\n'
'If you don\'t know which test this is, run:\n'
' tests/git_cl_tests.py -v\n'
'If you are already running only this test, then **first** fix the '
'problem whose exception is emitted below by unittest runner.\n'
'Else, to be sure what\'s going on, run this test **alone** with \n'
' tests/git_cl_tests.py TestGitCl.<name>\n'
'and follow instructions above.\n' +
'=' * 80)
finally:
super(TestGitCl, self).tearDown()
def _mocked_call(self, *args, **_kwargs):
self.assertTrue(
self.calls,
'@%d Expected: <Missing> Actual: %r' % (len(self._calls_done), args))
top = self.calls.pop(0)
expected_args, result = top
# Also logs otherwise it could get caught in a try/finally and be hard to
# diagnose.
if expected_args != args:
N = 5
prior_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) - N + i, c[0])
for i, c in enumerate(self._calls_done[-N:]))
following_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) + i + 1, c[0])
for i, c in enumerate(self.calls[:N]))
extended_msg = (
'A few prior calls:\n %s\n\n'
'This (expected):\n @%d: %r\n'
'This (actual):\n @%d: %r\n\n'
'A few following expected calls:\n %s' %
(prior_calls, len(self._calls_done), expected_args,
len(self._calls_done), args, following_calls))
git_cl.logging.error(extended_msg)
self.fail('@%d\n'
' Expected: %r\n'
' Actual: %r' % (
len(self._calls_done), expected_args, args))
self._calls_done.append(top)
if isinstance(result, Exception):
raise result
return result
def test_ask_for_explicit_yes_true(self):
self.calls = [
(('ask_for_data', 'prompt [Yes/No]: '), 'blah'),
(('ask_for_data', 'Please, type yes or no: '), 'ye'),
]
self.assertTrue(git_cl.ask_for_explicit_yes('prompt'))
def test_LoadCodereviewSettingsFromFile_gerrit(self):
codereview_file = StringIO.StringIO('GERRIT_HOST: true')
self.calls = [
((['git', 'config', '--unset-all', 'rietveld.cc'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.tree-status-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.viewvc-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-regex'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-ignore-regex'],),
CERR1),
((['git', 'config', '--unset-all', 'rietveld.run-post-upload-hook'],),
CERR1),
((['git', 'config', 'gerrit.host', 'true'],), ''),
]
self.assertIsNone(git_cl.LoadCodereviewSettingsFromFile(codereview_file))
@classmethod
def _is_gerrit_calls(cls, gerrit=False):
return [((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'gerrit.host'],), 'True' if gerrit else '')]
@classmethod
def _git_post_upload_calls(cls):
return [
((['git', 'rev-parse', 'HEAD'],), 'hash'),
((['git', 'symbolic-ref', 'HEAD'],), 'hash'),
((['git',
'config', 'branch.hash.last-upload-hash', 'hash'],), ''),
((['git', 'config', 'rietveld.run-post-upload-hook'],), ''),
]
@staticmethod
def _git_sanity_checks(diff_base, working_branch, get_remote_branch=True):
fake_ancestor = 'fake_ancestor'
fake_cl = 'fake_cl_for_patch'
return [
((['git',
'rev-parse', '--verify', diff_base],), fake_ancestor),
((['git',
'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor),
((['git',
'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl),
# Mock a config miss (error code 1)
((['git',
'config', 'gitcl.remotebranch'],), CERR1),
] + ([
# Call to GetRemoteBranch()
((['git',
'config', 'branch.%s.merge' % working_branch],),
'refs/heads/master'),
((['git',
'config', 'branch.%s.remote' % working_branch],), 'origin'),
] if get_remote_branch else []) + [
((['git', 'rev-list', '^' + fake_ancestor,
'refs/remotes/origin/master'],), ''),
]
@classmethod
def _gerrit_ensure_auth_calls(
cls, issue=None, skip_auth_check=False, short_hostname='chromium'):
cmd = ['git', 'config', '--bool', 'gerrit.skip-ensure-authenticated']
if skip_auth_check:
return [((cmd, ), 'true')]
calls = [((cmd, ), CERR1)]
if issue:
calls.extend([
((['git', 'config', 'branch.master.gerritserver'],), CERR1),
])
calls.extend([
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://%s.googlesource.com/my/repo' % short_hostname),
])
return calls
@classmethod
def _gerrit_base_calls(cls, issue=None, fetched_description=None,
fetched_status=None, other_cl_owner=None,
custom_cl_base=None, short_hostname='chromium'):
calls = cls._is_gerrit_calls(True)
calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],),
CERR1 if issue is None else str(issue)),
]
if custom_cl_base:
ancestor_revision = custom_cl_base
else:
# Determine ancestor_revision to be merge base.
ancestor_revision = 'fake_ancestor_sha'
calls += [
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],), ancestor_revision),
]
# Calls to verify branch point is ancestor
calls += cls._gerrit_ensure_auth_calls(
issue=issue, short_hostname=short_hostname)
if issue:
calls += [
(('GetChangeDetail', '%s-review.googlesource.com' % short_hostname,
'my%2Frepo~123456',
['DETAILED_ACCOUNTS', 'CURRENT_REVISION', 'CURRENT_COMMIT', 'LABELS']
),
{
'owner': {'email': (other_cl_owner or 'owner@example.com')},
'change_id': '123456789',
'current_revision': 'sha1_of_current_revision',
'revisions': { 'sha1_of_current_revision': {
'commit': {'message': fetched_description},
}},
'status': fetched_status or 'NEW',
}),
]
if fetched_status == 'ABANDONED':
calls += [
(('DieWithError', 'Change https://%s-review.googlesource.com/'
'123456 has been abandoned, new uploads are not '
'allowed' % short_hostname), SystemExitMock()),
]
return calls
if other_cl_owner:
calls += [
(('ask_for_data', 'Press Enter to upload, or Ctrl+C to abort'), ''),
]
calls += cls._git_sanity_checks(ancestor_revision, 'master',
get_remote_branch=False)
calls += [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git', '-c', 'core.quotePath=false', 'diff', '--name-status',
'--no-renames', '-r', ancestor_revision + '...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.gerritpatchset'],), CERR1),
]
if not issue:
calls += [
((['git', 'log', '--pretty=format:%s%n%n%b',
ancestor_revision + '...'],),
'foo'),
]
calls += [
((['git', 'config', 'user.email'],), 'me@example.com'),
((['git', 'diff', '--no-ext-diff', '--stat', '-l100000', '-C50'] +
([custom_cl_base] if custom_cl_base else
[ancestor_revision, 'HEAD']),),
'+dat'),
]
return calls
@classmethod
def _gerrit_upload_calls(cls, description, reviewers, squash,
squash_mode='default',
expected_upstream_ref='origin/refs/heads/master',
title=None, notify=False,
post_amend_description=None, issue=None, cc=None,
custom_cl_base=None, tbr=None,
short_hostname='chromium',
labels=None):
if post_amend_description is None:
post_amend_description = description
cc = cc or []
# Determined in `_gerrit_base_calls`.
determined_ancestor_revision = custom_cl_base or 'fake_ancestor_sha'
calls = []
if squash_mode == 'default':
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],), ''),
((['git', 'config', '--bool', 'gerrit.squash-uploads'],), ''),
])
elif squash_mode in ('override_squash', 'override_nosquash'):
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],),
'true' if squash_mode == 'override_squash' else 'false'),
])
else:
assert squash_mode in ('squash', 'nosquash')
# If issue is given, then description is fetched from Gerrit instead.
if issue is None:
calls += [
((['git', 'log', '--pretty=format:%s\n\n%b',
((custom_cl_base + '..') if custom_cl_base else
'fake_ancestor_sha..HEAD')],),
description),
]
if squash:
title = 'Initial_upload'
else:
if not title:
calls += [
((['git', 'show', '-s', '--format=%s', 'HEAD'],), ''),
(('ask_for_data', 'Title for patchset []: '), 'User input'),
]
title = 'User_input'
if not git_footers.get_footer_change_id(description) and not squash:
calls += [
(('DownloadGerritHook', False), ''),
# Amending of commit message to get the Change-Id.
((['git', 'log', '--pretty=format:%s\n\n%b',
determined_ancestor_revision + '..HEAD'],),
description),
((['git', 'commit', '--amend', '-m', description],), ''),
((['git', 'log', '--pretty=format:%s\n\n%b',
determined_ancestor_revision + '..HEAD'],),
post_amend_description)
]
if squash:
if not issue:
# Prompting to edit description on first upload.
calls += [
((['git', 'config', 'core.editor'],), ''),
((['RunEditor'],), description),
]
ref_to_push = 'abcdef0123456789'
calls += [
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
]
if custom_cl_base is None:
calls += [
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],),
'origin/master'),
]
parent = 'origin/master'
else:
calls += [
((['git', 'merge-base', '--is-ancestor', custom_cl_base,
'refs/remotes/origin/master'],),
callError(1)), # Means not ancenstor.
(('ask_for_data',
'Do you take responsibility for cleaning up potential mess '
'resulting from proceeding with upload? Press Enter to upload, '
'or Ctrl+C to abort'), ''),
]
parent = custom_cl_base
calls += [
((['git', 'rev-parse', 'HEAD:'],), # `HEAD:` means HEAD's tree hash.
'0123456789abcdef'),
((['git', 'commit-tree', '0123456789abcdef', '-p', parent,
'-F', '/tmp/named'],),
ref_to_push),
]
else:
ref_to_push = 'HEAD'
calls += [
(('SaveDescriptionBackup',), None),
((['git', 'rev-list',
(custom_cl_base if custom_cl_base else expected_upstream_ref) + '..' +
ref_to_push],),
'1hashPerLine\n'),
]
metrics_arguments = []
if notify:
ref_suffix = '%ready,notify=ALL'
metrics_arguments += ['ready', 'notify=ALL']
else:
if not issue and squash:
ref_suffix = '%wip'
metrics_arguments.append('wip')
else:
ref_suffix = '%notify=NONE'
metrics_arguments.append('notify=NONE')
if title:
ref_suffix += ',m=' + title
metrics_arguments.append('m')
calls += [
((['git', 'config', 'rietveld.cc'],), ''),
]
if short_hostname == 'chromium':
# All reviwers and ccs get into ref_suffix.
for r in sorted(reviewers):
ref_suffix += ',r=%s' % r
metrics_arguments.append('r')
for c in sorted(['chromium-reviews+test-more-cc@chromium.org',
'joe@example.com'] + cc):
ref_suffix += ',cc=%s' % c
metrics_arguments.append('cc')
reviewers, cc = [], []
else:
# TODO(crbug/877717): remove this case.
calls += [
(('ValidAccounts', '%s-review.googlesource.com' % short_hostname,
sorted(reviewers) + ['joe@example.com',
'chromium-reviews+test-more-cc@chromium.org'] + cc),
{
e: {'email': e}
for e in (reviewers + ['joe@example.com'] + cc)
})
]
for r in sorted(reviewers):
if r != 'bad-account-or-email':
ref_suffix += ',r=%s' % r
metrics_arguments.append('r')
reviewers.remove(r)
for c in sorted(['joe@example.com'] + cc):
ref_suffix += ',cc=%s' % c
metrics_arguments.append('cc')
if c in cc:
cc.remove(c)
for k, v in sorted((labels or {}).items()):
ref_suffix += ',l=%s+%d' % (k, v)
metrics_arguments.append('l=%s+%d' % (k, v))
if tbr:
calls += [
(('GetCodeReviewTbrScore',
'%s-review.googlesource.com' % short_hostname,
'my/repo'),
2,),
]
calls += [
(('time.time',), 1000,),
((['git', 'push',
'https://%s.googlesource.com/my/repo' % short_hostname,
ref_to_push + ':refs/for/refs/heads/master' + ref_suffix],),
(('remote:\n'
'remote: Processing changes: (\)\n'
'remote: Processing changes: (|)\n'
'remote: Processing changes: (/)\n'
'remote: Processing changes: (-)\n'
'remote: Processing changes: new: 1 (/)\n'
'remote: Processing changes: new: 1, done\n'
'remote:\n'
'remote: New Changes:\n'
'remote: https://%s-review.googlesource.com/#/c/my/repo/+/123456'
' XXX\n'
'remote:\n'
'To https://%s.googlesource.com/my/repo\n'
' * [new branch] hhhh -> refs/for/refs/heads/master\n'
) % (short_hostname, short_hostname)),),
(('time.time',), 2000,),
(('add_repeated',
'sub_commands',
{
'execution_time': 1000,
'command': 'git push',
'exit_code': 0,
'arguments': sorted(metrics_arguments),
}),
None,),
]
if squash:
calls += [
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash',
'abcdef0123456789'],), ''),
]
# TODO(crbug/877717): this should never be used.
if squash and short_hostname != 'chromium':
calls += [
(('AddReviewers',
'chromium-review.googlesource.com', 'my%2Frepo~123456',
sorted(reviewers),
cc + ['chromium-reviews+test-more-cc@chromium.org'],
notify),
''),
]
calls += cls._git_post_upload_calls()
return calls
def _run_gerrit_upload_test(
self,
upload_args,
description,
reviewers=None,
squash=True,
squash_mode=None,
expected_upstream_ref='origin/refs/heads/master',
title=None,
notify=False,
post_amend_description=None,
issue=None,
cc=None,
fetched_status=None,
other_cl_owner=None,
custom_cl_base=None,
tbr=None,
short_hostname='chromium',
labels=None):
"""Generic gerrit upload test framework."""
if squash_mode is None:
if '--no-squash' in upload_args:
squash_mode = 'nosquash'
elif '--squash' in upload_args:
squash_mode = 'squash'
else:
squash_mode = 'default'
reviewers = reviewers or []
cc = cc or []
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(
same_auth=('git-owner.example.com', '', 'pass')))
self.mock(git_cl._GerritChangelistImpl, '_GerritCommitMsgHookCheck',
lambda _, offer_removal: None)
self.mock(git_cl.gclient_utils, 'RunEditor',
lambda *_, **__: self._mocked_call(['RunEditor']))
self.mock(git_cl, 'DownloadGerritHook', lambda force: self._mocked_call(
'DownloadGerritHook', force))
self.calls = self._gerrit_base_calls(
issue=issue,
fetched_description=description,
fetched_status=fetched_status,
other_cl_owner=other_cl_owner,
custom_cl_base=custom_cl_base,
short_hostname=short_hostname)
if fetched_status != 'ABANDONED':
self.mock(tempfile, 'NamedTemporaryFile', MakeNamedTemporaryFileMock(
expected_content=description))
self.mock(os, 'remove', lambda _: True)
self.calls += self._gerrit_upload_calls(
description, reviewers, squash,
squash_mode=squash_mode,
expected_upstream_ref=expected_upstream_ref,
title=title, notify=notify,
post_amend_description=post_amend_description,
issue=issue, cc=cc,
custom_cl_base=custom_cl_base, tbr=tbr,
short_hostname=short_hostname,
labels=labels)
# Uncomment when debugging.
# print '\n'.join(map(lambda x: '%2i: %s' % x, enumerate(self.calls)))
git_cl.main(['upload'] + upload_args)
def test_gerrit_upload_without_change_id(self):
self._run_gerrit_upload_test(
['--no-squash'],
'desc\n\nBUG=\n',
[],
squash=False,
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_upload_without_change_id_override_nosquash(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n',
[],
squash=False,
squash_mode='override_nosquash',
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_no_reviewer(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n\nChange-Id: I123456789\n',
[],
squash=False,
squash_mode='override_nosquash')
def test_gerrit_no_reviewer_non_chromium_host(self):
# TODO(crbug/877717): remove this test case.
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n\nChange-Id: I123456789\n',
[],
squash=False,
squash_mode='override_nosquash',
short_hostname='other')
def test_gerrit_patchset_title_special_chars(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self._run_gerrit_upload_test(
['-f', '-t', 'We\'ll escape ^_ ^ special chars...@{u}'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
squash=False,
squash_mode='override_nosquash',
title='We%27ll_escape_%5E%5F_%5E_special_chars%2E%2E%2E%40%7Bu%7D')
def test_gerrit_reviewers_cmd_line(self):
self._run_gerrit_upload_test(
['-r', 'foo@example.com', '--send-mail'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
['foo@example.com'],
squash=False,
squash_mode='override_nosquash',
notify=True)
def test_gerrit_reviewer_multiple(self):
self.mock(git_cl.gerrit_util, 'GetCodeReviewTbrScore',
lambda *a: self._mocked_call('GetCodeReviewTbrScore', *a))
self._run_gerrit_upload_test(
[],
'desc\nTBR=reviewer@example.com\nBUG=\nR=another@example.com\n'
'CC=more@example.com,people@example.com\n\n'
'Change-Id: 123456789',
['reviewer@example.com', 'another@example.com'],
expected_upstream_ref='origin/master',
cc=['more@example.com', 'people@example.com'],
tbr='reviewer@example.com',
labels={'Code-Review': 2})
def test_gerrit_upload_squash_first_is_default(self):
self._run_gerrit_upload_test(
[],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_first(self):
self._run_gerrit_upload_test(
['--squash'],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_first_with_labels(self):
self._run_gerrit_upload_test(
['--squash', '--cq-dry-run', '--enable-auto-submit'],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master',
labels={'Commit-Queue': 1, 'Auto-Submit': 1})
def test_gerrit_upload_squash_first_against_rev(self):
custom_cl_base = 'custom_cl_base_rev_or_branch'
self._run_gerrit_upload_test(
['--squash', custom_cl_base],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master',
custom_cl_base=custom_cl_base)
self.assertIn(
'If you proceed with upload, more than 1 CL may be created by Gerrit',
sys.stdout.getvalue())
def test_gerrit_upload_squash_reupload(self):
description = 'desc\nBUG=\n\nChange-Id: 123456789'
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456)
def test_gerrit_upload_squash_reupload_to_abandoned(self):
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call('DieWithError', msg))
description = 'desc\nBUG=\n\nChange-Id: 123456789'
with self.assertRaises(SystemExitMock):
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456,
fetched_status='ABANDONED')
def test_gerrit_upload_squash_reupload_to_not_owned(self):
self.mock(git_cl.gerrit_util, 'GetAccountDetails',
lambda *_, **__: {'email': 'yet-another@example.com'})
description = 'desc\nBUG=\n\nChange-Id: 123456789'
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456,
other_cl_owner='other@example.com')
self.assertIn(
'WARNING: Change 123456 is owned by other@example.com, but you '
'authenticate to Gerrit as yet-another@example.com.\n'
'Uploading may fail due to lack of permissions',
git_cl.sys.stdout.getvalue())
def test_upload_branch_deps(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
def mock_run_git(*args, **_kwargs):
if args[0] == ['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads']:
# Create a local branch dependency tree that looks like this:
# test1 -> test2 -> test3 -> test4 -> test5
# -> test3.1
# test6 -> test0
branch_deps = [
'test2 test1', # test1 -> test2
'test3 test2', # test2 -> test3
'test3.1 test2', # test2 -> test3.1
'test4 test3', # test3 -> test4
'test5 test4', # test4 -> test5
'test6 test0', # test0 -> test6
'test7', # test7
]
return '\n'.join(branch_deps)
self.mock(git_cl, 'RunGit', mock_run_git)
class RecordCalls:
times_called = 0
record_calls = RecordCalls()
def mock_CMDupload(*args, **_kwargs):
record_calls.times_called += 1
return 0
self.mock(git_cl, 'CMDupload', mock_CMDupload)
self.calls = [
(('ask_for_data', 'This command will checkout all dependent branches '
'and run "git cl upload". Press Enter to continue, '
'or Ctrl+C to abort'), ''),
]
class MockChangelist():
def __init__(self):
pass
def GetBranch(self):
return 'test1'
def GetIssue(self):
return '123'
def GetPatchset(self):
return '1001'
def IsGerrit(self):
return False
ret = git_cl.upload_branch_deps(MockChangelist(), [])
# CMDupload should have been called 5 times because of 5 dependent branches.
self.assertEquals(5, record_calls.times_called)
self.assertEquals(0, ret)
def test_gerrit_change_id(self):
self.calls = [
((['git', 'write-tree'], ),
'hashtree'),
((['git', 'rev-parse', 'HEAD~0'], ),
'branch-parent'),
((['git', 'var', 'GIT_AUTHOR_IDENT'], ),
'A B <a@b.org> 1456848326 +0100'),
((['git', 'var', 'GIT_COMMITTER_IDENT'], ),
'C D <c@d.org> 1456858326 +0100'),
((['git', 'hash-object', '-t', 'commit', '--stdin'], ),
'hashchange'),
]
change_id = git_cl.GenerateGerritChangeId('line1\nline2\n')
self.assertEqual(change_id, 'Ihashchange')
def test_desecription_append_footer(self):
for init_desc, footer_line, expected_desc in [
# Use unique desc first lines for easy test failure identification.
('foo', 'R=one', 'foo\n\nR=one'),
('foo\n\nR=one', 'BUG=', 'foo\n\nR=one\nBUG='),
('foo\n\nR=one', 'Change-Id: Ixx', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nChange-Id: Ixx', 'R=one', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'TBR=two',
'foo\n\nR=one\nTBR=two\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'Foo-Bar: baz',
'foo\n\nR=one\n\nChange-Id: Ixx\nFoo-Bar: baz'),
('foo\n\nChange-Id: Ixx', 'Foo-Bak: baz',
'foo\n\nChange-Id: Ixx\nFoo-Bak: baz'),
('foo', 'Change-Id: Ixx', 'foo\n\nChange-Id: Ixx'),
]:
desc = git_cl.ChangeDescription(init_desc)
desc.append_footer(footer_line)
self.assertEqual(desc.description, expected_desc)
def test_update_reviewers(self):
data = [
('foo', [], [],
'foo'),
('foo\nR=xx', [], [],
'foo\nR=xx'),
('foo\nTBR=xx', [], [],
'foo\nTBR=xx'),
('foo', ['a@c'], [],
'foo\n\nR=a@c'),
('foo\nR=xx', ['a@c'], [],
'foo\n\nR=a@c, xx'),
('foo\nTBR=xx', ['a@c'], [],
'foo\n\nR=a@c\nTBR=xx'),
('foo\nTBR=xx\nR=yy', ['a@c'], [],
'foo\n\nR=a@c, yy\nTBR=xx'),
('foo\nBUG=', ['a@c'], [],
'foo\nBUG=\nR=a@c'),
('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], [],
'foo\n\nR=a@c, bar, xx\nTBR=yy'),
('foo', ['a@c', 'b@c'], [],
'foo\n\nR=a@c, b@c'),
('foo\nBar\n\nR=\nBUG=', ['c@c'], [],
'foo\nBar\n\nR=c@c\nBUG='),
('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], [],
'foo\nBar\n\nR=c@c\nBUG='),
# Same as the line before, but full of whitespaces.
(
'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'], [],
'foo\nBar\n\nR=c@c\n BUG =',
),
# Whitespaces aren't interpreted as new lines.
('foo BUG=allo R=joe ', ['c@c'], [],
'foo BUG=allo R=joe\n\nR=c@c'),
# Redundant TBRs get promoted to Rs
('foo\n\nR=a@c\nTBR=t@c', ['b@c', 'a@c'], ['a@c', 't@c'],
'foo\n\nR=a@c, b@c\nTBR=t@c'),
]
expected = [i[-1] for i in data]
actual = []
for orig, reviewers, tbrs, _expected in data:
obj = git_cl.ChangeDescription(orig)
obj.update_reviewers(reviewers, tbrs)
actual.append(obj.description)
self.assertEqual(expected, actual)
def test_get_hash_tags(self):
cases = [
('', []),
('a', []),
('[a]', ['a']),
('[aa]', ['aa']),
('[a ]', ['a']),
('[a- ]', ['a']),
('[a- b]', ['a-b']),
('[a--b]', ['a-b']),
('[a', []),
('[a]x', ['a']),
('[aa]x', ['aa']),
('[a b]', ['a-b']),
('[a b]', ['a-b']),
('[a__b]', ['a-b']),
('[a] x', ['a']),
('[a][b]', ['a', 'b']),
('[a] [b]', ['a', 'b']),
('[a][b]x', ['a', 'b']),
('[a][b] x', ['a', 'b']),
('[a]\n[b]', ['a']),
('[a\nb]', []),
('[a][', ['a']),
('Revert "[a] feature"', ['a']),
('Reland "[a] feature"', ['a']),
('Revert: [a] feature', ['a']),
('Reland: [a] feature', ['a']),
('Revert "Reland: [a] feature"', ['a']),
('Foo: feature', ['foo']),
('Foo Bar: feature', ['foo-bar']),
('Revert "Foo bar: feature"', ['foo-bar']),
('Reland "Foo bar: feature"', ['foo-bar']),
]
for desc, expected in cases:
change_desc = git_cl.ChangeDescription(desc)
actual = change_desc.get_hash_tags()
self.assertEqual(
actual,
expected,
'GetHashTags(%r) == %r, expected %r' % (desc, actual, expected))
self.assertEqual(None, git_cl.GetTargetRef('origin', None, 'master'))
self.assertEqual(None, git_cl.GetTargetRef(None,
'refs/remotes/origin/master',
'master'))
# Check default target refs for branches.
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/master',
None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkgr',
None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkcr',
None))
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
None))
self.assertEqual('refs/diff/test',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/refs/diff/test',
None))
self.assertEqual('refs/heads/chrome/m42',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/chrome/m42',
None))
# Check target refs for user-specified target branch.
for branch in ('branch-heads/123', 'remotes/branch-heads/123',
'refs/remotes/branch-heads/123'):
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/master',
branch))
for branch in ('origin/master', 'remotes/origin/master',
'refs/remotes/origin/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch))
for branch in ('master', 'heads/master', 'refs/heads/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch))
def test_patch_when_dirty(self):
# Patch when local tree is dirty
self.mock(git_common, 'is_dirty_git_tree', lambda x: True)
self.assertNotEqual(git_cl.main(['patch', '123456']), 0)
@staticmethod
def _get_gerrit_codereview_server_calls(branch, value=None,
git_short_host='host',
detect_branch=True,
detect_server=True):
"""Returns calls executed by _GerritChangelistImpl.GetCodereviewServer.
If value is given, branch.<BRANCH>.gerritcodereview is already set.
"""
calls = []
if detect_branch:
calls.append(((['git', 'symbolic-ref', 'HEAD'],), branch))
if detect_server:
calls.append(((['git', 'config', 'branch.' + branch + '.gerritserver'],),
CERR1 if value is None else value))
if value is None:
calls += [
((['git', 'config', 'branch.' + branch + '.merge'],),
'refs/heads' + branch),
((['git', 'config', 'branch.' + branch + '.remote'],),
'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://%s.googlesource.com/my/repo' % git_short_host),
]
return calls
def _patch_common(self, force_codereview=False,
new_branch=False, git_short_host='host',
detect_gerrit_server=False,
actual_codereview=None,
codereview_in_url=False):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl, 'IsGitVersionAtLeast', lambda *args: True)
if new_branch:
self.calls = [((['git', 'new-branch', 'master'],), ''),]
if codereview_in_url and actual_codereview == 'rietveld':
self.calls += [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
]
if not force_codereview and not codereview_in_url:
# These calls detect codereview to use.
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
]
if detect_gerrit_server:
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host=git_short_host,
detect_branch=not new_branch and force_codereview)
actual_codereview = 'gerrit'
if actual_codereview == 'gerrit':
self.calls += [
(('GetChangeDetail', git_short_host + '-review.googlesource.com',
'my%2Frepo~123456', ['ALL_REVISIONS', 'CURRENT_COMMIT']),
{
'current_revision': '7777777777',
'revisions': {
'1111111111': {
'_number': 1,
'fetch': {'http': {
'url': 'https://%s.googlesource.com/my/repo' % git_short_host,
'ref': 'refs/changes/56/123456/1',
}},
},
'7777777777': {
'_number': 7,
'fetch': {'http': {
'url': 'https://%s.googlesource.com/my/repo' % git_short_host,
'ref': 'refs/changes/56/123456/7',
}},
},
},
}),
]
def test_patch_gerrit_default(self):
self._patch_common(git_short_host='chromium', detect_gerrit_server=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '123456']), 0)
def test_patch_gerrit_new_branch(self):
self._patch_common(
git_short_host='chromium', detect_gerrit_server=True, new_branch=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '-b', 'master', '123456']), 0)
def test_patch_gerrit_force(self):
self._patch_common(
force_codereview=True, git_short_host='host', detect_gerrit_server=True)
self.calls += [
((['git', 'fetch', 'https://host.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'reset', '--hard', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://host-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '--gerrit', '123456', '--force']), 0)
def test_patch_gerrit_guess_by_url(self):
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host='else', detect_server=False)
self._patch_common(
actual_codereview='gerrit', git_short_host='else',
codereview_in_url=True, detect_gerrit_server=False)
self.calls += [
((['git', 'fetch', 'https://else.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://else-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '1'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(
['patch', 'https://else-review.googlesource.com/#/c/123456/1']), 0)
def test_patch_gerrit_guess_by_url_with_repo(self):
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host='else', detect_server=False)
self._patch_common(
actual_codereview='gerrit', git_short_host='else',
codereview_in_url=True, detect_gerrit_server=False)
self.calls += [
((['git', 'fetch', 'https://else.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://else-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '1'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(
['patch', 'https://else-review.googlesource.com/c/my/repo/+/123456/1']),
0)
def test_patch_gerrit_conflict(self):
self._patch_common(detect_gerrit_server=True, git_short_host='chromium')
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), CERR1),
((['DieWithError', 'Command "git cherry-pick FETCH_HEAD" failed.\n'],),
SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
git_cl.main(['patch', '123456'])
def test_patch_gerrit_not_exists(self):
def notExists(_issue, *_, **kwargs):
raise git_cl.gerrit_util.GerritError(404, '')
self.mock(git_cl.gerrit_util, 'GetChangeDetail', notExists)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
((['git', 'config', 'branch.master.gerritserver'],), CERR1),
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
((['DieWithError',
'change 123456 at https://chromium-review.googlesource.com does not '
'exist or you have no access to it'],), SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
self.assertEqual(1, git_cl.main(['patch', '123456']))
def _checkout_calls(self):
return [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ),
('branch.ger-branch.gerritissue 123456\n'
'branch.gbranch654.gerritissue 654321\n')),
]
def test_checkout_gerrit(self):
"""Tests git cl checkout <issue>."""
self.calls = self._checkout_calls()
self.calls += [((['git', 'checkout', 'ger-branch'], ), '')]
self.assertEqual(0, git_cl.main(['checkout', '123456']))
def test_checkout_not_found(self):
"""Tests git cl checkout <issue>."""
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = self._checkout_calls()
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def test_checkout_no_branch_issues(self):
"""Tests git cl checkout <issue>."""
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ), CERR1),
]
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def _test_gerrit_ensure_authenticated_common(self, auth,
skip_auth_check=False):
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(hosts_with_creds=auth))
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call(['DieWithError', msg]))
self.calls = self._gerrit_ensure_auth_calls(skip_auth_check=skip_auth_check)
cl = git_cl.Changelist(codereview='gerrit')
cl.branch = 'master'
cl.branchref = 'refs/heads/master'
cl.lookedup_issue = True
return cl
def test_gerrit_ensure_authenticated_missing(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com': ('git-is.ok', '', 'but gerrit is missing'),
})
self.calls.append(
((['DieWithError',
'Credentials for the following hosts are required:\n'
' chromium-review.googlesource.com\n'
'These are read from ~/.gitcookies (or legacy ~/.netrc)\n'
'You can (re)generate your credentials by visiting '
'https://chromium-review.googlesource.com/new-password'],), ''),)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_conflict(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('git-one.example.com', None, 'secret1'),
'chromium-review.googlesource.com':
('git-other.example.com', None, 'secret2'),
})
self.calls.append(
(('ask_for_data', 'If you know what you are doing '
'press Enter to continue, or Ctrl+C to abort'), ''))
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_ok(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('git-same.example.com', None, 'secret'),
'chromium-review.googlesource.com':
('git-same.example.com', None, 'secret'),
})
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_skipped(self):
cl = self._test_gerrit_ensure_authenticated_common(
auth={}, skip_auth_check=True)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_bearer_token(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('', None, 'secret'),
'chromium-review.googlesource.com':
('', None, 'secret'),
})
self.assertIsNone(cl.EnsureAuthenticated(force=False))
header = gerrit_util.CookiesAuthenticator().get_auth_header(
'chromium.googlesource.com')
self.assertTrue('Bearer' in header)
def _cmd_set_commit_gerrit_common(self, vote, notify=None):
self.mock(git_cl.gerrit_util, 'SetReview',
lambda h, i, labels, notify=None:
self._mocked_call(['SetReview', h, i, labels, notify]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra.git'),
((['SetReview', 'chromium-review.googlesource.com',
'infra%2Finfra~123',
{'Commit-Queue': vote}, notify],), ''),
]
def test_cmd_set_commit_gerrit_clear(self):
self._cmd_set_commit_gerrit_common(0)
self.assertEqual(0, git_cl.main(['set-commit', '-c']))
def test_cmd_set_commit_gerrit_dry(self):
self._cmd_set_commit_gerrit_common(1, notify=False)
self.assertEqual(0, git_cl.main(['set-commit', '-d']))
def test_cmd_set_commit_gerrit(self):
self._cmd_set_commit_gerrit_common(2)
self.assertEqual(0, git_cl.main(['set-commit']))
def test_description_display(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
ChangelistMock.desc = 'foo\n'
self.assertEqual(0, git_cl.main(['description', '-d']))
self.assertEqual('foo\n', out.getvalue())
def test_StatusFieldOverrideIssueMissingArgs(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--issue must be specified')
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1', '--gerrit']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--field must be specified')
def test_StatusFieldOverrideIssue(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.assertEqual(
git_cl.main(['status', '--issue', '1', '--gerrit', '--field', 'desc']),
0)
self.assertEqual(out.getvalue(), 'foobar\n')
def test_SetCloseOverrideIssue(self):
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.mock(git_cl.Changelist, 'CloseIssue', lambda *_: None)
self.assertEqual(
git_cl.main(['set-close', '--issue', '1', '--gerrit']), 0)
def test_description(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'my%2Frepo~123123', ['CURRENT_REVISION', 'CURRENT_COMMIT']),
{
'current_revision': 'sha1',
'revisions': {'sha1': {
'commit': {'message': 'foobar'},
}},
}),
]
self.assertEqual(0, git_cl.main([
'description',
'https://chromium-review.googlesource.com/c/my/repo/+/123123',
'-d']))
self.assertEqual('foobar\n', out.getvalue())
def test_description_set_raw(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hihi'))
self.assertEqual(0, git_cl.main(['description', '-n', 'hihi']))
self.assertEqual('hihi', ChangelistMock.desc)
def test_description_appends_bug_line(self):
current_desc = 'Some.\n\nChange-Id: xxx'
def RunEditor(desc, _, **kwargs):
self.assertEquals(
'# Enter a description of the change.\n'
'# This will be displayed on the codereview site.\n'
'# The first line will also be used as the subject of the review.\n'
'#--------------------This line is 72 characters long'
'--------------------\n'
'Some.\n\nChange-Id: xxx\nBug: ',
desc)
# Simulate user changing something.
return 'Some.\n\nChange-Id: xxx\nBug: 123'
def UpdateDescriptionRemote(_, desc, force=False):
self.assertEquals(desc, 'Some.\n\nChange-Id: xxx\nBug: 123')
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.Changelist, 'GetDescription',
lambda *args: current_desc)
self.mock(git_cl._GerritChangelistImpl, 'UpdateDescriptionRemote',
UpdateDescriptionRemote)
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', 'core.editor'],), 'vi'),
]
self.assertEqual(0, git_cl.main(['description', '--gerrit']))
def test_description_set_stdin(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hi \r\n\t there\n\nman'))
self.assertEqual(0, git_cl.main(['description', '-n', '-']))
self.assertEqual('hi\n\t there\n\nman', ChangelistMock.desc)
def test_archive(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '456'),
((['git', 'config', 'branch.foo.gerritissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'tag', 'git-cl-archived-456-foo', 'foo'],), ''),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f']))
def test_archive_current_branch_fails(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master'),
((['git', 'config', 'branch.master.gerritissue'],), '1'),
((['git', 'symbolic-ref', 'HEAD'],), 'master')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'closed')])
self.assertEqual(1, git_cl.main(['archive', '-f']))
def test_archive_dry_run(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '456'),
((['git', 'config', 'branch.foo.gerritissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--dry-run']))
def test_archive_no_tags(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '1'),
((['git', 'config', 'branch.foo.gerritissue'],), '456'),
((['git', 'config', 'branch.bar.gerritissue'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--notags']))
def test_cmd_issue_erase_existing(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
# Let this command raise exception (retcode=1) - it should be ignored.
((['git', 'config', '--unset', 'branch.feature.last-upload-hash'],),
CERR1),
((['git', 'config', '--unset', 'branch.feature.gerritissue'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritpatchset'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritserver'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritsquashhash'],),
''),
((['git', 'log', '-1', '--format=%B'],), 'This is a description'),
]
self.assertEqual(0, git_cl.main(['issue', '0']))
def test_cmd_issue_erase_existing_with_change_id(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetDescription',
lambda _: 'This is a description\n\nChange-Id: Ideadbeef')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
# Let this command raise exception (retcode=1) - it should be ignored.
((['git', 'config', '--unset', 'branch.feature.last-upload-hash'],),
CERR1),
((['git', 'config', '--unset', 'branch.feature.gerritissue'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritpatchset'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritserver'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritsquashhash'],),
''),
((['git', 'log', '-1', '--format=%B'],),
'This is a description\n\nChange-Id: Ideadbeef'),
((['git', 'commit', '--amend', '-m', 'This is a description\n'],), ''),
]
self.assertEqual(0, git_cl.main(['issue', '0']))
def test_cmd_issue_json(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
(('write_json', 'output.json',
{'issue': 123,
'issue_url': 'https://chromium-review.googlesource.com/123'}),
''),
]
self.assertEqual(0, git_cl.main(['issue', '--json', 'output.json']))
def test_git_cl_try_default_cq_dry_run_gerrit(self):
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._GerritChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': 'owner@e.mail'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['get_or_create_merge_base', 'feature', 'feature'],),
'fake_ancestor_sha'),
((['GetChange', 'fake_ancestor_sha', None], ),
git_cl.presubmit_support.GitChange(
'', '', '', '', '', '', '', '')),
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['DoGetTryMasters'], ), None),
((['SetCQState', git_cl._CQState.DRY_RUN], ), None),
]
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.assertEqual(0, git_cl.main(['try']))
self.assertEqual(
out.getvalue(),
'Scheduling CQ dry run on: '
'https://chromium-review.googlesource.com/123456\n')
def test_git_cl_try_buildbucket_with_properties_gerrit(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 7)
self.mock(git_cl.uuid, 'uuid4', lambda: 'uuid4')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': 'owner@e.mail'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
]
def _buildbucket_retry(*_, **kw):
# self.maxDiff = 10000
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 1)
build = body['builds'][0]
params = json.loads(build.pop('parameters_json'))
self.assertEqual(params, {
u'builder_name': u'win',
u'changes': [{u'author': {u'email': u'owner@e.mail'},
u'revision': None}],
u'properties': {
u'category': u'git_cl_try',
u'key': u'val',
u'json': [{u'a': 1}, None],
u'patch_gerrit_url':
u'https://chromium-review.googlesource.com',
u'patch_issue': 123456,
u'patch_project': u'depot_tools',
u'patch_ref': u'refs/changes/56/123456/7',
u'patch_repository_url':
u'https://chromium.googlesource.com/depot_tools',
u'patch_set': 7,
u'patch_storage': u'gerrit',
}
})
self.assertEqual(build, {
u'bucket': u'luci.chromium.try',
u'client_operation_id': u'uuid4',
u'tags': [
u'builder:win',
u'buildset:patch/gerrit/chromium-review.googlesource.com/123456/7',
u'user_agent:git_cl_try',
],
})
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(0, git_cl.main([
'try', '-B', 'luci.chromium.try', '-b', 'win',
'-p', 'key=val', '-p', 'json=[{"a":1}, null]']))
self.assertRegexpMatches(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\nBucket: luci.chromium.try')
def test_git_cl_try_bots_on_multiple_masters(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 7)
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._GerritChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': 'owner@e.mail'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
]
def _buildbucket_retry(*_, **kw):
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 2)
self.assertEqual(body['builds'][0]['bucket'], 'bucket1')
params = json.loads(body['builds'][0]['parameters_json'])
self.assertEqual(params['builder_name'], 'builder1')
self.assertEqual(body['builds'][1]['bucket'], 'bucket2')
params = json.loads(body['builds'][1]['parameters_json'])
self.assertEqual(params['builder_name'], 'builder2')
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.urllib2, 'urlopen', lambda _: StringIO.StringIO(
json.dumps({
'builder1': {'bucket': 'bucket1'},
'builder2': {'bucket': 'bucket2'},
})))
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(
0, git_cl.main(['try', '-b', 'builder1', '-b', 'builder2']))
self.assertEqual(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\n'
'Bucket: bucket1\n'
' builder1: []\n'
'Bucket: bucket2\n'
' builder2: []\n'
'To see results here, run: git cl try-results\n'
'To see results in browser, run: git cl web\n')
def _common_GerritCommitMsgHookCheck(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.os.path, 'abspath',
lambda path: self._mocked_call(['abspath', path]))
self.mock(git_cl.os.path, 'exists',
lambda path: self._mocked_call(['exists', path]))
self.mock(git_cl.gclient_utils, 'FileRead',
lambda path: self._mocked_call(['FileRead', path]))
self.mock(git_cl.gclient_utils, 'rm_file_or_tree',
lambda path: self._mocked_call(['rm_file_or_tree', path]))
self.calls = [
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['abspath', '../'],), '/abs/git_repo_root'),
]
return git_cl.Changelist(codereview='gerrit', issue=123)
def test_GerritCommitMsgHookCheck_custom_hook(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'#!/bin/sh\necho "custom hook"')
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck_not_exists(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), False),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'...\n# From Gerrit Code Review\n...\nadd_ChangeId()\n'),
(('ask_for_data', 'Do you want to remove it now? [Yes/No]: '), 'Yes'),
((['rm_file_or_tree', '/abs/git_repo_root/.git/hooks/commit-msg'],),
''),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCmdLand(self):
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritsquashhash'],),
'deadbeaf'),
((['git', 'diff', 'deadbeaf'],), ''), # No diff.
((['git', 'config', 'branch.feature.gerritserver'],),
'chromium-review.googlesource.com'),
]
cl = git_cl.Changelist(issue=123, codereview='gerrit')
cl._codereview_impl._GetChangeDetail = lambda _: {
'labels': {},
'current_revision': 'deadbeaf',
}
cl._codereview_impl._GetChangeCommit = lambda: {
'commit': 'deadbeef',
'web_links': [{'name': 'gitiles',
'url': 'https://git.googlesource.com/test/+/deadbeef'}],
}
cl._codereview_impl.SubmitIssue = lambda wait_for_merge: None
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.assertEqual(0, cl.CMDLand(force=True,
bypass_hooks=True,
verbose=True,
parallel=False))
self.assertRegexpMatches(out.getvalue(), 'Issue.*123 has been submitted')
self.assertRegexpMatches(out.getvalue(), 'Landed as: .*deadbeef')
BUILDBUCKET_BUILDS_MAP = {
'9000': {
'id': '9000',
'bucket': 'master.x.y',
'created_by': 'user:someone@chromium.org',
'created_ts': '147200002222000',
'experimental': False,
'parameters_json': json.dumps({
'builder_name': 'my-bot',
'properties': {'category': 'cq'},
}),
'status': 'STARTED',
'tags': [
'build_address:x.y/my-bot/2',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/2',
},
'8000': {
'id': '8000',
'bucket': 'master.x.y',
'created_by': 'user:someone@chromium.org',
'created_ts': '147200001111000',
'experimental': False,
'failure_reason': 'BUILD_FAILURE',
'parameters_json': json.dumps({
'builder_name': 'my-bot',
'properties': {'category': 'cq'},
}),
'result_details_json': json.dumps({
'properties': {'buildnumber': 1},
}),
'result': 'FAILURE',
'status': 'COMPLETED',
'tags': [
'build_address:x.y/my-bot/1',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/1',
},
}
def test_write_try_results_json(self):
expected_output = [
{
'bucket': 'master.x.y',
'buildbucket_id': '8000',
'builder_name': 'my-bot',
'created_ts': '147200001111000',
'experimental': False,
'failure_reason': 'BUILD_FAILURE',
'result': 'FAILURE',
'status': 'COMPLETED',
'tags': [
'build_address:x.y/my-bot/1',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/1',
},
{
'bucket': 'master.x.y',
'buildbucket_id': '9000',
'builder_name': 'my-bot',
'created_ts': '147200002222000',
'experimental': False,
'failure_reason': None,
'result': None,
'status': 'STARTED',
'tags': [
'build_address:x.y/my-bot/2',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/2',
},
]
self.calls = [(('write_json', 'output.json', expected_output), '')]
git_cl.write_try_results_json('output.json', self.BUILDBUCKET_BUILDS_MAP)
def _setup_fetch_try_jobs(self, most_recent_patchset=20001):
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetMostRecentPatchset',
lambda *args: most_recent_patchset)
self.mock(git_cl.auth, 'get_authenticator_for_host', lambda host, _cfg:
self._mocked_call(['get_authenticator_for_host', host]))
self.mock(git_cl, '_buildbucket_retry', lambda *_, **__:
self._mocked_call(['_buildbucket_retry']))
def _setup_fetch_try_jobs_gerrit(self, *request_results):
self._setup_fetch_try_jobs(most_recent_patchset=13)
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '1'),
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# Simulate that Gerrit has more patchsets than local.
# ((['git', 'config', 'branch.feature.gerritpatchset'],), '12'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://x-review.googlesource.com'),
((['get_authenticator_for_host', 'x-review.googlesource.com'],),
AuthenticatorMock()),
] + [((['_buildbucket_retry'],), r) for r in request_results]
def test_fetch_try_jobs_none_gerrit(self):
self._setup_fetch_try_jobs_gerrit({})
self.assertEqual(0, git_cl.main(['try-results']))
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# self.assertRegexpMatches(
# sys.stdout.getvalue(),
# r'Warning: Codereview server has newer patchsets \(13\)')
self.assertRegexpMatches(sys.stdout.getvalue(), 'No try jobs')
def test_fetch_try_jobs_some_gerrit(self):
self._setup_fetch_try_jobs_gerrit({
'builds': self.BUILDBUCKET_BUILDS_MAP.values(),
})
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# self.calls.remove(
# ((['git', 'config', 'branch.feature.gerritpatchset'],), '12'))
self.assertEqual(0, git_cl.main(['try-results', '--patchset', '5']))
# ... and doesn't result in warning.
self.assertNotRegexpMatches(sys.stdout.getvalue(), 'Warning')
self.assertRegexpMatches(sys.stdout.getvalue(), '^Failures:')
self.assertRegexpMatches(sys.stdout.getvalue(), 'Started:')
self.assertRegexpMatches(sys.stdout.getvalue(), '2 try jobs')
def _mock_gerrit_changes_for_detail_cache(self):
self.mock(git_cl._GerritChangelistImpl, '_GetGerritHost', lambda _: 'host')
def test_gerrit_change_detail_cache_simple(self):
self._mock_gerrit_changes_for_detail_cache()
self.calls = [
(('GetChangeDetail', 'host', 'my%2Frepo~1', []), 'a'),
(('GetChangeDetail', 'host', 'ab%2Frepo~2', []), 'b'),
(('GetChangeDetail', 'host', 'ab%2Frepo~2', []), 'b2'),
]
cl1 = git_cl.Changelist(issue=1, codereview='gerrit')
cl1._cached_remote_url = (
True, 'https://chromium.googlesource.com/a/my/repo.git/')
cl2 = git_cl.Changelist(issue=2, codereview='gerrit')
cl2._cached_remote_url = (
True, 'https://chromium.googlesource.com/ab/repo')
self.assertEqual(cl1._GetChangeDetail(), 'a') # Miss.
self.assertEqual(cl1._GetChangeDetail(), 'a')
self.assertEqual(cl2._GetChangeDetail(), 'b') # Miss.
self.assertEqual(cl2._GetChangeDetail(no_cache=True), 'b2') # Miss.
self.assertEqual(cl1._GetChangeDetail(), 'a')
self.assertEqual(cl2._GetChangeDetail(), 'b2')
def test_gerrit_change_detail_cache_options(self):
self._mock_gerrit_changes_for_detail_cache()
self.calls = [
(('GetChangeDetail', 'host', 'repo~1', ['C', 'A', 'B']), 'cab'),
(('GetChangeDetail', 'host', 'repo~1', ['A', 'D']), 'ad'),
(('GetChangeDetail', 'host', 'repo~1', ['A']), 'a'), # no_cache=True
# no longer in cache.
(('GetChangeDetail', 'host', 'repo~1', ['B']), 'b'),
]
cl = git_cl.Changelist(issue=1, codereview='gerrit')
cl._cached_remote_url = (True, 'https://chromium.googlesource.com/repo/')
self.assertEqual(cl._GetChangeDetail(options=['C', 'A', 'B']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A', 'B', 'C']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['B', 'A']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['C']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A']), 'cab')
self.assertEqual(cl._GetChangeDetail(), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A', 'D']), 'ad')
self.assertEqual(cl._GetChangeDetail(options=['A']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['D']), 'ad')
self.assertEqual(cl._GetChangeDetail(), 'cab')
# Finally, no_cache should invalidate all caches for given change.
self.assertEqual(cl._GetChangeDetail(options=['A'], no_cache=True), 'a')
self.assertEqual(cl._GetChangeDetail(options=['B']), 'b')
def test_gerrit_description_caching(self):
def gen_detail(rev, desc):
return {
'current_revision': rev,
'revisions': {rev: {'commit': {'message': desc}}}
}
self.calls = [
(('GetChangeDetail', 'host', 'my%2Frepo~1',
['CURRENT_REVISION', 'CURRENT_COMMIT']),
gen_detail('rev1', 'desc1')),
(('GetChangeDetail', 'host', 'my%2Frepo~1',
['CURRENT_REVISION', 'CURRENT_COMMIT']),
gen_detail('rev2', 'desc2')),
]
self._mock_gerrit_changes_for_detail_cache()
cl = git_cl.Changelist(issue=1, codereview='gerrit')
cl._cached_remote_url = (
True, 'https://chromium.googlesource.com/a/my/repo.git/')
self.assertEqual(cl.GetDescription(), 'desc1')
self.assertEqual(cl.GetDescription(), 'desc1') # cache hit.
self.assertEqual(cl.GetDescription(force=True), 'desc2')
def test_print_current_creds(self):
class CookiesAuthenticatorMock(object):
def __init__(self):
self.gitcookies = {
'host.googlesource.com': ('user', 'pass'),
'host-review.googlesource.com': ('user', 'pass'),
}
self.netrc = self
self.netrc.hosts = {
'github.com': ('user2', None, 'pass2'),
'host2.googlesource.com': ('user3', None, 'pass'),
}
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMock)
self.mock(sys, 'stdout', StringIO.StringIO())
git_cl._GitCookiesChecker().print_current_creds(include_netrc=True)
self.assertEqual(list(sys.stdout.getvalue().splitlines()), [
' Host\t User\t Which file',
'============================\t=====\t===========',
'host-review.googlesource.com\t user\t.gitcookies',
' host.googlesource.com\t user\t.gitcookies',
' host2.googlesource.com\tuser3\t .netrc',
])
sys.stdout.buf = ''
git_cl._GitCookiesChecker().print_current_creds(include_netrc=False)
self.assertEqual(list(sys.stdout.getvalue().splitlines()), [
' Host\tUser\t Which file',
'============================\t====\t===========',
'host-review.googlesource.com\tuser\t.gitcookies',
' host.googlesource.com\tuser\t.gitcookies',
])
def _common_creds_check_mocks(self):
def exists_mock(path):
dirname = os.path.dirname(path)
if dirname == os.path.expanduser('~'):
dirname = '~'
base = os.path.basename(path)
if base in ('.netrc', '.gitcookies'):
return self._mocked_call('os.path.exists', '%s/%s' % (dirname, base))
# git cl also checks for existence other files not relevant to this test.
return None
self.mock(os.path, 'exists', exists_mock)
self.mock(sys, 'stdout', StringIO.StringIO())
def test_creds_check_gitcookies_not_configured(self):
self._common_creds_check_mocks()
self.mock(git_cl._GitCookiesChecker, 'get_hosts_with_creds',
lambda _, include_netrc=False: [])
self.calls = [
((['git', 'config', '--path', 'http.cookiefile'],), CERR1),
((['git', 'config', '--global', 'http.cookiefile'],), CERR1),
(('os.path.exists', '~/.netrc'), True),
(('ask_for_data', 'Press Enter to setup .gitcookies, '
'or Ctrl+C to abort'), ''),
((['git', 'config', '--global', 'http.cookiefile',
os.path.expanduser('~/.gitcookies')], ), ''),
]
self.assertEqual(0, git_cl.main(['creds-check']))
self.assertRegexpMatches(
sys.stdout.getvalue(),
'^You seem to be using outdated .netrc for git credentials:')
self.assertRegexpMatches(
sys.stdout.getvalue(),
'\nConfigured git to use .gitcookies from')
def test_creds_check_gitcookies_configured_custom_broken(self):
self._common_creds_check_mocks()
self.mock(git_cl._GitCookiesChecker, 'get_hosts_with_creds',
lambda _, include_netrc=False: [])
self.calls = [
((['git', 'config', '--path', 'http.cookiefile'],), CERR1),
((['git', 'config', '--global', 'http.cookiefile'],),
'/custom/.gitcookies'),
(('os.path.exists', '/custom/.gitcookies'), False),
(('ask_for_data', 'Reconfigure git to use default .gitcookies? '
'Press Enter to reconfigure, or Ctrl+C to abort'), ''),
((['git', 'config', '--global', 'http.cookiefile',
os.path.expanduser('~/.gitcookies')], ), ''),
]
self.assertEqual(0, git_cl.main(['creds-check']))
self.assertRegexpMatches(
sys.stdout.getvalue(),
'WARNING: You have configured custom path to .gitcookies: ')
self.assertRegexpMatches(
sys.stdout.getvalue(),
'However, your configured .gitcookies file is missing.')
def test_git_cl_comment_add_gerrit(self):
self.mock(git_cl.gerrit_util, 'SetReview',
lambda host, change, msg, ready:
self._mocked_call('SetReview', host, change, msg, ready))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), CERR1),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('SetReview', 'chromium-review.googlesource.com', 'infra%2Finfra~10',
'msg', None),
None),
]
self.assertEqual(0, git_cl.main(['comment', '--gerrit', '-i', '10',
'-a', 'msg']))
def test_git_cl_comments_fetch_gerrit(self):
self.mock(sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', 'branch.foo.gerritserver'],), ''),
((['git', 'config', 'branch.foo.merge'],), ''),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'infra%2Finfra~1',
['MESSAGES', 'DETAILED_ACCOUNTS', 'CURRENT_REVISION',
'CURRENT_COMMIT']), {
'owner': {'email': 'owner@example.com'},
'current_revision': 'ba5eba11',
'revisions': {
'deadbeaf': {
'_number': 1,
},
'ba5eba11': {
'_number': 2,
},
},
'messages': [
{
u'_revision_number': 1,
u'author': {
u'_account_id': 1111084,
u'email': u'commit-bot@chromium.org',
u'name': u'Commit Bot'
},
u'date': u'2017-03-15 20:08:45.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046dc50b',
u'message': u'Patch Set 1:\n\nDry run: CQ is trying the patch...',
u'tag': u'autogenerated:cq:dry-run'
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 11151243,
u'email': u'owner@example.com',
u'name': u'owner'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'PTAL',
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 148512 ,
u'email': u'reviewer@example.com',
u'name': u'reviewer'
},
u'date': u'2017-03-17 05:19:37.500000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d4568',
u'message': u'Patch Set 2: Code-Review+1',
},
]
}),
(('GetChangeComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {
'/COMMIT_MSG': [
{
'author': {'email': u'reviewer@example.com'},
'updated': u'2017-03-17 05:19:37.500000000',
'patch_set': 2,
'side': 'REVISION',
'message': 'Please include a bug link',
},
],
'codereview.settings': [
{
'author': {'email': u'owner@example.com'},
'updated': u'2017-03-16 20:00:41.000000000',
'patch_set': 2,
'side': 'PARENT',
'line': 42,
'message': 'I removed this because it is bad',
},
]
}),
(('GetChangeRobotComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {}),
((['git', 'config', 'branch.foo.gerritpatchset', '2'],), ''),
] * 2 + [
(('write_json', 'output.json', [
{
u'date': u'2017-03-16 20:00:41.000000',
u'message': (
u'PTAL\n' +
u'\n' +
u'codereview.settings\n' +
u' Base, Line 42: https://chromium-review.googlesource.com/' +
u'c/1/2/codereview.settings#b42\n' +
u' I removed this because it is bad\n'),
u'autogenerated': False,
u'approval': False,
u'disapproval': False,
u'sender': u'owner@example.com'
}, {
u'date': u'2017-03-17 05:19:37.500000',
u'message': (
u'Patch Set 2: Code-Review+1\n' +
u'\n' +
u'/COMMIT_MSG\n' +
u' PS2, File comment: https://chromium-review.googlesource' +
u'.com/c/1/2//COMMIT_MSG#\n' +
u' Please include a bug link\n'),
u'autogenerated': False,
u'approval': False,
u'disapproval': False,
u'sender': u'reviewer@example.com'
}
]),'')
]
expected_comments_summary = [
git_cl._CommentSummary(
message=(
u'PTAL\n' +
u'\n' +
u'codereview.settings\n' +
u' Base, Line 42: https://chromium-review.googlesource.com/' +
u'c/1/2/codereview.settings#b42\n' +
u' I removed this because it is bad\n'),
date=datetime.datetime(2017, 3, 16, 20, 0, 41, 0),
autogenerated=False,
disapproval=False, approval=False, sender=u'owner@example.com'),
git_cl._CommentSummary(
message=(
u'Patch Set 2: Code-Review+1\n' +
u'\n' +
u'/COMMIT_MSG\n' +
u' PS2, File comment: https://chromium-review.googlesource.com/' +
u'c/1/2//COMMIT_MSG#\n' +
u' Please include a bug link\n'),
date=datetime.datetime(2017, 3, 17, 5, 19, 37, 500000),
autogenerated=False,
disapproval=False, approval=False, sender=u'reviewer@example.com'),
]
cl = git_cl.Changelist(
codereview='gerrit', issue=1, branchref='refs/heads/foo')
self.assertEqual(cl.GetCommentsSummary(), expected_comments_summary)
self.mock(git_cl.Changelist, 'GetBranch', lambda _: 'foo')
self.assertEqual(
0, git_cl.main(['comments', '-i', '1', '-j', 'output.json']))
def test_git_cl_comments_robot_comments(self):
# git cl comments also fetches robot comments (which are considered a type
# of autogenerated comment), and unlike other types of comments, only robot
# comments from the latest patchset are shown.
self.mock(sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', 'branch.foo.gerritserver'],), ''),
((['git', 'config', 'branch.foo.merge'],), ''),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'infra%2Finfra~1',
['MESSAGES', 'DETAILED_ACCOUNTS', 'CURRENT_REVISION',
'CURRENT_COMMIT']), {
'owner': {'email': 'owner@example.com'},
'current_revision': 'ba5eba11',
'revisions': {
'deadbeaf': {
'_number': 1,
},
'ba5eba11': {
'_number': 2,
},
},
'messages': [
{
u'_revision_number': 1,
u'author': {
u'_account_id': 1111084,
u'email': u'commit-bot@chromium.org',
u'name': u'Commit Bot'
},
u'date': u'2017-03-15 20:08:45.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046dc50b',
u'message': u'Patch Set 1:\n\nDry run: CQ is trying the patch...',
u'tag': u'autogenerated:cq:dry-run'
},
{
u'_revision_number': 1,
u'author': {
u'_account_id': 123,
u'email': u'tricium@serviceaccount.com',
u'name': u'Tricium'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'(1 comment)',
u'tag': u'autogenerated:tricium',
},
{
u'_revision_number': 1,
u'author': {
u'_account_id': 123,
u'email': u'tricium@serviceaccount.com',
u'name': u'Tricium'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'(1 comment)',
u'tag': u'autogenerated:tricium',
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 123 ,
u'email': u'tricium@serviceaccount.com',
u'name': u'reviewer'
},
u'date': u'2017-03-17 05:30:37.000000000',
u'tag': u'autogenerated:tricium',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d4568',
u'message': u'(1 comment)',
},
]
}),
(('GetChangeComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {}),
(('GetChangeRobotComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {
'codereview.settings': [
{
u'author': {u'email': u'tricium@serviceaccount.com'},
u'updated': u'2017-03-17 05:30:37.000000000',
u'robot_run_id': u'5565031076855808',
u'robot_id': u'Linter/Category',
u'tag': u'autogenerated:tricium',
u'patch_set': 2,
u'side': u'REVISION',
u'message': u'Linter warning message text',
u'line': 32,
},
],
}),
((['git', 'config', 'branch.foo.gerritpatchset', '2'],), ''),
]
expected_comments_summary = [
git_cl._CommentSummary(date=datetime.datetime(2017, 3, 17, 5, 30, 37),
message=(
u'(1 comment)\n\ncodereview.settings\n'
u' PS2, Line 32: https://chromium-review.googlesource.com/'
u'c/1/2/codereview.settings#32\n'
u' Linter warning message text\n'),
sender=u'tricium@serviceaccount.com',
autogenerated=True, approval=False, disapproval=False)
]
cl = git_cl.Changelist(
codereview='gerrit', issue=1, branchref='refs/heads/foo')
self.assertEqual(cl.GetCommentsSummary(), expected_comments_summary)
def test_get_remote_url_with_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-exists':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
url = 'https://chromium.googlesource.com/my/repo'
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-exists'),
(('os.path.isdir', '/cache/this-dir-exists'),
True),
# Runs in /cache/this-dir-exists.
((['git', 'config', 'remote.origin.url'],),
url),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertEqual(cl.GetRemoteUrl(), url)
self.assertEqual(cl.GetRemoteUrl(), url) # Must be cached.
def test_get_remote_url_non_existing_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-doesnt-exist':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
self.mock(logging, 'error',
lambda fmt, *a: self._mocked_call('logging.error', fmt % a))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-doesnt-exist'),
(('os.path.isdir', '/cache/this-dir-doesnt-exist'),
False),
(('logging.error',
'Remote "origin" for branch "/cache/this-dir-doesnt-exist" points to'
' "master", but it doesn\'t exist.'), None),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertIsNone(cl.GetRemoteUrl())
def test_get_remote_url_misconfigured_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-exists':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
self.mock(logging, 'error',
lambda *a: self._mocked_call('logging.error', *a))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-exists'),
(('os.path.isdir', '/cache/this-dir-exists'), True),
# Runs in /cache/this-dir-exists.
((['git', 'config', 'remote.origin.url'],), ''),
(('logging.error',
'Remote "%(remote)s" for branch "%(branch)s" points to '
'"%(cache_path)s", but it is misconfigured.\n'
'"%(cache_path)s" must be a git repo and must have a remote named '
'"%(remote)s" pointing to the git host.', {
'remote': 'origin',
'cache_path': '/cache/this-dir-exists',
'branch': 'master'}
), None),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertIsNone(cl.GetRemoteUrl())
def test_gerrit_change_identifier_with_project(self):
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/a/my/repo.git/'),
]
cl = git_cl.Changelist(codereview='gerrit', issue=123456)
self.assertEqual(cl._GerritChangeIdentifier(), 'my%2Frepo~123456')
def test_gerrit_change_identifier_without_project(self):
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],), CERR1),
]
cl = git_cl.Changelist(codereview='gerrit', issue=123456)
self.assertEqual(cl._GerritChangeIdentifier(), '123456')
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
unittest.main()
| 38.759643
| 80
| 0.577042
|
import contextlib
import datetime
import json
import logging
import os
import StringIO
import sys
import tempfile
import unittest
import urlparse
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.auto_stub import TestCase
import metrics
metrics.DISABLE_METRICS_COLLECTION = True
import gerrit_util
import git_cl
import git_common
import git_footers
import subprocess2
def callError(code=1, cmd='', cwd='', stdout='', stderr=''):
return subprocess2.CalledProcessError(code, cmd, cwd, stdout, stderr)
CERR1 = callError(1)
def MakeNamedTemporaryFileMock(expected_content):
class NamedTemporaryFileMock(object):
def __init__(self, *args, **kwargs):
self.name = '/tmp/named'
self.expected_content = expected_content
def __enter__(self):
return self
def __exit__(self, _type, _value, _tb):
pass
def write(self, content):
if self.expected_content:
assert content == self.expected_content
def close(self):
pass
return NamedTemporaryFileMock
class ChangelistMock(object):
# instance that's being set.
desc = ""
def __init__(self, **kwargs):
pass
def GetIssue(self):
return 1
def GetDescription(self, force=False):
return ChangelistMock.desc
def UpdateDescription(self, desc, force=False):
ChangelistMock.desc = desc
class PresubmitMock(object):
def __init__(self, *args, **kwargs):
self.reviewers = []
self.more_cc = ['chromium-reviews+test-more-cc@chromium.org']
@staticmethod
def should_continue():
return True
class GitCheckoutMock(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def reset():
GitCheckoutMock.conflict = False
def apply_patch(self, p):
if GitCheckoutMock.conflict:
raise Exception('failed')
class WatchlistsMock(object):
def __init__(self, _):
pass
@staticmethod
def GetWatchersForPaths(_):
return ['joe@example.com']
class CodereviewSettingsFileMock(object):
def __init__(self):
pass
def read(self):
return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" +
"GERRIT_HOST: True\n")
class AuthenticatorMock(object):
def __init__(self, *_args):
pass
def has_cached_credentials(self):
return True
def authorize(self, http):
return http
def CookiesAuthenticatorMockFactory(hosts_with_creds=None, same_auth=False):
class CookiesAuthenticatorMock(git_cl.gerrit_util.CookiesAuthenticator):
def __init__(self):
pass
@classmethod
def get_gitcookies_path(cls):
return '~/.gitcookies'
@classmethod
def get_netrc_path(cls):
return '~/.netrc'
def _get_auth_for_host(self, host):
if same_auth:
return same_auth
return (hosts_with_creds or {}).get(host)
return CookiesAuthenticatorMock
class MockChangelistWithBranchAndIssue():
def __init__(self, branch, issue):
self.branch = branch
self.issue = issue
def GetBranch(self):
return self.branch
def GetIssue(self):
return self.issue
class SystemExitMock(Exception):
pass
class TestGitClBasic(unittest.TestCase):
def test_get_description(self):
cl = git_cl.Changelist(issue=1, codereview='gerrit',
codereview_host='host')
cl.description = 'x'
cl.has_description = True
cl._codereview_impl.FetchDescription = lambda *a, **kw: 'y'
self.assertEquals(cl.GetDescription(), 'x')
self.assertEquals(cl.GetDescription(force=True), 'y')
self.assertEquals(cl.GetDescription(), 'y')
def test_description_footers(self):
cl = git_cl.Changelist(issue=1, codereview='gerrit',
codereview_host='host')
cl.description = '\n'.join([
'This is some message',
'',
'It has some lines',
'and, also',
'',
'Some: Really',
'Awesome: Footers',
])
cl.has_description = True
cl._codereview_impl.UpdateDescriptionRemote = lambda *a, **kw: 'y'
msg, footers = cl.GetDescriptionFooters()
self.assertEquals(
msg, ['This is some message', '', 'It has some lines', 'and, also'])
self.assertEquals(footers, [('Some', 'Really'), ('Awesome', 'Footers')])
msg.append('wut')
footers.append(('gnarly-dude', 'beans'))
cl.UpdateDescriptionFooters(msg, footers)
self.assertEquals(cl.GetDescription().splitlines(), [
'This is some message',
'',
'It has some lines',
'and, also',
'wut'
'',
'Some: Really',
'Awesome: Footers',
'Gnarly-Dude: beans',
])
def test_get_bug_line_values(self):
f = lambda p, bugs: list(git_cl._get_bug_line_values(p, bugs))
self.assertEqual(f('', ''), [])
self.assertEqual(f('', '123,v8:456'), ['123', 'v8:456'])
self.assertEqual(f('v8', '456'), ['v8:456'])
self.assertEqual(f('v8', 'chromium:123,456'), ['v8:456', 'chromium:123'])
self.assertEqual(f('v8', 'chromium:123,456,v8:123'),
['v8:456', 'chromium:123', 'v8:123'])
def _test_git_number(self, parent_msg, dest_ref, child_msg,
parent_hash='parenthash'):
desc = git_cl.ChangeDescription(child_msg)
desc.update_with_git_number_footers(parent_hash, parent_msg, dest_ref)
return desc.description
def assertEqualByLine(self, actual, expected):
self.assertEqual(actual.splitlines(), expected.splitlines())
def test_git_number_bad_parent(self):
with self.assertRaises(ValueError):
self._test_git_number('Parent', 'refs/heads/master', 'Child')
def test_git_number_bad_parent_footer(self):
with self.assertRaises(AssertionError):
self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: wrong',
'refs/heads/master', 'Child')
def test_git_number_bad_lineage_ignored(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#1}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}',
'refs/heads/master', 'Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#2}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}')
def test_git_number_same_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_same_branch_mixed_footers(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child\n'
'\n'
'Broken-by: design\n'
'BUG=123')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Broken-by: design\n'
'BUG=123\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_same_branch_with_originals(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Whatever: value\n'
'Cr-Commit-Position: refs/copy/paste@{#22}')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Original-Whatever: value\n'
'Cr-Original-Commit-Position: refs/copy/paste@{#22}\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_new_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/master@{#12}')
def test_git_number_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_moooooooore_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/mooore',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/mooore@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_ever_moooooooore_lineage(self):
self.maxDiff = 10000
actual = self._test_git_number(
'CQ commit on fresh new branch + numbering.\n'
'\n'
'NOTRY=True\n'
'NOPRESUBMIT=True\n'
'BUG=\n'
'\n'
'Review-Url: https://codereview.chromium.org/2577703003\n'
'Cr-Commit-Position: refs/heads/gnumb-test/br@{#1}\n'
'Cr-Branched-From: 0749ff9edc-refs/heads/gnumb-test/cq@{#4}\n'
'Cr-Branched-From: 5c49df2da6-refs/heads/master@{#41618}',
dest_ref='refs/heads/gnumb-test/cl',
child_msg='git cl on fresh new branch + numbering.\n'
'\n'
'Review-Url: https://codereview.chromium.org/2575043003 .\n')
self.assertEqualByLine(
actual,
'git cl on fresh new branch + numbering.\n'
'\n'
'Review-Url: https://codereview.chromium.org/2575043003 .\n'
'Cr-Commit-Position: refs/heads/gnumb-test/cl@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/gnumb-test/br@{#1}\n'
'Cr-Branched-From: 0749ff9edc-refs/heads/gnumb-test/cq@{#4}\n'
'Cr-Branched-From: 5c49df2da6-refs/heads/master@{#41618}')
def test_git_number_cherry_pick(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child, which is cherry-pick from master\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#100}\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)')
self.assertEqualByLine(
actual,
'Child, which is cherry-pick from master\n'
'\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)\n'
'\n'
'Cr-Original-Commit-Position: refs/heads/master@{#100}\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_gerrit_mirror_hack(self):
cr = 'chromium-review.googlesource.com'
url0 = 'https://%s/a/changes/x?a=b' % cr
origMirrors = git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES
try:
git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES = ['us1', 'us2']
url1 = git_cl.gerrit_util._UseGerritMirror(url0, cr)
url2 = git_cl.gerrit_util._UseGerritMirror(url1, cr)
url3 = git_cl.gerrit_util._UseGerritMirror(url2, cr)
self.assertNotEqual(url1, url2)
self.assertEqual(sorted((url1, url2)), [
'https://us1-mirror-chromium-review.googlesource.com/a/changes/x?a=b',
'https://us2-mirror-chromium-review.googlesource.com/a/changes/x?a=b'])
self.assertEqual(url1, url3)
finally:
git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES = origMirrors
def test_valid_accounts(self):
mock_per_account = {
'u1': None,
'u2': {
'_account_id': 123124,
'avatars': [],
'email': 'u2@example.com',
'name': 'User Number 2',
'status': 'OOO',
},
'u3': git_cl.gerrit_util.GerritError(500, 'retries didn\'t help :('),
}
def GetAccountDetailsMock(_, account):
v = mock_per_account.pop(account)
if isinstance(v, Exception):
raise v
return v
original = git_cl.gerrit_util.GetAccountDetails
try:
git_cl.gerrit_util.GetAccountDetails = GetAccountDetailsMock
actual = git_cl.gerrit_util.ValidAccounts(
'host', ['u1', 'u2', 'u3'], max_threads=1)
finally:
git_cl.gerrit_util.GetAccountDetails = original
self.assertEqual(actual, {
'u2': {
'_account_id': 123124,
'avatars': [],
'email': 'u2@example.com',
'name': 'User Number 2',
'status': 'OOO',
},
})
class TestParseIssueURL(unittest.TestCase):
def _validate(self, parsed, issue=None, patchset=None, hostname=None,
codereview=None, fail=False):
self.assertIsNotNone(parsed)
if fail:
self.assertFalse(parsed.valid)
return
self.assertTrue(parsed.valid)
self.assertEqual(parsed.issue, issue)
self.assertEqual(parsed.patchset, patchset)
self.assertEqual(parsed.hostname, hostname)
self.assertEqual(parsed.codereview, codereview)
def _run_and_validate(self, func, url, *args, **kwargs):
result = func(urlparse.urlparse(url))
if kwargs.pop('fail', False):
self.assertIsNone(result)
return None
self._validate(result, *args, fail=False, **kwargs)
def test_gerrit(self):
def test(url, issue=None, patchset=None, hostname=None, fail=None):
self._test_ParseIssueUrl(
git_cl._GerritChangelistImpl.ParseIssueURL,
url, issue, patchset, hostname, fail)
def test(url, *args, **kwargs):
self._run_and_validate(git_cl._GerritChangelistImpl.ParseIssueURL, url,
*args, codereview='gerrit', **kwargs)
test('http://chrome-review.source.com/c/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/#/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/1/whatisthis', fail=True)
test('https://chrome-review.source.com/c/abc/', fail=True)
test('ssh://chrome-review.source.com/c/123/1/', fail=True)
def test_ParseIssueNumberArgument(self):
def test(arg, *args, **kwargs):
codereview_hint = kwargs.pop('hint', None)
self._validate(git_cl.ParseIssueNumberArgument(arg, codereview_hint),
*args, **kwargs)
test('123', 123)
test('', fail=True)
test('abc', fail=True)
test('123/1', fail=True)
test('123a', fail=True)
test('ssh://chrome-review.source.com/#/c/123/4/', fail=True)
test('https://codereview.source.com/123',
123, None, 'codereview.source.com', 'gerrit',
hint='gerrit')
test('https://codereview.source.com/123',
123, None, 'codereview.source.com', 'gerrit')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com', 'gerrit')
test('https://chrome-review.source.com/bad/123/4', fail=True)
class GitCookiesCheckerTest(TestCase):
def setUp(self):
super(GitCookiesCheckerTest, self).setUp()
self.c = git_cl._GitCookiesChecker()
self.c._all_hosts = []
def mock_hosts_creds(self, subhost_identity_pairs):
def ensure_googlesource(h):
if not h.endswith(self.c._GOOGLESOURCE):
assert not h.endswith('.')
return h + '.' + self.c._GOOGLESOURCE
return h
self.c._all_hosts = [(ensure_googlesource(h), i, '.gitcookies')
for h, i in subhost_identity_pairs]
def test_identity_parsing(self):
self.assertEqual(self.c._parse_identity('ldap.google.com'),
('ldap', 'google.com'))
self.assertEqual(self.c._parse_identity('git-ldap.example.com'),
('ldap', 'example.com'))
self.assertEqual(self.c._parse_identity('git-note.period.chromium.org'),
('note.period', 'chromium.org'))
self.assertEqual(self.c._parse_identity('git-note.period.example.com'),
('note', 'period.example.com'))
def test_analysis_nothing(self):
self.c._all_hosts = []
self.assertFalse(self.c.has_generic_host())
self.assertEqual(set(), self.c.get_conflicting_hosts())
self.assertEqual(set(), self.c.get_duplicated_hosts())
self.assertEqual(set(), self.c.get_partially_configured_hosts())
self.assertEqual(set(), self.c.get_hosts_with_wrong_identities())
def test_analysis(self):
self.mock_hosts_creds([
('.googlesource.com', 'git-example.chromium.org'),
('chromium', 'git-example.google.com'),
('chromium-review', 'git-example.google.com'),
('chrome-internal', 'git-example.chromium.org'),
('chrome-internal-review', 'git-example.chromium.org'),
('conflict', 'git-example.google.com'),
('conflict-review', 'git-example.chromium.org'),
('dup', 'git-example.google.com'),
('dup', 'git-example.google.com'),
('dup-review', 'git-example.google.com'),
('partial', 'git-example.google.com'),
('gpartial-review', 'git-example.google.com'),
])
self.assertTrue(self.c.has_generic_host())
self.assertEqual(set(['conflict.googlesource.com']),
self.c.get_conflicting_hosts())
self.assertEqual(set(['dup.googlesource.com']),
self.c.get_duplicated_hosts())
self.assertEqual(set(['partial.googlesource.com',
'gpartial-review.googlesource.com']),
self.c.get_partially_configured_hosts())
self.assertEqual(set(['chromium.googlesource.com',
'chrome-internal.googlesource.com']),
self.c.get_hosts_with_wrong_identities())
def test_report_no_problems(self):
self.test_analysis_nothing()
self.mock(sys, 'stdout', StringIO.StringIO())
self.assertFalse(self.c.find_and_report_problems())
self.assertEqual(sys.stdout.getvalue(), '')
def test_report(self):
self.test_analysis()
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.gerrit_util.CookiesAuthenticator, 'get_gitcookies_path',
classmethod(lambda _: '~/.gitcookies'))
self.assertTrue(self.c.find_and_report_problems())
with open(os.path.join(os.path.dirname(__file__),
'git_cl_creds_check_report.txt')) as f:
expected = f.read()
def by_line(text):
return [l.rstrip() for l in text.rstrip().splitlines()]
self.maxDiff = 10000
self.assertEqual(by_line(sys.stdout.getvalue().strip()), by_line(expected))
class TestGitCl(TestCase):
def setUp(self):
super(TestGitCl, self).setUp()
self.calls = []
self._calls_done = []
self.mock(git_cl, 'time_time',
lambda: self._mocked_call('time.time'))
self.mock(git_cl.metrics.collector, 'add_repeated',
lambda *a: self._mocked_call('add_repeated', *a))
self.mock(subprocess2, 'call', self._mocked_call)
self.mock(subprocess2, 'check_call', self._mocked_call)
self.mock(subprocess2, 'check_output', self._mocked_call)
self.mock(subprocess2, 'communicate',
lambda *a, **kw: ([self._mocked_call(*a, **kw), ''], 0))
self.mock(git_cl.gclient_utils, 'CheckCallAndFilter', self._mocked_call)
self.mock(git_common, 'is_dirty_git_tree', lambda x: False)
self.mock(git_common, 'get_or_create_merge_base',
lambda *a: (
self._mocked_call(['get_or_create_merge_base']+list(a))))
self.mock(git_cl, 'BranchExists', lambda _: True)
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '')
self.mock(git_cl, 'SaveDescriptionBackup', lambda _:
self._mocked_call('SaveDescriptionBackup'))
self.mock(git_cl, 'ask_for_data', lambda *a, **k: self._mocked_call(
*(['ask_for_data'] + list(a)), **k))
self.mock(git_cl, 'write_json', lambda path, contents:
self._mocked_call('write_json', path, contents))
self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock)
self.mock(git_cl.checkout, 'GitCheckout', GitCheckoutMock)
GitCheckoutMock.reset()
self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock)
self.mock(git_cl.auth, 'get_authenticator_for_host', AuthenticatorMock)
self.mock(git_cl.gerrit_util, 'GetChangeDetail',
lambda *args, **kwargs: self._mocked_call(
'GetChangeDetail', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'GetChangeComments',
lambda *args, **kwargs: self._mocked_call(
'GetChangeComments', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'GetChangeRobotComments',
lambda *args, **kwargs: self._mocked_call(
'GetChangeRobotComments', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'AddReviewers',
lambda h, i, reviewers, ccs, notify: self._mocked_call(
'AddReviewers', h, i, reviewers, ccs, notify))
self.mock(git_cl.gerrit_util, 'SetReview',
lambda h, i, msg=None, labels=None, notify=None:
self._mocked_call('SetReview', h, i, msg, labels, notify))
self.mock(git_cl.gerrit_util.LuciContextAuthenticator, 'is_luci',
staticmethod(lambda: False))
self.mock(git_cl.gerrit_util.GceAuthenticator, 'is_gce',
classmethod(lambda _: False))
self.mock(git_cl.gerrit_util, 'ValidAccounts',
lambda host, accounts:
self._mocked_call('ValidAccounts', host, accounts))
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call(['DieWithError', msg]))
git_cl.settings = None
def tearDown(self):
try:
self.assertEquals([], self.calls)
except AssertionError:
if not self.has_failed():
raise
# Sadly, has_failed() returns True if this OR any other tests before this
# one have failed.
git_cl.logging.error(
'!!!!!! IF YOU SEE THIS, READ BELOW, IT WILL SAVE YOUR TIME !!!!!\n'
'There are un-consumed self.calls after this test has finished.\n'
'If you don\'t know which test this is, run:\n'
' tests/git_cl_tests.py -v\n'
'If you are already running only this test, then **first** fix the '
'problem whose exception is emitted below by unittest runner.\n'
'Else, to be sure what\'s going on, run this test **alone** with \n'
' tests/git_cl_tests.py TestGitCl.<name>\n'
'and follow instructions above.\n' +
'=' * 80)
finally:
super(TestGitCl, self).tearDown()
def _mocked_call(self, *args, **_kwargs):
self.assertTrue(
self.calls,
'@%d Expected: <Missing> Actual: %r' % (len(self._calls_done), args))
top = self.calls.pop(0)
expected_args, result = top
# Also logs otherwise it could get caught in a try/finally and be hard to
# diagnose.
if expected_args != args:
N = 5
prior_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) - N + i, c[0])
for i, c in enumerate(self._calls_done[-N:]))
following_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) + i + 1, c[0])
for i, c in enumerate(self.calls[:N]))
extended_msg = (
'A few prior calls:\n %s\n\n'
'This (expected):\n @%d: %r\n'
'This (actual):\n @%d: %r\n\n'
'A few following expected calls:\n %s' %
(prior_calls, len(self._calls_done), expected_args,
len(self._calls_done), args, following_calls))
git_cl.logging.error(extended_msg)
self.fail('@%d\n'
' Expected: %r\n'
' Actual: %r' % (
len(self._calls_done), expected_args, args))
self._calls_done.append(top)
if isinstance(result, Exception):
raise result
return result
def test_ask_for_explicit_yes_true(self):
self.calls = [
(('ask_for_data', 'prompt [Yes/No]: '), 'blah'),
(('ask_for_data', 'Please, type yes or no: '), 'ye'),
]
self.assertTrue(git_cl.ask_for_explicit_yes('prompt'))
def test_LoadCodereviewSettingsFromFile_gerrit(self):
codereview_file = StringIO.StringIO('GERRIT_HOST: true')
self.calls = [
((['git', 'config', '--unset-all', 'rietveld.cc'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.tree-status-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.viewvc-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-regex'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-ignore-regex'],),
CERR1),
((['git', 'config', '--unset-all', 'rietveld.run-post-upload-hook'],),
CERR1),
((['git', 'config', 'gerrit.host', 'true'],), ''),
]
self.assertIsNone(git_cl.LoadCodereviewSettingsFromFile(codereview_file))
@classmethod
def _is_gerrit_calls(cls, gerrit=False):
return [((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'gerrit.host'],), 'True' if gerrit else '')]
@classmethod
def _git_post_upload_calls(cls):
return [
((['git', 'rev-parse', 'HEAD'],), 'hash'),
((['git', 'symbolic-ref', 'HEAD'],), 'hash'),
((['git',
'config', 'branch.hash.last-upload-hash', 'hash'],), ''),
((['git', 'config', 'rietveld.run-post-upload-hook'],), ''),
]
@staticmethod
def _git_sanity_checks(diff_base, working_branch, get_remote_branch=True):
fake_ancestor = 'fake_ancestor'
fake_cl = 'fake_cl_for_patch'
return [
((['git',
'rev-parse', '--verify', diff_base],), fake_ancestor),
((['git',
'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor),
((['git',
'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl),
# Mock a config miss (error code 1)
((['git',
'config', 'gitcl.remotebranch'],), CERR1),
] + ([
# Call to GetRemoteBranch()
((['git',
'config', 'branch.%s.merge' % working_branch],),
'refs/heads/master'),
((['git',
'config', 'branch.%s.remote' % working_branch],), 'origin'),
] if get_remote_branch else []) + [
((['git', 'rev-list', '^' + fake_ancestor,
'refs/remotes/origin/master'],), ''),
]
@classmethod
def _gerrit_ensure_auth_calls(
cls, issue=None, skip_auth_check=False, short_hostname='chromium'):
cmd = ['git', 'config', '--bool', 'gerrit.skip-ensure-authenticated']
if skip_auth_check:
return [((cmd, ), 'true')]
calls = [((cmd, ), CERR1)]
if issue:
calls.extend([
((['git', 'config', 'branch.master.gerritserver'],), CERR1),
])
calls.extend([
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://%s.googlesource.com/my/repo' % short_hostname),
])
return calls
@classmethod
def _gerrit_base_calls(cls, issue=None, fetched_description=None,
fetched_status=None, other_cl_owner=None,
custom_cl_base=None, short_hostname='chromium'):
calls = cls._is_gerrit_calls(True)
calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],),
CERR1 if issue is None else str(issue)),
]
if custom_cl_base:
ancestor_revision = custom_cl_base
else:
# Determine ancestor_revision to be merge base.
ancestor_revision = 'fake_ancestor_sha'
calls += [
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],), ancestor_revision),
]
# Calls to verify branch point is ancestor
calls += cls._gerrit_ensure_auth_calls(
issue=issue, short_hostname=short_hostname)
if issue:
calls += [
(('GetChangeDetail', '%s-review.googlesource.com' % short_hostname,
'my%2Frepo~123456',
['DETAILED_ACCOUNTS', 'CURRENT_REVISION', 'CURRENT_COMMIT', 'LABELS']
),
{
'owner': {'email': (other_cl_owner or 'owner@example.com')},
'change_id': '123456789',
'current_revision': 'sha1_of_current_revision',
'revisions': { 'sha1_of_current_revision': {
'commit': {'message': fetched_description},
}},
'status': fetched_status or 'NEW',
}),
]
if fetched_status == 'ABANDONED':
calls += [
(('DieWithError', 'Change https://%s-review.googlesource.com/'
'123456 has been abandoned, new uploads are not '
'allowed' % short_hostname), SystemExitMock()),
]
return calls
if other_cl_owner:
calls += [
(('ask_for_data', 'Press Enter to upload, or Ctrl+C to abort'), ''),
]
calls += cls._git_sanity_checks(ancestor_revision, 'master',
get_remote_branch=False)
calls += [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git', '-c', 'core.quotePath=false', 'diff', '--name-status',
'--no-renames', '-r', ancestor_revision + '...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.gerritpatchset'],), CERR1),
]
if not issue:
calls += [
((['git', 'log', '--pretty=format:%s%n%n%b',
ancestor_revision + '...'],),
'foo'),
]
calls += [
((['git', 'config', 'user.email'],), 'me@example.com'),
((['git', 'diff', '--no-ext-diff', '--stat', '-l100000', '-C50'] +
([custom_cl_base] if custom_cl_base else
[ancestor_revision, 'HEAD']),),
'+dat'),
]
return calls
@classmethod
def _gerrit_upload_calls(cls, description, reviewers, squash,
squash_mode='default',
expected_upstream_ref='origin/refs/heads/master',
title=None, notify=False,
post_amend_description=None, issue=None, cc=None,
custom_cl_base=None, tbr=None,
short_hostname='chromium',
labels=None):
if post_amend_description is None:
post_amend_description = description
cc = cc or []
# Determined in `_gerrit_base_calls`.
determined_ancestor_revision = custom_cl_base or 'fake_ancestor_sha'
calls = []
if squash_mode == 'default':
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],), ''),
((['git', 'config', '--bool', 'gerrit.squash-uploads'],), ''),
])
elif squash_mode in ('override_squash', 'override_nosquash'):
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],),
'true' if squash_mode == 'override_squash' else 'false'),
])
else:
assert squash_mode in ('squash', 'nosquash')
# If issue is given, then description is fetched from Gerrit instead.
if issue is None:
calls += [
((['git', 'log', '--pretty=format:%s\n\n%b',
((custom_cl_base + '..') if custom_cl_base else
'fake_ancestor_sha..HEAD')],),
description),
]
if squash:
title = 'Initial_upload'
else:
if not title:
calls += [
((['git', 'show', '-s', '--format=%s', 'HEAD'],), ''),
(('ask_for_data', 'Title for patchset []: '), 'User input'),
]
title = 'User_input'
if not git_footers.get_footer_change_id(description) and not squash:
calls += [
(('DownloadGerritHook', False), ''),
# Amending of commit message to get the Change-Id.
((['git', 'log', '--pretty=format:%s\n\n%b',
determined_ancestor_revision + '..HEAD'],),
description),
((['git', 'commit', '--amend', '-m', description],), ''),
((['git', 'log', '--pretty=format:%s\n\n%b',
determined_ancestor_revision + '..HEAD'],),
post_amend_description)
]
if squash:
if not issue:
# Prompting to edit description on first upload.
calls += [
((['git', 'config', 'core.editor'],), ''),
((['RunEditor'],), description),
]
ref_to_push = 'abcdef0123456789'
calls += [
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
]
if custom_cl_base is None:
calls += [
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],),
'origin/master'),
]
parent = 'origin/master'
else:
calls += [
((['git', 'merge-base', '--is-ancestor', custom_cl_base,
'refs/remotes/origin/master'],),
callError(1)), # Means not ancenstor.
(('ask_for_data',
'Do you take responsibility for cleaning up potential mess '
'resulting from proceeding with upload? Press Enter to upload, '
'or Ctrl+C to abort'), ''),
]
parent = custom_cl_base
calls += [
((['git', 'rev-parse', 'HEAD:'],), # `HEAD:` means HEAD's tree hash.
'0123456789abcdef'),
((['git', 'commit-tree', '0123456789abcdef', '-p', parent,
'-F', '/tmp/named'],),
ref_to_push),
]
else:
ref_to_push = 'HEAD'
calls += [
(('SaveDescriptionBackup',), None),
((['git', 'rev-list',
(custom_cl_base if custom_cl_base else expected_upstream_ref) + '..' +
ref_to_push],),
'1hashPerLine\n'),
]
metrics_arguments = []
if notify:
ref_suffix = '%ready,notify=ALL'
metrics_arguments += ['ready', 'notify=ALL']
else:
if not issue and squash:
ref_suffix = '%wip'
metrics_arguments.append('wip')
else:
ref_suffix = '%notify=NONE'
metrics_arguments.append('notify=NONE')
if title:
ref_suffix += ',m=' + title
metrics_arguments.append('m')
calls += [
((['git', 'config', 'rietveld.cc'],), ''),
]
if short_hostname == 'chromium':
for r in sorted(reviewers):
ref_suffix += ',r=%s' % r
metrics_arguments.append('r')
for c in sorted(['chromium-reviews+test-more-cc@chromium.org',
'joe@example.com'] + cc):
ref_suffix += ',cc=%s' % c
metrics_arguments.append('cc')
reviewers, cc = [], []
else:
calls += [
(('ValidAccounts', '%s-review.googlesource.com' % short_hostname,
sorted(reviewers) + ['joe@example.com',
'chromium-reviews+test-more-cc@chromium.org'] + cc),
{
e: {'email': e}
for e in (reviewers + ['joe@example.com'] + cc)
})
]
for r in sorted(reviewers):
if r != 'bad-account-or-email':
ref_suffix += ',r=%s' % r
metrics_arguments.append('r')
reviewers.remove(r)
for c in sorted(['joe@example.com'] + cc):
ref_suffix += ',cc=%s' % c
metrics_arguments.append('cc')
if c in cc:
cc.remove(c)
for k, v in sorted((labels or {}).items()):
ref_suffix += ',l=%s+%d' % (k, v)
metrics_arguments.append('l=%s+%d' % (k, v))
if tbr:
calls += [
(('GetCodeReviewTbrScore',
'%s-review.googlesource.com' % short_hostname,
'my/repo'),
2,),
]
calls += [
(('time.time',), 1000,),
((['git', 'push',
'https://%s.googlesource.com/my/repo' % short_hostname,
ref_to_push + ':refs/for/refs/heads/master' + ref_suffix],),
(('remote:\n'
'remote: Processing changes: (\)\n'
'remote: Processing changes: (|)\n'
'remote: Processing changes: (/)\n'
'remote: Processing changes: (-)\n'
'remote: Processing changes: new: 1 (/)\n'
'remote: Processing changes: new: 1, done\n'
'remote:\n'
'remote: New Changes:\n'
'remote: https://%s-review.googlesource.com/#/c/my/repo/+/123456'
' XXX\n'
'remote:\n'
'To https://%s.googlesource.com/my/repo\n'
' * [new branch] hhhh -> refs/for/refs/heads/master\n'
) % (short_hostname, short_hostname)),),
(('time.time',), 2000,),
(('add_repeated',
'sub_commands',
{
'execution_time': 1000,
'command': 'git push',
'exit_code': 0,
'arguments': sorted(metrics_arguments),
}),
None,),
]
if squash:
calls += [
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash',
'abcdef0123456789'],), ''),
]
if squash and short_hostname != 'chromium':
calls += [
(('AddReviewers',
'chromium-review.googlesource.com', 'my%2Frepo~123456',
sorted(reviewers),
cc + ['chromium-reviews+test-more-cc@chromium.org'],
notify),
''),
]
calls += cls._git_post_upload_calls()
return calls
def _run_gerrit_upload_test(
self,
upload_args,
description,
reviewers=None,
squash=True,
squash_mode=None,
expected_upstream_ref='origin/refs/heads/master',
title=None,
notify=False,
post_amend_description=None,
issue=None,
cc=None,
fetched_status=None,
other_cl_owner=None,
custom_cl_base=None,
tbr=None,
short_hostname='chromium',
labels=None):
if squash_mode is None:
if '--no-squash' in upload_args:
squash_mode = 'nosquash'
elif '--squash' in upload_args:
squash_mode = 'squash'
else:
squash_mode = 'default'
reviewers = reviewers or []
cc = cc or []
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(
same_auth=('git-owner.example.com', '', 'pass')))
self.mock(git_cl._GerritChangelistImpl, '_GerritCommitMsgHookCheck',
lambda _, offer_removal: None)
self.mock(git_cl.gclient_utils, 'RunEditor',
lambda *_, **__: self._mocked_call(['RunEditor']))
self.mock(git_cl, 'DownloadGerritHook', lambda force: self._mocked_call(
'DownloadGerritHook', force))
self.calls = self._gerrit_base_calls(
issue=issue,
fetched_description=description,
fetched_status=fetched_status,
other_cl_owner=other_cl_owner,
custom_cl_base=custom_cl_base,
short_hostname=short_hostname)
if fetched_status != 'ABANDONED':
self.mock(tempfile, 'NamedTemporaryFile', MakeNamedTemporaryFileMock(
expected_content=description))
self.mock(os, 'remove', lambda _: True)
self.calls += self._gerrit_upload_calls(
description, reviewers, squash,
squash_mode=squash_mode,
expected_upstream_ref=expected_upstream_ref,
title=title, notify=notify,
post_amend_description=post_amend_description,
issue=issue, cc=cc,
custom_cl_base=custom_cl_base, tbr=tbr,
short_hostname=short_hostname,
labels=labels)
git_cl.main(['upload'] + upload_args)
def test_gerrit_upload_without_change_id(self):
self._run_gerrit_upload_test(
['--no-squash'],
'desc\n\nBUG=\n',
[],
squash=False,
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_upload_without_change_id_override_nosquash(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n',
[],
squash=False,
squash_mode='override_nosquash',
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_no_reviewer(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n\nChange-Id: I123456789\n',
[],
squash=False,
squash_mode='override_nosquash')
def test_gerrit_no_reviewer_non_chromium_host(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n\nChange-Id: I123456789\n',
[],
squash=False,
squash_mode='override_nosquash',
short_hostname='other')
def test_gerrit_patchset_title_special_chars(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self._run_gerrit_upload_test(
['-f', '-t', 'We\'ll escape ^_ ^ special chars...@{u}'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
squash=False,
squash_mode='override_nosquash',
title='We%27ll_escape_%5E%5F_%5E_special_chars%2E%2E%2E%40%7Bu%7D')
def test_gerrit_reviewers_cmd_line(self):
self._run_gerrit_upload_test(
['-r', 'foo@example.com', '--send-mail'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
['foo@example.com'],
squash=False,
squash_mode='override_nosquash',
notify=True)
def test_gerrit_reviewer_multiple(self):
self.mock(git_cl.gerrit_util, 'GetCodeReviewTbrScore',
lambda *a: self._mocked_call('GetCodeReviewTbrScore', *a))
self._run_gerrit_upload_test(
[],
'desc\nTBR=reviewer@example.com\nBUG=\nR=another@example.com\n'
'CC=more@example.com,people@example.com\n\n'
'Change-Id: 123456789',
['reviewer@example.com', 'another@example.com'],
expected_upstream_ref='origin/master',
cc=['more@example.com', 'people@example.com'],
tbr='reviewer@example.com',
labels={'Code-Review': 2})
def test_gerrit_upload_squash_first_is_default(self):
self._run_gerrit_upload_test(
[],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_first(self):
self._run_gerrit_upload_test(
['--squash'],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_first_with_labels(self):
self._run_gerrit_upload_test(
['--squash', '--cq-dry-run', '--enable-auto-submit'],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master',
labels={'Commit-Queue': 1, 'Auto-Submit': 1})
def test_gerrit_upload_squash_first_against_rev(self):
custom_cl_base = 'custom_cl_base_rev_or_branch'
self._run_gerrit_upload_test(
['--squash', custom_cl_base],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master',
custom_cl_base=custom_cl_base)
self.assertIn(
'If you proceed with upload, more than 1 CL may be created by Gerrit',
sys.stdout.getvalue())
def test_gerrit_upload_squash_reupload(self):
description = 'desc\nBUG=\n\nChange-Id: 123456789'
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456)
def test_gerrit_upload_squash_reupload_to_abandoned(self):
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call('DieWithError', msg))
description = 'desc\nBUG=\n\nChange-Id: 123456789'
with self.assertRaises(SystemExitMock):
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456,
fetched_status='ABANDONED')
def test_gerrit_upload_squash_reupload_to_not_owned(self):
self.mock(git_cl.gerrit_util, 'GetAccountDetails',
lambda *_, **__: {'email': 'yet-another@example.com'})
description = 'desc\nBUG=\n\nChange-Id: 123456789'
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456,
other_cl_owner='other@example.com')
self.assertIn(
'WARNING: Change 123456 is owned by other@example.com, but you '
'authenticate to Gerrit as yet-another@example.com.\n'
'Uploading may fail due to lack of permissions',
git_cl.sys.stdout.getvalue())
def test_upload_branch_deps(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
def mock_run_git(*args, **_kwargs):
if args[0] == ['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads']:
# Create a local branch dependency tree that looks like this:
# test1 -> test2 -> test3 -> test4 -> test5
# -> test3.1
# test6 -> test0
branch_deps = [
'test2 test1', # test1 -> test2
'test3 test2', # test2 -> test3
'test3.1 test2', # test2 -> test3.1
'test4 test3', # test3 -> test4
'test5 test4', # test4 -> test5
'test6 test0', # test0 -> test6
'test7', # test7
]
return '\n'.join(branch_deps)
self.mock(git_cl, 'RunGit', mock_run_git)
class RecordCalls:
times_called = 0
record_calls = RecordCalls()
def mock_CMDupload(*args, **_kwargs):
record_calls.times_called += 1
return 0
self.mock(git_cl, 'CMDupload', mock_CMDupload)
self.calls = [
(('ask_for_data', 'This command will checkout all dependent branches '
'and run "git cl upload". Press Enter to continue, '
'or Ctrl+C to abort'), ''),
]
class MockChangelist():
def __init__(self):
pass
def GetBranch(self):
return 'test1'
def GetIssue(self):
return '123'
def GetPatchset(self):
return '1001'
def IsGerrit(self):
return False
ret = git_cl.upload_branch_deps(MockChangelist(), [])
# CMDupload should have been called 5 times because of 5 dependent branches.
self.assertEquals(5, record_calls.times_called)
self.assertEquals(0, ret)
def test_gerrit_change_id(self):
self.calls = [
((['git', 'write-tree'], ),
'hashtree'),
((['git', 'rev-parse', 'HEAD~0'], ),
'branch-parent'),
((['git', 'var', 'GIT_AUTHOR_IDENT'], ),
'A B <a@b.org> 1456848326 +0100'),
((['git', 'var', 'GIT_COMMITTER_IDENT'], ),
'C D <c@d.org> 1456858326 +0100'),
((['git', 'hash-object', '-t', 'commit', '--stdin'], ),
'hashchange'),
]
change_id = git_cl.GenerateGerritChangeId('line1\nline2\n')
self.assertEqual(change_id, 'Ihashchange')
def test_desecription_append_footer(self):
for init_desc, footer_line, expected_desc in [
# Use unique desc first lines for easy test failure identification.
('foo', 'R=one', 'foo\n\nR=one'),
('foo\n\nR=one', 'BUG=', 'foo\n\nR=one\nBUG='),
('foo\n\nR=one', 'Change-Id: Ixx', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nChange-Id: Ixx', 'R=one', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'TBR=two',
'foo\n\nR=one\nTBR=two\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'Foo-Bar: baz',
'foo\n\nR=one\n\nChange-Id: Ixx\nFoo-Bar: baz'),
('foo\n\nChange-Id: Ixx', 'Foo-Bak: baz',
'foo\n\nChange-Id: Ixx\nFoo-Bak: baz'),
('foo', 'Change-Id: Ixx', 'foo\n\nChange-Id: Ixx'),
]:
desc = git_cl.ChangeDescription(init_desc)
desc.append_footer(footer_line)
self.assertEqual(desc.description, expected_desc)
def test_update_reviewers(self):
data = [
('foo', [], [],
'foo'),
('foo\nR=xx', [], [],
'foo\nR=xx'),
('foo\nTBR=xx', [], [],
'foo\nTBR=xx'),
('foo', ['a@c'], [],
'foo\n\nR=a@c'),
('foo\nR=xx', ['a@c'], [],
'foo\n\nR=a@c, xx'),
('foo\nTBR=xx', ['a@c'], [],
'foo\n\nR=a@c\nTBR=xx'),
('foo\nTBR=xx\nR=yy', ['a@c'], [],
'foo\n\nR=a@c, yy\nTBR=xx'),
('foo\nBUG=', ['a@c'], [],
'foo\nBUG=\nR=a@c'),
('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], [],
'foo\n\nR=a@c, bar, xx\nTBR=yy'),
('foo', ['a@c', 'b@c'], [],
'foo\n\nR=a@c, b@c'),
('foo\nBar\n\nR=\nBUG=', ['c@c'], [],
'foo\nBar\n\nR=c@c\nBUG='),
('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], [],
'foo\nBar\n\nR=c@c\nBUG='),
# Same as the line before, but full of whitespaces.
(
'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'], [],
'foo\nBar\n\nR=c@c\n BUG =',
),
# Whitespaces aren't interpreted as new lines.
('foo BUG=allo R=joe ', ['c@c'], [],
'foo BUG=allo R=joe\n\nR=c@c'),
('foo\n\nR=a@c\nTBR=t@c', ['b@c', 'a@c'], ['a@c', 't@c'],
'foo\n\nR=a@c, b@c\nTBR=t@c'),
]
expected = [i[-1] for i in data]
actual = []
for orig, reviewers, tbrs, _expected in data:
obj = git_cl.ChangeDescription(orig)
obj.update_reviewers(reviewers, tbrs)
actual.append(obj.description)
self.assertEqual(expected, actual)
def test_get_hash_tags(self):
cases = [
('', []),
('a', []),
('[a]', ['a']),
('[aa]', ['aa']),
('[a ]', ['a']),
('[a- ]', ['a']),
('[a- b]', ['a-b']),
('[a--b]', ['a-b']),
('[a', []),
('[a]x', ['a']),
('[aa]x', ['aa']),
('[a b]', ['a-b']),
('[a b]', ['a-b']),
('[a__b]', ['a-b']),
('[a] x', ['a']),
('[a][b]', ['a', 'b']),
('[a] [b]', ['a', 'b']),
('[a][b]x', ['a', 'b']),
('[a][b] x', ['a', 'b']),
('[a]\n[b]', ['a']),
('[a\nb]', []),
('[a][', ['a']),
('Revert "[a] feature"', ['a']),
('Reland "[a] feature"', ['a']),
('Revert: [a] feature', ['a']),
('Reland: [a] feature', ['a']),
('Revert "Reland: [a] feature"', ['a']),
('Foo: feature', ['foo']),
('Foo Bar: feature', ['foo-bar']),
('Revert "Foo bar: feature"', ['foo-bar']),
('Reland "Foo bar: feature"', ['foo-bar']),
]
for desc, expected in cases:
change_desc = git_cl.ChangeDescription(desc)
actual = change_desc.get_hash_tags()
self.assertEqual(
actual,
expected,
'GetHashTags(%r) == %r, expected %r' % (desc, actual, expected))
self.assertEqual(None, git_cl.GetTargetRef('origin', None, 'master'))
self.assertEqual(None, git_cl.GetTargetRef(None,
'refs/remotes/origin/master',
'master'))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/master',
None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkgr',
None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkcr',
None))
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
None))
self.assertEqual('refs/diff/test',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/refs/diff/test',
None))
self.assertEqual('refs/heads/chrome/m42',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/chrome/m42',
None))
for branch in ('branch-heads/123', 'remotes/branch-heads/123',
'refs/remotes/branch-heads/123'):
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/master',
branch))
for branch in ('origin/master', 'remotes/origin/master',
'refs/remotes/origin/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch))
for branch in ('master', 'heads/master', 'refs/heads/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch))
def test_patch_when_dirty(self):
self.mock(git_common, 'is_dirty_git_tree', lambda x: True)
self.assertNotEqual(git_cl.main(['patch', '123456']), 0)
@staticmethod
def _get_gerrit_codereview_server_calls(branch, value=None,
git_short_host='host',
detect_branch=True,
detect_server=True):
calls = []
if detect_branch:
calls.append(((['git', 'symbolic-ref', 'HEAD'],), branch))
if detect_server:
calls.append(((['git', 'config', 'branch.' + branch + '.gerritserver'],),
CERR1 if value is None else value))
if value is None:
calls += [
((['git', 'config', 'branch.' + branch + '.merge'],),
'refs/heads' + branch),
((['git', 'config', 'branch.' + branch + '.remote'],),
'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://%s.googlesource.com/my/repo' % git_short_host),
]
return calls
def _patch_common(self, force_codereview=False,
new_branch=False, git_short_host='host',
detect_gerrit_server=False,
actual_codereview=None,
codereview_in_url=False):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl, 'IsGitVersionAtLeast', lambda *args: True)
if new_branch:
self.calls = [((['git', 'new-branch', 'master'],), ''),]
if codereview_in_url and actual_codereview == 'rietveld':
self.calls += [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
]
if not force_codereview and not codereview_in_url:
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
]
if detect_gerrit_server:
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host=git_short_host,
detect_branch=not new_branch and force_codereview)
actual_codereview = 'gerrit'
if actual_codereview == 'gerrit':
self.calls += [
(('GetChangeDetail', git_short_host + '-review.googlesource.com',
'my%2Frepo~123456', ['ALL_REVISIONS', 'CURRENT_COMMIT']),
{
'current_revision': '7777777777',
'revisions': {
'1111111111': {
'_number': 1,
'fetch': {'http': {
'url': 'https://%s.googlesource.com/my/repo' % git_short_host,
'ref': 'refs/changes/56/123456/1',
}},
},
'7777777777': {
'_number': 7,
'fetch': {'http': {
'url': 'https://%s.googlesource.com/my/repo' % git_short_host,
'ref': 'refs/changes/56/123456/7',
}},
},
},
}),
]
def test_patch_gerrit_default(self):
self._patch_common(git_short_host='chromium', detect_gerrit_server=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '123456']), 0)
def test_patch_gerrit_new_branch(self):
self._patch_common(
git_short_host='chromium', detect_gerrit_server=True, new_branch=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '-b', 'master', '123456']), 0)
def test_patch_gerrit_force(self):
self._patch_common(
force_codereview=True, git_short_host='host', detect_gerrit_server=True)
self.calls += [
((['git', 'fetch', 'https://host.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'reset', '--hard', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://host-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '--gerrit', '123456', '--force']), 0)
def test_patch_gerrit_guess_by_url(self):
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host='else', detect_server=False)
self._patch_common(
actual_codereview='gerrit', git_short_host='else',
codereview_in_url=True, detect_gerrit_server=False)
self.calls += [
((['git', 'fetch', 'https://else.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://else-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '1'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(
['patch', 'https://else-review.googlesource.com/#/c/123456/1']), 0)
def test_patch_gerrit_guess_by_url_with_repo(self):
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host='else', detect_server=False)
self._patch_common(
actual_codereview='gerrit', git_short_host='else',
codereview_in_url=True, detect_gerrit_server=False)
self.calls += [
((['git', 'fetch', 'https://else.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://else-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '1'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(
['patch', 'https://else-review.googlesource.com/c/my/repo/+/123456/1']),
0)
def test_patch_gerrit_conflict(self):
self._patch_common(detect_gerrit_server=True, git_short_host='chromium')
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), CERR1),
((['DieWithError', 'Command "git cherry-pick FETCH_HEAD" failed.\n'],),
SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
git_cl.main(['patch', '123456'])
def test_patch_gerrit_not_exists(self):
def notExists(_issue, *_, **kwargs):
raise git_cl.gerrit_util.GerritError(404, '')
self.mock(git_cl.gerrit_util, 'GetChangeDetail', notExists)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
((['git', 'config', 'branch.master.gerritserver'],), CERR1),
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
((['DieWithError',
'change 123456 at https://chromium-review.googlesource.com does not '
'exist or you have no access to it'],), SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
self.assertEqual(1, git_cl.main(['patch', '123456']))
def _checkout_calls(self):
return [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ),
('branch.ger-branch.gerritissue 123456\n'
'branch.gbranch654.gerritissue 654321\n')),
]
def test_checkout_gerrit(self):
self.calls = self._checkout_calls()
self.calls += [((['git', 'checkout', 'ger-branch'], ), '')]
self.assertEqual(0, git_cl.main(['checkout', '123456']))
def test_checkout_not_found(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = self._checkout_calls()
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def test_checkout_no_branch_issues(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ), CERR1),
]
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def _test_gerrit_ensure_authenticated_common(self, auth,
skip_auth_check=False):
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(hosts_with_creds=auth))
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call(['DieWithError', msg]))
self.calls = self._gerrit_ensure_auth_calls(skip_auth_check=skip_auth_check)
cl = git_cl.Changelist(codereview='gerrit')
cl.branch = 'master'
cl.branchref = 'refs/heads/master'
cl.lookedup_issue = True
return cl
def test_gerrit_ensure_authenticated_missing(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com': ('git-is.ok', '', 'but gerrit is missing'),
})
self.calls.append(
((['DieWithError',
'Credentials for the following hosts are required:\n'
' chromium-review.googlesource.com\n'
'These are read from ~/.gitcookies (or legacy ~/.netrc)\n'
'You can (re)generate your credentials by visiting '
'https://chromium-review.googlesource.com/new-password'],), ''),)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_conflict(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('git-one.example.com', None, 'secret1'),
'chromium-review.googlesource.com':
('git-other.example.com', None, 'secret2'),
})
self.calls.append(
(('ask_for_data', 'If you know what you are doing '
'press Enter to continue, or Ctrl+C to abort'), ''))
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_ok(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('git-same.example.com', None, 'secret'),
'chromium-review.googlesource.com':
('git-same.example.com', None, 'secret'),
})
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_skipped(self):
cl = self._test_gerrit_ensure_authenticated_common(
auth={}, skip_auth_check=True)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_bearer_token(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('', None, 'secret'),
'chromium-review.googlesource.com':
('', None, 'secret'),
})
self.assertIsNone(cl.EnsureAuthenticated(force=False))
header = gerrit_util.CookiesAuthenticator().get_auth_header(
'chromium.googlesource.com')
self.assertTrue('Bearer' in header)
def _cmd_set_commit_gerrit_common(self, vote, notify=None):
self.mock(git_cl.gerrit_util, 'SetReview',
lambda h, i, labels, notify=None:
self._mocked_call(['SetReview', h, i, labels, notify]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra.git'),
((['SetReview', 'chromium-review.googlesource.com',
'infra%2Finfra~123',
{'Commit-Queue': vote}, notify],), ''),
]
def test_cmd_set_commit_gerrit_clear(self):
self._cmd_set_commit_gerrit_common(0)
self.assertEqual(0, git_cl.main(['set-commit', '-c']))
def test_cmd_set_commit_gerrit_dry(self):
self._cmd_set_commit_gerrit_common(1, notify=False)
self.assertEqual(0, git_cl.main(['set-commit', '-d']))
def test_cmd_set_commit_gerrit(self):
self._cmd_set_commit_gerrit_common(2)
self.assertEqual(0, git_cl.main(['set-commit']))
def test_description_display(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
ChangelistMock.desc = 'foo\n'
self.assertEqual(0, git_cl.main(['description', '-d']))
self.assertEqual('foo\n', out.getvalue())
def test_StatusFieldOverrideIssueMissingArgs(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--issue must be specified')
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1', '--gerrit']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--field must be specified')
def test_StatusFieldOverrideIssue(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.assertEqual(
git_cl.main(['status', '--issue', '1', '--gerrit', '--field', 'desc']),
0)
self.assertEqual(out.getvalue(), 'foobar\n')
def test_SetCloseOverrideIssue(self):
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.mock(git_cl.Changelist, 'CloseIssue', lambda *_: None)
self.assertEqual(
git_cl.main(['set-close', '--issue', '1', '--gerrit']), 0)
def test_description(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'my%2Frepo~123123', ['CURRENT_REVISION', 'CURRENT_COMMIT']),
{
'current_revision': 'sha1',
'revisions': {'sha1': {
'commit': {'message': 'foobar'},
}},
}),
]
self.assertEqual(0, git_cl.main([
'description',
'https://chromium-review.googlesource.com/c/my/repo/+/123123',
'-d']))
self.assertEqual('foobar\n', out.getvalue())
def test_description_set_raw(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hihi'))
self.assertEqual(0, git_cl.main(['description', '-n', 'hihi']))
self.assertEqual('hihi', ChangelistMock.desc)
def test_description_appends_bug_line(self):
current_desc = 'Some.\n\nChange-Id: xxx'
def RunEditor(desc, _, **kwargs):
self.assertEquals(
'# Enter a description of the change.\n'
'# This will be displayed on the codereview site.\n'
'# The first line will also be used as the subject of the review.\n'
'#--------------------This line is 72 characters long'
'--------------------\n'
'Some.\n\nChange-Id: xxx\nBug: ',
desc)
return 'Some.\n\nChange-Id: xxx\nBug: 123'
def UpdateDescriptionRemote(_, desc, force=False):
self.assertEquals(desc, 'Some.\n\nChange-Id: xxx\nBug: 123')
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.Changelist, 'GetDescription',
lambda *args: current_desc)
self.mock(git_cl._GerritChangelistImpl, 'UpdateDescriptionRemote',
UpdateDescriptionRemote)
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', 'core.editor'],), 'vi'),
]
self.assertEqual(0, git_cl.main(['description', '--gerrit']))
def test_description_set_stdin(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hi \r\n\t there\n\nman'))
self.assertEqual(0, git_cl.main(['description', '-n', '-']))
self.assertEqual('hi\n\t there\n\nman', ChangelistMock.desc)
def test_archive(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '456'),
((['git', 'config', 'branch.foo.gerritissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'tag', 'git-cl-archived-456-foo', 'foo'],), ''),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f']))
def test_archive_current_branch_fails(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master'),
((['git', 'config', 'branch.master.gerritissue'],), '1'),
((['git', 'symbolic-ref', 'HEAD'],), 'master')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'closed')])
self.assertEqual(1, git_cl.main(['archive', '-f']))
def test_archive_dry_run(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '456'),
((['git', 'config', 'branch.foo.gerritissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--dry-run']))
def test_archive_no_tags(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '1'),
((['git', 'config', 'branch.foo.gerritissue'],), '456'),
((['git', 'config', 'branch.bar.gerritissue'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--notags']))
def test_cmd_issue_erase_existing(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', '--unset', 'branch.feature.last-upload-hash'],),
CERR1),
((['git', 'config', '--unset', 'branch.feature.gerritissue'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritpatchset'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritserver'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritsquashhash'],),
''),
((['git', 'log', '-1', '--format=%B'],), 'This is a description'),
]
self.assertEqual(0, git_cl.main(['issue', '0']))
def test_cmd_issue_erase_existing_with_change_id(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetDescription',
lambda _: 'This is a description\n\nChange-Id: Ideadbeef')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', '--unset', 'branch.feature.last-upload-hash'],),
CERR1),
((['git', 'config', '--unset', 'branch.feature.gerritissue'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritpatchset'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritserver'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritsquashhash'],),
''),
((['git', 'log', '-1', '--format=%B'],),
'This is a description\n\nChange-Id: Ideadbeef'),
((['git', 'commit', '--amend', '-m', 'This is a description\n'],), ''),
]
self.assertEqual(0, git_cl.main(['issue', '0']))
def test_cmd_issue_json(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
(('write_json', 'output.json',
{'issue': 123,
'issue_url': 'https://chromium-review.googlesource.com/123'}),
''),
]
self.assertEqual(0, git_cl.main(['issue', '--json', 'output.json']))
def test_git_cl_try_default_cq_dry_run_gerrit(self):
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._GerritChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': 'owner@e.mail'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['get_or_create_merge_base', 'feature', 'feature'],),
'fake_ancestor_sha'),
((['GetChange', 'fake_ancestor_sha', None], ),
git_cl.presubmit_support.GitChange(
'', '', '', '', '', '', '', '')),
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['DoGetTryMasters'], ), None),
((['SetCQState', git_cl._CQState.DRY_RUN], ), None),
]
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.assertEqual(0, git_cl.main(['try']))
self.assertEqual(
out.getvalue(),
'Scheduling CQ dry run on: '
'https://chromium-review.googlesource.com/123456\n')
def test_git_cl_try_buildbucket_with_properties_gerrit(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 7)
self.mock(git_cl.uuid, 'uuid4', lambda: 'uuid4')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': 'owner@e.mail'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
]
def _buildbucket_retry(*_, **kw):
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 1)
build = body['builds'][0]
params = json.loads(build.pop('parameters_json'))
self.assertEqual(params, {
u'builder_name': u'win',
u'changes': [{u'author': {u'email': u'owner@e.mail'},
u'revision': None}],
u'properties': {
u'category': u'git_cl_try',
u'key': u'val',
u'json': [{u'a': 1}, None],
u'patch_gerrit_url':
u'https://chromium-review.googlesource.com',
u'patch_issue': 123456,
u'patch_project': u'depot_tools',
u'patch_ref': u'refs/changes/56/123456/7',
u'patch_repository_url':
u'https://chromium.googlesource.com/depot_tools',
u'patch_set': 7,
u'patch_storage': u'gerrit',
}
})
self.assertEqual(build, {
u'bucket': u'luci.chromium.try',
u'client_operation_id': u'uuid4',
u'tags': [
u'builder:win',
u'buildset:patch/gerrit/chromium-review.googlesource.com/123456/7',
u'user_agent:git_cl_try',
],
})
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(0, git_cl.main([
'try', '-B', 'luci.chromium.try', '-b', 'win',
'-p', 'key=val', '-p', 'json=[{"a":1}, null]']))
self.assertRegexpMatches(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\nBucket: luci.chromium.try')
def test_git_cl_try_bots_on_multiple_masters(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 7)
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._GerritChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': 'owner@e.mail'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
]
def _buildbucket_retry(*_, **kw):
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 2)
self.assertEqual(body['builds'][0]['bucket'], 'bucket1')
params = json.loads(body['builds'][0]['parameters_json'])
self.assertEqual(params['builder_name'], 'builder1')
self.assertEqual(body['builds'][1]['bucket'], 'bucket2')
params = json.loads(body['builds'][1]['parameters_json'])
self.assertEqual(params['builder_name'], 'builder2')
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.urllib2, 'urlopen', lambda _: StringIO.StringIO(
json.dumps({
'builder1': {'bucket': 'bucket1'},
'builder2': {'bucket': 'bucket2'},
})))
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(
0, git_cl.main(['try', '-b', 'builder1', '-b', 'builder2']))
self.assertEqual(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\n'
'Bucket: bucket1\n'
' builder1: []\n'
'Bucket: bucket2\n'
' builder2: []\n'
'To see results here, run: git cl try-results\n'
'To see results in browser, run: git cl web\n')
def _common_GerritCommitMsgHookCheck(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.os.path, 'abspath',
lambda path: self._mocked_call(['abspath', path]))
self.mock(git_cl.os.path, 'exists',
lambda path: self._mocked_call(['exists', path]))
self.mock(git_cl.gclient_utils, 'FileRead',
lambda path: self._mocked_call(['FileRead', path]))
self.mock(git_cl.gclient_utils, 'rm_file_or_tree',
lambda path: self._mocked_call(['rm_file_or_tree', path]))
self.calls = [
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['abspath', '../'],), '/abs/git_repo_root'),
]
return git_cl.Changelist(codereview='gerrit', issue=123)
def test_GerritCommitMsgHookCheck_custom_hook(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'#!/bin/sh\necho "custom hook"')
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck_not_exists(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), False),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'...\n# From Gerrit Code Review\n...\nadd_ChangeId()\n'),
(('ask_for_data', 'Do you want to remove it now? [Yes/No]: '), 'Yes'),
((['rm_file_or_tree', '/abs/git_repo_root/.git/hooks/commit-msg'],),
''),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCmdLand(self):
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritsquashhash'],),
'deadbeaf'),
((['git', 'diff', 'deadbeaf'],), ''),
((['git', 'config', 'branch.feature.gerritserver'],),
'chromium-review.googlesource.com'),
]
cl = git_cl.Changelist(issue=123, codereview='gerrit')
cl._codereview_impl._GetChangeDetail = lambda _: {
'labels': {},
'current_revision': 'deadbeaf',
}
cl._codereview_impl._GetChangeCommit = lambda: {
'commit': 'deadbeef',
'web_links': [{'name': 'gitiles',
'url': 'https://git.googlesource.com/test/+/deadbeef'}],
}
cl._codereview_impl.SubmitIssue = lambda wait_for_merge: None
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.assertEqual(0, cl.CMDLand(force=True,
bypass_hooks=True,
verbose=True,
parallel=False))
self.assertRegexpMatches(out.getvalue(), 'Issue.*123 has been submitted')
self.assertRegexpMatches(out.getvalue(), 'Landed as: .*deadbeef')
BUILDBUCKET_BUILDS_MAP = {
'9000': {
'id': '9000',
'bucket': 'master.x.y',
'created_by': 'user:someone@chromium.org',
'created_ts': '147200002222000',
'experimental': False,
'parameters_json': json.dumps({
'builder_name': 'my-bot',
'properties': {'category': 'cq'},
}),
'status': 'STARTED',
'tags': [
'build_address:x.y/my-bot/2',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/2',
},
'8000': {
'id': '8000',
'bucket': 'master.x.y',
'created_by': 'user:someone@chromium.org',
'created_ts': '147200001111000',
'experimental': False,
'failure_reason': 'BUILD_FAILURE',
'parameters_json': json.dumps({
'builder_name': 'my-bot',
'properties': {'category': 'cq'},
}),
'result_details_json': json.dumps({
'properties': {'buildnumber': 1},
}),
'result': 'FAILURE',
'status': 'COMPLETED',
'tags': [
'build_address:x.y/my-bot/1',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/1',
},
}
def test_write_try_results_json(self):
expected_output = [
{
'bucket': 'master.x.y',
'buildbucket_id': '8000',
'builder_name': 'my-bot',
'created_ts': '147200001111000',
'experimental': False,
'failure_reason': 'BUILD_FAILURE',
'result': 'FAILURE',
'status': 'COMPLETED',
'tags': [
'build_address:x.y/my-bot/1',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/1',
},
{
'bucket': 'master.x.y',
'buildbucket_id': '9000',
'builder_name': 'my-bot',
'created_ts': '147200002222000',
'experimental': False,
'failure_reason': None,
'result': None,
'status': 'STARTED',
'tags': [
'build_address:x.y/my-bot/2',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/2',
},
]
self.calls = [(('write_json', 'output.json', expected_output), '')]
git_cl.write_try_results_json('output.json', self.BUILDBUCKET_BUILDS_MAP)
def _setup_fetch_try_jobs(self, most_recent_patchset=20001):
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetMostRecentPatchset',
lambda *args: most_recent_patchset)
self.mock(git_cl.auth, 'get_authenticator_for_host', lambda host, _cfg:
self._mocked_call(['get_authenticator_for_host', host]))
self.mock(git_cl, '_buildbucket_retry', lambda *_, **__:
self._mocked_call(['_buildbucket_retry']))
def _setup_fetch_try_jobs_gerrit(self, *request_results):
self._setup_fetch_try_jobs(most_recent_patchset=13)
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '1'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://x-review.googlesource.com'),
((['get_authenticator_for_host', 'x-review.googlesource.com'],),
AuthenticatorMock()),
] + [((['_buildbucket_retry'],), r) for r in request_results]
def test_fetch_try_jobs_none_gerrit(self):
self._setup_fetch_try_jobs_gerrit({})
self.assertEqual(0, git_cl.main(['try-results']))
self.assertRegexpMatches(sys.stdout.getvalue(), 'No try jobs')
def test_fetch_try_jobs_some_gerrit(self):
self._setup_fetch_try_jobs_gerrit({
'builds': self.BUILDBUCKET_BUILDS_MAP.values(),
})
self.assertEqual(0, git_cl.main(['try-results', '--patchset', '5']))
self.assertNotRegexpMatches(sys.stdout.getvalue(), 'Warning')
self.assertRegexpMatches(sys.stdout.getvalue(), '^Failures:')
self.assertRegexpMatches(sys.stdout.getvalue(), 'Started:')
self.assertRegexpMatches(sys.stdout.getvalue(), '2 try jobs')
def _mock_gerrit_changes_for_detail_cache(self):
self.mock(git_cl._GerritChangelistImpl, '_GetGerritHost', lambda _: 'host')
def test_gerrit_change_detail_cache_simple(self):
self._mock_gerrit_changes_for_detail_cache()
self.calls = [
(('GetChangeDetail', 'host', 'my%2Frepo~1', []), 'a'),
(('GetChangeDetail', 'host', 'ab%2Frepo~2', []), 'b'),
(('GetChangeDetail', 'host', 'ab%2Frepo~2', []), 'b2'),
]
cl1 = git_cl.Changelist(issue=1, codereview='gerrit')
cl1._cached_remote_url = (
True, 'https://chromium.googlesource.com/a/my/repo.git/')
cl2 = git_cl.Changelist(issue=2, codereview='gerrit')
cl2._cached_remote_url = (
True, 'https://chromium.googlesource.com/ab/repo')
self.assertEqual(cl1._GetChangeDetail(), 'a') # Miss.
self.assertEqual(cl1._GetChangeDetail(), 'a')
self.assertEqual(cl2._GetChangeDetail(), 'b') # Miss.
self.assertEqual(cl2._GetChangeDetail(no_cache=True), 'b2') # Miss.
self.assertEqual(cl1._GetChangeDetail(), 'a')
self.assertEqual(cl2._GetChangeDetail(), 'b2')
def test_gerrit_change_detail_cache_options(self):
self._mock_gerrit_changes_for_detail_cache()
self.calls = [
(('GetChangeDetail', 'host', 'repo~1', ['C', 'A', 'B']), 'cab'),
(('GetChangeDetail', 'host', 'repo~1', ['A', 'D']), 'ad'),
(('GetChangeDetail', 'host', 'repo~1', ['A']), 'a'), # no_cache=True
# no longer in cache.
(('GetChangeDetail', 'host', 'repo~1', ['B']), 'b'),
]
cl = git_cl.Changelist(issue=1, codereview='gerrit')
cl._cached_remote_url = (True, 'https://chromium.googlesource.com/repo/')
self.assertEqual(cl._GetChangeDetail(options=['C', 'A', 'B']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A', 'B', 'C']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['B', 'A']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['C']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A']), 'cab')
self.assertEqual(cl._GetChangeDetail(), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A', 'D']), 'ad')
self.assertEqual(cl._GetChangeDetail(options=['A']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['D']), 'ad')
self.assertEqual(cl._GetChangeDetail(), 'cab')
# Finally, no_cache should invalidate all caches for given change.
self.assertEqual(cl._GetChangeDetail(options=['A'], no_cache=True), 'a')
self.assertEqual(cl._GetChangeDetail(options=['B']), 'b')
def test_gerrit_description_caching(self):
def gen_detail(rev, desc):
return {
'current_revision': rev,
'revisions': {rev: {'commit': {'message': desc}}}
}
self.calls = [
(('GetChangeDetail', 'host', 'my%2Frepo~1',
['CURRENT_REVISION', 'CURRENT_COMMIT']),
gen_detail('rev1', 'desc1')),
(('GetChangeDetail', 'host', 'my%2Frepo~1',
['CURRENT_REVISION', 'CURRENT_COMMIT']),
gen_detail('rev2', 'desc2')),
]
self._mock_gerrit_changes_for_detail_cache()
cl = git_cl.Changelist(issue=1, codereview='gerrit')
cl._cached_remote_url = (
True, 'https://chromium.googlesource.com/a/my/repo.git/')
self.assertEqual(cl.GetDescription(), 'desc1')
self.assertEqual(cl.GetDescription(), 'desc1') # cache hit.
self.assertEqual(cl.GetDescription(force=True), 'desc2')
def test_print_current_creds(self):
class CookiesAuthenticatorMock(object):
def __init__(self):
self.gitcookies = {
'host.googlesource.com': ('user', 'pass'),
'host-review.googlesource.com': ('user', 'pass'),
}
self.netrc = self
self.netrc.hosts = {
'github.com': ('user2', None, 'pass2'),
'host2.googlesource.com': ('user3', None, 'pass'),
}
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMock)
self.mock(sys, 'stdout', StringIO.StringIO())
git_cl._GitCookiesChecker().print_current_creds(include_netrc=True)
self.assertEqual(list(sys.stdout.getvalue().splitlines()), [
' Host\t User\t Which file',
'============================\t=====\t===========',
'host-review.googlesource.com\t user\t.gitcookies',
' host.googlesource.com\t user\t.gitcookies',
' host2.googlesource.com\tuser3\t .netrc',
])
sys.stdout.buf = ''
git_cl._GitCookiesChecker().print_current_creds(include_netrc=False)
self.assertEqual(list(sys.stdout.getvalue().splitlines()), [
' Host\tUser\t Which file',
'============================\t====\t===========',
'host-review.googlesource.com\tuser\t.gitcookies',
' host.googlesource.com\tuser\t.gitcookies',
])
def _common_creds_check_mocks(self):
def exists_mock(path):
dirname = os.path.dirname(path)
if dirname == os.path.expanduser('~'):
dirname = '~'
base = os.path.basename(path)
if base in ('.netrc', '.gitcookies'):
return self._mocked_call('os.path.exists', '%s/%s' % (dirname, base))
# git cl also checks for existence other files not relevant to this test.
return None
self.mock(os.path, 'exists', exists_mock)
self.mock(sys, 'stdout', StringIO.StringIO())
def test_creds_check_gitcookies_not_configured(self):
self._common_creds_check_mocks()
self.mock(git_cl._GitCookiesChecker, 'get_hosts_with_creds',
lambda _, include_netrc=False: [])
self.calls = [
((['git', 'config', '--path', 'http.cookiefile'],), CERR1),
((['git', 'config', '--global', 'http.cookiefile'],), CERR1),
(('os.path.exists', '~/.netrc'), True),
(('ask_for_data', 'Press Enter to setup .gitcookies, '
'or Ctrl+C to abort'), ''),
((['git', 'config', '--global', 'http.cookiefile',
os.path.expanduser('~/.gitcookies')], ), ''),
]
self.assertEqual(0, git_cl.main(['creds-check']))
self.assertRegexpMatches(
sys.stdout.getvalue(),
'^You seem to be using outdated .netrc for git credentials:')
self.assertRegexpMatches(
sys.stdout.getvalue(),
'\nConfigured git to use .gitcookies from')
def test_creds_check_gitcookies_configured_custom_broken(self):
self._common_creds_check_mocks()
self.mock(git_cl._GitCookiesChecker, 'get_hosts_with_creds',
lambda _, include_netrc=False: [])
self.calls = [
((['git', 'config', '--path', 'http.cookiefile'],), CERR1),
((['git', 'config', '--global', 'http.cookiefile'],),
'/custom/.gitcookies'),
(('os.path.exists', '/custom/.gitcookies'), False),
(('ask_for_data', 'Reconfigure git to use default .gitcookies? '
'Press Enter to reconfigure, or Ctrl+C to abort'), ''),
((['git', 'config', '--global', 'http.cookiefile',
os.path.expanduser('~/.gitcookies')], ), ''),
]
self.assertEqual(0, git_cl.main(['creds-check']))
self.assertRegexpMatches(
sys.stdout.getvalue(),
'WARNING: You have configured custom path to .gitcookies: ')
self.assertRegexpMatches(
sys.stdout.getvalue(),
'However, your configured .gitcookies file is missing.')
def test_git_cl_comment_add_gerrit(self):
self.mock(git_cl.gerrit_util, 'SetReview',
lambda host, change, msg, ready:
self._mocked_call('SetReview', host, change, msg, ready))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), CERR1),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('SetReview', 'chromium-review.googlesource.com', 'infra%2Finfra~10',
'msg', None),
None),
]
self.assertEqual(0, git_cl.main(['comment', '--gerrit', '-i', '10',
'-a', 'msg']))
def test_git_cl_comments_fetch_gerrit(self):
self.mock(sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', 'branch.foo.gerritserver'],), ''),
((['git', 'config', 'branch.foo.merge'],), ''),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'infra%2Finfra~1',
['MESSAGES', 'DETAILED_ACCOUNTS', 'CURRENT_REVISION',
'CURRENT_COMMIT']), {
'owner': {'email': 'owner@example.com'},
'current_revision': 'ba5eba11',
'revisions': {
'deadbeaf': {
'_number': 1,
},
'ba5eba11': {
'_number': 2,
},
},
'messages': [
{
u'_revision_number': 1,
u'author': {
u'_account_id': 1111084,
u'email': u'commit-bot@chromium.org',
u'name': u'Commit Bot'
},
u'date': u'2017-03-15 20:08:45.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046dc50b',
u'message': u'Patch Set 1:\n\nDry run: CQ is trying the patch...',
u'tag': u'autogenerated:cq:dry-run'
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 11151243,
u'email': u'owner@example.com',
u'name': u'owner'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'PTAL',
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 148512 ,
u'email': u'reviewer@example.com',
u'name': u'reviewer'
},
u'date': u'2017-03-17 05:19:37.500000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d4568',
u'message': u'Patch Set 2: Code-Review+1',
},
]
}),
(('GetChangeComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {
'/COMMIT_MSG': [
{
'author': {'email': u'reviewer@example.com'},
'updated': u'2017-03-17 05:19:37.500000000',
'patch_set': 2,
'side': 'REVISION',
'message': 'Please include a bug link',
},
],
'codereview.settings': [
{
'author': {'email': u'owner@example.com'},
'updated': u'2017-03-16 20:00:41.000000000',
'patch_set': 2,
'side': 'PARENT',
'line': 42,
'message': 'I removed this because it is bad',
},
]
}),
(('GetChangeRobotComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {}),
((['git', 'config', 'branch.foo.gerritpatchset', '2'],), ''),
] * 2 + [
(('write_json', 'output.json', [
{
u'date': u'2017-03-16 20:00:41.000000',
u'message': (
u'PTAL\n' +
u'\n' +
u'codereview.settings\n' +
u' Base, Line 42: https://chromium-review.googlesource.com/' +
u'c/1/2/codereview.settings
u' I removed this because it is bad\n'),
u'autogenerated': False,
u'approval': False,
u'disapproval': False,
u'sender': u'owner@example.com'
}, {
u'date': u'2017-03-17 05:19:37.500000',
u'message': (
u'Patch Set 2: Code-Review+1\n' +
u'\n' +
u'/COMMIT_MSG\n' +
u' PS2, File comment: https://chromium-review.googlesource' +
u'.com/c/1/2//COMMIT_MSG
u' Please include a bug link\n'),
u'autogenerated': False,
u'approval': False,
u'disapproval': False,
u'sender': u'reviewer@example.com'
}
]),'')
]
expected_comments_summary = [
git_cl._CommentSummary(
message=(
u'PTAL\n' +
u'\n' +
u'codereview.settings\n' +
u' Base, Line 42: https://chromium-review.googlesource.com/' +
u'c/1/2/codereview.settings
u' I removed this because it is bad\n'),
date=datetime.datetime(2017, 3, 16, 20, 0, 41, 0),
autogenerated=False,
disapproval=False, approval=False, sender=u'owner@example.com'),
git_cl._CommentSummary(
message=(
u'Patch Set 2: Code-Review+1\n' +
u'\n' +
u'/COMMIT_MSG\n' +
u' PS2, File comment: https://chromium-review.googlesource.com/' +
u'c/1/2//COMMIT_MSG
u' Please include a bug link\n'),
date=datetime.datetime(2017, 3, 17, 5, 19, 37, 500000),
autogenerated=False,
disapproval=False, approval=False, sender=u'reviewer@example.com'),
]
cl = git_cl.Changelist(
codereview='gerrit', issue=1, branchref='refs/heads/foo')
self.assertEqual(cl.GetCommentsSummary(), expected_comments_summary)
self.mock(git_cl.Changelist, 'GetBranch', lambda _: 'foo')
self.assertEqual(
0, git_cl.main(['comments', '-i', '1', '-j', 'output.json']))
def test_git_cl_comments_robot_comments(self):
# git cl comments also fetches robot comments (which are considered a type
# of autogenerated comment), and unlike other types of comments, only robot
# comments from the latest patchset are shown.
self.mock(sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', 'branch.foo.gerritserver'],), ''),
((['git', 'config', 'branch.foo.merge'],), ''),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'infra%2Finfra~1',
['MESSAGES', 'DETAILED_ACCOUNTS', 'CURRENT_REVISION',
'CURRENT_COMMIT']), {
'owner': {'email': 'owner@example.com'},
'current_revision': 'ba5eba11',
'revisions': {
'deadbeaf': {
'_number': 1,
},
'ba5eba11': {
'_number': 2,
},
},
'messages': [
{
u'_revision_number': 1,
u'author': {
u'_account_id': 1111084,
u'email': u'commit-bot@chromium.org',
u'name': u'Commit Bot'
},
u'date': u'2017-03-15 20:08:45.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046dc50b',
u'message': u'Patch Set 1:\n\nDry run: CQ is trying the patch...',
u'tag': u'autogenerated:cq:dry-run'
},
{
u'_revision_number': 1,
u'author': {
u'_account_id': 123,
u'email': u'tricium@serviceaccount.com',
u'name': u'Tricium'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'(1 comment)',
u'tag': u'autogenerated:tricium',
},
{
u'_revision_number': 1,
u'author': {
u'_account_id': 123,
u'email': u'tricium@serviceaccount.com',
u'name': u'Tricium'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'(1 comment)',
u'tag': u'autogenerated:tricium',
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 123 ,
u'email': u'tricium@serviceaccount.com',
u'name': u'reviewer'
},
u'date': u'2017-03-17 05:30:37.000000000',
u'tag': u'autogenerated:tricium',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d4568',
u'message': u'(1 comment)',
},
]
}),
(('GetChangeComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {}),
(('GetChangeRobotComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {
'codereview.settings': [
{
u'author': {u'email': u'tricium@serviceaccount.com'},
u'updated': u'2017-03-17 05:30:37.000000000',
u'robot_run_id': u'5565031076855808',
u'robot_id': u'Linter/Category',
u'tag': u'autogenerated:tricium',
u'patch_set': 2,
u'side': u'REVISION',
u'message': u'Linter warning message text',
u'line': 32,
},
],
}),
((['git', 'config', 'branch.foo.gerritpatchset', '2'],), ''),
]
expected_comments_summary = [
git_cl._CommentSummary(date=datetime.datetime(2017, 3, 17, 5, 30, 37),
message=(
u'(1 comment)\n\ncodereview.settings\n'
u' PS2, Line 32: https://chromium-review.googlesource.com/'
u'c/1/2/codereview.settings
u' Linter warning message text\n'),
sender=u'tricium@serviceaccount.com',
autogenerated=True, approval=False, disapproval=False)
]
cl = git_cl.Changelist(
codereview='gerrit', issue=1, branchref='refs/heads/foo')
self.assertEqual(cl.GetCommentsSummary(), expected_comments_summary)
def test_get_remote_url_with_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-exists':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
url = 'https://chromium.googlesource.com/my/repo'
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-exists'),
(('os.path.isdir', '/cache/this-dir-exists'),
True),
# Runs in /cache/this-dir-exists.
((['git', 'config', 'remote.origin.url'],),
url),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertEqual(cl.GetRemoteUrl(), url)
self.assertEqual(cl.GetRemoteUrl(), url) # Must be cached.
def test_get_remote_url_non_existing_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-doesnt-exist':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
self.mock(logging, 'error',
lambda fmt, *a: self._mocked_call('logging.error', fmt % a))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-doesnt-exist'),
(('os.path.isdir', '/cache/this-dir-doesnt-exist'),
False),
(('logging.error',
'Remote "origin" for branch "/cache/this-dir-doesnt-exist" points to'
' "master", but it doesn\'t exist.'), None),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertIsNone(cl.GetRemoteUrl())
def test_get_remote_url_misconfigured_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-exists':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
self.mock(logging, 'error',
lambda *a: self._mocked_call('logging.error', *a))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-exists'),
(('os.path.isdir', '/cache/this-dir-exists'), True),
((['git', 'config', 'remote.origin.url'],), ''),
(('logging.error',
'Remote "%(remote)s" for branch "%(branch)s" points to '
'"%(cache_path)s", but it is misconfigured.\n'
'"%(cache_path)s" must be a git repo and must have a remote named '
'"%(remote)s" pointing to the git host.', {
'remote': 'origin',
'cache_path': '/cache/this-dir-exists',
'branch': 'master'}
), None),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertIsNone(cl.GetRemoteUrl())
def test_gerrit_change_identifier_with_project(self):
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/a/my/repo.git/'),
]
cl = git_cl.Changelist(codereview='gerrit', issue=123456)
self.assertEqual(cl._GerritChangeIdentifier(), 'my%2Frepo~123456')
def test_gerrit_change_identifier_without_project(self):
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],), CERR1),
]
cl = git_cl.Changelist(codereview='gerrit', issue=123456)
self.assertEqual(cl._GerritChangeIdentifier(), '123456')
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
unittest.main()
| true
| true
|
f71a5da87d38b68176352916c419991c5e418c42
| 15,214
|
py
|
Python
|
treebuilder/partptr/train.py
|
NLP-Discourse-SoochowU/TDDiscourseParser
|
2f9c7cef85c564c47b368ee4935caf1fad7c598d
|
[
"Apache-2.0"
] | 9
|
2020-11-24T01:16:01.000Z
|
2022-01-26T09:37:00.000Z
|
treebuilder/partptr/train.py
|
NLP-Discourse-SoochowU/TDDiscourseParser
|
2f9c7cef85c564c47b368ee4935caf1fad7c598d
|
[
"Apache-2.0"
] | 2
|
2020-11-29T17:49:49.000Z
|
2021-05-20T02:53:25.000Z
|
treebuilder/partptr/train.py
|
NLP-Discourse-SoochowU/TDDiscourseParser
|
2f9c7cef85c564c47b368ee4935caf1fad7c598d
|
[
"Apache-2.0"
] | 1
|
2022-01-26T11:00:33.000Z
|
2022-01-26T11:00:33.000Z
|
# coding: UTF-8
import argparse
import logging
import random
import torch
import copy
import numpy as np
from dataset import CDTB
from collections import Counter
from itertools import chain
from structure.vocab import Vocab, Label
from structure.nodes import node_type_filter, EDU, Relation, Sentence, TEXT
from treebuilder.partptr.model import PartitionPtr
from treebuilder.partptr.parser import PartitionPtrParser
import torch.optim as optim
from util.eval import parse_eval, gen_parse_report
from tensorboardX import SummaryWriter
def build_vocab(dataset):
word_freq = Counter()
pos_freq = Counter()
nuc_freq = Counter()
rel_freq = Counter()
for paragraph in chain(*dataset):
for node in paragraph.iterfind(filter=node_type_filter([EDU, Relation])):
if isinstance(node, EDU):
word_freq.update(node.words)
pos_freq.update(node.tags)
elif isinstance(node, Relation):
nuc_freq[node.nuclear] += 1
rel_freq[node.ftype] += 1
word_vocab = Vocab("word", word_freq)
pos_vocab = Vocab("part of speech", pos_freq)
nuc_label = Label("nuclear", nuc_freq)
rel_label = Label("relation", rel_freq)
return word_vocab, pos_vocab, nuc_label, rel_label
def gen_decoder_data(root, edu2ids):
# splits s0 s1 s2 s3 s4 s5 s6
# edus s/ e0 e1 e2 e3 e4 e5 /s
splits = [] # [(0, 3, 6, NS), (0, 2, 3, SN), ...]
child_edus = [] # [edus]
if isinstance(root, EDU):
child_edus.append(root)
elif isinstance(root, Sentence):
for child in root:
_child_edus, _splits = gen_decoder_data(child, edu2ids)
child_edus.extend(_child_edus)
splits.extend(_splits)
elif isinstance(root, Relation):
children = [gen_decoder_data(child, edu2ids) for child in root]
if len(children) < 2:
raise ValueError("relation node should have at least 2 children")
while children:
left_child_edus, left_child_splits = children.pop(0)
if children:
last_child_edus, _ = children[-1]
start = edu2ids[left_child_edus[0]]
split = edu2ids[left_child_edus[-1]] + 1
end = edu2ids[last_child_edus[-1]] + 1
nuc = root.nuclear
rel = root.ftype
splits.append((start, split, end, nuc, rel))
child_edus.extend(left_child_edus)
splits.extend(left_child_splits)
return child_edus, splits
def numericalize(dataset, word_vocab, pos_vocab, nuc_label, rel_label):
instances = []
for paragraph in filter(lambda d: d.root_relation(), chain(*dataset)):
encoder_inputs = []
decoder_inputs = []
pred_splits = []
pred_nucs = []
pred_rels = []
edus = list(paragraph.edus())
for edu in edus:
edu_word_ids = [word_vocab[word] for word in edu.words]
edu_pos_ids = [pos_vocab[pos] for pos in edu.tags]
encoder_inputs.append((edu_word_ids, edu_pos_ids))
edu2ids = {edu: i for i, edu in enumerate(edus)}
_, splits = gen_decoder_data(paragraph.root_relation(), edu2ids)
for start, split, end, nuc, rel in splits:
decoder_inputs.append((start, end))
pred_splits.append(split)
pred_nucs.append(nuc_label[nuc])
pred_rels.append(rel_label[rel])
instances.append((encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels))
return instances
def gen_batch_iter(instances, batch_size, use_gpu=False):
random_instances = np.random.permutation(instances)
num_instances = len(instances)
offset = 0
while offset < num_instances:
batch = random_instances[offset: min(num_instances, offset+batch_size)]
# find out max seqlen of edus and words of edus
num_batch = batch.shape[0]
max_edu_seqlen = 0
max_word_seqlen = 0
for encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels in batch:
max_edu_seqlen = max_edu_seqlen if max_edu_seqlen >= len(encoder_inputs) else len(encoder_inputs)
for edu_word_ids, edu_pos_ids in encoder_inputs:
max_word_seqlen = max_word_seqlen if max_word_seqlen >= len(edu_word_ids) else len(edu_word_ids)
# batch to numpy
e_input_words = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)
e_input_poses = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)
e_masks = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.uint8)
d_inputs = np.zeros([num_batch, max_edu_seqlen-1, 2], dtype=np.long)
d_outputs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)
d_output_nucs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)
d_output_rels = np.zeros([num_batch, max_edu_seqlen - 1], dtype=np.long)
d_masks = np.zeros([num_batch, max_edu_seqlen-1, max_edu_seqlen+1], dtype=np.uint8)
for batchi, (encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels) in enumerate(batch):
for edui, (edu_word_ids, edu_pos_ids) in enumerate(encoder_inputs):
word_seqlen = len(edu_word_ids)
e_input_words[batchi][edui][:word_seqlen] = edu_word_ids
e_input_poses[batchi][edui][:word_seqlen] = edu_pos_ids
e_masks[batchi][edui][:word_seqlen] = 1
for di, decoder_input in enumerate(decoder_inputs):
d_inputs[batchi][di] = decoder_input
d_masks[batchi][di][decoder_input[0]+1: decoder_input[1]] = 1
d_outputs[batchi][:len(pred_splits)] = pred_splits
d_output_nucs[batchi][:len(pred_nucs)] = pred_nucs
d_output_rels[batchi][:len(pred_rels)] = pred_rels
# numpy to torch
e_input_words = torch.from_numpy(e_input_words).long()
e_input_poses = torch.from_numpy(e_input_poses).long()
e_masks = torch.from_numpy(e_masks).byte()
d_inputs = torch.from_numpy(d_inputs).long()
d_outputs = torch.from_numpy(d_outputs).long()
d_output_nucs = torch.from_numpy(d_output_nucs).long()
d_output_rels = torch.from_numpy(d_output_rels).long()
d_masks = torch.from_numpy(d_masks).byte()
if use_gpu:
e_input_words = e_input_words.cuda()
e_input_poses = e_input_poses.cuda()
e_masks = e_masks.cuda()
d_inputs = d_inputs.cuda()
d_outputs = d_outputs.cuda()
d_output_nucs = d_output_nucs.cuda()
d_output_rels = d_output_rels.cuda()
d_masks = d_masks.cuda()
yield (e_input_words, e_input_poses, e_masks), (d_inputs, d_masks), (d_outputs, d_output_nucs, d_output_rels)
offset = offset + batch_size
def parse_and_eval(dataset, model):
model.eval()
parser = PartitionPtrParser(model)
golds = list(filter(lambda d: d.root_relation(), chain(*dataset)))
num_instances = len(golds)
strips = []
for paragraph in golds:
edus = []
for edu in paragraph.edus():
edu_copy = EDU([TEXT(edu.text)])
setattr(edu_copy, "words", edu.words)
setattr(edu_copy, "tags", edu.tags)
edus.append(edu_copy)
strips.append(edus)
parses = []
for edus in strips:
parse = parser.parse(edus)
parses.append(parse)
return num_instances, parse_eval(parses, golds)
def model_score(scores):
eval_score = sum(score[2] for score in scores)
return eval_score
def main(args):
# set seed for reproducibility
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# load dataset
cdtb = CDTB(args.data, "TRAIN", "VALIDATE", "TEST", ctb_dir=args.ctb_dir, preprocess=True, cache_dir=args.cache_dir)
# build vocabulary
word_vocab, pos_vocab, nuc_label, rel_label = build_vocab(cdtb.train)
trainset = numericalize(cdtb.train, word_vocab, pos_vocab, nuc_label, rel_label)
logging.info("num of instances trainset: %d" % len(trainset))
logging.info("args: %s" % str(args))
# build model
model = PartitionPtr(hidden_size=args.hidden_size, dropout=args.dropout,
word_vocab=word_vocab, pos_vocab=pos_vocab, nuc_label=nuc_label, rel_label=rel_label,
pretrained=args.pretrained, w2v_size=args.w2v_size, w2v_freeze=args.w2v_freeze,
pos_size=args.pos_size,
split_mlp_size=args.split_mlp_size, nuc_mlp_size=args.nuc_mlp_size,
rel_mlp_size=args.rel_mlp_size,
use_gpu=args.use_gpu)
if args.use_gpu:
model.cuda()
logging.info("model:\n%s" % str(model))
# train and evaluate
niter = 0
log_splits_loss = 0.
log_nucs_loss = 0.
log_rels_loss = 0.
log_loss = 0.
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
writer = SummaryWriter(args.log_dir)
logging.info("hint: run 'tensorboard --logdir %s' to observe training status" % args.log_dir)
best_model = None
best_model_score = 0.
for nepoch in range(1, args.epoch + 1):
batch_iter = gen_batch_iter(trainset, args.batch_size, args.use_gpu)
for nbatch, (e_inputs, d_inputs, grounds) in enumerate(batch_iter, start=1):
niter += 1
model.train()
optimizer.zero_grad()
splits_loss, nucs_loss, rels_loss = model.loss(e_inputs, d_inputs, grounds)
loss = args.a_split_loss * splits_loss + args.a_nuclear_loss * nucs_loss + args.a_relation_loss * rels_loss
loss.backward()
optimizer.step()
log_splits_loss += splits_loss.item()
log_nucs_loss += nucs_loss.item()
log_rels_loss += rels_loss.item()
log_loss += loss.item()
if niter % args.log_every == 0:
logging.info("[iter %-6d]epoch: %-3d, batch %-5d,"
"train splits loss:%.5f, nuclear loss %.5f, relation loss %.5f, loss %.5f" %
(niter, nepoch, nbatch, log_splits_loss, log_nucs_loss, log_rels_loss, log_loss))
writer.add_scalar("train/split_loss", log_splits_loss, niter)
writer.add_scalar("train/nuclear_loss", log_nucs_loss, niter)
writer.add_scalar("train/relation_loss", log_rels_loss, niter)
writer.add_scalar("train/loss", log_loss, niter)
log_splits_loss = 0.
log_nucs_loss = 0.
log_rels_loss = 0.
log_loss = 0.
if niter % args.validate_every == 0:
num_instances, validate_scores = parse_and_eval(cdtb.validate, model)
logging.info("validation on %d instances" % num_instances)
logging.info(gen_parse_report(*validate_scores))
writer.add_scalar("validate/span_f1", validate_scores[0][2], niter)
writer.add_scalar("validate/nuclear_f1", validate_scores[1][2], niter)
writer.add_scalar("validate/coarse_relation_f1", validate_scores[2][2], niter)
writer.add_scalar("validate/fine_relation_f1", validate_scores[3][2], niter)
new_model_score = model_score(validate_scores)
if new_model_score > best_model_score:
# test on testset with new best model
best_model_score = new_model_score
best_model = copy.deepcopy(model)
logging.info("test on new best model")
num_instances, test_scores = parse_and_eval(cdtb.test, best_model)
logging.info("test on %d instances" % num_instances)
logging.info(gen_parse_report(*test_scores))
writer.add_scalar("test/span_f1", test_scores[0][2], niter)
writer.add_scalar("test/nuclear_f1", test_scores[1][2], niter)
writer.add_scalar("test/coarse_relation_f1", test_scores[2][2], niter)
writer.add_scalar("test/fine_relation_f1", test_scores[3][2], niter)
if best_model:
# evaluation and save best model
logging.info("final test result")
num_instances, test_scores = parse_and_eval(cdtb.test, best_model)
logging.info("test on %d instances" % num_instances)
logging.info(gen_parse_report(*test_scores))
logging.info("save best model to %s" % args.model_save)
with open(args.model_save, "wb+") as model_fd:
torch.save(best_model, model_fd)
writer.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser()
# dataset parameters
arg_parser.add_argument("--data", default="data/CDTB")
arg_parser.add_argument("--ctb_dir", default="data/CTB")
arg_parser.add_argument("--cache_dir", default="data/cache")
# model parameters
arg_parser.add_argument("-hidden_size", default=512, type=int)
arg_parser.add_argument("-dropout", default=0.33, type=float)
# w2v_group = arg_parser.add_mutually_exclusive_group(required=True)
arg_parser.add_argument("-pretrained", default="data/pretrained/sgns.renmin.word")
arg_parser.add_argument("-w2v_size", type=int)
arg_parser.add_argument("-pos_size", default=30, type=int)
arg_parser.add_argument("-split_mlp_size", default=64, type=int)
arg_parser.add_argument("-nuc_mlp_size", default=32, type=int)
arg_parser.add_argument("-rel_mlp_size", default=128, type=int)
arg_parser.add_argument("--w2v_freeze", dest="w2v_freeze", action="store_true")
arg_parser.set_defaults(w2v_freeze=True)
# train parameters
arg_parser.add_argument("-epoch", default=20, type=int)
arg_parser.add_argument("-batch_size", default=64, type=int)
arg_parser.add_argument("-lr", default=0.001, type=float)
arg_parser.add_argument("-l2", default=0.0, type=float)
arg_parser.add_argument("-log_every", default=10, type=int)
arg_parser.add_argument("-validate_every", default=10, type=int)
arg_parser.add_argument("-a_split_loss", default=0.3, type=float)
arg_parser.add_argument("-a_nuclear_loss", default=1.0, type=float)
arg_parser.add_argument("-a_relation_loss", default=1.0, type=float)
arg_parser.add_argument("-log_dir", default="data/log")
arg_parser.add_argument("-model_save", default="data/models/treebuilder.partptr.model")
arg_parser.add_argument("--seed", default=21, type=int)
arg_parser.add_argument("--use_gpu", dest="use_gpu", action="store_true")
arg_parser.set_defaults(use_gpu=True)
main(arg_parser.parse_args())
| 46.95679
| 121
| 0.63481
|
import argparse
import logging
import random
import torch
import copy
import numpy as np
from dataset import CDTB
from collections import Counter
from itertools import chain
from structure.vocab import Vocab, Label
from structure.nodes import node_type_filter, EDU, Relation, Sentence, TEXT
from treebuilder.partptr.model import PartitionPtr
from treebuilder.partptr.parser import PartitionPtrParser
import torch.optim as optim
from util.eval import parse_eval, gen_parse_report
from tensorboardX import SummaryWriter
def build_vocab(dataset):
word_freq = Counter()
pos_freq = Counter()
nuc_freq = Counter()
rel_freq = Counter()
for paragraph in chain(*dataset):
for node in paragraph.iterfind(filter=node_type_filter([EDU, Relation])):
if isinstance(node, EDU):
word_freq.update(node.words)
pos_freq.update(node.tags)
elif isinstance(node, Relation):
nuc_freq[node.nuclear] += 1
rel_freq[node.ftype] += 1
word_vocab = Vocab("word", word_freq)
pos_vocab = Vocab("part of speech", pos_freq)
nuc_label = Label("nuclear", nuc_freq)
rel_label = Label("relation", rel_freq)
return word_vocab, pos_vocab, nuc_label, rel_label
def gen_decoder_data(root, edu2ids):
splits = []
child_edus = []
if isinstance(root, EDU):
child_edus.append(root)
elif isinstance(root, Sentence):
for child in root:
_child_edus, _splits = gen_decoder_data(child, edu2ids)
child_edus.extend(_child_edus)
splits.extend(_splits)
elif isinstance(root, Relation):
children = [gen_decoder_data(child, edu2ids) for child in root]
if len(children) < 2:
raise ValueError("relation node should have at least 2 children")
while children:
left_child_edus, left_child_splits = children.pop(0)
if children:
last_child_edus, _ = children[-1]
start = edu2ids[left_child_edus[0]]
split = edu2ids[left_child_edus[-1]] + 1
end = edu2ids[last_child_edus[-1]] + 1
nuc = root.nuclear
rel = root.ftype
splits.append((start, split, end, nuc, rel))
child_edus.extend(left_child_edus)
splits.extend(left_child_splits)
return child_edus, splits
def numericalize(dataset, word_vocab, pos_vocab, nuc_label, rel_label):
instances = []
for paragraph in filter(lambda d: d.root_relation(), chain(*dataset)):
encoder_inputs = []
decoder_inputs = []
pred_splits = []
pred_nucs = []
pred_rels = []
edus = list(paragraph.edus())
for edu in edus:
edu_word_ids = [word_vocab[word] for word in edu.words]
edu_pos_ids = [pos_vocab[pos] for pos in edu.tags]
encoder_inputs.append((edu_word_ids, edu_pos_ids))
edu2ids = {edu: i for i, edu in enumerate(edus)}
_, splits = gen_decoder_data(paragraph.root_relation(), edu2ids)
for start, split, end, nuc, rel in splits:
decoder_inputs.append((start, end))
pred_splits.append(split)
pred_nucs.append(nuc_label[nuc])
pred_rels.append(rel_label[rel])
instances.append((encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels))
return instances
def gen_batch_iter(instances, batch_size, use_gpu=False):
random_instances = np.random.permutation(instances)
num_instances = len(instances)
offset = 0
while offset < num_instances:
batch = random_instances[offset: min(num_instances, offset+batch_size)]
num_batch = batch.shape[0]
max_edu_seqlen = 0
max_word_seqlen = 0
for encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels in batch:
max_edu_seqlen = max_edu_seqlen if max_edu_seqlen >= len(encoder_inputs) else len(encoder_inputs)
for edu_word_ids, edu_pos_ids in encoder_inputs:
max_word_seqlen = max_word_seqlen if max_word_seqlen >= len(edu_word_ids) else len(edu_word_ids)
e_input_words = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)
e_input_poses = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)
e_masks = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.uint8)
d_inputs = np.zeros([num_batch, max_edu_seqlen-1, 2], dtype=np.long)
d_outputs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)
d_output_nucs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)
d_output_rels = np.zeros([num_batch, max_edu_seqlen - 1], dtype=np.long)
d_masks = np.zeros([num_batch, max_edu_seqlen-1, max_edu_seqlen+1], dtype=np.uint8)
for batchi, (encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels) in enumerate(batch):
for edui, (edu_word_ids, edu_pos_ids) in enumerate(encoder_inputs):
word_seqlen = len(edu_word_ids)
e_input_words[batchi][edui][:word_seqlen] = edu_word_ids
e_input_poses[batchi][edui][:word_seqlen] = edu_pos_ids
e_masks[batchi][edui][:word_seqlen] = 1
for di, decoder_input in enumerate(decoder_inputs):
d_inputs[batchi][di] = decoder_input
d_masks[batchi][di][decoder_input[0]+1: decoder_input[1]] = 1
d_outputs[batchi][:len(pred_splits)] = pred_splits
d_output_nucs[batchi][:len(pred_nucs)] = pred_nucs
d_output_rels[batchi][:len(pred_rels)] = pred_rels
e_input_words = torch.from_numpy(e_input_words).long()
e_input_poses = torch.from_numpy(e_input_poses).long()
e_masks = torch.from_numpy(e_masks).byte()
d_inputs = torch.from_numpy(d_inputs).long()
d_outputs = torch.from_numpy(d_outputs).long()
d_output_nucs = torch.from_numpy(d_output_nucs).long()
d_output_rels = torch.from_numpy(d_output_rels).long()
d_masks = torch.from_numpy(d_masks).byte()
if use_gpu:
e_input_words = e_input_words.cuda()
e_input_poses = e_input_poses.cuda()
e_masks = e_masks.cuda()
d_inputs = d_inputs.cuda()
d_outputs = d_outputs.cuda()
d_output_nucs = d_output_nucs.cuda()
d_output_rels = d_output_rels.cuda()
d_masks = d_masks.cuda()
yield (e_input_words, e_input_poses, e_masks), (d_inputs, d_masks), (d_outputs, d_output_nucs, d_output_rels)
offset = offset + batch_size
def parse_and_eval(dataset, model):
model.eval()
parser = PartitionPtrParser(model)
golds = list(filter(lambda d: d.root_relation(), chain(*dataset)))
num_instances = len(golds)
strips = []
for paragraph in golds:
edus = []
for edu in paragraph.edus():
edu_copy = EDU([TEXT(edu.text)])
setattr(edu_copy, "words", edu.words)
setattr(edu_copy, "tags", edu.tags)
edus.append(edu_copy)
strips.append(edus)
parses = []
for edus in strips:
parse = parser.parse(edus)
parses.append(parse)
return num_instances, parse_eval(parses, golds)
def model_score(scores):
eval_score = sum(score[2] for score in scores)
return eval_score
def main(args):
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
cdtb = CDTB(args.data, "TRAIN", "VALIDATE", "TEST", ctb_dir=args.ctb_dir, preprocess=True, cache_dir=args.cache_dir)
word_vocab, pos_vocab, nuc_label, rel_label = build_vocab(cdtb.train)
trainset = numericalize(cdtb.train, word_vocab, pos_vocab, nuc_label, rel_label)
logging.info("num of instances trainset: %d" % len(trainset))
logging.info("args: %s" % str(args))
model = PartitionPtr(hidden_size=args.hidden_size, dropout=args.dropout,
word_vocab=word_vocab, pos_vocab=pos_vocab, nuc_label=nuc_label, rel_label=rel_label,
pretrained=args.pretrained, w2v_size=args.w2v_size, w2v_freeze=args.w2v_freeze,
pos_size=args.pos_size,
split_mlp_size=args.split_mlp_size, nuc_mlp_size=args.nuc_mlp_size,
rel_mlp_size=args.rel_mlp_size,
use_gpu=args.use_gpu)
if args.use_gpu:
model.cuda()
logging.info("model:\n%s" % str(model))
niter = 0
log_splits_loss = 0.
log_nucs_loss = 0.
log_rels_loss = 0.
log_loss = 0.
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
writer = SummaryWriter(args.log_dir)
logging.info("hint: run 'tensorboard --logdir %s' to observe training status" % args.log_dir)
best_model = None
best_model_score = 0.
for nepoch in range(1, args.epoch + 1):
batch_iter = gen_batch_iter(trainset, args.batch_size, args.use_gpu)
for nbatch, (e_inputs, d_inputs, grounds) in enumerate(batch_iter, start=1):
niter += 1
model.train()
optimizer.zero_grad()
splits_loss, nucs_loss, rels_loss = model.loss(e_inputs, d_inputs, grounds)
loss = args.a_split_loss * splits_loss + args.a_nuclear_loss * nucs_loss + args.a_relation_loss * rels_loss
loss.backward()
optimizer.step()
log_splits_loss += splits_loss.item()
log_nucs_loss += nucs_loss.item()
log_rels_loss += rels_loss.item()
log_loss += loss.item()
if niter % args.log_every == 0:
logging.info("[iter %-6d]epoch: %-3d, batch %-5d,"
"train splits loss:%.5f, nuclear loss %.5f, relation loss %.5f, loss %.5f" %
(niter, nepoch, nbatch, log_splits_loss, log_nucs_loss, log_rels_loss, log_loss))
writer.add_scalar("train/split_loss", log_splits_loss, niter)
writer.add_scalar("train/nuclear_loss", log_nucs_loss, niter)
writer.add_scalar("train/relation_loss", log_rels_loss, niter)
writer.add_scalar("train/loss", log_loss, niter)
log_splits_loss = 0.
log_nucs_loss = 0.
log_rels_loss = 0.
log_loss = 0.
if niter % args.validate_every == 0:
num_instances, validate_scores = parse_and_eval(cdtb.validate, model)
logging.info("validation on %d instances" % num_instances)
logging.info(gen_parse_report(*validate_scores))
writer.add_scalar("validate/span_f1", validate_scores[0][2], niter)
writer.add_scalar("validate/nuclear_f1", validate_scores[1][2], niter)
writer.add_scalar("validate/coarse_relation_f1", validate_scores[2][2], niter)
writer.add_scalar("validate/fine_relation_f1", validate_scores[3][2], niter)
new_model_score = model_score(validate_scores)
if new_model_score > best_model_score:
best_model_score = new_model_score
best_model = copy.deepcopy(model)
logging.info("test on new best model")
num_instances, test_scores = parse_and_eval(cdtb.test, best_model)
logging.info("test on %d instances" % num_instances)
logging.info(gen_parse_report(*test_scores))
writer.add_scalar("test/span_f1", test_scores[0][2], niter)
writer.add_scalar("test/nuclear_f1", test_scores[1][2], niter)
writer.add_scalar("test/coarse_relation_f1", test_scores[2][2], niter)
writer.add_scalar("test/fine_relation_f1", test_scores[3][2], niter)
if best_model:
logging.info("final test result")
num_instances, test_scores = parse_and_eval(cdtb.test, best_model)
logging.info("test on %d instances" % num_instances)
logging.info(gen_parse_report(*test_scores))
logging.info("save best model to %s" % args.model_save)
with open(args.model_save, "wb+") as model_fd:
torch.save(best_model, model_fd)
writer.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--data", default="data/CDTB")
arg_parser.add_argument("--ctb_dir", default="data/CTB")
arg_parser.add_argument("--cache_dir", default="data/cache")
arg_parser.add_argument("-hidden_size", default=512, type=int)
arg_parser.add_argument("-dropout", default=0.33, type=float)
arg_parser.add_argument("-pretrained", default="data/pretrained/sgns.renmin.word")
arg_parser.add_argument("-w2v_size", type=int)
arg_parser.add_argument("-pos_size", default=30, type=int)
arg_parser.add_argument("-split_mlp_size", default=64, type=int)
arg_parser.add_argument("-nuc_mlp_size", default=32, type=int)
arg_parser.add_argument("-rel_mlp_size", default=128, type=int)
arg_parser.add_argument("--w2v_freeze", dest="w2v_freeze", action="store_true")
arg_parser.set_defaults(w2v_freeze=True)
arg_parser.add_argument("-epoch", default=20, type=int)
arg_parser.add_argument("-batch_size", default=64, type=int)
arg_parser.add_argument("-lr", default=0.001, type=float)
arg_parser.add_argument("-l2", default=0.0, type=float)
arg_parser.add_argument("-log_every", default=10, type=int)
arg_parser.add_argument("-validate_every", default=10, type=int)
arg_parser.add_argument("-a_split_loss", default=0.3, type=float)
arg_parser.add_argument("-a_nuclear_loss", default=1.0, type=float)
arg_parser.add_argument("-a_relation_loss", default=1.0, type=float)
arg_parser.add_argument("-log_dir", default="data/log")
arg_parser.add_argument("-model_save", default="data/models/treebuilder.partptr.model")
arg_parser.add_argument("--seed", default=21, type=int)
arg_parser.add_argument("--use_gpu", dest="use_gpu", action="store_true")
arg_parser.set_defaults(use_gpu=True)
main(arg_parser.parse_args())
| true
| true
|
f71a5e69e97dfd4fa78fe7475a89e51f71597592
| 2,911
|
py
|
Python
|
migrations/env.py
|
kvshravan/sample-platform
|
f3cf050d21df9d8e4b3746a5a32d273d839c4898
|
[
"0BSD"
] | null | null | null |
migrations/env.py
|
kvshravan/sample-platform
|
f3cf050d21df9d8e4b3746a5a32d273d839c4898
|
[
"0BSD"
] | null | null | null |
migrations/env.py
|
kvshravan/sample-platform
|
f3cf050d21df9d8e4b3746a5a32d273d839c4898
|
[
"0BSD"
] | null | null | null |
from __future__ import with_statement
import logging
from logging.config import fileConfig
from alembic import context
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
from sqlalchemy import engine_from_config, pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
config.set_main_option(
'sqlalchemy.url', current_app.config.get( # type: ignore
'SQLALCHEMY_DATABASE_URI').replace('%', '%%'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 30.642105
| 77
| 0.710752
|
from __future__ import with_statement
import logging
from logging.config import fileConfig
from alembic import context
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
from sqlalchemy import engine_from_config, pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
config.set_main_option(
'sqlalchemy.url', current_app.config.get( # type: ignore
'SQLALCHEMY_DATABASE_URI').replace('%', '%%'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| true
| true
|
f71a5edef5f756dd35102d293503bdea8a7fd387
| 973
|
py
|
Python
|
v1/blacklist.py
|
benjaveri/phonescreen
|
dd34df8e2f66c59032089f751223e651fd263cac
|
[
"BSD-3-Clause"
] | 1
|
2020-05-30T23:24:55.000Z
|
2020-05-30T23:24:55.000Z
|
v1/blacklist.py
|
benjaveri/phonescreen
|
dd34df8e2f66c59032089f751223e651fd263cac
|
[
"BSD-3-Clause"
] | null | null | null |
v1/blacklist.py
|
benjaveri/phonescreen
|
dd34df8e2f66c59032089f751223e651fd263cac
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2
# BSD 3-Clause License -> see /LICENSE
# Copyright (c) 2017-2020 by Ben de Waal, All rights reserved.
#
import sys
from v1.interfaces import *
# blacklist.py [del|delete|remove] [number]
PRIMARY = "blacklist"
SECONDARY = "whitelist"
REMOVE = (len(sys.argv) > 2) and sys.argv[1] in ["del","delete","remove"]
NUMBER = (sys.argv[2] if REMOVE else sys.argv[1]) if len(sys.argv) > 1 else None
db = Database("numbers.sqlite")
if NUMBER:
with db as conn:
conn.execute("DELETE FROM %s WHERE number=?" % SECONDARY,(NUMBER,))
if REMOVE:
conn.execute("DELETE FROM %s WHERE number=?" % PRIMARY, (NUMBER,))
else:
conn.execute("INSERT OR REPLACE INTO %s(number) VALUES (?)" % PRIMARY,(NUMBER,))
print "%s:" % PRIMARY
with db as conn:
for row in conn.execute("SELECT DISTINCT p.number,h.name FROM %s AS p LEFT JOIN history AS h ON p.number=h.number" % PRIMARY):
print " %s %s" % (row[0], row[1])
| 33.551724
| 130
| 0.642343
|
import sys
from v1.interfaces import *
PRIMARY = "blacklist"
SECONDARY = "whitelist"
REMOVE = (len(sys.argv) > 2) and sys.argv[1] in ["del","delete","remove"]
NUMBER = (sys.argv[2] if REMOVE else sys.argv[1]) if len(sys.argv) > 1 else None
db = Database("numbers.sqlite")
if NUMBER:
with db as conn:
conn.execute("DELETE FROM %s WHERE number=?" % SECONDARY,(NUMBER,))
if REMOVE:
conn.execute("DELETE FROM %s WHERE number=?" % PRIMARY, (NUMBER,))
else:
conn.execute("INSERT OR REPLACE INTO %s(number) VALUES (?)" % PRIMARY,(NUMBER,))
print "%s:" % PRIMARY
with db as conn:
for row in conn.execute("SELECT DISTINCT p.number,h.name FROM %s AS p LEFT JOIN history AS h ON p.number=h.number" % PRIMARY):
print " %s %s" % (row[0], row[1])
| false
| true
|
f71a5f10643eea16f3e9e3317d0eb53ee89dcc29
| 4,484
|
py
|
Python
|
setup.py
|
btcdrak/mitmproxy
|
cacee3871c6a9f0be7127f3c790e09a1daaf8490
|
[
"MIT"
] | 1
|
2018-03-31T17:16:07.000Z
|
2018-03-31T17:16:07.000Z
|
setup.py
|
btcdrak/mitmproxy
|
cacee3871c6a9f0be7127f3c790e09a1daaf8490
|
[
"MIT"
] | null | null | null |
setup.py
|
btcdrak/mitmproxy
|
cacee3871c6a9f0be7127f3c790e09a1daaf8490
|
[
"MIT"
] | 4
|
2018-04-18T13:17:01.000Z
|
2021-02-21T17:08:33.000Z
|
from setuptools import setup, find_packages
from codecs import open
import os
from netlib import version
# Based on https://github.com/pypa/sampleproject/blob/master/setup.py
# and https://python-packaging-user-guide.readthedocs.org/
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="mitmproxy",
version=version.VERSION,
description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
long_description=long_description,
url="http://mitmproxy.org",
author="Aldo Cortesi",
author_email="aldo@corte.si",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Console :: Curses",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: Proxy Servers",
"Topic :: Software Development :: Testing"
],
packages=find_packages(include=[
"mitmproxy", "mitmproxy.*",
"pathod", "pathod.*",
"netlib", "netlib.*"
]),
include_package_data=True,
entry_points={
'console_scripts': [
"mitmproxy = mitmproxy.main:mitmproxy",
"mitmdump = mitmproxy.main:mitmdump",
"mitmweb = mitmproxy.main:mitmweb",
"pathod = pathod.pathod_cmdline:go_pathod",
"pathoc = pathod.pathoc_cmdline:go_pathoc"
]
},
# https://packaging.python.org/en/latest/requirements/#install-requires
# It is not considered best practice to use install_requires to pin dependencies to specific versions.
install_requires=[
"backports.ssl_match_hostname>=3.5.0.1, <3.6",
"blinker>=1.4, <1.5",
"click>=6.2, <7.0",
"certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
"configargparse>=0.10, <0.11",
"construct>=2.5.2, <2.6",
"cryptography>=1.3, <1.5",
"cssutils>=1.0.1, <1.1",
"Flask>=0.10.1, <0.12",
"h2>=2.4.0, <3",
"html2text>=2016.1.8, <=2016.5.29",
"hyperframe>=4.0.1, <5",
"jsbeautifier>=1.6.3, <1.7",
"lxml>=3.5.0, <=3.6.0", # no wheels for 3.6.1 yet.
"Pillow>=3.2, <3.4",
"passlib>=1.6.5, <1.7",
"pyasn1>=0.1.9, <0.2",
"pyOpenSSL>=16.0, <17.0",
"pyparsing>=2.1.3, <2.2",
"pyperclip>=1.5.22, <1.6",
"requests>=2.9.1, <2.12",
"six>=1.10, <1.11",
"tornado>=4.3, <4.5",
"urwid>=1.3.1, <1.4",
"watchdog>=0.8.3, <0.9",
"brotlipy>=0.3.0, <0.5",
],
extras_require={
':sys_platform == "win32"': [
"pydivert>=0.0.7, <0.1",
],
':sys_platform != "win32"': [
],
# Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380
# Ubuntu Trusty and other still ship with setuptools < 17.1
':python_version == "2.7"': [
"enum34>=1.0.4, <2",
"ipaddress>=1.0.15, <1.1",
"typing==3.5.2.2",
],
'dev': [
"tox>=2.3, <3",
"mock>=2.0, <2.1",
"pytest>=2.8.7, <3",
"pytest-cov>=2.2.1, <3",
"pytest-timeout>=1.0.0, <2",
"pytest-xdist>=1.14, <2",
"sphinx>=1.3.5, <1.5",
"sphinx-autobuild>=0.5.2, <0.7",
"sphinxcontrib-documentedlist>=0.4.0, <0.5",
"sphinx_rtd_theme>=0.1.9, <0.2",
],
'contentviews': [
# TODO: Find Python 3 replacements
# "protobuf>=2.6.1, <2.7",
# "pyamf>=0.8.0, <0.9",
],
'examples': [
"beautifulsoup4>=4.4.1, <4.6",
"pytz>=2015.07.0, <=2016.6.1",
]
}
)
| 35.587302
| 125
| 0.533898
|
from setuptools import setup, find_packages
from codecs import open
import os
from netlib import version
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="mitmproxy",
version=version.VERSION,
description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
long_description=long_description,
url="http://mitmproxy.org",
author="Aldo Cortesi",
author_email="aldo@corte.si",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Console :: Curses",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: Proxy Servers",
"Topic :: Software Development :: Testing"
],
packages=find_packages(include=[
"mitmproxy", "mitmproxy.*",
"pathod", "pathod.*",
"netlib", "netlib.*"
]),
include_package_data=True,
entry_points={
'console_scripts': [
"mitmproxy = mitmproxy.main:mitmproxy",
"mitmdump = mitmproxy.main:mitmdump",
"mitmweb = mitmproxy.main:mitmweb",
"pathod = pathod.pathod_cmdline:go_pathod",
"pathoc = pathod.pathoc_cmdline:go_pathoc"
]
},
_requires=[
"backports.ssl_match_hostname>=3.5.0.1, <3.6",
"blinker>=1.4, <1.5",
"click>=6.2, <7.0",
"certifi>=2015.11.20.1",
"configargparse>=0.10, <0.11",
"construct>=2.5.2, <2.6",
"cryptography>=1.3, <1.5",
"cssutils>=1.0.1, <1.1",
"Flask>=0.10.1, <0.12",
"h2>=2.4.0, <3",
"html2text>=2016.1.8, <=2016.5.29",
"hyperframe>=4.0.1, <5",
"jsbeautifier>=1.6.3, <1.7",
"lxml>=3.5.0, <=3.6.0",
"Pillow>=3.2, <3.4",
"passlib>=1.6.5, <1.7",
"pyasn1>=0.1.9, <0.2",
"pyOpenSSL>=16.0, <17.0",
"pyparsing>=2.1.3, <2.2",
"pyperclip>=1.5.22, <1.6",
"requests>=2.9.1, <2.12",
"six>=1.10, <1.11",
"tornado>=4.3, <4.5",
"urwid>=1.3.1, <1.4",
"watchdog>=0.8.3, <0.9",
"brotlipy>=0.3.0, <0.5",
],
extras_require={
':sys_platform == "win32"': [
"pydivert>=0.0.7, <0.1",
],
':sys_platform != "win32"': [
],
':python_version == "2.7"': [
"enum34>=1.0.4, <2",
"ipaddress>=1.0.15, <1.1",
"typing==3.5.2.2",
],
'dev': [
"tox>=2.3, <3",
"mock>=2.0, <2.1",
"pytest>=2.8.7, <3",
"pytest-cov>=2.2.1, <3",
"pytest-timeout>=1.0.0, <2",
"pytest-xdist>=1.14, <2",
"sphinx>=1.3.5, <1.5",
"sphinx-autobuild>=0.5.2, <0.7",
"sphinxcontrib-documentedlist>=0.4.0, <0.5",
"sphinx_rtd_theme>=0.1.9, <0.2",
],
'contentviews': [
],
'examples': [
"beautifulsoup4>=4.4.1, <4.6",
"pytz>=2015.07.0, <=2016.6.1",
]
}
)
| true
| true
|
f71a5f3662e8e2e441c743a6c1f62a562f34d623
| 2,570
|
py
|
Python
|
homeassistant/components/fibaro/binary_sensor.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 2
|
2017-10-26T19:43:55.000Z
|
2017-12-30T23:29:00.000Z
|
homeassistant/components/fibaro/binary_sensor.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:34:57.000Z
|
2022-03-12T00:59:48.000Z
|
homeassistant/components/fibaro/binary_sensor.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 1
|
2019-06-19T07:43:11.000Z
|
2019-06-19T07:43:11.000Z
|
"""Support for Fibaro binary sensors."""
import logging
from homeassistant.components.binary_sensor import (
ENTITY_ID_FORMAT, BinarySensorDevice)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_ICON
from . import FIBARO_DEVICES, FibaroDevice
DEPENDENCIES = ['fibaro']
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
'com.fibaro.floodSensor': ['Flood', 'mdi:water', 'flood'],
'com.fibaro.motionSensor': ['Motion', 'mdi:run', 'motion'],
'com.fibaro.doorSensor': ['Door', 'mdi:window-open', 'door'],
'com.fibaro.windowSensor': ['Window', 'mdi:window-open', 'window'],
'com.fibaro.smokeSensor': ['Smoke', 'mdi:smoking', 'smoke'],
'com.fibaro.FGMS001': ['Motion', 'mdi:run', 'motion'],
'com.fibaro.heatDetector': ['Heat', 'mdi:fire', 'heat'],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Perform the setup for Fibaro controller devices."""
if discovery_info is None:
return
add_entities(
[FibaroBinarySensor(device)
for device in hass.data[FIBARO_DEVICES]['binary_sensor']], True)
class FibaroBinarySensor(FibaroDevice, BinarySensorDevice):
"""Representation of a Fibaro Binary Sensor."""
def __init__(self, fibaro_device):
"""Initialize the binary_sensor."""
self._state = None
super().__init__(fibaro_device)
self.entity_id = ENTITY_ID_FORMAT.format(self.ha_id)
stype = None
devconf = fibaro_device.device_config
if fibaro_device.type in SENSOR_TYPES:
stype = fibaro_device.type
elif fibaro_device.baseType in SENSOR_TYPES:
stype = fibaro_device.baseType
if stype:
self._device_class = SENSOR_TYPES[stype][2]
self._icon = SENSOR_TYPES[stype][1]
else:
self._device_class = None
self._icon = None
# device_config overrides:
self._device_class = devconf.get(CONF_DEVICE_CLASS,
self._device_class)
self._icon = devconf.get(CONF_ICON, self._icon)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
def update(self):
"""Get the latest data and update the state."""
self._state = self.current_binary_state
| 32.948718
| 73
| 0.643191
|
import logging
from homeassistant.components.binary_sensor import (
ENTITY_ID_FORMAT, BinarySensorDevice)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_ICON
from . import FIBARO_DEVICES, FibaroDevice
DEPENDENCIES = ['fibaro']
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
'com.fibaro.floodSensor': ['Flood', 'mdi:water', 'flood'],
'com.fibaro.motionSensor': ['Motion', 'mdi:run', 'motion'],
'com.fibaro.doorSensor': ['Door', 'mdi:window-open', 'door'],
'com.fibaro.windowSensor': ['Window', 'mdi:window-open', 'window'],
'com.fibaro.smokeSensor': ['Smoke', 'mdi:smoking', 'smoke'],
'com.fibaro.FGMS001': ['Motion', 'mdi:run', 'motion'],
'com.fibaro.heatDetector': ['Heat', 'mdi:fire', 'heat'],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
if discovery_info is None:
return
add_entities(
[FibaroBinarySensor(device)
for device in hass.data[FIBARO_DEVICES]['binary_sensor']], True)
class FibaroBinarySensor(FibaroDevice, BinarySensorDevice):
def __init__(self, fibaro_device):
self._state = None
super().__init__(fibaro_device)
self.entity_id = ENTITY_ID_FORMAT.format(self.ha_id)
stype = None
devconf = fibaro_device.device_config
if fibaro_device.type in SENSOR_TYPES:
stype = fibaro_device.type
elif fibaro_device.baseType in SENSOR_TYPES:
stype = fibaro_device.baseType
if stype:
self._device_class = SENSOR_TYPES[stype][2]
self._icon = SENSOR_TYPES[stype][1]
else:
self._device_class = None
self._icon = None
self._device_class = devconf.get(CONF_DEVICE_CLASS,
self._device_class)
self._icon = devconf.get(CONF_ICON, self._icon)
@property
def icon(self):
return self._icon
@property
def device_class(self):
return self._device_class
@property
def is_on(self):
return self._state
def update(self):
self._state = self.current_binary_state
| true
| true
|
f71a5f6bde441477b83381af68fd302a858044d3
| 338
|
py
|
Python
|
fixture_packages/no_mp/setup.py
|
DuncanBetts/morepath
|
acad10489b051df9c512f6735a9338854745a599
|
[
"BSD-3-Clause"
] | null | null | null |
fixture_packages/no_mp/setup.py
|
DuncanBetts/morepath
|
acad10489b051df9c512f6735a9338854745a599
|
[
"BSD-3-Clause"
] | null | null | null |
fixture_packages/no_mp/setup.py
|
DuncanBetts/morepath
|
acad10489b051df9c512f6735a9338854745a599
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from setuptools import setup, find_packages
setup(name='no_mp',
version = '0.1.dev0',
description="No Mp Test Fixture",
author="Martijn Faassen",
author_email="faassen@startifact.com",
license="BSD",
packages=find_packages(),
zip_safe=False,
install_requires=[
]
)
| 22.533333
| 44
| 0.62426
|
import os
from setuptools import setup, find_packages
setup(name='no_mp',
version = '0.1.dev0',
description="No Mp Test Fixture",
author="Martijn Faassen",
author_email="faassen@startifact.com",
license="BSD",
packages=find_packages(),
zip_safe=False,
install_requires=[
]
)
| true
| true
|
f71a60c2e83e89f0d85d50940ea141974ce4e00d
| 5,431
|
py
|
Python
|
homeassistant/components/geo_rss_events/sensor.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/geo_rss_events/sensor.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:34:57.000Z
|
2022-03-12T00:59:48.000Z
|
homeassistant/components/geo_rss_events/sensor.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | null | null | null |
"""
Generic GeoRSS events service.
Retrieves current events (typically incidents or alerts) in GeoRSS format, and
shows information on events filtered by distance to the HA instance's location
and grouped by category.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.geo_rss_events/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT, CONF_NAME,
CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_URL)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['georss_generic_client==0.2']
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = 'category'
ATTR_DISTANCE = 'distance'
ATTR_TITLE = 'title'
CONF_CATEGORIES = 'categories'
DEFAULT_ICON = 'mdi:alert'
DEFAULT_NAME = "Event Service"
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = 'Events'
DOMAIN = 'geo_rss_events'
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_UNIT_OF_MEASUREMENT,
default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GeoRSS component."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
url = config.get(CONF_URL)
radius_in_km = config.get(CONF_RADIUS)
name = config.get(CONF_NAME)
categories = config.get(CONF_CATEGORIES)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
_LOGGER.debug("latitude=%s, longitude=%s, url=%s, radius=%s",
latitude, longitude, url, radius_in_km)
# Create all sensors based on categories.
devices = []
if not categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, None, name,
unit_of_measurement)
devices.append(device)
else:
for category in categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, category, name,
unit_of_measurement)
devices.append(device)
add_entities(devices, True)
class GeoRssServiceSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, coordinates, url, radius, category, service_name,
unit_of_measurement):
"""Initialize the sensor."""
self._category = category
self._service_name = service_name
self._state = None
self._state_attributes = None
self._unit_of_measurement = unit_of_measurement
from georss_client.generic_feed import GenericFeed
self._feed = GenericFeed(coordinates, url, filter_radius=radius,
filter_categories=None if not category
else [category])
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._service_name,
'Any' if self._category is None
else self._category)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the default icon to use in the frontend."""
return DEFAULT_ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
def update(self):
"""Update this sensor from the GeoRSS service."""
import georss_client
status, feed_entries = self._feed.update()
if status == georss_client.UPDATE_OK:
_LOGGER.debug("Adding events to sensor %s: %s", self.entity_id,
feed_entries)
self._state = len(feed_entries)
# And now compute the attributes from the filtered events.
matrix = {}
for entry in feed_entries:
matrix[entry.title] = '{:.0f}km'.format(
entry.distance_to_home)
self._state_attributes = matrix
elif status == georss_client.UPDATE_OK_NO_DATA:
_LOGGER.debug("Update successful, but no data received from %s",
self._feed)
# Don't change the state or state attributes.
else:
_LOGGER.warning("Update not successful, no data received from %s",
self._feed)
# If no events were found due to an error then just set state to
# zero.
self._state = 0
self._state_attributes = {}
| 35.730263
| 79
| 0.645369
|
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT, CONF_NAME,
CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_URL)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['georss_generic_client==0.2']
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = 'category'
ATTR_DISTANCE = 'distance'
ATTR_TITLE = 'title'
CONF_CATEGORIES = 'categories'
DEFAULT_ICON = 'mdi:alert'
DEFAULT_NAME = "Event Service"
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = 'Events'
DOMAIN = 'geo_rss_events'
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_UNIT_OF_MEASUREMENT,
default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
url = config.get(CONF_URL)
radius_in_km = config.get(CONF_RADIUS)
name = config.get(CONF_NAME)
categories = config.get(CONF_CATEGORIES)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
_LOGGER.debug("latitude=%s, longitude=%s, url=%s, radius=%s",
latitude, longitude, url, radius_in_km)
devices = []
if not categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, None, name,
unit_of_measurement)
devices.append(device)
else:
for category in categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, category, name,
unit_of_measurement)
devices.append(device)
add_entities(devices, True)
class GeoRssServiceSensor(Entity):
def __init__(self, coordinates, url, radius, category, service_name,
unit_of_measurement):
self._category = category
self._service_name = service_name
self._state = None
self._state_attributes = None
self._unit_of_measurement = unit_of_measurement
from georss_client.generic_feed import GenericFeed
self._feed = GenericFeed(coordinates, url, filter_radius=radius,
filter_categories=None if not category
else [category])
@property
def name(self):
return '{} {}'.format(self._service_name,
'Any' if self._category is None
else self._category)
@property
def state(self):
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
@property
def icon(self):
return DEFAULT_ICON
@property
def device_state_attributes(self):
return self._state_attributes
def update(self):
import georss_client
status, feed_entries = self._feed.update()
if status == georss_client.UPDATE_OK:
_LOGGER.debug("Adding events to sensor %s: %s", self.entity_id,
feed_entries)
self._state = len(feed_entries)
matrix = {}
for entry in feed_entries:
matrix[entry.title] = '{:.0f}km'.format(
entry.distance_to_home)
self._state_attributes = matrix
elif status == georss_client.UPDATE_OK_NO_DATA:
_LOGGER.debug("Update successful, but no data received from %s",
self._feed)
else:
_LOGGER.warning("Update not successful, no data received from %s",
self._feed)
# If no events were found due to an error then just set state to
# zero.
self._state = 0
self._state_attributes = {}
| true
| true
|
f71a60d6ac54cd0f6a8035a072455dd7fe920d40
| 3,670
|
py
|
Python
|
akshare/stock/stock_rank_forecast.py
|
J-Z-Z/akshare
|
0a9ca71b381a272e2f56211e455ff2493dfed17a
|
[
"MIT"
] | 721
|
2021-09-21T12:10:33.000Z
|
2022-03-31T09:47:01.000Z
|
akshare/stock/stock_rank_forecast.py
|
J-Z-Z/akshare
|
0a9ca71b381a272e2f56211e455ff2493dfed17a
|
[
"MIT"
] | 135
|
2021-09-21T12:07:54.000Z
|
2022-03-31T14:15:36.000Z
|
akshare/stock/stock_rank_forecast.py
|
J-Z-Z/akshare
|
0a9ca71b381a272e2f56211e455ff2493dfed17a
|
[
"MIT"
] | 234
|
2021-09-21T12:16:27.000Z
|
2022-03-31T09:47:04.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/9/12 18:29
Desc: 巨潮资讯-数据中心-评级预测-投资评级
http://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7
"""
import time
from py_mini_racer import py_mini_racer
import requests
import pandas as pd
js_str = """
function mcode(input) {
var keyStr = "ABCDEFGHIJKLMNOP" + "QRSTUVWXYZabcdef" + "ghijklmnopqrstuv" + "wxyz0123456789+/" + "=";
var output = "";
var chr1, chr2, chr3 = "";
var enc1, enc2, enc3, enc4 = "";
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2)
+ keyStr.charAt(enc3) + keyStr.charAt(enc4);
chr1 = chr2 = chr3 = "";
enc1 = enc2 = enc3 = enc4 = "";
} while (i < input.length);
return output;
}
"""
def stock_rank_forecast_cninfo(date: str = "20210910") -> pd.DataFrame:
"""
巨潮资讯-数据中心-评级预测-投资评级
http://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7
:param date: 查询日期
:type date: str
:return: 投资评级
:rtype: pandas.DataFrame
"""
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1089"
params = {"tdate": "-".join([date[:4], date[4:6], date[6:]])}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"证券简称",
"发布日期",
"前一次投资评级",
"评级变化",
"目标价格-上限",
"是否首次评级",
"投资评级",
"研究员名称",
"研究机构简称",
"目标价格-下限",
"证券代码",
]
temp_df = temp_df[[
"证券代码",
"证券简称",
"发布日期",
"研究机构简称",
"研究员名称",
"投资评级",
"是否首次评级",
"评级变化",
"前一次投资评级",
"目标价格-下限",
"目标价格-上限",
]]
temp_df["目标价格-上限"] = pd.to_numeric(temp_df["目标价格-上限"], errors="coerce")
temp_df["目标价格-下限"] = pd.to_numeric(temp_df["目标价格-下限"], errors="coerce")
return temp_df
if __name__ == "__main__":
stock_rank_forecast_cninfo_df = stock_rank_forecast_cninfo(date="20210907")
print(stock_rank_forecast_cninfo_df)
| 33.063063
| 139
| 0.495368
|
import time
from py_mini_racer import py_mini_racer
import requests
import pandas as pd
js_str = """
function mcode(input) {
var keyStr = "ABCDEFGHIJKLMNOP" + "QRSTUVWXYZabcdef" + "ghijklmnopqrstuv" + "wxyz0123456789+/" + "=";
var output = "";
var chr1, chr2, chr3 = "";
var enc1, enc2, enc3, enc4 = "";
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2)
+ keyStr.charAt(enc3) + keyStr.charAt(enc4);
chr1 = chr2 = chr3 = "";
enc1 = enc2 = enc3 = enc4 = "";
} while (i < input.length);
return output;
}
"""
def stock_rank_forecast_cninfo(date: str = "20210910") -> pd.DataFrame:
url = "http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1089"
params = {"tdate": "-".join([date[:4], date[4:6], date[6:]])}
random_time_str = str(int(time.time()))
js_code = py_mini_racer.MiniRacer()
js_code.eval(js_str)
mcode = js_code.call("mcode", random_time_str)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Content-Length": "0",
"Host": "webapi.cninfo.com.cn",
"mcode": mcode,
"Origin": "http://webapi.cninfo.com.cn",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "http://webapi.cninfo.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.post(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["records"])
temp_df.columns = [
"证券简称",
"发布日期",
"前一次投资评级",
"评级变化",
"目标价格-上限",
"是否首次评级",
"投资评级",
"研究员名称",
"研究机构简称",
"目标价格-下限",
"证券代码",
]
temp_df = temp_df[[
"证券代码",
"证券简称",
"发布日期",
"研究机构简称",
"研究员名称",
"投资评级",
"是否首次评级",
"评级变化",
"前一次投资评级",
"目标价格-下限",
"目标价格-上限",
]]
temp_df["目标价格-上限"] = pd.to_numeric(temp_df["目标价格-上限"], errors="coerce")
temp_df["目标价格-下限"] = pd.to_numeric(temp_df["目标价格-下限"], errors="coerce")
return temp_df
if __name__ == "__main__":
stock_rank_forecast_cninfo_df = stock_rank_forecast_cninfo(date="20210907")
print(stock_rank_forecast_cninfo_df)
| true
| true
|
f71a61f85926c5c06fd0a3030685cd6256d6daab
| 7,369
|
py
|
Python
|
coremltools/converters/mil/mil/passes/conv_scale_fusion.py
|
LaudateCorpus1/coremltools
|
777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/mil/passes/conv_scale_fusion.py
|
LaudateCorpus1/coremltools
|
777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/mil/passes/conv_scale_fusion.py
|
LaudateCorpus1/coremltools
|
777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil import Builder as mb
def _try_to_transform(conv_op, scale_op, block):
# get the scale
if scale_op.x.val is None and scale_op.y.val is None:
return False
scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y
scale = scale_var.val
# for the scalar case, the scalar can be either
# 1. a python int/float
# 2. a 0d numpy array
# 3. a 1d numpy array with shape (1,)
is_scalar = True
if isinstance(scale, np.ndarray):
if scale.shape == ():
scale = scale.tolist()
elif scale.shape == (1) or scale.shape == (1,):
scale = scale[0]
else:
is_scalar = False
# get weight and bias and groups from conv layer
if conv_op.weight.val is None:
return False
conv_weight = conv_op.weight.val
conv_bias = conv_op.bias
groups = conv_op.groups.val
# get type of the conv layer
is_deconv = conv_op.op_type == 'conv_transpose'
is_conv_1d = len(conv_weight.shape) == 3
# D_in denotes the spatial dimensions for conv kernel weight
# for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]
# for conv, conv_weight has shape [Cout, Cin / groups, *D_in]
if is_deconv:
Cout = conv_weight.shape[1] * groups
Cin = conv_weight.shape[0]
else:
Cout = conv_weight.shape[0]
Cin = conv_weight.shape[1] * groups
# for the vector scale case, check if the shape is broacastable
if not is_scalar:
if not np.product(scale.shape) == Cout:
return False
if len(scale.shape) == len(conv_weight.shape):
if not scale.shape[1] == Cout:
return False
elif len(scale.shape) == len(conv_weight.shape) - 1:
if not scale.shape[0] == Cout:
return False
else:
return False
# transform the scale to 1./scale for the real_div case
if scale_op.op_type == "real_div":
scale = 1./scale
# get the type of the conv weight
conv_weight_type = conv_weight.dtype
# create bias for conv if not exist
if conv_bias is None:
conv_bias = np.zeros(Cout)
else:
conv_bias = conv_bias.val
conv_bias = conv_bias.astype(conv_weight_type)
# get the original shape of weight and bias
origin_weight_shape = conv_weight.shape
origin_bias_shape = conv_bias.shape
# update the weight/bias for conv layer
if is_scalar:
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)
else:
scale = np.reshape(scale, (Cout))
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = []
if is_deconv:
conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))
for i in range(Cout):
_conv_weight = conv_weight[i] * scale[i]
new_conv_weight.append(_conv_weight)
new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)
if is_deconv:
new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))
new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
# make sure the updated weight and bias have the same shape as the original ones
assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass."
assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass."
# create a new conv op with the new weight, bias value, copying rest of the attributes
out_name = scale_op.outputs[0].name
conv_kargs = {"weight": new_conv_weight, "bias": new_conv_bias, "name": out_name, "before_op": conv_op}
for k, v in conv_op.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
scale_op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=scale_op, old_var=scale_op.outputs[0], new_var=x
)
# Remove all the ops at once
block.remove_ops([conv_op, scale_op])
return True
@register_pass(namespace="common")
class fuse_conv_scale(AbstractGraphPass):
"""
Fold mul/div into conv/conv_transpose by updating the weight/bias of the convolution layers.
The scale const can be a single number (scalar) or a vector with a broacasable shape,
for instance, if the output of the conv/deconv layer is (B, Cout, H, W),
const of shape (Cout, 1, 1) and (1, Cout, 1, 1) are allowed.
Given:
%2 = conv(%1)
...
%3 = mul(%2, constant) # where constant is the scale constant
...
Result:
%3 = conv(%1)
...
"""
def __init__(self):
self.ops_to_skip = set()
def set_ops_to_skip(self, prog):
pass
def _fuse_conv_scale_block(self, block):
def _match_pattern(op):
if op.op_type == "conv" or op.op_type == "conv_transpose":
# abort fusion if op output is also a block output
if op.outputs[0] in op.enclosing_block.outputs:
return None
# find batch_norm op
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
scale_op_candidate = list(child_ops)[0]
if scale_op_candidate.op_type in ["mul", "real_div"]:
return scale_op_candidate
return None
fusion_occurred = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(b)
if len(op.blocks) > 0:
# This op can't be conv or conv_transpose
continue
scale_op = _match_pattern(op)
if op in self.ops_to_skip or scale_op in self.ops_to_skip:
continue
if scale_op is not None:
with block:
fusion_occurred = _try_to_transform(op, scale_op, block)
# has to break as the downstream iterator is affected.
if fusion_occurred:
return fusion_occurred
return fusion_occurred
def apply(self, prog):
self.set_ops_to_skip(prog)
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(f)
| 36.122549
| 140
| 0.627086
|
import numpy as np
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil import Builder as mb
def _try_to_transform(conv_op, scale_op, block):
if scale_op.x.val is None and scale_op.y.val is None:
return False
scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y
scale = scale_var.val
is_scalar = True
if isinstance(scale, np.ndarray):
if scale.shape == ():
scale = scale.tolist()
elif scale.shape == (1) or scale.shape == (1,):
scale = scale[0]
else:
is_scalar = False
if conv_op.weight.val is None:
return False
conv_weight = conv_op.weight.val
conv_bias = conv_op.bias
groups = conv_op.groups.val
is_deconv = conv_op.op_type == 'conv_transpose'
is_conv_1d = len(conv_weight.shape) == 3
if is_deconv:
Cout = conv_weight.shape[1] * groups
Cin = conv_weight.shape[0]
else:
Cout = conv_weight.shape[0]
Cin = conv_weight.shape[1] * groups
if not is_scalar:
if not np.product(scale.shape) == Cout:
return False
if len(scale.shape) == len(conv_weight.shape):
if not scale.shape[1] == Cout:
return False
elif len(scale.shape) == len(conv_weight.shape) - 1:
if not scale.shape[0] == Cout:
return False
else:
return False
if scale_op.op_type == "real_div":
scale = 1./scale
conv_weight_type = conv_weight.dtype
if conv_bias is None:
conv_bias = np.zeros(Cout)
else:
conv_bias = conv_bias.val
conv_bias = conv_bias.astype(conv_weight_type)
origin_weight_shape = conv_weight.shape
origin_bias_shape = conv_bias.shape
if is_scalar:
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)
else:
scale = np.reshape(scale, (Cout))
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = []
if is_deconv:
conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))
for i in range(Cout):
_conv_weight = conv_weight[i] * scale[i]
new_conv_weight.append(_conv_weight)
new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)
if is_deconv:
new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))
new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass."
assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass."
out_name = scale_op.outputs[0].name
conv_kargs = {"weight": new_conv_weight, "bias": new_conv_bias, "name": out_name, "before_op": conv_op}
for k, v in conv_op.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
scale_op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=scale_op, old_var=scale_op.outputs[0], new_var=x
)
block.remove_ops([conv_op, scale_op])
return True
@register_pass(namespace="common")
class fuse_conv_scale(AbstractGraphPass):
def __init__(self):
self.ops_to_skip = set()
def set_ops_to_skip(self, prog):
pass
def _fuse_conv_scale_block(self, block):
def _match_pattern(op):
if op.op_type == "conv" or op.op_type == "conv_transpose":
if op.outputs[0] in op.enclosing_block.outputs:
return None
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
scale_op_candidate = list(child_ops)[0]
if scale_op_candidate.op_type in ["mul", "real_div"]:
return scale_op_candidate
return None
fusion_occurred = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(b)
if len(op.blocks) > 0:
continue
scale_op = _match_pattern(op)
if op in self.ops_to_skip or scale_op in self.ops_to_skip:
continue
if scale_op is not None:
with block:
fusion_occurred = _try_to_transform(op, scale_op, block)
# has to break as the downstream iterator is affected.
if fusion_occurred:
return fusion_occurred
return fusion_occurred
def apply(self, prog):
self.set_ops_to_skip(prog)
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(f)
| true
| true
|
f71a6261577109f2928b029f3952cbc9f28b4dcc
| 997
|
py
|
Python
|
kubernetes/test/test_v1_ceph_fs_volume_source.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_ceph_fs_volume_source.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_ceph_fs_volume_source.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource
class TestV1CephFSVolumeSource(unittest.TestCase):
""" V1CephFSVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1CephFSVolumeSource(self):
"""
Test V1CephFSVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_ceph_fs_volume_source.V1CephFSVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| 22.155556
| 105
| 0.719157
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource
class TestV1CephFSVolumeSource(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1CephFSVolumeSource(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f71a62b2ff79265703f83e0534fed29f3684b334
| 14,167
|
py
|
Python
|
notebooks/39.1-BDP-unbiased-clustering.py
|
zeou1/maggot_models
|
4e1b518c2981ab1ca9607099c3813e8429d94ca4
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/39.1-BDP-unbiased-clustering.py
|
zeou1/maggot_models
|
4e1b518c2981ab1ca9607099c3813e8429d94ca4
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/39.1-BDP-unbiased-clustering.py
|
zeou1/maggot_models
|
4e1b518c2981ab1ca9607099c3813e8429d94ca4
|
[
"BSD-3-Clause"
] | null | null | null |
# %% [markdown]
# # Imports
import json
import os
import warnings
from operator import itemgetter
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from joblib.parallel import Parallel, delayed
from sklearn.metrics import adjusted_rand_score
import networkx as nx
from graspy.cluster import GaussianCluster, AutoGMMCluster
from graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed
from graspy.models import DCSBMEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.utils import binarize, cartprod, get_lcc, pass_to_ranks
from src.data import load_everything
from src.utils import export_skeleton_json, savefig
from src.visualization import clustergram, palplot, sankey
from src.hierarchy import signal_flow
warnings.simplefilter("ignore", category=FutureWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
BRAIN_VERSION = "2019-12-09"
GRAPH_TYPES = ["Gad", "Gaa", "Gdd", "Gda"]
GRAPH_TYPE_LABELS = [r"A $\to$ D", r"A $\to$ A", r"D $\to$ D", r"D $\to$ A"]
N_GRAPH_TYPES = len(GRAPH_TYPES)
SAVEFIGS = True
DEFAULT_FMT = "png"
DEFUALT_DPI = 150
SAVESKELS = False
MIN_CLUSTERS = 8
MAX_CLUSTERS = 8
N_INIT = 50
PTR = True
ONLY_RIGHT = True
embed = "LSE"
cluster = "GMM"
n_components = 4
if cluster == "GMM":
gmm_params = {"n_init": N_INIT, "covariance_type": "all"}
elif cluster == "AutoGMM":
gmm_params = {"max_agglom_size": None}
np.random.seed(23409857)
def stashfig(name, **kws):
if SAVEFIGS:
savefig(name, foldername=FNAME, fmt=DEFAULT_FMT, dpi=DEFUALT_DPI, **kws)
def stashskel(name, ids, colors, palette=None, **kws):
if SAVESKELS:
return export_skeleton_json(
name, ids, colors, palette=palette, foldername=FNAME, **kws
)
def ase(adj, n_components):
if PTR:
adj = pass_to_ranks(adj)
ase = AdjacencySpectralEmbed(n_components=n_components)
latent = ase.fit_transform(adj)
latent = np.concatenate(latent, axis=-1)
return latent
def to_laplace(graph, form="DAD", regularizer=None):
r"""
A function to convert graph adjacency matrix to graph laplacian.
Currently supports I-DAD, DAD, and R-DAD laplacians, where D is the diagonal
matrix of degrees of each node raised to the -1/2 power, I is the
identity matrix, and A is the adjacency matrix.
R-DAD is regularized laplacian: where :math:`D_t = D + regularizer*I`.
Parameters
----------
graph: object
Either array-like, (n_vertices, n_vertices) numpy array,
or an object of type networkx.Graph.
form: {'I-DAD' (default), 'DAD', 'R-DAD'}, string, optional
- 'I-DAD'
Computes :math:`L = I - D*A*D`
- 'DAD'
Computes :math:`L = D*A*D`
- 'R-DAD'
Computes :math:`L = D_t*A*D_t` where :math:`D_t = D + regularizer*I`
regularizer: int, float or None, optional (default=None)
Constant to be added to the diagonal of degree matrix. If None, average
node degree is added. If int or float, must be >= 0. Only used when
``form`` == 'R-DAD'.
Returns
-------
L: numpy.ndarray
2D (n_vertices, n_vertices) array representing graph
laplacian of specified form
References
----------
.. [1] Qin, Tai, and Karl Rohe. "Regularized spectral clustering
under the degree-corrected stochastic blockmodel." In Advances
in Neural Information Processing Systems, pp. 3120-3128. 2013
"""
valid_inputs = ["I-DAD", "DAD", "R-DAD"]
if form not in valid_inputs:
raise TypeError("Unsuported Laplacian normalization")
A = graph
in_degree = np.sum(A, axis=0)
out_degree = np.sum(A, axis=1)
# regularize laplacian with parameter
# set to average degree
if form == "R-DAD":
if regularizer is None:
regularizer = 1
elif not isinstance(regularizer, (int, float)):
raise TypeError(
"Regularizer must be a int or float, not {}".format(type(regularizer))
)
elif regularizer < 0:
raise ValueError("Regularizer must be greater than or equal to 0")
regularizer = regularizer * np.mean(out_degree)
in_degree += regularizer
out_degree += regularizer
with np.errstate(divide="ignore"):
in_root = 1 / np.sqrt(in_degree) # this is 10x faster than ** -0.5
out_root = 1 / np.sqrt(out_degree)
in_root[np.isinf(in_root)] = 0
out_root[np.isinf(out_root)] = 0
in_root = np.diag(in_root) # just change to sparse diag for sparse support
out_root = np.diag(out_root)
if form == "I-DAD":
L = np.diag(in_degree) - A
L = in_root @ L @ in_root
elif form == "DAD" or form == "R-DAD":
L = out_root @ A @ in_root
# return symmetrize(L, method="avg") # sometimes machine prec. makes this necessary
return L
def lse(adj, n_components, regularizer=None):
if PTR:
adj = pass_to_ranks(adj)
lap = to_laplace(adj, form="R-DAD")
ase = AdjacencySpectralEmbed(n_components=n_components)
latent = ase.fit_transform(lap)
latent = np.concatenate(latent, axis=-1)
return latent
def omni(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
omni = OmnibusEmbed(n_components=n_components // len(adjs))
latent = omni.fit_transform(adjs)
latent = np.concatenate(latent, axis=-1) # first is for in/out
latent = np.concatenate(latent, axis=-1) # second is for concat. each graph
return latent
def ase_concatenate(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
ase = AdjacencySpectralEmbed(n_components=n_components // len(adjs))
graph_latents = []
for a in adjs:
latent = ase.fit_transform(a)
latent = np.concatenate(latent, axis=-1)
graph_latents.append(latent)
latent = np.concatenate(graph_latents, axis=-1)
return latent
def sub_ari(known_inds, true_labels, pred_labels):
true_known_labels = true_labels[known_inds]
pred_known_labels = pred_labels[known_inds]
ari = adjusted_rand_score(true_known_labels, pred_known_labels)
return ari
# Set up plotting constants
plt.style.use("seaborn-white")
sns.set_palette("deep")
sns.set_context("talk", font_scale=1)
# %% [markdown]
# # Load the data
adj, class_labels, side_labels, skeleton_labels = load_everything(
"Gad",
version=BRAIN_VERSION,
return_keys=["Merge Class", "Hemisphere"],
return_ids=True,
)
# select the right hemisphere
if ONLY_RIGHT:
side = "right hemisphere"
right_inds = np.where(side_labels == "R")[0]
adj = adj[np.ix_(right_inds, right_inds)]
class_labels = class_labels[right_inds]
skeleton_labels = skeleton_labels[right_inds]
else:
side = "full brain"
# sort by number of synapses
degrees = adj.sum(axis=0) + adj.sum(axis=1)
sort_inds = np.argsort(degrees)[::-1]
adj = adj[np.ix_(sort_inds, sort_inds)]
class_labels = class_labels[sort_inds]
skeleton_labels = skeleton_labels[sort_inds]
# remove disconnected nodes
adj, lcc_inds = get_lcc(adj, return_inds=True)
class_labels = class_labels[lcc_inds]
skeleton_labels = skeleton_labels[lcc_inds]
# remove pendants
degrees = np.count_nonzero(adj, axis=0) + np.count_nonzero(adj, axis=1)
not_pendant_mask = degrees != 1
not_pendant_inds = np.array(range(len(degrees)))[not_pendant_mask]
adj = adj[np.ix_(not_pendant_inds, not_pendant_inds)]
class_labels = class_labels[not_pendant_inds]
skeleton_labels = skeleton_labels[not_pendant_inds]
# plot degree sequence
d_sort = np.argsort(degrees)[::-1]
degrees = degrees[d_sort]
plt.figure(figsize=(10, 5))
sns.scatterplot(x=range(len(degrees)), y=degrees, s=30, linewidth=0)
known_inds = np.where(class_labels != "Unk")[0]
# %% [markdown]
# # Run clustering using LSE on the sum graph
n_verts = adj.shape[0]
latent = lse(adj, n_components, regularizer=None)
pairplot(latent, labels=class_labels, title=embed)
k_list = list(range(MIN_CLUSTERS, MAX_CLUSTERS + 1))
n_runs = len(k_list)
out_dicts = []
bin_adj = binarize(adj)
last_pred_labels = np.zeros(n_verts)
if cluster == "GMM":
ClusterModel = GaussianCluster
elif cluster == "AutoGMM":
ClusterModel = AutoGMMCluster
for k in k_list:
run_name = f"k = {k}, {cluster}, {embed}, {side} (A to D), PTR, raw"
print(run_name)
print()
# Do clustering
# TODO: make this autogmm instead
gmm = ClusterModel(min_components=k, max_components=k, **gmm_params)
gmm.fit(latent)
pred_labels = gmm.predict(latent)
# Score unsupervised metrics
base_dict = {
"K": k,
"Cluster": cluster,
"Embed": embed,
"Method": f"{cluster} o {embed}",
}
# GMM likelihood
score = gmm.model_.score(latent)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "GMM likelihood"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
# GMM BIC
score = gmm.model_.bic(latent)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "GMM BIC"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
# SBM likelihood
sbm = SBMEstimator(directed=True, loops=False)
sbm.fit(bin_adj, y=pred_labels)
score = sbm.score(bin_adj)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "SBM likelihood"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
# DCSBM likelihood
dcsbm = DCSBMEstimator(directed=True, loops=False)
dcsbm.fit(bin_adj, y=pred_labels)
score = dcsbm.score(bin_adj)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "DCSBM likelihood"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
# ARI of the subset with labels
score = sub_ari(known_inds, class_labels, pred_labels)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "Simple ARI"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
# ARI vs K - 1
score = adjusted_rand_score(last_pred_labels, pred_labels)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "K-1 ARI"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
last_pred_labels = pred_labels
save_name = f"k{k}-{cluster}-{embed}-right-ad-PTR-raw"
# Plot embedding
# pairplot(latent, labels=pred_labels, title=run_name)
# stashfig("latent-" + save_name)
# Plot everything else
clustergram(adj, class_labels, pred_labels)
stashfig("clustergram-" + save_name)
# New plot
# - Compute signal flow
# - Get the centroid of each cluster and project to 1d
# - Alternatively, just take the first dimension
# - For each cluster plot as a node
# output skeletons
if SAVESKELS:
_, colormap, pal = stashskel(
save_name, skeleton_labels, pred_labels, palette="viridis", multiout=True
)
palplot(k, cmap="viridis")
stashfig("palplot-" + save_name)
# save dict colormapping
filename = (
Path("./maggot_models/notebooks/outs")
/ Path(FNAME)
/ str("colormap-" + save_name + ".json")
)
with open(filename, "w") as fout:
json.dump(colormap, fout)
stashskel(
save_name, skeleton_labels, pred_labels, palette="viridis", multiout=False
)
# %% [markdown]
# # Plot results of unsupervised metrics
result_df = pd.DataFrame(out_dicts)
fg = sns.FacetGrid(result_df, col="Metric", col_wrap=3, sharey=False, height=4)
fg.map(sns.lineplot, "K", "Score")
stashfig(f"metrics-{cluster}-{embed}-right-ad-PTR-raw")
# Modifications i need to make to the above
# - Increase the height of the sankey diagram overall
# - Look into color maps that could be better
# - Color the cluster labels by what gets written to the JSON
# - Plot the clusters as nodes in a small network
# %% [markdown]
# # try graph flow
node_signal_flow = signal_flow(adj)
mean_sf = np.zeros(k)
for i in np.unique(pred_labels):
inds = np.where(pred_labels == i)[0]
mean_sf[i] = np.mean(node_signal_flow[inds])
cluster_mean_latent = gmm.model_.means_[:, 0]
block_probs = SBMEstimator().fit(bin_adj, y=pred_labels).block_p_
block_prob_df = pd.DataFrame(data=block_probs, index=range(k), columns=range(k))
block_g = nx.from_pandas_adjacency(block_prob_df, create_using=nx.DiGraph)
plt.figure(figsize=(10, 10))
# don't ever let em tell you you're too pythonic
pos = dict(zip(range(k), zip(cluster_mean_latent, mean_sf)))
# nx.draw_networkx_nodes(block_g, pos=pos)
labels = nx.get_edge_attributes(block_g, "weight")
# nx.draw_networkx_edge_labels(block_g, pos, edge_labels=labels)
from matplotlib.cm import ScalarMappable
import matplotlib as mpl
norm = mpl.colors.LogNorm(vmin=0.01, vmax=0.1)
sm = ScalarMappable(cmap="Reds", norm=norm)
cmap = sm.to_rgba(np.array(list(labels.values())) + 0.01)
nx.draw_networkx(
block_g,
pos,
edge_cmap="Reds",
edge_color=cmap,
connectionstyle="arc3,rad=0.2",
width=1.5,
)
# %% [markdown]
# # signal flow marginals
signal_flow_marginal(adj, pred_labels)
# %% [markdown]
# #
def signal_flow_marginal(adj, labels, col_wrap=5, palette="tab20"):
sf = signal_flow(adj)
uni_labels = np.unique(labels)
medians = []
for i in uni_labels:
inds = np.where(labels == i)[0]
medians.append(np.median(sf[inds]))
sort_inds = np.argsort(medians)[::-1]
col_order = uni_labels[sort_inds]
plot_df = pd.DataFrame()
plot_df["Signal flow"] = sf
plot_df["Class"] = labels
fg = sns.FacetGrid(
plot_df,
col="Class",
aspect=1.5,
palette=palette,
col_order=col_order,
sharey=False,
col_wrap=col_wrap,
xlim=(-3, 3),
)
fg = fg.map(sns.distplot, "Signal flow") # bins=np.linspace(-2.2, 2.2))
fg.set(yticks=[], yticklabels=[])
plt.tight_layout()
return fg
signal_flow_marginal(adj, class_labels)
stashfig("known-class-sf-marginal")
# tomorrow
# DEFINITELY
# run with unsupervised metrics from k=2-50
# IF TIME
# run hgmm
| 29.150206
| 88
| 0.673678
|
son
import os
import warnings
from operator import itemgetter
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from joblib.parallel import Parallel, delayed
from sklearn.metrics import adjusted_rand_score
import networkx as nx
from graspy.cluster import GaussianCluster, AutoGMMCluster
from graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed
from graspy.models import DCSBMEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.utils import binarize, cartprod, get_lcc, pass_to_ranks
from src.data import load_everything
from src.utils import export_skeleton_json, savefig
from src.visualization import clustergram, palplot, sankey
from src.hierarchy import signal_flow
warnings.simplefilter("ignore", category=FutureWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
ON = "2019-12-09"
GRAPH_TYPES = ["Gad", "Gaa", "Gdd", "Gda"]
GRAPH_TYPE_LABELS = [r"A $\to$ D", r"A $\to$ A", r"D $\to$ D", r"D $\to$ A"]
N_GRAPH_TYPES = len(GRAPH_TYPES)
SAVEFIGS = True
DEFAULT_FMT = "png"
DEFUALT_DPI = 150
SAVESKELS = False
MIN_CLUSTERS = 8
MAX_CLUSTERS = 8
N_INIT = 50
PTR = True
ONLY_RIGHT = True
embed = "LSE"
cluster = "GMM"
n_components = 4
if cluster == "GMM":
gmm_params = {"n_init": N_INIT, "covariance_type": "all"}
elif cluster == "AutoGMM":
gmm_params = {"max_agglom_size": None}
np.random.seed(23409857)
def stashfig(name, **kws):
if SAVEFIGS:
savefig(name, foldername=FNAME, fmt=DEFAULT_FMT, dpi=DEFUALT_DPI, **kws)
def stashskel(name, ids, colors, palette=None, **kws):
if SAVESKELS:
return export_skeleton_json(
name, ids, colors, palette=palette, foldername=FNAME, **kws
)
def ase(adj, n_components):
if PTR:
adj = pass_to_ranks(adj)
ase = AdjacencySpectralEmbed(n_components=n_components)
latent = ase.fit_transform(adj)
latent = np.concatenate(latent, axis=-1)
return latent
def to_laplace(graph, form="DAD", regularizer=None):
valid_inputs = ["I-DAD", "DAD", "R-DAD"]
if form not in valid_inputs:
raise TypeError("Unsuported Laplacian normalization")
A = graph
in_degree = np.sum(A, axis=0)
out_degree = np.sum(A, axis=1)
if form == "R-DAD":
if regularizer is None:
regularizer = 1
elif not isinstance(regularizer, (int, float)):
raise TypeError(
"Regularizer must be a int or float, not {}".format(type(regularizer))
)
elif regularizer < 0:
raise ValueError("Regularizer must be greater than or equal to 0")
regularizer = regularizer * np.mean(out_degree)
in_degree += regularizer
out_degree += regularizer
with np.errstate(divide="ignore"):
in_root = 1 / np.sqrt(in_degree)
out_root = 1 / np.sqrt(out_degree)
in_root[np.isinf(in_root)] = 0
out_root[np.isinf(out_root)] = 0
in_root = np.diag(in_root)
out_root = np.diag(out_root)
if form == "I-DAD":
L = np.diag(in_degree) - A
L = in_root @ L @ in_root
elif form == "DAD" or form == "R-DAD":
L = out_root @ A @ in_root
ularizer=None):
if PTR:
adj = pass_to_ranks(adj)
lap = to_laplace(adj, form="R-DAD")
ase = AdjacencySpectralEmbed(n_components=n_components)
latent = ase.fit_transform(lap)
latent = np.concatenate(latent, axis=-1)
return latent
def omni(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
omni = OmnibusEmbed(n_components=n_components // len(adjs))
latent = omni.fit_transform(adjs)
latent = np.concatenate(latent, axis=-1)
latent = np.concatenate(latent, axis=-1)
return latent
def ase_concatenate(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
ase = AdjacencySpectralEmbed(n_components=n_components // len(adjs))
graph_latents = []
for a in adjs:
latent = ase.fit_transform(a)
latent = np.concatenate(latent, axis=-1)
graph_latents.append(latent)
latent = np.concatenate(graph_latents, axis=-1)
return latent
def sub_ari(known_inds, true_labels, pred_labels):
true_known_labels = true_labels[known_inds]
pred_known_labels = pred_labels[known_inds]
ari = adjusted_rand_score(true_known_labels, pred_known_labels)
return ari
plt.style.use("seaborn-white")
sns.set_palette("deep")
sns.set_context("talk", font_scale=1)
abels, side_labels, skeleton_labels = load_everything(
"Gad",
version=BRAIN_VERSION,
return_keys=["Merge Class", "Hemisphere"],
return_ids=True,
)
if ONLY_RIGHT:
side = "right hemisphere"
right_inds = np.where(side_labels == "R")[0]
adj = adj[np.ix_(right_inds, right_inds)]
class_labels = class_labels[right_inds]
skeleton_labels = skeleton_labels[right_inds]
else:
side = "full brain"
degrees = adj.sum(axis=0) + adj.sum(axis=1)
sort_inds = np.argsort(degrees)[::-1]
adj = adj[np.ix_(sort_inds, sort_inds)]
class_labels = class_labels[sort_inds]
skeleton_labels = skeleton_labels[sort_inds]
adj, lcc_inds = get_lcc(adj, return_inds=True)
class_labels = class_labels[lcc_inds]
skeleton_labels = skeleton_labels[lcc_inds]
degrees = np.count_nonzero(adj, axis=0) + np.count_nonzero(adj, axis=1)
not_pendant_mask = degrees != 1
not_pendant_inds = np.array(range(len(degrees)))[not_pendant_mask]
adj = adj[np.ix_(not_pendant_inds, not_pendant_inds)]
class_labels = class_labels[not_pendant_inds]
skeleton_labels = skeleton_labels[not_pendant_inds]
d_sort = np.argsort(degrees)[::-1]
degrees = degrees[d_sort]
plt.figure(figsize=(10, 5))
sns.scatterplot(x=range(len(degrees)), y=degrees, s=30, linewidth=0)
known_inds = np.where(class_labels != "Unk")[0]
, n_components, regularizer=None)
pairplot(latent, labels=class_labels, title=embed)
k_list = list(range(MIN_CLUSTERS, MAX_CLUSTERS + 1))
n_runs = len(k_list)
out_dicts = []
bin_adj = binarize(adj)
last_pred_labels = np.zeros(n_verts)
if cluster == "GMM":
ClusterModel = GaussianCluster
elif cluster == "AutoGMM":
ClusterModel = AutoGMMCluster
for k in k_list:
run_name = f"k = {k}, {cluster}, {embed}, {side} (A to D), PTR, raw"
print(run_name)
print()
gmm = ClusterModel(min_components=k, max_components=k, **gmm_params)
gmm.fit(latent)
pred_labels = gmm.predict(latent)
base_dict = {
"K": k,
"Cluster": cluster,
"Embed": embed,
"Method": f"{cluster} o {embed}",
}
score = gmm.model_.score(latent)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "GMM likelihood"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
score = gmm.model_.bic(latent)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "GMM BIC"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
sbm = SBMEstimator(directed=True, loops=False)
sbm.fit(bin_adj, y=pred_labels)
score = sbm.score(bin_adj)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "SBM likelihood"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
dcsbm = DCSBMEstimator(directed=True, loops=False)
dcsbm.fit(bin_adj, y=pred_labels)
score = dcsbm.score(bin_adj)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "DCSBM likelihood"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
score = sub_ari(known_inds, class_labels, pred_labels)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "Simple ARI"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
score = adjusted_rand_score(last_pred_labels, pred_labels)
temp_dict = base_dict.copy()
temp_dict["Metric"] = "K-1 ARI"
temp_dict["Score"] = score
out_dicts.append(temp_dict)
last_pred_labels = pred_labels
save_name = f"k{k}-{cluster}-{embed}-right-ad-PTR-raw"
clustergram(adj, class_labels, pred_labels)
stashfig("clustergram-" + save_name)
if SAVESKELS:
_, colormap, pal = stashskel(
save_name, skeleton_labels, pred_labels, palette="viridis", multiout=True
)
palplot(k, cmap="viridis")
stashfig("palplot-" + save_name)
filename = (
Path("./maggot_models/notebooks/outs")
/ Path(FNAME)
/ str("colormap-" + save_name + ".json")
)
with open(filename, "w") as fout:
json.dump(colormap, fout)
stashskel(
save_name, skeleton_labels, pred_labels, palette="viridis", multiout=False
)
g = sns.FacetGrid(result_df, col="Metric", col_wrap=3, sharey=False, height=4)
fg.map(sns.lineplot, "K", "Score")
stashfig(f"metrics-{cluster}-{embed}-right-ad-PTR-raw")
low = signal_flow(adj)
mean_sf = np.zeros(k)
for i in np.unique(pred_labels):
inds = np.where(pred_labels == i)[0]
mean_sf[i] = np.mean(node_signal_flow[inds])
cluster_mean_latent = gmm.model_.means_[:, 0]
block_probs = SBMEstimator().fit(bin_adj, y=pred_labels).block_p_
block_prob_df = pd.DataFrame(data=block_probs, index=range(k), columns=range(k))
block_g = nx.from_pandas_adjacency(block_prob_df, create_using=nx.DiGraph)
plt.figure(figsize=(10, 10))
pos = dict(zip(range(k), zip(cluster_mean_latent, mean_sf)))
labels = nx.get_edge_attributes(block_g, "weight")
from matplotlib.cm import ScalarMappable
import matplotlib as mpl
norm = mpl.colors.LogNorm(vmin=0.01, vmax=0.1)
sm = ScalarMappable(cmap="Reds", norm=norm)
cmap = sm.to_rgba(np.array(list(labels.values())) + 0.01)
nx.draw_networkx(
block_g,
pos,
edge_cmap="Reds",
edge_color=cmap,
connectionstyle="arc3,rad=0.2",
width=1.5,
)
adj, pred_labels)
def signal_flow_marginal(adj, labels, col_wrap=5, palette="tab20"):
sf = signal_flow(adj)
uni_labels = np.unique(labels)
medians = []
for i in uni_labels:
inds = np.where(labels == i)[0]
medians.append(np.median(sf[inds]))
sort_inds = np.argsort(medians)[::-1]
col_order = uni_labels[sort_inds]
plot_df = pd.DataFrame()
plot_df["Signal flow"] = sf
plot_df["Class"] = labels
fg = sns.FacetGrid(
plot_df,
col="Class",
aspect=1.5,
palette=palette,
col_order=col_order,
sharey=False,
col_wrap=col_wrap,
xlim=(-3, 3),
)
fg = fg.map(sns.distplot, "Signal flow")
fg.set(yticks=[], yticklabels=[])
plt.tight_layout()
return fg
signal_flow_marginal(adj, class_labels)
stashfig("known-class-sf-marginal")
| true
| true
|
f71a6368df82f8cba23fa6c4aacdc3254b4af1ca
| 702
|
py
|
Python
|
Cklib/Filter.py
|
kamphaus/HPCGrunner
|
1885ee87bf02bab51cc71d560d86217c79c5f46b
|
[
"MIT"
] | null | null | null |
Cklib/Filter.py
|
kamphaus/HPCGrunner
|
1885ee87bf02bab51cc71d560d86217c79c5f46b
|
[
"MIT"
] | null | null | null |
Cklib/Filter.py
|
kamphaus/HPCGrunner
|
1885ee87bf02bab51cc71d560d86217c79c5f46b
|
[
"MIT"
] | null | null | null |
import copy
def filterRemaining(remaining, environment):
returned = copy.copy(remaining)
for i in range(len(returned)-1, -1, -1):
r = returned[i]
if any(not(r[e]==environment[e]) for e in environment if e in r):
del returned[i]
else:
runs = copy.copy(r['runs'])
for j in range(len(runs)-1, -1, -1):
u = runs[j]
if any(not(u[e]==environment[e]) for e in environment):
del runs[j]
if len(runs)==0:
del returned[i]
else:
r = copy.deepcopy(r)
r['runs'] = runs
returned[i] = r
return returned
| 30.521739
| 73
| 0.474359
|
import copy
def filterRemaining(remaining, environment):
returned = copy.copy(remaining)
for i in range(len(returned)-1, -1, -1):
r = returned[i]
if any(not(r[e]==environment[e]) for e in environment if e in r):
del returned[i]
else:
runs = copy.copy(r['runs'])
for j in range(len(runs)-1, -1, -1):
u = runs[j]
if any(not(u[e]==environment[e]) for e in environment):
del runs[j]
if len(runs)==0:
del returned[i]
else:
r = copy.deepcopy(r)
r['runs'] = runs
returned[i] = r
return returned
| true
| true
|
f71a637927490a1a25d4576addd9a32c1d6e1ce3
| 2,617
|
py
|
Python
|
acregnet/data.py
|
luoyi1hao/ACRN_Chest_X-ray_IA
|
b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a
|
[
"MIT"
] | 1
|
2021-09-23T10:37:53.000Z
|
2021-09-23T10:37:53.000Z
|
acregnet/data.py
|
luoyi1hao/ACRN_Chest_X-ray_IA
|
b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a
|
[
"MIT"
] | null | null | null |
acregnet/data.py
|
luoyi1hao/ACRN_Chest_X-ray_IA
|
b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from sklearn.model_selection import train_test_split
import cv2
class DataHandler(object):
def _load_data(im_fnames, add_channel_dim=True):
im0 = cv2.imread(im_fnames[0], 0)
im_batch = np.zeros((len(im_fnames),) + im0.shape)
im_batch[0] = im0
for i, fname in enumerate(im_fnames[1:], 1):
im_batch[i] = cv2.imread(fname, 0)
if add_channel_dim:
return np.expand_dims(im_batch, axis=-1)
return im_batch
@staticmethod
def load_images(_file, normalize=True):
im_fnames = list(np.loadtxt(_file, dtype='str'))
im_batch = DataHandler._load_data(im_fnames).astype(np.float32)
if normalize:
im_batch = im_batch / 255.
return im_batch, im_fnames
@staticmethod
def load_labels(_file):
lb_fnames = list(np.loadtxt(_file, dtype='str'))
lb_batch = DataHandler._load_data(lb_fnames).astype(np.int32)
cur_labels = np.unique(lb_batch)
new_labels = range(np.unique(lb_batch).shape[0])
if not np.array_equal(cur_labels, new_labels):
for cur_l, new_l in zip(cur_labels, new_labels):
lb_batch[lb_batch == cur_l] = new_l
return lb_batch, lb_fnames
@staticmethod
def train_test_split(data_dir, out_dir,
test_size=0.2, seed=1):
data_fnames = [
os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]
train_fnames, test_fnames = train_test_split(
data_fnames, test_size, True, seed)
np.savetxt(os.path.join(out_dir, 'train_fnames'),
np.array(train_fnames), fmt='%s')
np.savetxt(os.path.join(out_dir, 'test_fnames'),
np.array(test_fnames), fmt='%s')
@staticmethod
def train_valid_test_split(data_dir, out_dir, valid_size=0.1,
test_size=0.2, seed=1):
data_fnames = [
os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]
train_fnames, test_fnames = train_test_split(
data_fnames, test_size, True, seed)
train_fnames, valid_fnames = train_test_split(
train_fnames, valid_size/(1 - test_size), False, seed + 1)
np.savetxt(os.path.join(out_dir, 'train_fnames'),
np.array(train_fnames), fmt='%s')
np.savetxt(os.path.join(out_dir, 'valid_fnames'),
np.array(valid_fnames), fmt='%s')
np.savetxt(os.path.join(out_dir, 'test_fnames'),
np.array(test_fnames), fmt='%s')
| 34.893333
| 76
| 0.610623
|
import os
import numpy as np
from sklearn.model_selection import train_test_split
import cv2
class DataHandler(object):
def _load_data(im_fnames, add_channel_dim=True):
im0 = cv2.imread(im_fnames[0], 0)
im_batch = np.zeros((len(im_fnames),) + im0.shape)
im_batch[0] = im0
for i, fname in enumerate(im_fnames[1:], 1):
im_batch[i] = cv2.imread(fname, 0)
if add_channel_dim:
return np.expand_dims(im_batch, axis=-1)
return im_batch
@staticmethod
def load_images(_file, normalize=True):
im_fnames = list(np.loadtxt(_file, dtype='str'))
im_batch = DataHandler._load_data(im_fnames).astype(np.float32)
if normalize:
im_batch = im_batch / 255.
return im_batch, im_fnames
@staticmethod
def load_labels(_file):
lb_fnames = list(np.loadtxt(_file, dtype='str'))
lb_batch = DataHandler._load_data(lb_fnames).astype(np.int32)
cur_labels = np.unique(lb_batch)
new_labels = range(np.unique(lb_batch).shape[0])
if not np.array_equal(cur_labels, new_labels):
for cur_l, new_l in zip(cur_labels, new_labels):
lb_batch[lb_batch == cur_l] = new_l
return lb_batch, lb_fnames
@staticmethod
def train_test_split(data_dir, out_dir,
test_size=0.2, seed=1):
data_fnames = [
os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]
train_fnames, test_fnames = train_test_split(
data_fnames, test_size, True, seed)
np.savetxt(os.path.join(out_dir, 'train_fnames'),
np.array(train_fnames), fmt='%s')
np.savetxt(os.path.join(out_dir, 'test_fnames'),
np.array(test_fnames), fmt='%s')
@staticmethod
def train_valid_test_split(data_dir, out_dir, valid_size=0.1,
test_size=0.2, seed=1):
data_fnames = [
os.path.join(data_dir, f) for f in sorted(os.listdir(data_dir))]
train_fnames, test_fnames = train_test_split(
data_fnames, test_size, True, seed)
train_fnames, valid_fnames = train_test_split(
train_fnames, valid_size/(1 - test_size), False, seed + 1)
np.savetxt(os.path.join(out_dir, 'train_fnames'),
np.array(train_fnames), fmt='%s')
np.savetxt(os.path.join(out_dir, 'valid_fnames'),
np.array(valid_fnames), fmt='%s')
np.savetxt(os.path.join(out_dir, 'test_fnames'),
np.array(test_fnames), fmt='%s')
| true
| true
|
f71a63e9f0ba5a8f65374e8816ee7e58d28c35bf
| 4,706
|
py
|
Python
|
gatenlp/processing/annotator.py
|
gitter-badger/python-gatenlp
|
bfed863b404cfd62c98a6cb08ad287c3b4b6ccae
|
[
"Apache-2.0"
] | null | null | null |
gatenlp/processing/annotator.py
|
gitter-badger/python-gatenlp
|
bfed863b404cfd62c98a6cb08ad287c3b4b6ccae
|
[
"Apache-2.0"
] | null | null | null |
gatenlp/processing/annotator.py
|
gitter-badger/python-gatenlp
|
bfed863b404cfd62c98a6cb08ad287c3b4b6ccae
|
[
"Apache-2.0"
] | null | null | null |
"""
Module with the base class and supporting functions for all annotators.
Any callable that can be called by passing a document can be used as an annotator,
but the base class "Annotator" defined in here is designed to allow for a more
flexible approach to do things.
"""
from abc import ABC, abstractmethod
__pdoc__ = {"Annotator.__call__": True}
class Annotator(ABC):
@abstractmethod
def __call__(self, doc, **kwargs):
"""
This method MUST get implemented in a concrete subclass to do the actual processing
and annotation. It must accept a document and arbitrary keyword arguments and it must
return either a document which may be the same or a different object than the document passed,
or None or an empty list or a list of one or more documents. The method also may raise an
exception.
The semantics of returning None or an empty list are not strictly defined: this may be used to
handle processing errors where documents which cannot be processed are quietly ignored or
filtering.
The method must accept arbitrary keyword arguments which will be passed on to sub-annotators and
may be used to configure or parametrize processing.
NOTE: some annotators may set or use special document features in order to handle
document context or the document id when processing a corpus or streams where a document id
is important.
Args:
doc: the document to process
kwargs: any arguments to pass to the annotator or sub-annotators called by this annotator
Returns:
a document, None, or a possibly empty list of documents
"""
raise Exception("This method must be implemented!")
def pipe(self, documents, **kwargs):
"""
If this method gets overridden, it should take an iterable of documents and yield processed documents.
This allows for batching, caching, and other optimizations over streams of documents.
If with_context is True, then the documents parameter should be an iterable over tuples (document, context).
Args:
documents: an iterable over documents or (document, context) tuples if with_context=True
**kwargs: arbitrary other keyword arguments must be accepted
Yields:
processed documents
"""
for el in documents:
if el is not None:
doc = self.__call__(el, **kwargs)
yield doc
def start(self):
"""
A method that gets called when processing starts, e.g. before the first document in
corpus gets processed. This is invoked by an executor to initialize processing a batch
of documents.
This is different from initializing the Annotator: initializing may load large data which
can be reused even if the same annotator instance is run several times over documents.
"""
pass
def finish(self):
"""
A method that gets called when processing ends, e.g. when all documents of a corpus
have been processed. It should return some result for processing the whole batch of documents
it has seen - that result may be None.
Returns:
The overall result of processing all documents or None
"""
pass
def reduce(self, results):
"""
A method that should know how to combine the results passed on in some collection into a
single result. This method should behave like a static method, i.e. not make use of any
data that is specific to the concrete instance.
This can be used to combine corpus results obtained from several processes running on
different parts of a corpus.
This gets invoked by the executor if more than one instance of the annotator was run
over separate sets of documents. If only a single instance was used, the result returned
from finish is used directly.
Args:
results: an iterable of individual results over some documents each or None if no results are available.
If no results have been passed back from the finish method of any of the processes, the executor should
not call reduce, but if it does, reduce should accept None or an iterator of all None and return None.
Returns:
The combined overall result or None if there are no individual results
"""
return results
class AnnotatorFunction(Annotator):
def __init__(self, funct):
self.funct = funct
def __call__(self, doc, **kwargs):
return self.funct(doc, **kwargs)
| 41.280702
| 116
| 0.679558
|
from abc import ABC, abstractmethod
__pdoc__ = {"Annotator.__call__": True}
class Annotator(ABC):
@abstractmethod
def __call__(self, doc, **kwargs):
raise Exception("This method must be implemented!")
def pipe(self, documents, **kwargs):
for el in documents:
if el is not None:
doc = self.__call__(el, **kwargs)
yield doc
def start(self):
pass
def finish(self):
pass
def reduce(self, results):
return results
class AnnotatorFunction(Annotator):
def __init__(self, funct):
self.funct = funct
def __call__(self, doc, **kwargs):
return self.funct(doc, **kwargs)
| true
| true
|
f71a650b60dea15af020b9d6037cca6aa1d1b85d
| 3,943
|
py
|
Python
|
muti_thread.py
|
fanlushuai/jd-assistant
|
ac9fce2cc87d2a6702743c28d4a3eeb3ee99f9ac
|
[
"MIT"
] | 2
|
2021-01-13T00:16:30.000Z
|
2021-01-31T01:34:57.000Z
|
muti_thread.py
|
fanlushuai/jd-assistant
|
ac9fce2cc87d2a6702743c28d4a3eeb3ee99f9ac
|
[
"MIT"
] | null | null | null |
muti_thread.py
|
fanlushuai/jd-assistant
|
ac9fce2cc87d2a6702743c28d4a3eeb3ee99f9ac
|
[
"MIT"
] | 1
|
2020-12-16T12:10:06.000Z
|
2020-12-16T12:10:06.000Z
|
import functools
import queue
import random
import time
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
from log import logger
shut_down_pool_queue = queue.Queue()
# sys_thread_pool = ThreadPoolExecutor(max_workers=2)
def shutdown_listener():
for _ in repeat(None):
t_pool = shut_down_pool_queue.get()
t_pool.shutdown()
logger.info("shutdown")
# sys_thread_pool.submit(shutdown_listener)
# 根据一系列逻辑,估算出来的整个流程,任务不等待,情况下的合理线程数
no_task_wait_size_assessed = 35
concurrent_pool_assessed = ThreadPoolExecutor(max_workers=no_task_wait_size_assessed)
def do_nothing():
# 休息5s。保证能创建新的线程,而不是复用线程
time.sleep(5)
return
def pre_concurrent_pool():
# 预热线程池里的线程
t = time.perf_counter()
for i in range(no_task_wait_size_assessed):
concurrent_pool_assessed.submit(do_nothing)
time.sleep(5) #便于使用过期时间进行调试
logger.info("预热线程池,耗时%s", time.perf_counter() - t)
def threads(concurrent_size=1, try_times=1, try_internal=0.05):
"""
并发工具。
:param concurrent_size: 每次重试的并发数
:param try_times: 重试次数
:param try_internal: 重试间隔
:return: 多线程,多次重试。的所有任务中,哪个最快获得结果,就将哪个返回。如果都没有获得,就返回None
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kw):
re = Job(concurrent_size, try_times, try_internal).run(func, *args, **kw)
logger.info("threads tool return %s", re)
return re
return wrapper
return decorate
class Job(object):
"""
并发处理工具。
可以在一个周期并发相应的请求。并且,上个周期的任务不会影响下个周期的延迟。
具体来讲:周期1执行时间t1,周期2执行时间为 t2= t1 + try_internal
解决的问题:
传统的for循环,周期1执行时间t1,周期2执行时间为 t2= t1+任务耗时+try_internal。
(可见传统方式的毛病,并不能带来真正的并发。只是单线程重试,并且重试的间隔受到上个周期任务执行时间的影响,严格讲,这种重试的间隔参数毫无意义,尤其是在io操作的时候)
"""
def __init__(self, concurrent_size=1, try_times=1, try_internal=0.05):
self.concurrent_size = concurrent_size
self.try_times = try_times
self.try_internal = try_internal
self.futures = []
# 整个流程共享这一个线程池
self.thread_pool = concurrent_pool_assessed
self.loop = True
def run(self, fn, *args, **kwargs):
# 开启异步线程去做这个
self.thread_pool.submit(self._loop, fn, *args, **kwargs)
logger.info("同步等待结果……")
# 同步获取返回结果
try_return_count = 0
for _ in repeat(None):
futures = self.futures
for future in futures:
if future.done():
re = future.result()
if re:
self.loop = False
# !!!!!! 确的修饰的方法,必须有明返回值。None或者其他。不然会一直搞
shut_down_pool_queue.put(self.thread_pool)
return re
else:
try_return_count += 1
futures.remove(future)
if try_return_count >= self.try_times * self.concurrent_size:
return None
def _loop(self, fn, *args, **kwargs):
for try_count in range(self.try_times):
for i in range(self.concurrent_size):
self.futures.append(self.thread_pool.submit(fn, *args, **kwargs))
logger.info("启动线程")
if not self.loop:
# loop会一直执行,直到结果获得,或者循环结束,即self.try_times*self.concurrent_size
logger.debug("获取到结果,结束")
return
if not self.loop:
logger.debug("获取到结果,结束")
# loop会一直执行,直到结果获得,或者循环结束,即self.try_times*self.concurrent_size
return
time.sleep(self.try_internal)
@threads(concurrent_size=3, try_times=100, try_internal=0.1)
def test_g():
t = random.choice([0.1, 0.2, 0.3, 0.4, 0.5, 1])
logger.info("run%s", t)
time.sleep(t)
return "java{}".format(t)
if __name__ == '__main__':
pre_concurrent_pool()
logger.info("拿到结果%s", test_g())
| 29.425373
| 88
| 0.609942
|
import functools
import queue
import random
import time
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
from log import logger
shut_down_pool_queue = queue.Queue()
def shutdown_listener():
for _ in repeat(None):
t_pool = shut_down_pool_queue.get()
t_pool.shutdown()
logger.info("shutdown")
no_task_wait_size_assessed = 35
concurrent_pool_assessed = ThreadPoolExecutor(max_workers=no_task_wait_size_assessed)
def do_nothing():
time.sleep(5)
return
def pre_concurrent_pool():
t = time.perf_counter()
for i in range(no_task_wait_size_assessed):
concurrent_pool_assessed.submit(do_nothing)
time.sleep(5)
logger.info("预热线程池,耗时%s", time.perf_counter() - t)
def threads(concurrent_size=1, try_times=1, try_internal=0.05):
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kw):
re = Job(concurrent_size, try_times, try_internal).run(func, *args, **kw)
logger.info("threads tool return %s", re)
return re
return wrapper
return decorate
class Job(object):
def __init__(self, concurrent_size=1, try_times=1, try_internal=0.05):
self.concurrent_size = concurrent_size
self.try_times = try_times
self.try_internal = try_internal
self.futures = []
self.thread_pool = concurrent_pool_assessed
self.loop = True
def run(self, fn, *args, **kwargs):
self.thread_pool.submit(self._loop, fn, *args, **kwargs)
logger.info("同步等待结果……")
try_return_count = 0
for _ in repeat(None):
futures = self.futures
for future in futures:
if future.done():
re = future.result()
if re:
self.loop = False
shut_down_pool_queue.put(self.thread_pool)
return re
else:
try_return_count += 1
futures.remove(future)
if try_return_count >= self.try_times * self.concurrent_size:
return None
def _loop(self, fn, *args, **kwargs):
for try_count in range(self.try_times):
for i in range(self.concurrent_size):
self.futures.append(self.thread_pool.submit(fn, *args, **kwargs))
logger.info("启动线程")
if not self.loop:
logger.debug("获取到结果,结束")
return
if not self.loop:
logger.debug("获取到结果,结束")
return
time.sleep(self.try_internal)
@threads(concurrent_size=3, try_times=100, try_internal=0.1)
def test_g():
t = random.choice([0.1, 0.2, 0.3, 0.4, 0.5, 1])
logger.info("run%s", t)
time.sleep(t)
return "java{}".format(t)
if __name__ == '__main__':
pre_concurrent_pool()
logger.info("拿到结果%s", test_g())
| true
| true
|
f71a66863303bb27d7b14ce461ffa23d7ac9b033
| 534
|
py
|
Python
|
web_api/api/migrations/0103_gateway_mqtt_password.py
|
IoT-BA/project_noe-backend
|
4b63b4604dd9f3d53a1bdb6ad8e6ad20fe53ebd9
|
[
"MIT"
] | 2
|
2017-02-27T07:41:18.000Z
|
2017-03-05T22:13:39.000Z
|
web_api/api/migrations/0103_gateway_mqtt_password.py
|
IoT-BA/lorawan-sk-backend
|
4b63b4604dd9f3d53a1bdb6ad8e6ad20fe53ebd9
|
[
"MIT"
] | null | null | null |
web_api/api/migrations/0103_gateway_mqtt_password.py
|
IoT-BA/lorawan-sk-backend
|
4b63b4604dd9f3d53a1bdb6ad8e6ad20fe53ebd9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-22 09:20
from __future__ import unicode_literals
import api.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0102_auto_20170121_2038'),
]
operations = [
migrations.AddField(
model_name='gateway',
name='mqtt_password',
field=models.CharField(blank=True, default=api.models.generate_mqtt_password, max_length=16, null=True),
),
]
| 24.272727
| 116
| 0.651685
|
from __future__ import unicode_literals
import api.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0102_auto_20170121_2038'),
]
operations = [
migrations.AddField(
model_name='gateway',
name='mqtt_password',
field=models.CharField(blank=True, default=api.models.generate_mqtt_password, max_length=16, null=True),
),
]
| true
| true
|
f71a677c5c16ac76e38db599d0a5eac2507bf63b
| 747
|
py
|
Python
|
ScriptEngine/app.py
|
daizhaolin/scriptengine
|
eb3aee0381193d5550d31b59574ca60a4706cb25
|
[
"BSD-3-Clause"
] | null | null | null |
ScriptEngine/app.py
|
daizhaolin/scriptengine
|
eb3aee0381193d5550d31b59574ca60a4706cb25
|
[
"BSD-3-Clause"
] | null | null | null |
ScriptEngine/app.py
|
daizhaolin/scriptengine
|
eb3aee0381193d5550d31b59574ca60a4706cb25
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
'''
Created on 2020-03-08
@author: daizhaolin
'''
from .config import Config
from .helper import cached_property
from .logging import create_logger
class ScriptEngine(object):
def __init__(self):
self.name = __name__
self.config = Config({
'DEBUG': False
})
self.extensions = dict()
self.controller_queue = list()
@property
def debug(self):
return self.config['DEBUG']
@cached_property
def logger(self):
return create_logger(self)
def register_controller(self, controller):
self.controller_queue.append(controller)
def run(self):
for controller in self.controller_queue:
controller(self)
| 19.153846
| 48
| 0.630522
|
from .config import Config
from .helper import cached_property
from .logging import create_logger
class ScriptEngine(object):
def __init__(self):
self.name = __name__
self.config = Config({
'DEBUG': False
})
self.extensions = dict()
self.controller_queue = list()
@property
def debug(self):
return self.config['DEBUG']
@cached_property
def logger(self):
return create_logger(self)
def register_controller(self, controller):
self.controller_queue.append(controller)
def run(self):
for controller in self.controller_queue:
controller(self)
| true
| true
|
f71a679ff4b8d5cbe23ab5310c5a07b000075f19
| 8,622
|
py
|
Python
|
examples/tutorials/advanced/websockets-example-MNIST-parallel/run_websocket_client.py
|
theoptips/PySyft
|
4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc
|
[
"Apache-2.0"
] | 1
|
2019-07-14T01:18:34.000Z
|
2019-07-14T01:18:34.000Z
|
examples/tutorials/advanced/websockets-example-MNIST-parallel/run_websocket_client.py
|
theoptips/PySyft
|
4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc
|
[
"Apache-2.0"
] | null | null | null |
examples/tutorials/advanced/websockets-example-MNIST-parallel/run_websocket_client.py
|
theoptips/PySyft
|
4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc
|
[
"Apache-2.0"
] | 1
|
2021-02-12T12:11:44.000Z
|
2021-02-12T12:11:44.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms, datasets
import logging
import argparse
import sys
import asyncio
import numpy as np
import syft as sy
from syft import workers
from syft.frameworks.torch.federated import utils
logger = logging.getLogger(__name__)
LOG_INTERVAL = 25
# Loss function
@torch.jit.script
def loss_fn(pred, target):
return F.nll_loss(input=pred, target=target)
# Model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def define_and_get_arguments(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run federated learning using websocket client workers."
)
parser.add_argument("--batch_size", type=int, default=32, help="batch size of the training")
parser.add_argument(
"--test_batch_size", type=int, default=128, help="batch size used for the test data"
)
parser.add_argument(
"--training_rounds", type=int, default=40, help="number of federated learning rounds"
)
parser.add_argument(
"--federate_after_n_batches",
type=int,
default=10,
help="number of training steps performed on each remote worker before averaging",
)
parser.add_argument("--lr", type=float, default=0.1, help="learning rate")
parser.add_argument("--cuda", action="store_true", help="use cuda")
parser.add_argument("--seed", type=int, default=1, help="seed used for randomization")
parser.add_argument("--save_model", action="store_true", help="if set, model will be saved")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="if set, websocket client workers will be started in verbose mode",
)
args = parser.parse_args(args=args)
return args
async def fit_model_on_worker(
worker: workers.WebsocketClientWorker,
traced_model: torch.jit.ScriptModule,
batch_size: int,
curr_round: int,
max_nr_batches: int,
lr: float,
):
"""Send the model to the worker and fit the model on the worker's training data.
Args:
worker: Remote location, where the model shall be trained.
traced_model: Model which shall be trained.
batch_size: Batch size of each training step.
curr_round: Index of the current training round (for logging purposes).
max_nr_batches: If > 0, training on worker will stop at min(max_nr_batches, nr_available_batches).
lr: Learning rate of each training step.
Returns:
A tuple containing:
* worker_id: Union[int, str], id of the worker.
* improved model: torch.jit.ScriptModule, model after training at the worker.
* loss: Loss on last training batch, torch.tensor.
"""
train_config = sy.TrainConfig(
model=traced_model,
loss_fn=loss_fn,
batch_size=batch_size,
shuffle=True,
max_nr_batches=max_nr_batches,
epochs=1,
lr=lr,
)
train_config.send(worker)
logger.info(
"Training round %s, calling fit on worker: %s, lr = %s",
curr_round,
worker.id,
"{:.3f}".format(train_config.lr),
)
loss = await worker.async_fit(dataset_key="mnist", return_ids=[0])
logger.info("Training round: %s, worker: %s, avg_loss: %s", curr_round, worker.id, loss.mean())
model = train_config.model_ptr.get().obj
return worker.id, model, loss
def evaluate_models_on_test_data(test_loader, results):
np.set_printoptions(formatter={"float": "{: .0f}".format})
for worker_id, worker_model, _ in results:
evaluate_model(worker_id, worker_model, "cpu", test_loader, print_target_hist=False)
def evaluate_model(worker_id, model, device, test_loader, print_target_hist=False):
model.eval()
test_loss = 0.0
correct = 0
hist_target = np.zeros(10)
hist_pred = np.zeros(10)
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
hist, _ = np.histogram(target, bins=10, range=(0, 10))
hist_target += hist
output = model(data)
test_loss += loss_fn(output, target).item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
hist, _ = np.histogram(pred, bins=10, range=(0, 10))
hist_pred += hist
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
if print_target_hist:
logger.info("Target histogram: %s", hist_target)
logger.info("Prediction hist.: %s", hist_pred)
logger.info(
"%s: Test set: Average loss: %s, Accuracy: %s/%s (%s)",
worker_id,
"{:.4f}".format(test_loss),
correct,
len(test_loader.dataset),
"{:.2f}".format(100.0 * correct / len(test_loader.dataset)),
)
async def main():
args = define_and_get_arguments()
hook = sy.TorchHook(torch)
kwargs_websocket = {"host": "localhost", "hook": hook, "verbose": args.verbose}
alice = workers.WebsocketClientWorker(id="alice", port=8777, **kwargs_websocket)
bob = workers.WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)
charlie = workers.WebsocketClientWorker(id="charlie", port=8779, **kwargs_websocket)
worker_instances = [alice, bob, charlie]
use_cuda = args.cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=args.test_batch_size,
shuffle=False,
drop_last=False,
**kwargs,
)
model = Net().to(device)
(data, target) = test_loader.__iter__().next()
traced_model = torch.jit.trace(model, data)
learning_rate = args.lr
for curr_round in range(1, args.training_rounds + 1):
logger.info("Starting training round %s/%s", curr_round, args.training_rounds)
results = await asyncio.gather(
*[
fit_model_on_worker(
worker=worker,
traced_model=traced_model,
batch_size=args.batch_size,
curr_round=curr_round,
max_nr_batches=args.federate_after_n_batches,
lr=learning_rate,
)
for worker in worker_instances
]
)
models = {}
loss_values = {}
test_models = curr_round % 10 == 1 or curr_round == args.training_rounds
if test_models:
evaluate_models_on_test_data(test_loader, results)
for worker_id, worker_model, worker_loss in results:
if worker_model is not None:
models[worker_id] = worker_model
loss_values[worker_id] = worker_loss
traced_model = utils.federated_avg(models)
if test_models:
evaluate_model(
"Federated model", traced_model, "cpu", test_loader, print_target_hist=True
)
# decay learning rate
learning_rate = max(0.98 * learning_rate, args.lr * 0.01)
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == "__main__":
# Logging setup
logger = logging.getLogger("run_websocket_server")
FORMAT = "%(asctime)s %(levelname)s %(filename)s(l:%(lineno)d, p:%(process)d) - %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(level=logging.DEBUG)
# Websockets setup
websockets_logger = logging.getLogger("websockets")
websockets_logger.setLevel(logging.INFO)
websockets_logger.addHandler(logging.StreamHandler())
# Run main
asyncio.get_event_loop().run_until_complete(main())
| 33.034483
| 106
| 0.63164
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms, datasets
import logging
import argparse
import sys
import asyncio
import numpy as np
import syft as sy
from syft import workers
from syft.frameworks.torch.federated import utils
logger = logging.getLogger(__name__)
LOG_INTERVAL = 25
@torch.jit.script
def loss_fn(pred, target):
return F.nll_loss(input=pred, target=target)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def define_and_get_arguments(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run federated learning using websocket client workers."
)
parser.add_argument("--batch_size", type=int, default=32, help="batch size of the training")
parser.add_argument(
"--test_batch_size", type=int, default=128, help="batch size used for the test data"
)
parser.add_argument(
"--training_rounds", type=int, default=40, help="number of federated learning rounds"
)
parser.add_argument(
"--federate_after_n_batches",
type=int,
default=10,
help="number of training steps performed on each remote worker before averaging",
)
parser.add_argument("--lr", type=float, default=0.1, help="learning rate")
parser.add_argument("--cuda", action="store_true", help="use cuda")
parser.add_argument("--seed", type=int, default=1, help="seed used for randomization")
parser.add_argument("--save_model", action="store_true", help="if set, model will be saved")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="if set, websocket client workers will be started in verbose mode",
)
args = parser.parse_args(args=args)
return args
async def fit_model_on_worker(
worker: workers.WebsocketClientWorker,
traced_model: torch.jit.ScriptModule,
batch_size: int,
curr_round: int,
max_nr_batches: int,
lr: float,
):
train_config = sy.TrainConfig(
model=traced_model,
loss_fn=loss_fn,
batch_size=batch_size,
shuffle=True,
max_nr_batches=max_nr_batches,
epochs=1,
lr=lr,
)
train_config.send(worker)
logger.info(
"Training round %s, calling fit on worker: %s, lr = %s",
curr_round,
worker.id,
"{:.3f}".format(train_config.lr),
)
loss = await worker.async_fit(dataset_key="mnist", return_ids=[0])
logger.info("Training round: %s, worker: %s, avg_loss: %s", curr_round, worker.id, loss.mean())
model = train_config.model_ptr.get().obj
return worker.id, model, loss
def evaluate_models_on_test_data(test_loader, results):
np.set_printoptions(formatter={"float": "{: .0f}".format})
for worker_id, worker_model, _ in results:
evaluate_model(worker_id, worker_model, "cpu", test_loader, print_target_hist=False)
def evaluate_model(worker_id, model, device, test_loader, print_target_hist=False):
model.eval()
test_loss = 0.0
correct = 0
hist_target = np.zeros(10)
hist_pred = np.zeros(10)
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
hist, _ = np.histogram(target, bins=10, range=(0, 10))
hist_target += hist
output = model(data)
test_loss += loss_fn(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
hist, _ = np.histogram(pred, bins=10, range=(0, 10))
hist_pred += hist
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
if print_target_hist:
logger.info("Target histogram: %s", hist_target)
logger.info("Prediction hist.: %s", hist_pred)
logger.info(
"%s: Test set: Average loss: %s, Accuracy: %s/%s (%s)",
worker_id,
"{:.4f}".format(test_loss),
correct,
len(test_loader.dataset),
"{:.2f}".format(100.0 * correct / len(test_loader.dataset)),
)
async def main():
args = define_and_get_arguments()
hook = sy.TorchHook(torch)
kwargs_websocket = {"host": "localhost", "hook": hook, "verbose": args.verbose}
alice = workers.WebsocketClientWorker(id="alice", port=8777, **kwargs_websocket)
bob = workers.WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)
charlie = workers.WebsocketClientWorker(id="charlie", port=8779, **kwargs_websocket)
worker_instances = [alice, bob, charlie]
use_cuda = args.cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=args.test_batch_size,
shuffle=False,
drop_last=False,
**kwargs,
)
model = Net().to(device)
(data, target) = test_loader.__iter__().next()
traced_model = torch.jit.trace(model, data)
learning_rate = args.lr
for curr_round in range(1, args.training_rounds + 1):
logger.info("Starting training round %s/%s", curr_round, args.training_rounds)
results = await asyncio.gather(
*[
fit_model_on_worker(
worker=worker,
traced_model=traced_model,
batch_size=args.batch_size,
curr_round=curr_round,
max_nr_batches=args.federate_after_n_batches,
lr=learning_rate,
)
for worker in worker_instances
]
)
models = {}
loss_values = {}
test_models = curr_round % 10 == 1 or curr_round == args.training_rounds
if test_models:
evaluate_models_on_test_data(test_loader, results)
for worker_id, worker_model, worker_loss in results:
if worker_model is not None:
models[worker_id] = worker_model
loss_values[worker_id] = worker_loss
traced_model = utils.federated_avg(models)
if test_models:
evaluate_model(
"Federated model", traced_model, "cpu", test_loader, print_target_hist=True
)
learning_rate = max(0.98 * learning_rate, args.lr * 0.01)
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == "__main__":
logger = logging.getLogger("run_websocket_server")
FORMAT = "%(asctime)s %(levelname)s %(filename)s(l:%(lineno)d, p:%(process)d) - %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(level=logging.DEBUG)
websockets_logger = logging.getLogger("websockets")
websockets_logger.setLevel(logging.INFO)
websockets_logger.addHandler(logging.StreamHandler())
asyncio.get_event_loop().run_until_complete(main())
| true
| true
|
f71a67e87a44037f0e910996ddb201d1c1d0ca36
| 373
|
py
|
Python
|
Lib/site-packages/spyder/plugins/layout/__init__.py
|
hirorin-demon/hirorin-streamlit
|
03fbb6f03ec94f909d451e708a3b30b177607695
|
[
"0BSD"
] | 1
|
2021-06-20T14:52:40.000Z
|
2021-06-20T14:52:40.000Z
|
spyder/plugins/layout/__init__.py
|
Pancakerr/spyder
|
34a9878bba97f427fbdd7b4a6d77ac0651327565
|
[
"MIT"
] | 1
|
2020-11-02T21:11:19.000Z
|
2020-11-02T21:11:19.000Z
|
spyder/plugins/layout/__init__.py
|
Pancakerr/spyder
|
34a9878bba97f427fbdd7b4a6d77ac0651327565
|
[
"MIT"
] | 1
|
2020-06-14T07:03:50.000Z
|
2020-06-14T07:03:50.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
spyder.plugins.layout
=====================
Layout plugin.
"""
from spyder.plugins.layout.plugin import Layout
# The following statement is required to be able to grab internal plugins.
PLUGIN_CLASSES = [Layout]
| 20.722222
| 74
| 0.699732
|
from spyder.plugins.layout.plugin import Layout
PLUGIN_CLASSES = [Layout]
| true
| true
|
f71a685556aab5e675c6c3f4e360e0b1d91795d0
| 5,029
|
py
|
Python
|
nezzle/graphics/arrows/basearrow.py
|
dwgoon/nezzle
|
c69d111ae5e57ee2a7db85e14299c23d3b98a6d5
|
[
"MIT"
] | 2
|
2021-10-06T08:54:02.000Z
|
2021-10-06T16:17:18.000Z
|
nezzle/graphics/arrows/basearrow.py
|
dwgoon/nezzle
|
c69d111ae5e57ee2a7db85e14299c23d3b98a6d5
|
[
"MIT"
] | null | null | null |
nezzle/graphics/arrows/basearrow.py
|
dwgoon/nezzle
|
c69d111ae5e57ee2a7db85e14299c23d3b98a6d5
|
[
"MIT"
] | null | null | null |
from qtpy.QtCore import QPointF
from nezzle.utils import TriggerDict
class BaseArrow(object):
ITEM_TYPE = 'BASE_HEAD'
DEFAULT_OFFSET = 4
def __init__(self, width, height, offset):
self._attr = TriggerDict()
self._attr['ITEM_TYPE'] = self.ITEM_TYPE
self._offset = offset
self._height = height
self._width = width
self._attr.set_trigger('WIDTH', self._trigger_set_width, when='set')
self._attr.set_trigger('HEIGHT', self._trigger_set_height, when='set')
self._attr.set_trigger('OFFSET', self._trigger_set_offset, when='set')
self._attr['WIDTH'] = width
self._attr['HEIGHT'] = height
self._attr['OFFSET'] = offset
# Read-write properties
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, obj):
self._parent = obj
@property
def width(self):
return self._width
@width.setter
def width(self, val):
self._attr['WIDTH'] = val
self.update()
def _trigger_set_width(self, key, value):
self._width = value
return value
@property
def height(self):
return self._height
@height.setter
def height(self, val):
self._attr['HEIGHT'] = val
self.update()
def _trigger_set_height(self, key, value):
self._height = value
return value
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, val):
if not hasattr(self, "_parent") or not self._parent:
raise ValueError("A edge should be assigned for this arrow before setting offset.")
self._attr['OFFSET'] = val
self.update()
def _trigger_set_offset(self, key, value):
self._offset = value
return value
def update(self):
self.parent.update()
def identify_points(self, head, edge_body_width, angle=None):
raise NotImplementedError("identify_pos should be implemented!")
def to_dict(self):
dict_head = {}
dict_head['ITEM_TYPE'] = self.ITEM_TYPE
dict_head['WIDTH'] = self.width
dict_head['HEIGHT'] = self.height
dict_head['OFFSET'] = self.offset
dict_head.update(self._attr)
return dict_head
@classmethod
def from_dict(cls, dict_head):
width = dict_head['WIDTH']
height = dict_head['HEIGHT']
offset = dict_head['OFFSET']
return cls(width, height, offset=offset)
class Triangle(BaseArrow):
ITEM_TYPE = "TRIANGLE"
DEFAULT_WIDTH = 10
DEFAULT_HEIGHT = 10
DEFAULT_OFFSET = 4
def __init__(self, width=None, height=None, offset=None, *args, **kwargs):
if not width:
width = Triangle.DEFAULT_WIDTH
if not height:
height = Triangle.DEFAULT_HEIGHT
if not offset:
offset = Triangle.DEFAULT_OFFSET
super().__init__(width, height, offset, *args, **kwargs)
def identify_points(self, head, edge_body_width, transform=None):
neck1 = head + QPointF(0, -edge_body_width/2)
neck2 = head + QPointF(0, +edge_body_width/2)
face1 = head + QPointF(0.0, -self.width/2)
face2 = head + QPointF(0.0, +self.width/2)
top = head + QPointF(self.height, 0)
points = [neck1, face1, top, face2, neck2]
# transform is a callable object, which defines its own transformation in __call__.
if transform:
for i, pt in enumerate(points):
points[i] = transform(pt, head)
return points
# end of def identify_pos
def set_size_from_edge(self, edge_width):
self.width = 5*edge_width
self.height = 5*edge_width
self.parent.update()
class Hammer(BaseArrow):
ITEM_TYPE = "HAMMER"
DEFAULT_WIDTH = 14
DEFAULT_HEIGHT = 2
DEFAULT_OFFSET = 4
def __init__(self, width=None, height=None, offset=None, *args, **kwargs):
if not width:
width = Hammer.DEFAULT_WIDTH
if not height:
height = Hammer.DEFAULT_HEIGHT
if not offset:
offset = Hammer.DEFAULT_OFFSET
super().__init__(width, height, offset, *args, **kwargs)
def identify_points(self, head, edge_body_width, transform=None):
neck1 = head + QPointF(0, -edge_body_width/2)
neck2 = head + QPointF(0, +edge_body_width/2)
face1 = head + QPointF(0, -self.width/2)
face2 = head + QPointF(self.height, -self.width/2)
face3 = head + QPointF(self.height, +self.width/2)
face4 = head + QPointF(0, +self.width/2)
points = [neck1, face1, face2, face3, face4, neck2]
if transform:
for i, pt in enumerate(points):
points[i] = transform(pt, head)
return points
# end of def identify_pos
def set_size_from_edge(self, edge_width):
self.width = 7*edge_width
self.height = edge_width
self.parent.update()
| 26.329843
| 95
| 0.611652
|
from qtpy.QtCore import QPointF
from nezzle.utils import TriggerDict
class BaseArrow(object):
ITEM_TYPE = 'BASE_HEAD'
DEFAULT_OFFSET = 4
def __init__(self, width, height, offset):
self._attr = TriggerDict()
self._attr['ITEM_TYPE'] = self.ITEM_TYPE
self._offset = offset
self._height = height
self._width = width
self._attr.set_trigger('WIDTH', self._trigger_set_width, when='set')
self._attr.set_trigger('HEIGHT', self._trigger_set_height, when='set')
self._attr.set_trigger('OFFSET', self._trigger_set_offset, when='set')
self._attr['WIDTH'] = width
self._attr['HEIGHT'] = height
self._attr['OFFSET'] = offset
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, obj):
self._parent = obj
@property
def width(self):
return self._width
@width.setter
def width(self, val):
self._attr['WIDTH'] = val
self.update()
def _trigger_set_width(self, key, value):
self._width = value
return value
@property
def height(self):
return self._height
@height.setter
def height(self, val):
self._attr['HEIGHT'] = val
self.update()
def _trigger_set_height(self, key, value):
self._height = value
return value
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, val):
if not hasattr(self, "_parent") or not self._parent:
raise ValueError("A edge should be assigned for this arrow before setting offset.")
self._attr['OFFSET'] = val
self.update()
def _trigger_set_offset(self, key, value):
self._offset = value
return value
def update(self):
self.parent.update()
def identify_points(self, head, edge_body_width, angle=None):
raise NotImplementedError("identify_pos should be implemented!")
def to_dict(self):
dict_head = {}
dict_head['ITEM_TYPE'] = self.ITEM_TYPE
dict_head['WIDTH'] = self.width
dict_head['HEIGHT'] = self.height
dict_head['OFFSET'] = self.offset
dict_head.update(self._attr)
return dict_head
@classmethod
def from_dict(cls, dict_head):
width = dict_head['WIDTH']
height = dict_head['HEIGHT']
offset = dict_head['OFFSET']
return cls(width, height, offset=offset)
class Triangle(BaseArrow):
ITEM_TYPE = "TRIANGLE"
DEFAULT_WIDTH = 10
DEFAULT_HEIGHT = 10
DEFAULT_OFFSET = 4
def __init__(self, width=None, height=None, offset=None, *args, **kwargs):
if not width:
width = Triangle.DEFAULT_WIDTH
if not height:
height = Triangle.DEFAULT_HEIGHT
if not offset:
offset = Triangle.DEFAULT_OFFSET
super().__init__(width, height, offset, *args, **kwargs)
def identify_points(self, head, edge_body_width, transform=None):
neck1 = head + QPointF(0, -edge_body_width/2)
neck2 = head + QPointF(0, +edge_body_width/2)
face1 = head + QPointF(0.0, -self.width/2)
face2 = head + QPointF(0.0, +self.width/2)
top = head + QPointF(self.height, 0)
points = [neck1, face1, top, face2, neck2]
if transform:
for i, pt in enumerate(points):
points[i] = transform(pt, head)
return points
def set_size_from_edge(self, edge_width):
self.width = 5*edge_width
self.height = 5*edge_width
self.parent.update()
class Hammer(BaseArrow):
ITEM_TYPE = "HAMMER"
DEFAULT_WIDTH = 14
DEFAULT_HEIGHT = 2
DEFAULT_OFFSET = 4
def __init__(self, width=None, height=None, offset=None, *args, **kwargs):
if not width:
width = Hammer.DEFAULT_WIDTH
if not height:
height = Hammer.DEFAULT_HEIGHT
if not offset:
offset = Hammer.DEFAULT_OFFSET
super().__init__(width, height, offset, *args, **kwargs)
def identify_points(self, head, edge_body_width, transform=None):
neck1 = head + QPointF(0, -edge_body_width/2)
neck2 = head + QPointF(0, +edge_body_width/2)
face1 = head + QPointF(0, -self.width/2)
face2 = head + QPointF(self.height, -self.width/2)
face3 = head + QPointF(self.height, +self.width/2)
face4 = head + QPointF(0, +self.width/2)
points = [neck1, face1, face2, face3, face4, neck2]
if transform:
for i, pt in enumerate(points):
points[i] = transform(pt, head)
return points
def set_size_from_edge(self, edge_width):
self.width = 7*edge_width
self.height = edge_width
self.parent.update()
| true
| true
|
f71a69117f18301e660b95414a5b6b4799351cfc
| 14,078
|
py
|
Python
|
glance/tests/functional/test_api.py
|
ilay09/glance
|
60814cb577401c121d5d786980b3b801be5f4e9e
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/functional/test_api.py
|
ilay09/glance
|
60814cb577401c121d5d786980b3b801be5f4e9e
|
[
"Apache-2.0"
] | null | null | null |
glance/tests/functional/test_api.py
|
ilay09/glance
|
60814cb577401c121d5d786980b3b801be5f4e9e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Version-independent api tests"""
import httplib2
from oslo_serialization import jsonutils
from six.moves import http_client
from glance.tests import functional
class TestApiVersions(functional.FunctionalTest):
def test_version_configurations(self):
"""Test that versioning is handled properly through all channels"""
# v1 and v2 api enabled
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.5',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(versions_json, content)
def test_v2_api_configuration(self):
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.5',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(versions_json, content)
def test_v1_api_configuration(self):
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(versions_json, content)
class TestApiPaths(functional.FunctionalTest):
def setUp(self):
super(TestApiPaths, self).setUp()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.5',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
self.versions_json = jsonutils.dumps(versions)
images = {'images': []}
self.images_json = jsonutils.dumps(images)
def test_get_root_path(self):
"""Assert GET / with `no Accept:` header.
Verify version choices returned.
Bug lp:803260 no Accept header causes a 500 in glance-api
"""
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path(self):
"""Assert GET /images with `no Accept:` header.
Verify version choices returned.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_images_path(self):
"""GET /v1/images with `no Accept:` header.
Verify empty images list returned.
"""
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
def test_get_root_path_with_unknown_header(self):
"""Assert GET / with Accept: unknown header
Verify version choices returned. Verify message in API log about
unknown accept header.
"""
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_root_path_with_openstack_header(self):
"""Assert GET / with an Accept: application/vnd.openstack.images-v1
Verify empty image list returned
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.images_json, content)
def test_get_images_path_with_openstack_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v1` header.
Verify version choices returned. Verify message in API log
about unknown accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v10_images_path(self):
"""Assert GET /v1.0/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_v1a_images_path(self):
"""Assert GET /v1.a/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_va1_images_path(self):
"""Assert GET /va.1/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path(self):
"""Assert GET /versions with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path_with_openstack_header(self):
"""Assert GET /versions with the
`Accept: application/vnd.openstack.images-v1` header.
Verify version choices returned.
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_versions_path(self):
"""Assert GET /v1/versions with `no Accept:` header
Verify 404 returned
"""
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.NOT_FOUND, response.status)
def test_get_versions_choices(self):
"""Verify version choices returned"""
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path_with_openstack_v2_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v2` header.
Verify version choices returned. Verify message in API log
about unknown version in accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v12_images_path(self):
"""Assert GET /v1.2/images with `no Accept:` header
Verify version choices returned
"""
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
| 38.359673
| 78
| 0.533954
|
import httplib2
from oslo_serialization import jsonutils
from six.moves import http_client
from glance.tests import functional
class TestApiVersions(functional.FunctionalTest):
def test_version_configurations(self):
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.5',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(versions_json, content)
def test_v2_api_configuration(self):
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.5',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
]}
versions_json = jsonutils.dumps(versions)
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(versions_json, content)
def test_v1_api_configuration(self):
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(versions_json, content)
class TestApiPaths(functional.FunctionalTest):
def setUp(self):
super(TestApiPaths, self).setUp()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v2.5',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
self.versions_json = jsonutils.dumps(versions)
images = {'images': []}
self.images_json = jsonutils.dumps(images)
def test_get_root_path(self):
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path(self):
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_images_path(self):
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
def test_get_root_path_with_unknown_header(self):
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_root_path_with_openstack_header(self):
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.images_json, content)
def test_get_images_path_with_openstack_header(self):
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v10_images_path(self):
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_v1a_images_path(self):
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_va1_images_path(self):
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path(self):
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path_with_openstack_header(self):
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_versions_path(self):
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.NOT_FOUND, response.status)
def test_get_versions_choices(self):
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path_with_openstack_v2_header(self):
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v12_images_path(self):
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
self.assertEqual(self.versions_json, content)
| true
| true
|
f71a691a5ad95f1250cb884753aea776f113110d
| 7,737
|
py
|
Python
|
interface.py
|
Owiti-Charles/Password-Locker
|
3e2a0fd883d033fe784af387b52d7360a1157d34
|
[
"MIT"
] | 3
|
2019-08-31T08:48:15.000Z
|
2021-12-14T08:21:05.000Z
|
interface.py
|
Owiti-Charles/Password-Locker
|
3e2a0fd883d033fe784af387b52d7360a1157d34
|
[
"MIT"
] | null | null | null |
interface.py
|
Owiti-Charles/Password-Locker
|
3e2a0fd883d033fe784af387b52d7360a1157d34
|
[
"MIT"
] | 24
|
2020-03-09T10:42:17.000Z
|
2022-02-20T19:25:56.000Z
|
#!/usr/bin/env python3.6
from passlock import User, Credentials
def function():
print(" ____ _____ _ ")
print(" | _ \ / ____|| | ")
print(" | |_) ) ____ ___ ___ / ____ | |__ _____ _ _ ____ ")
print(" | __/ / _ |/ __ / __ \___ \ | __) / _ \| '_|/ __ \ ")
print(" | | / (_| |\__ \ \__ \ ___ / | |___ ( (_) ) | | ___/ ")
print(" |_| \_____| ___/ ___/ |____/ |_____) \_____/|_| \____ ")
function()
def create_new_user(username,password):
'''
Function to create a new user with a username and password
'''
new_user = User(username,password)
return new_user
def save_user(user):
'''
Function to save a new user
'''
user.save_user()
def display_user():
"""
Function to display existing user
"""
return User.display_user()
def login_user(username,password):
"""
function that checks whether a user exist and then login the user in.
"""
check_user = Credentials.verify_user(username,password)
return check_user
def create_new_credential(account,userName,password):
"""
Function that creates new credentials for a given user account
"""
new_credential = Credentials(account,userName,password)
return new_credential
def save_credentials(credentials):
"""
Function to save Credentials to the credentials list
"""
credentials. save_details()
def display_accounts_details():
"""
Function that returns all the saved credential.
"""
return Credentials.display_credentials()
def delete_credential(credentials):
"""
Function to delete a Credentials from credentials list
"""
credentials.delete_credentials()
def find_credential(account):
"""
Function that finds a Credentials by an account name and returns the Credentials that belong to that account
"""
return Credentials.find_credential(account)
def check_credendtials(account):
"""
Function that check if a Credentials exists with that account name and return true or false
"""
return Credentials.if_credential_exist(account)
def generate_Password():
'''
generates a random password for the user.
'''
auto_password=Credentials.generatePassword()
return auto_password
def copy_password(account):
"""
A funct that copies the password using the pyperclip framework
We import the framework then declare a function that copies the emails.
"""
return Credentials.copy_password(account)
def passlocker():
print("Hello Welcome to your Accounts Password Store...\n Please enter one of the following to proceed.\n CA --- Create New Account \n LI --- Have An Account \n")
short_code=input("").lower().strip()
if short_code == "ca":
print("Sign Up")
print('*' * 50)
username = input("User_name: ")
while True:
print(" TP - To type your own pasword:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try again")
save_user(create_new_user(username,password))
print("*"*85)
print(f"Hello {username}, Your account has been created succesfully! Your password is: {password}")
print("*"*85)
elif short_code == "li":
print("*"*50)
print("Enter your User name and your Password to log in:")
print('*' * 50)
username = input("User name: ")
password = input("password: ")
login = login_user(username,password)
if login_user == login:
print(f"Hello {username}.Welcome To PassWord Locker Manager")
print('\n')
while True:
print("Use these short codes:\n CC - Create a new credential \n DC - Display Credentials \n FC - Find a credential \n GP - Generate A randomn password \n D - Delete credential \n EX - Exit the application \n")
short_code = input().lower().strip()
if short_code == "cc":
print("Create New Credential")
print("."*20)
print("Account name ....")
account = input().lower()
print("Your Account username")
userName = input()
while True:
print(" TP - To type your own pasword if you already have an account:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Your Own Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try again")
save_credentials(create_new_credential(account,userName,password))
print('\n')
print(f"Account Credential for: {account} - UserName: {userName} - Password:{password} created succesfully")
print('\n')
elif short_code == "dc":
if display_accounts_details():
print("Here's your list of acoounts: ")
print('*' * 30)
print('_'* 30)
for account in display_accounts_details():
print(f" Account:{account.account} \n User Name:{username}\n Password:{password}")
print('_'* 30)
print('*' * 30)
else:
print("You don't have any credentials saved yet..........")
elif short_code == "fc":
print("Enter the Account Name you want to search for")
search_name = input().lower()
if find_credential(search_name):
search_credential = find_credential(search_name)
print(f"Account Name : {search_credential.account}")
print('-' * 50)
print(f"User Name: {search_credential.userName} Password :{search_credential.password}")
print('-' * 50)
else:
print("That Credential does not exist")
print('\n')
elif short_code == "d":
print("Enter the account name of the Credentials you want to delete")
search_name = input().lower()
if find_credential(search_name):
search_credential = find_credential(search_name)
print("_"*50)
search_credential.delete_credentials()
print('\n')
print(f"Your stored credentials for : {search_credential.account} successfully deleted!!!")
print('\n')
else:
print("That Credential you want to delete does not exist in your store yet")
elif short_code == 'gp':
password = generate_Password()
print(f" {password} Has been generated succesfull. You can proceed to use it to your account")
elif short_code == 'ex':
print("Thanks for using passwords store manager.. See you next time!")
break
else:
print("Wrong entry... Check your entry again and let it match those in the menu")
else:
print("Please enter a valid input to continue")
if __name__ == '__main__':
passlocker()
| 39.676923
| 217
| 0.569342
|
from passlock import User, Credentials
def function():
print(" ____ _____ _ ")
print(" | _ \ / ____|| | ")
print(" | |_) ) ____ ___ ___ / ____ | |__ _____ _ _ ____ ")
print(" | __/ / _ |/ __ / __ \___ \ | __) / _ \| '_|/ __ \ ")
print(" | | / (_| |\__ \ \__ \ ___ / | |___ ( (_) ) | | ___/ ")
print(" |_| \_____| ___/ ___/ |____/ |_____) \_____/|_| \____ ")
function()
def create_new_user(username,password):
new_user = User(username,password)
return new_user
def save_user(user):
user.save_user()
def display_user():
return User.display_user()
def login_user(username,password):
check_user = Credentials.verify_user(username,password)
return check_user
def create_new_credential(account,userName,password):
new_credential = Credentials(account,userName,password)
return new_credential
def save_credentials(credentials):
credentials. save_details()
def display_accounts_details():
return Credentials.display_credentials()
def delete_credential(credentials):
credentials.delete_credentials()
def find_credential(account):
return Credentials.find_credential(account)
def check_credendtials(account):
return Credentials.if_credential_exist(account)
def generate_Password():
auto_password=Credentials.generatePassword()
return auto_password
def copy_password(account):
return Credentials.copy_password(account)
def passlocker():
print("Hello Welcome to your Accounts Password Store...\n Please enter one of the following to proceed.\n CA --- Create New Account \n LI --- Have An Account \n")
short_code=input("").lower().strip()
if short_code == "ca":
print("Sign Up")
print('*' * 50)
username = input("User_name: ")
while True:
print(" TP - To type your own pasword:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try again")
save_user(create_new_user(username,password))
print("*"*85)
print(f"Hello {username}, Your account has been created succesfully! Your password is: {password}")
print("*"*85)
elif short_code == "li":
print("*"*50)
print("Enter your User name and your Password to log in:")
print('*' * 50)
username = input("User name: ")
password = input("password: ")
login = login_user(username,password)
if login_user == login:
print(f"Hello {username}.Welcome To PassWord Locker Manager")
print('\n')
while True:
print("Use these short codes:\n CC - Create a new credential \n DC - Display Credentials \n FC - Find a credential \n GP - Generate A randomn password \n D - Delete credential \n EX - Exit the application \n")
short_code = input().lower().strip()
if short_code == "cc":
print("Create New Credential")
print("."*20)
print("Account name ....")
account = input().lower()
print("Your Account username")
userName = input()
while True:
print(" TP - To type your own pasword if you already have an account:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Your Own Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try again")
save_credentials(create_new_credential(account,userName,password))
print('\n')
print(f"Account Credential for: {account} - UserName: {userName} - Password:{password} created succesfully")
print('\n')
elif short_code == "dc":
if display_accounts_details():
print("Here's your list of acoounts: ")
print('*' * 30)
print('_'* 30)
for account in display_accounts_details():
print(f" Account:{account.account} \n User Name:{username}\n Password:{password}")
print('_'* 30)
print('*' * 30)
else:
print("You don't have any credentials saved yet..........")
elif short_code == "fc":
print("Enter the Account Name you want to search for")
search_name = input().lower()
if find_credential(search_name):
search_credential = find_credential(search_name)
print(f"Account Name : {search_credential.account}")
print('-' * 50)
print(f"User Name: {search_credential.userName} Password :{search_credential.password}")
print('-' * 50)
else:
print("That Credential does not exist")
print('\n')
elif short_code == "d":
print("Enter the account name of the Credentials you want to delete")
search_name = input().lower()
if find_credential(search_name):
search_credential = find_credential(search_name)
print("_"*50)
search_credential.delete_credentials()
print('\n')
print(f"Your stored credentials for : {search_credential.account} successfully deleted!!!")
print('\n')
else:
print("That Credential you want to delete does not exist in your store yet")
elif short_code == 'gp':
password = generate_Password()
print(f" {password} Has been generated succesfull. You can proceed to use it to your account")
elif short_code == 'ex':
print("Thanks for using passwords store manager.. See you next time!")
break
else:
print("Wrong entry... Check your entry again and let it match those in the menu")
else:
print("Please enter a valid input to continue")
if __name__ == '__main__':
passlocker()
| true
| true
|
f71a697a4e4fb47cb796149291e6b50fd45b68f7
| 2,233
|
py
|
Python
|
v1.0.0.test/toontown/toon/NPCForceAcknowledge.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v1.0.0.test/toontown/toon/NPCForceAcknowledge.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v1.0.0.test/toontown/toon/NPCForceAcknowledge.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from panda3d.core import *
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from direct.gui import DirectLabel
from toontown.quest import Quests
class NPCForceAcknowledge:
def __init__(self, doneEvent):
self.doneEvent = doneEvent
self.dialog = None
return
def enter(self):
doneStatus = {}
questHistory = base.localAvatar.getQuestHistory()
imgScale = 0.5
if questHistory != [] and questHistory != [1000] and questHistory != [101, 110]:
doneStatus['mode'] = 'complete'
messenger.send(self.doneEvent, [doneStatus])
elif len(base.localAvatar.quests) > 1 or len(base.localAvatar.quests) == 0:
doneStatus['mode'] = 'complete'
messenger.send(self.doneEvent, [doneStatus])
elif base.localAvatar.quests[0][0] != Quests.TROLLEY_QUEST_ID:
doneStatus['mode'] = 'complete'
messenger.send(self.doneEvent, [doneStatus])
else:
base.localAvatar.b_setAnimState('neutral', 1)
doneStatus['mode'] = 'incomplete'
self.doneStatus = doneStatus
imageModel = loader.loadModel('phase_4/models/gui/tfa_images')
if Quests.avatarHasTrolleyQuest(base.localAvatar):
if base.localAvatar.quests[0][4] != 0:
imgNodePath = imageModel.find('**/hq-dialog-image')
imgPos = (0, 0, -0.02)
msg = TTLocalizer.NPCForceAcknowledgeMessage2
else:
imgNodePath = imageModel.find('**/trolley-dialog-image')
imgPos = (0, 0, 0.04)
msg = TTLocalizer.NPCForceAcknowledgeMessage
self.dialog = TTDialog.TTDialog(text=msg, command=self.handleOk, style=TTDialog.Acknowledge)
imgLabel = DirectLabel.DirectLabel(parent=self.dialog, relief=None, pos=imgPos, scale=TTLocalizer.NPCFimgLabel, image=imgNodePath, image_scale=imgScale)
return
def exit(self):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
return
def handleOk(self, value):
messenger.send(self.doneEvent, [self.doneStatus])
| 42.942308
| 164
| 0.617555
|
from panda3d.core import *
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from direct.gui import DirectLabel
from toontown.quest import Quests
class NPCForceAcknowledge:
def __init__(self, doneEvent):
self.doneEvent = doneEvent
self.dialog = None
return
def enter(self):
doneStatus = {}
questHistory = base.localAvatar.getQuestHistory()
imgScale = 0.5
if questHistory != [] and questHistory != [1000] and questHistory != [101, 110]:
doneStatus['mode'] = 'complete'
messenger.send(self.doneEvent, [doneStatus])
elif len(base.localAvatar.quests) > 1 or len(base.localAvatar.quests) == 0:
doneStatus['mode'] = 'complete'
messenger.send(self.doneEvent, [doneStatus])
elif base.localAvatar.quests[0][0] != Quests.TROLLEY_QUEST_ID:
doneStatus['mode'] = 'complete'
messenger.send(self.doneEvent, [doneStatus])
else:
base.localAvatar.b_setAnimState('neutral', 1)
doneStatus['mode'] = 'incomplete'
self.doneStatus = doneStatus
imageModel = loader.loadModel('phase_4/models/gui/tfa_images')
if Quests.avatarHasTrolleyQuest(base.localAvatar):
if base.localAvatar.quests[0][4] != 0:
imgNodePath = imageModel.find('**/hq-dialog-image')
imgPos = (0, 0, -0.02)
msg = TTLocalizer.NPCForceAcknowledgeMessage2
else:
imgNodePath = imageModel.find('**/trolley-dialog-image')
imgPos = (0, 0, 0.04)
msg = TTLocalizer.NPCForceAcknowledgeMessage
self.dialog = TTDialog.TTDialog(text=msg, command=self.handleOk, style=TTDialog.Acknowledge)
imgLabel = DirectLabel.DirectLabel(parent=self.dialog, relief=None, pos=imgPos, scale=TTLocalizer.NPCFimgLabel, image=imgNodePath, image_scale=imgScale)
return
def exit(self):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
return
def handleOk(self, value):
messenger.send(self.doneEvent, [self.doneStatus])
| true
| true
|
f71a6b6d7ebfa629b63064b6a06dfb7bca79a040
| 2,157
|
py
|
Python
|
htmltreediff/edit_script_runner.py
|
nomadicfm/htmltreediff
|
02a27b2339d5a9a96902eed5d12bca1b755bb109
|
[
"BSD-3-Clause"
] | 3
|
2015-04-04T20:35:17.000Z
|
2021-08-06T16:51:09.000Z
|
htmltreediff/edit_script_runner.py
|
tex/htmltreediff
|
ce5a94edd0cfb05ed5130aaed3f06c63668df127
|
[
"BSD-3-Clause"
] | 14
|
2015-01-15T16:03:14.000Z
|
2020-03-23T16:29:02.000Z
|
htmltreediff/edit_script_runner.py
|
tex/htmltreediff
|
ce5a94edd0cfb05ed5130aaed3f06c63668df127
|
[
"BSD-3-Clause"
] | 2
|
2017-05-16T04:17:46.000Z
|
2018-04-30T20:05:32.000Z
|
from xml.dom import Node
from htmltreediff.util import (
get_child,
get_location,
remove_node,
insert_or_append,
)
class EditScriptRunner(object):
def __init__(self, dom, edit_script):
self.dom = dom
self.edit_script = edit_script
self.del_nodes = []
self.ins_nodes = []
# edit script actions #
def action_delete(self, node):
parent = node.parentNode
next_sibling = node.nextSibling
remove_node(node)
node.orig_parent = parent
node.orig_next_sibling = next_sibling
self.del_nodes.append(node)
def action_insert(
self,
parent,
child_index,
node_type=None,
node_name=None,
node_value=None,
attributes=None,
):
node = None
if node_type == Node.ELEMENT_NODE:
node = self.dom.createElement(node_name)
if attributes:
for key, value in attributes.items():
node.setAttribute(key, value)
elif node_type == Node.TEXT_NODE:
node = self.dom.createTextNode(node_value)
if node is not None:
self.action_insert_node(parent, child_index, node)
def action_insert_node(self, parent, child_index, node):
next_sibling = get_child(parent, child_index)
insert_or_append(parent, node, next_sibling)
# add node to ins_nodes
assert node.parentNode is not None
node.orig_parent = parent
node.orig_next_sibling = next_sibling
self.ins_nodes.append(node)
# script running #
def run_edit_script(self):
"""
Run an xml edit script, and return the new html produced.
"""
for action, location, properties in self.edit_script:
if action == 'delete':
node = get_location(self.dom, location)
self.action_delete(node)
elif action == 'insert':
parent = get_location(self.dom, location[:-1])
child_index = location[-1]
self.action_insert(parent, child_index, **properties)
return self.dom
| 30.814286
| 69
| 0.601298
|
from xml.dom import Node
from htmltreediff.util import (
get_child,
get_location,
remove_node,
insert_or_append,
)
class EditScriptRunner(object):
def __init__(self, dom, edit_script):
self.dom = dom
self.edit_script = edit_script
self.del_nodes = []
self.ins_nodes = []
def action_delete(self, node):
parent = node.parentNode
next_sibling = node.nextSibling
remove_node(node)
node.orig_parent = parent
node.orig_next_sibling = next_sibling
self.del_nodes.append(node)
def action_insert(
self,
parent,
child_index,
node_type=None,
node_name=None,
node_value=None,
attributes=None,
):
node = None
if node_type == Node.ELEMENT_NODE:
node = self.dom.createElement(node_name)
if attributes:
for key, value in attributes.items():
node.setAttribute(key, value)
elif node_type == Node.TEXT_NODE:
node = self.dom.createTextNode(node_value)
if node is not None:
self.action_insert_node(parent, child_index, node)
def action_insert_node(self, parent, child_index, node):
next_sibling = get_child(parent, child_index)
insert_or_append(parent, node, next_sibling)
assert node.parentNode is not None
node.orig_parent = parent
node.orig_next_sibling = next_sibling
self.ins_nodes.append(node)
def run_edit_script(self):
for action, location, properties in self.edit_script:
if action == 'delete':
node = get_location(self.dom, location)
self.action_delete(node)
elif action == 'insert':
parent = get_location(self.dom, location[:-1])
child_index = location[-1]
self.action_insert(parent, child_index, **properties)
return self.dom
| true
| true
|
f71a6bcaeb8ae82f35824738ce05e63e951e4767
| 4,632
|
py
|
Python
|
archives/src/episode7/he_is_back.py
|
NovelBox/sherlock-no-adventure
|
9fe59ade8446d5c27e7bd390de9de42e26fc63a1
|
[
"MIT"
] | null | null | null |
archives/src/episode7/he_is_back.py
|
NovelBox/sherlock-no-adventure
|
9fe59ade8446d5c27e7bd390de9de42e26fc63a1
|
[
"MIT"
] | null | null | null |
archives/src/episode7/he_is_back.py
|
NovelBox/sherlock-no-adventure
|
9fe59ade8446d5c27e7bd390de9de42e26fc63a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Episode 7-3
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
sys.path.append('storybuilder')
from storybuilder.builder.world import World
# DEFINE
TITLE = "英雄の帰還"
# NOTE: outlines
ABSTRACT = """
変身して$sherlockたちを追い詰める$jake。しかし$sherlockの機転で工場に穴を開け、日光を浴びせかけることで$jakeは皮膚から大量に出血し、爆発した。
その爆音を聞いて$limeたちが駆けつける。$maryが身を挺して$sherlockを守っていたが、$maryは大怪我を負ってしまった。入院することになる$mary。
戻った$sherlockは、一旦$wilsonの家で$limeたちに事情を語る。
$morianoとの対決により滝壺に落下し、死を覚悟した$sherlockだったが、$maryが繕ってくれた服の裾が引っかかり、何とか死だけは免れた。
ただ大怪我をしており、そこを助けてくれたのが、$jackだった。彼女の別荘で回復するまで休養しながら各国の情報を集め、$moriano配下の動向を追いかけていた。
未だに$sherlockを探す動きが見えたので、おびき出すために空き家の件をでっち上げた。だがそれを利用した$jakeにより$maryがおびき出された、というのが今回の一件だった。
$sherlockは$maryに預けておいた$blue_stoneを取り戻す必要があると言う。
しかし$sherlockたちが病院に駆けつけると、$maryの姿が消えていた。
"""
# Episode
def main(w: World):
return w.episode(TITLE,
# NOTE
w.plot_setup("連続殺人犯$jakeは$maryを殺そうとする"),
w.plot_turnpoint("そこにホームレスが助けに入る"),
w.plot_develop("$sherlockは$jakeがどんな人生を歩んできたかを全て言い当て$jakeの牙を無力化しようとする"),
w.plot_turnpoint("$transformした$maryにより$sherlockが守られるが、彼女が負傷する"),
w.plot_resolve("$sherlockが呼んでおいた警察により$jakeは捕らえられた。$maryは入院し、$sherlockも治療を受ける"),
w.plot_turnpoint("入院している$maryから$blue_stoneを貰おうと思ったが$patosonにより連れ出された後だった"),
w.plot_note("$maryは病室で目覚める"),
w.plot_note("そこには$patsonの姿があった"),
w.plot_note("$maryは$sherlockは? と尋ねるが、わからないと言われる"),
w.plot_note("$patsonは$maryへの事情聴取を行う"),
w.plot_note("一体あそこで何を見たのか"),
w.plot_note("$maryはその黒焦げの遺体が、連続猟奇殺人事件の犯人だと証言した"),
w.plot_note("$patsonは$jakeがそう告白したのか? と尋ねた"),
#
w.plot_note("$limeは$ignesたちから$maryが爆発現場で発見されたと聞く"),
w.plot_note("その$ignesはホームレスと仲良さそうに話している"),
w.plot_note("その男こそ$sherlockだった"),
w.plot_note("$limeは驚き、事情を聞く"),
w.plot_note("$sherlockは実はずいぶん前に国内に戻ってきていて、$ignesは事情を知らされていた"),
w.plot_note("$sherlockを狙う連中をごまかすために、色々と嘘の情報をばらまいていた"),
w.plot_note("空き家情報も嘘のものだったが、それを使って猟奇殺人犯の$jakeが細工をし、$maryをおびき出した"),
w.plot_note("それを先導した人間が誰かいる、と$sherlockは言う"),
w.plot_note("滝壺から落ちたあと、$jackに助けられ、彼女の隠れ家で治療をしてもらっていた"),
w.plot_note("今回殺害されていた$ronaldが所有していた最後の$black_stoneが盗まれたことがわかり、戻ってきた"),
w.plot_note("四つ$stoneを揃えられるとまずい、と$shserlockは言う"),
w.plot_note("ひとまず$maryの様子を見に行くことにし、タクシーを拾う(これが$jack)"),
#
w.plot_note("病院にやってくると先に様子をみにきていた$refiがいる"),
w.plot_note("$refiは泣きそうになって、$maryを$patsonが連れ出したという"),
w.plot_note("$sherlockはそれで理解し、すぐに大聖堂に向かうと"),
w.plot_note("しかし$wilsonがいない。タクシー運転手に頼んで向かってもらう"),
#
w.plot_note("車内で説明する$sherlock"),
w.plot_note("四つの$stoneは$boss復活の儀式に必要な祭具だった"),
w.plot_note("かつて$bossを倒した$heroたちの神器にはまっていたものだが、$bossの力を吸収し、封じ込めたもの"),
w.plot_note("それが時代を経て、売られたり、盗まれたりし、行方不明になった"),
w.plot_note("今ある多くはレプリカだという"),
w.plot_note("実際に四つ揃え、かつての$boss城があった場所で儀式を行う"),
w.plot_note("それが大聖堂だという"),
w.plot_note("$boss城を封じる目的であの場所に建っていたのだ"),
w.plot_note("昨年春にあった地震は儀式の失敗だという"),
w.plot_note("その頃はまだ何が必要なのか、すべて判明していなかった。だが$stein教授により解明された"),
w.plot_note("その資料は$morianoにより盗まれ、紛失している"),
w.plot_note("実際にどういうものなのかは$sherlockも知らない"),
#
"$wilsonは最後に登場",
w.plot_note("大聖堂にやってくると、何があったのか警官($parkerたち)が警備していた"),
w.plot_note("巨大な爆弾が見つかったというのでみんなを避難させるように言われたと"),
w.plot_note("そこに$restradeもやってきて、困惑している"),
w.plot_note("一体何をやってるんだ、$patsonはと"),
w.plot_note("$sherlockはすぐ$patsonの家を調べるように言う。彼が$cultXの手先だった"),
w.plot_note("$sherlockは中に入る"),
#
w.plot_note("大聖堂の中は人がいなくなり、静まり返っていた"),
w.plot_note("聖堂を進む"),
w.plot_note("偉人たちの墓が並ぶ聖廟でもあった"),
w.plot_note("その一つが開けられている。中身はない"),
w.plot_note("扉があり、奥にいくと地下への階段"),
w.plot_note("地下に降りていく$sherlockたち"),
w.plot_note("そこには巨大なホールが広がっていた"),
w.plot_note("祭壇には四つの$stoneが供えられ、$patsonが儀式を始めようとしている"),
w.plot_note("誰も入れるなと言ったのに、と不敵な顔の$patson"),
w.plot_note("$maryは倒れていた。服が少し破れている。中に$stoneを身に着けていたからだ"),
w.plot_note("$sherlockがすぐにやめるように忠告する"),
w.plot_note("儀式は失敗すると言った"),
w.plot_note("しかし$patsonは儀式を行うべく、祝詞をとなえる"),
w.plot_note("その$patsonを現れた$wilsonが$gunで撃ち抜いた"),
w.plot_note("「間に合ってよかったよ」という$wilson"),
outline=ABSTRACT)
| 44.970874
| 91
| 0.655009
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
sys.path.append('storybuilder')
from storybuilder.builder.world import World
TITLE = "英雄の帰還"
ABSTRACT = """
変身して$sherlockたちを追い詰める$jake。しかし$sherlockの機転で工場に穴を開け、日光を浴びせかけることで$jakeは皮膚から大量に出血し、爆発した。
その爆音を聞いて$limeたちが駆けつける。$maryが身を挺して$sherlockを守っていたが、$maryは大怪我を負ってしまった。入院することになる$mary。
戻った$sherlockは、一旦$wilsonの家で$limeたちに事情を語る。
$morianoとの対決により滝壺に落下し、死を覚悟した$sherlockだったが、$maryが繕ってくれた服の裾が引っかかり、何とか死だけは免れた。
ただ大怪我をしており、そこを助けてくれたのが、$jackだった。彼女の別荘で回復するまで休養しながら各国の情報を集め、$moriano配下の動向を追いかけていた。
未だに$sherlockを探す動きが見えたので、おびき出すために空き家の件をでっち上げた。だがそれを利用した$jakeにより$maryがおびき出された、というのが今回の一件だった。
$sherlockは$maryに預けておいた$blue_stoneを取り戻す必要があると言う。
しかし$sherlockたちが病院に駆けつけると、$maryの姿が消えていた。
"""
def main(w: World):
return w.episode(TITLE,
w.plot_setup("連続殺人犯$jakeは$maryを殺そうとする"),
w.plot_turnpoint("そこにホームレスが助けに入る"),
w.plot_develop("$sherlockは$jakeがどんな人生を歩んできたかを全て言い当て$jakeの牙を無力化しようとする"),
w.plot_turnpoint("$transformした$maryにより$sherlockが守られるが、彼女が負傷する"),
w.plot_resolve("$sherlockが呼んでおいた警察により$jakeは捕らえられた。$maryは入院し、$sherlockも治療を受ける"),
w.plot_turnpoint("入院している$maryから$blue_stoneを貰おうと思ったが$patosonにより連れ出された後だった"),
w.plot_note("$maryは病室で目覚める"),
w.plot_note("そこには$patsonの姿があった"),
w.plot_note("$maryは$sherlockは? と尋ねるが、わからないと言われる"),
w.plot_note("$patsonは$maryへの事情聴取を行う"),
w.plot_note("一体あそこで何を見たのか"),
w.plot_note("$maryはその黒焦げの遺体が、連続猟奇殺人事件の犯人だと証言した"),
w.plot_note("$patsonは$jakeがそう告白したのか? と尋ねた"),
w.plot_note("$limeは$ignesたちから$maryが爆発現場で発見されたと聞く"),
w.plot_note("その$ignesはホームレスと仲良さそうに話している"),
w.plot_note("その男こそ$sherlockだった"),
w.plot_note("$limeは驚き、事情を聞く"),
w.plot_note("$sherlockは実はずいぶん前に国内に戻ってきていて、$ignesは事情を知らされていた"),
w.plot_note("$sherlockを狙う連中をごまかすために、色々と嘘の情報をばらまいていた"),
w.plot_note("空き家情報も嘘のものだったが、それを使って猟奇殺人犯の$jakeが細工をし、$maryをおびき出した"),
w.plot_note("それを先導した人間が誰かいる、と$sherlockは言う"),
w.plot_note("滝壺から落ちたあと、$jackに助けられ、彼女の隠れ家で治療をしてもらっていた"),
w.plot_note("今回殺害されていた$ronaldが所有していた最後の$black_stoneが盗まれたことがわかり、戻ってきた"),
w.plot_note("四つ$stoneを揃えられるとまずい、と$shserlockは言う"),
w.plot_note("ひとまず$maryの様子を見に行くことにし、タクシーを拾う(これが$jack)"),
w.plot_note("病院にやってくると先に様子をみにきていた$refiがいる"),
w.plot_note("$refiは泣きそうになって、$maryを$patsonが連れ出したという"),
w.plot_note("$sherlockはそれで理解し、すぐに大聖堂に向かうと"),
w.plot_note("しかし$wilsonがいない。タクシー運転手に頼んで向かってもらう"),
w.plot_note("車内で説明する$sherlock"),
w.plot_note("四つの$stoneは$boss復活の儀式に必要な祭具だった"),
w.plot_note("かつて$bossを倒した$heroたちの神器にはまっていたものだが、$bossの力を吸収し、封じ込めたもの"),
w.plot_note("それが時代を経て、売られたり、盗まれたりし、行方不明になった"),
w.plot_note("今ある多くはレプリカだという"),
w.plot_note("実際に四つ揃え、かつての$boss城があった場所で儀式を行う"),
w.plot_note("それが大聖堂だという"),
w.plot_note("$boss城を封じる目的であの場所に建っていたのだ"),
w.plot_note("昨年春にあった地震は儀式の失敗だという"),
w.plot_note("その頃はまだ何が必要なのか、すべて判明していなかった。だが$stein教授により解明された"),
w.plot_note("その資料は$morianoにより盗まれ、紛失している"),
w.plot_note("実際にどういうものなのかは$sherlockも知らない"),
"$wilsonは最後に登場",
w.plot_note("大聖堂にやってくると、何があったのか警官($parkerたち)が警備していた"),
w.plot_note("巨大な爆弾が見つかったというのでみんなを避難させるように言われたと"),
w.plot_note("そこに$restradeもやってきて、困惑している"),
w.plot_note("一体何をやってるんだ、$patsonはと"),
w.plot_note("$sherlockはすぐ$patsonの家を調べるように言う。彼が$cultXの手先だった"),
w.plot_note("$sherlockは中に入る"),
w.plot_note("大聖堂の中は人がいなくなり、静まり返っていた"),
w.plot_note("聖堂を進む"),
w.plot_note("偉人たちの墓が並ぶ聖廟でもあった"),
w.plot_note("その一つが開けられている。中身はない"),
w.plot_note("扉があり、奥にいくと地下への階段"),
w.plot_note("地下に降りていく$sherlockたち"),
w.plot_note("そこには巨大なホールが広がっていた"),
w.plot_note("祭壇には四つの$stoneが供えられ、$patsonが儀式を始めようとしている"),
w.plot_note("誰も入れるなと言ったのに、と不敵な顔の$patson"),
w.plot_note("$maryは倒れていた。服が少し破れている。中に$stoneを身に着けていたからだ"),
w.plot_note("$sherlockがすぐにやめるように忠告する"),
w.plot_note("儀式は失敗すると言った"),
w.plot_note("しかし$patsonは儀式を行うべく、祝詞をとなえる"),
w.plot_note("その$patsonを現れた$wilsonが$gunで撃ち抜いた"),
w.plot_note("「間に合ってよかったよ」という$wilson"),
outline=ABSTRACT)
| true
| true
|
f71a6d9110d6e2d9754fc6dd198852e4d0c18cb8
| 14,416
|
py
|
Python
|
tmapi/models/topic_map.py
|
ajenhl/django-tmapi
|
02f009e1b508218cf330ca7748c3a1dd110f3e8d
|
[
"Apache-2.0"
] | 2
|
2015-03-22T03:23:36.000Z
|
2017-01-08T10:57:18.000Z
|
tmapi/models/topic_map.py
|
ajenhl/django-tmapi
|
02f009e1b508218cf330ca7748c3a1dd110f3e8d
|
[
"Apache-2.0"
] | null | null | null |
tmapi/models/topic_map.py
|
ajenhl/django-tmapi
|
02f009e1b508218cf330ca7748c3a1dd110f3e8d
|
[
"Apache-2.0"
] | 1
|
2020-12-28T04:40:34.000Z
|
2020-12-28T04:40:34.000Z
|
# Copyright 2011 Jamie Norrish (jamie@artefact.org.nz)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib.sites.models import Site
from django.db import models
from tmapi.exceptions import ModelConstraintException, \
UnsupportedOperationException
from tmapi.indices.literal_index import LiteralIndex
from tmapi.indices.scoped_index import ScopedIndex
from tmapi.indices.type_instance_index import TypeInstanceIndex
from association import Association
from construct_fields import BaseConstructFields
from identifier import Identifier
from item_identifier import ItemIdentifier
from locator import Locator
from reifiable import Reifiable
from subject_identifier import SubjectIdentifier
from subject_locator import SubjectLocator
from topic import Topic
from copy_utils import copy
class TopicMap (BaseConstructFields, Reifiable):
"""Represents a topic map item."""
topic_map_system = models.ForeignKey('TopicMapSystem',
related_name='topic_maps')
iri = models.CharField(max_length=512)
title = models.CharField(max_length=128, blank=True)
base_address = models.CharField(max_length=512, blank=True)
class Meta:
app_label = 'tmapi'
def __init__ (self, *args, **kwargs):
super(TopicMap, self).__init__(*args, **kwargs)
self._indices = {}
def create_association (self, association_type, scope=None,
proxy=Association):
"""Creates an `Association` in this topic map with the
specified type and scope.
:param association_type: the association type
:type association_type: `Topic`
:param scope: scope
:type scope: list of `Topic`s
:param proxy: Django proxy model class
:type proxy: class
:rtype: `Association`
"""
if association_type is None:
raise ModelConstraintException(self, 'The type may not be None')
if self != association_type.topic_map:
raise ModelConstraintException(
self, 'The type is not from this topic map')
association = proxy(type=association_type, topic_map=self)
association.save()
if scope is None:
scope = []
for topic in scope:
if self != topic.topic_map:
raise ModelConstraintException(
self, 'The theme is not from this topic map')
association.scope.add(topic)
return association
def create_empty_topic (self):
"""Returns a `Topic` instance with no other information.
:rtype: `Topic`
"""
topic = Topic(topic_map=self)
topic.save()
return topic
def create_locator (self, reference):
"""Returns a `Locator` instance representing the specified IRI
reference.
The specified IRI reference is assumed to be absolute.
:param reference: a string which uses the IRI notation
:type reference: string
:rtype: `Locator`
"""
return Locator(reference)
def create_topic (self, proxy=Topic):
"""Returns a `Topic` instance with an automatically generated
item identifier.
This method never returns an existing `Topic` but creates a
new one with an automatically generated item identifier.
Returns the newly created `Topic` instance with an automatically
generated item identifier.
:param proxy: Django proxy model class
:type proxy: class
:rtype: `Topic`
"""
topic = proxy(topic_map=self)
topic.save()
address = 'http://%s/tmapi/iid/auto/%d' % \
(Site.objects.get_current().domain, topic.id)
ii = ItemIdentifier(address=address, containing_topic_map=self)
ii.save()
topic.item_identifiers.add(ii)
return topic
def create_topic_by_item_identifier (self, item_identifier):
"""Returns a `Topic` instance with the specified item identifier.
This method returns either an existing `Topic` or creates a
new `Topic` instance with the specified item identifier.
If a topic with the specified item identifier exists in the
topic map, that topic is returned. If a topic with a subject
identifier equal to the specified item identifier exists, the
specified item identifier is added to that topic and the topic
is returned. If neither a topic with the specified item
identifier nor with a subject identifier equal to the subject
identifier exists, a topic with the item identifier is
created.
:param item_identifier: the item identifier the topic should contain
:type item_identifier: `Locator`
:rtype: `Topic`
"""
if item_identifier is None:
raise ModelConstraintException(
self, 'The item identifier may not be None')
reference = item_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
item_identifiers__address=reference)
except Topic.DoesNotExist:
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
ii = ItemIdentifier(address=reference, containing_topic_map=self)
ii.save()
topic.item_identifiers.add(ii)
return topic
def create_topic_by_subject_identifier (self, subject_identifier):
"""Returns a `Topic` instance with the specified subject identifier.
This method returns either an existing `Topic` or creates a
new `Topic` instance with the specified subject identifier.
If a topic with the specified subject identifier exists in
this topic map, that topic is returned. If a topic with an
item identifier equal to the specified subject identifier
exists, the specified subject identifier is added to that
topic and the topic is returned. If neither a topic with the
specified subject identifier nor with an item identifier equal
to the subject identifier exists, a topic with the subject
identifier is created.
:param subject_identifier: the subject identifier the topic
should contain
:type subject_identifier: `Locator`
:rtype: `Topic`
"""
if subject_identifier is None:
raise ModelConstraintException(
self, 'The subject identifier may not be None')
reference = subject_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
try:
topic = self.topic_constructs.get(
item_identifiers__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
si = SubjectIdentifier(topic=topic, address=reference,
containing_topic_map=self)
si.save()
topic.subject_identifiers.add(si)
return topic
def create_topic_by_subject_locator (self, subject_locator):
"""Returns a `Topic` instance with the specified subject locator.
This method returns either an existing `Topic` or creates a
new `Topic` instance with the specified subject locator.
:param subject_locator: the subject locator the topic should
contain
:type subject_locator: `Locator`
:rtype: `Topic`
"""
if subject_locator is None:
raise ModelConstraintException(
self, 'The subject locator may not be None')
reference = subject_locator.to_external_form()
try:
topic = self.topic_constructs.get(
subject_locators__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
sl = SubjectLocator(topic=topic, address=reference,
containing_topic_map=self)
sl.save()
topic.subject_locators.add(sl)
return topic
def get_associations (self):
"""Returns all `Association`s contained in this topic map.
:rtype: `QuerySet` of `Association`s
"""
return self.association_constructs.all()
def get_construct_by_id (self, id, proxy=None):
"""Returns a `Construct` by its (system specific) identifier.
:param id: the identifier of the construct to be returned
:type id: string
:param proxy: Django proxy model
:type proxy: class
:rtype: `Construct`, proxy object, or None
"""
try:
identifier = Identifier.objects.get(pk=int(id),
containing_topic_map=self)
construct = identifier.get_construct()
if proxy is not None and construct is not None:
construct = proxy.objects.get(pk=construct.id)
except Identifier.DoesNotExist:
construct = None
return construct
def get_construct_by_item_identifier (self, item_identifier):
"""Returns a `Construct` by its item identifier.
:param item_identifier: the item identifier of the construct
to be returned
:type item_identifier: `Locator`
:rtype: a construct or None
"""
address = item_identifier.to_external_form()
try:
ii = ItemIdentifier.objects.get(address=address,
containing_topic_map=self)
construct = ii.get_construct()
except ItemIdentifier.DoesNotExist:
construct = None
return construct
def get_index (self, index_interface):
"""Returns the specified index.
:param index_interface: the index to return
:type index_interface: class
:rtype: `Index`
"""
if index_interface not in (LiteralIndex, ScopedIndex,
TypeInstanceIndex):
raise UnsupportedOperationException(
'This TMAPI implementation does not support that index')
if index_interface not in self._indices:
self._indices[index_interface] = index_interface(self)
return self._indices[index_interface]
def get_locator (self):
"""Returns the `Locator` that was used to create the topic map.
Note: The returned locator represents the storage address of
the topic map and implies no further semantics.
:rtype: `Locator`
"""
return Locator(self.iri)
def get_parent (self):
"""Returns None.
:rtype: None
"""
return None
def get_topics (self):
"""Returns all `Topic`s contained in this topic map.
:rtype: `QuerySet` of `Topic`s
"""
return self.topic_constructs.all()
def get_topic_by_subject_identifier (self, subject_identifier):
"""Returns a topic by its subject identifier.
If no topic with the specified subject identifier exists, this
method returns `None`.
:param subject_identifier: the subject identifier of the topic
to be returned
:type subject_identifier: `Locator`
:rtype: `Topic` or `None`
"""
reference = subject_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
topic = None
return topic
def get_topic_by_subject_locator (self, subject_locator):
"""Returns a topic by its subject locator.
If no topic with the specified subject locator exists, this
method returns `None`.
:param subject_locator: the subject locator of the topic to be
returned
:type subject_locator: `Locator`
:rtype: `Topic` of `None`
"""
reference = subject_locator.to_external_form()
try:
topic = self.topic_constructs.get(
subject_locators__address=reference)
except Topic.DoesNotExist:
topic = None
return topic
def get_topic_map (self):
"""Returns self.
:rtype: `TopicMap`
"""
return self
def merge_in (self, other):
"""Merges the topic map `other` into this topic map.
All `Topic`s and `Association`s and all of their contents in
`other` will be added to this topic map.
All information items in `other` will be merged into this
topic map as defined by the Topic Maps - Data Model (TMDM)
merging rules.
The merge process will not modify `other` in any way.
If this topic map equals `other`, no changes are made to the
topic map.
:param other: the topic map to be merged with this topic map
instance
:type other: `TopicMap`
"""
if other is None:
raise ModelConstraintException(
self, 'The topic map to merge in may not be None')
copy(other, self)
def remove (self):
self.delete()
def __eq__ (self, other):
if isinstance(other, TopicMap) and self.id == other.id:
return True
return False
def __ne__ (self, other):
return not(self.__eq__(other))
def __unicode__ (self):
name = self.title or 'Topic map'
return u'%s (%s)' % (name, self.iri)
| 34.821256
| 77
| 0.629231
|
from django.contrib.sites.models import Site
from django.db import models
from tmapi.exceptions import ModelConstraintException, \
UnsupportedOperationException
from tmapi.indices.literal_index import LiteralIndex
from tmapi.indices.scoped_index import ScopedIndex
from tmapi.indices.type_instance_index import TypeInstanceIndex
from association import Association
from construct_fields import BaseConstructFields
from identifier import Identifier
from item_identifier import ItemIdentifier
from locator import Locator
from reifiable import Reifiable
from subject_identifier import SubjectIdentifier
from subject_locator import SubjectLocator
from topic import Topic
from copy_utils import copy
class TopicMap (BaseConstructFields, Reifiable):
topic_map_system = models.ForeignKey('TopicMapSystem',
related_name='topic_maps')
iri = models.CharField(max_length=512)
title = models.CharField(max_length=128, blank=True)
base_address = models.CharField(max_length=512, blank=True)
class Meta:
app_label = 'tmapi'
def __init__ (self, *args, **kwargs):
super(TopicMap, self).__init__(*args, **kwargs)
self._indices = {}
def create_association (self, association_type, scope=None,
proxy=Association):
if association_type is None:
raise ModelConstraintException(self, 'The type may not be None')
if self != association_type.topic_map:
raise ModelConstraintException(
self, 'The type is not from this topic map')
association = proxy(type=association_type, topic_map=self)
association.save()
if scope is None:
scope = []
for topic in scope:
if self != topic.topic_map:
raise ModelConstraintException(
self, 'The theme is not from this topic map')
association.scope.add(topic)
return association
def create_empty_topic (self):
topic = Topic(topic_map=self)
topic.save()
return topic
def create_locator (self, reference):
return Locator(reference)
def create_topic (self, proxy=Topic):
topic = proxy(topic_map=self)
topic.save()
address = 'http://%s/tmapi/iid/auto/%d' % \
(Site.objects.get_current().domain, topic.id)
ii = ItemIdentifier(address=address, containing_topic_map=self)
ii.save()
topic.item_identifiers.add(ii)
return topic
def create_topic_by_item_identifier (self, item_identifier):
if item_identifier is None:
raise ModelConstraintException(
self, 'The item identifier may not be None')
reference = item_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
item_identifiers__address=reference)
except Topic.DoesNotExist:
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
ii = ItemIdentifier(address=reference, containing_topic_map=self)
ii.save()
topic.item_identifiers.add(ii)
return topic
def create_topic_by_subject_identifier (self, subject_identifier):
if subject_identifier is None:
raise ModelConstraintException(
self, 'The subject identifier may not be None')
reference = subject_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
try:
topic = self.topic_constructs.get(
item_identifiers__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
si = SubjectIdentifier(topic=topic, address=reference,
containing_topic_map=self)
si.save()
topic.subject_identifiers.add(si)
return topic
def create_topic_by_subject_locator (self, subject_locator):
if subject_locator is None:
raise ModelConstraintException(
self, 'The subject locator may not be None')
reference = subject_locator.to_external_form()
try:
topic = self.topic_constructs.get(
subject_locators__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
sl = SubjectLocator(topic=topic, address=reference,
containing_topic_map=self)
sl.save()
topic.subject_locators.add(sl)
return topic
def get_associations (self):
return self.association_constructs.all()
def get_construct_by_id (self, id, proxy=None):
try:
identifier = Identifier.objects.get(pk=int(id),
containing_topic_map=self)
construct = identifier.get_construct()
if proxy is not None and construct is not None:
construct = proxy.objects.get(pk=construct.id)
except Identifier.DoesNotExist:
construct = None
return construct
def get_construct_by_item_identifier (self, item_identifier):
address = item_identifier.to_external_form()
try:
ii = ItemIdentifier.objects.get(address=address,
containing_topic_map=self)
construct = ii.get_construct()
except ItemIdentifier.DoesNotExist:
construct = None
return construct
def get_index (self, index_interface):
if index_interface not in (LiteralIndex, ScopedIndex,
TypeInstanceIndex):
raise UnsupportedOperationException(
'This TMAPI implementation does not support that index')
if index_interface not in self._indices:
self._indices[index_interface] = index_interface(self)
return self._indices[index_interface]
def get_locator (self):
return Locator(self.iri)
def get_parent (self):
return None
def get_topics (self):
return self.topic_constructs.all()
def get_topic_by_subject_identifier (self, subject_identifier):
reference = subject_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
topic = None
return topic
def get_topic_by_subject_locator (self, subject_locator):
reference = subject_locator.to_external_form()
try:
topic = self.topic_constructs.get(
subject_locators__address=reference)
except Topic.DoesNotExist:
topic = None
return topic
def get_topic_map (self):
return self
def merge_in (self, other):
if other is None:
raise ModelConstraintException(
self, 'The topic map to merge in may not be None')
copy(other, self)
def remove (self):
self.delete()
def __eq__ (self, other):
if isinstance(other, TopicMap) and self.id == other.id:
return True
return False
def __ne__ (self, other):
return not(self.__eq__(other))
def __unicode__ (self):
name = self.title or 'Topic map'
return u'%s (%s)' % (name, self.iri)
| true
| true
|
f71a6db30e3de5c2849fe9a5b19812ba331899e0
| 2,275
|
py
|
Python
|
python-sdk/tutorials/automl-with-azureml/forecasting-recipes-univariate/forecasting_script.py
|
Ali-ry/azureml-examples
|
817ae89d2766dcafd70937a22cb3a80f100a2906
|
[
"MIT"
] | null | null | null |
python-sdk/tutorials/automl-with-azureml/forecasting-recipes-univariate/forecasting_script.py
|
Ali-ry/azureml-examples
|
817ae89d2766dcafd70937a22cb3a80f100a2906
|
[
"MIT"
] | null | null | null |
python-sdk/tutorials/automl-with-azureml/forecasting-recipes-univariate/forecasting_script.py
|
Ali-ry/azureml-examples
|
817ae89d2766dcafd70937a22cb3a80f100a2906
|
[
"MIT"
] | null | null | null |
"""
This is the script that is executed on the compute instance. It relies
on the model.pkl file which is uploaded along with this script to the
compute instance.
"""
import argparse
from azureml.core import Dataset, Run
from azureml.automl.core.shared.constants import TimeSeriesInternal
from sklearn.externals import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
test_dataset_id = args.test_dataset
run = Run.get_context()
ws = run.experiment.workspace
# get the input dataset by id
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test = (
test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
# generate forecast
fitted_model = joblib.load("model.pkl")
# We have default quantiles values set as below(95th percentile)
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test_df[target_column_name]
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
# drop rows where prediction or actuals are nan
# happens because of missing actuals
# or at edges of time due to lags/rolling windows
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
clean.rename(columns={target_column_name: "actual"}, inplace=True)
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False) # added Index
# Upload the predictions into artifacts
run.upload_file(name=file_name, path_or_stream=file_name)
| 32.042254
| 78
| 0.744176
|
import argparse
from azureml.core import Dataset, Run
from azureml.automl.core.shared.constants import TimeSeriesInternal
from sklearn.externals import joblib
parser = argparse.ArgumentParser()
parser.add_argument(
"--target_column_name",
type=str,
dest="target_column_name",
help="Target Column Name",
)
parser.add_argument(
"--test_dataset", type=str, dest="test_dataset", help="Test Dataset"
)
args = parser.parse_args()
target_column_name = args.target_column_name
test_dataset_id = args.test_dataset
run = Run.get_context()
ws = run.experiment.workspace
test_dataset = Dataset.get_by_id(ws, id=test_dataset_id)
X_test = (
test_dataset.drop_columns(columns=[target_column_name])
.to_pandas_dataframe()
.reset_index(drop=True)
)
y_test_df = (
test_dataset.with_timestamp_columns(None)
.keep_columns(columns=[target_column_name])
.to_pandas_dataframe()
)
fitted_model = joblib.load("model.pkl")
quantiles = [0.025, 0.5, 0.975]
predicted_column_name = "predicted"
PI = "prediction_interval"
fitted_model.quantiles = quantiles
pred_quantiles = fitted_model.forecast_quantiles(X_test)
pred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(
lambda x: "[{}, {}]".format(x[0], x[1]), axis=1
)
X_test[target_column_name] = y_test_df[target_column_name]
X_test[PI] = pred_quantiles[PI]
X_test[predicted_column_name] = pred_quantiles[0.5]
clean = X_test[
X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)
]
clean.rename(columns={target_column_name: "actual"}, inplace=True)
file_name = "outputs/predictions.csv"
export_csv = clean.to_csv(file_name, header=True, index=False)
run.upload_file(name=file_name, path_or_stream=file_name)
| true
| true
|
f71a6e91a09965fe94395d5877040ab4bd936107
| 4,760
|
py
|
Python
|
matching/matching.py
|
nielsbril/best
|
8a902293605f1bee1abf3ca66ae3708706658772
|
[
"MIT"
] | 21
|
2019-07-02T05:54:22.000Z
|
2021-04-07T13:52:50.000Z
|
matching/matching.py
|
nielsbril/best
|
8a902293605f1bee1abf3ca66ae3708706658772
|
[
"MIT"
] | 55
|
2019-07-03T18:59:26.000Z
|
2020-12-15T08:10:00.000Z
|
matching/matching.py
|
nielsbril/best
|
8a902293605f1bee1abf3ca66ae3708706658772
|
[
"MIT"
] | 9
|
2019-09-10T13:38:46.000Z
|
2021-09-01T08:02:42.000Z
|
import pandas as pd
import argparse
import logging
import sys
import json
def get_best_logger(log_file, verbose):
# Setup logger - (Python logger breaks PEP8 by default)
logger = logging.getLogger(__name__)
if verbose:
logger.setLevel('DEBUG')
# file_handler logs to file, stream_handler to console
file_handler = logging.FileHandler(log_file)
stream_handler = logging.StreamHandler()
# formatter sets log format
formatter = logging.Formatter(
'%(asctime)s - %(name)s : %(levelname)s - %(message)s')
# add formatter to both handlers
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
# add both handlers to logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
def compare_addresses(args):
"""Compare the addresses of two files
"""
logger.info('Started reading BOSA address file')
try:
bosa = pd.read_csv(args.input_file_1)
logger.info('Read the BOSA address file')
except IOError as io:
logger.fatal(io)
sys.exit(1)
logger.info('Started reading comparison file')
try:
comparison = pd.read_csv(args.input_file_2)
logger.info('Read the comparison file')
except IOError as io:
logger.fatal(io)
sys.exit(1)
comp_keys = []
bosa_ids = []
for comp_key, bosa_key in args.mapping.items():
try:
comp_keys.append(comp_key)
bosa_ids.append(bosa.columns.get_loc(bosa_key))
except KeyError as ke:
logger.error(
'Column %s of column mapping (%s -> %s) not found in BOSA file', ke, comp_key, bosa_key)
sys.exit(1)
address_dict = {}
logger.info('Building data structure to perform matching')
for i, row in enumerate(bosa.values):
if i % 50_000 == 0:
logger.info('Processed %i / %i addresses', i, len(bosa))
address_dict[tuple(el.lower() if type(
el) == str else el for el in row[bosa_ids])] = row
extended = perform_exact_matching(
bosa, comparison, address_dict, comp_keys)
try:
extended.to_csv(args.output_file, index=False)
except IOError as io:
logger.fatal(io)
sys.exit(1)
def perform_exact_matching(bosa, comparison, address_dict, comp_keys):
"""Match the addresses in the comparison file and add address_id and coordinates when matched
"""
addr_id = bosa.columns.get_loc('address_id')
lon_id = bosa.columns.get_loc('EPSG:4326_lon')
lat_id = bosa.columns.get_loc('EPSG:4326_lat')
extended = []
logger.info('Performing matching')
for i, row in comparison.iterrows():
if i % 50_000 == 0:
logger.info('Matched %i / %i addresses', i, len(comparison))
try:
key = tuple(el.lower() if type(el) ==
str else el for el in row[comp_keys])
except KeyError as ke:
logger.error('Column %s not found in the comparison file', ke)
sys.exit(1)
if key in address_dict:
# If the address is matched add address_id and coordinates to it
data = address_dict[key]
row['address_id'] = data[addr_id]
row['EPSG:4326_lon'] = data[lon_id]
row['EPSG:4326_lat'] = data[lat_id]
extended.append(row)
extended = pd.DataFrame(extended)
# Convert column to int type that can handle NaN
extended['address_id'] = extended['address_id'].astype('Int64')
return extended
if __name__ == "__main__":
# Setup argument parser
parser = argparse.ArgumentParser(
description='Compare addresses between two csv files.')
parser.add_argument(
'input_file_1', help='BOSA address file, in csv format')
parser.add_argument(
'input_file_2', help='Address file to compare to BOSA address file, in csv format')
parser.add_argument('output_file', help='Name of file to write output to')
parser.add_argument('--mode', default='exact',
choices=['exact'], help='How to compare the addresses.')
parser.add_argument(
'--mapping', default={}, type=json.loads, help='Column names to consider in the comparison and how they map to the \
column names of the BOSA address file. (as a json dict of {comparison_key: bosa_key})')
parser.add_argument('--log_name', default="compare.log",
help='name of the log file')
parser.add_argument('--verbose', action="store_true",
help="toggle verbose output", default=False)
args = parser.parse_args()
logger = get_best_logger(args.log_name, args.verbose)
compare_addresses(args)
| 36.060606
| 124
| 0.640336
|
import pandas as pd
import argparse
import logging
import sys
import json
def get_best_logger(log_file, verbose):
logger = logging.getLogger(__name__)
if verbose:
logger.setLevel('DEBUG')
file_handler = logging.FileHandler(log_file)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s : %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
def compare_addresses(args):
logger.info('Started reading BOSA address file')
try:
bosa = pd.read_csv(args.input_file_1)
logger.info('Read the BOSA address file')
except IOError as io:
logger.fatal(io)
sys.exit(1)
logger.info('Started reading comparison file')
try:
comparison = pd.read_csv(args.input_file_2)
logger.info('Read the comparison file')
except IOError as io:
logger.fatal(io)
sys.exit(1)
comp_keys = []
bosa_ids = []
for comp_key, bosa_key in args.mapping.items():
try:
comp_keys.append(comp_key)
bosa_ids.append(bosa.columns.get_loc(bosa_key))
except KeyError as ke:
logger.error(
'Column %s of column mapping (%s -> %s) not found in BOSA file', ke, comp_key, bosa_key)
sys.exit(1)
address_dict = {}
logger.info('Building data structure to perform matching')
for i, row in enumerate(bosa.values):
if i % 50_000 == 0:
logger.info('Processed %i / %i addresses', i, len(bosa))
address_dict[tuple(el.lower() if type(
el) == str else el for el in row[bosa_ids])] = row
extended = perform_exact_matching(
bosa, comparison, address_dict, comp_keys)
try:
extended.to_csv(args.output_file, index=False)
except IOError as io:
logger.fatal(io)
sys.exit(1)
def perform_exact_matching(bosa, comparison, address_dict, comp_keys):
addr_id = bosa.columns.get_loc('address_id')
lon_id = bosa.columns.get_loc('EPSG:4326_lon')
lat_id = bosa.columns.get_loc('EPSG:4326_lat')
extended = []
logger.info('Performing matching')
for i, row in comparison.iterrows():
if i % 50_000 == 0:
logger.info('Matched %i / %i addresses', i, len(comparison))
try:
key = tuple(el.lower() if type(el) ==
str else el for el in row[comp_keys])
except KeyError as ke:
logger.error('Column %s not found in the comparison file', ke)
sys.exit(1)
if key in address_dict:
data = address_dict[key]
row['address_id'] = data[addr_id]
row['EPSG:4326_lon'] = data[lon_id]
row['EPSG:4326_lat'] = data[lat_id]
extended.append(row)
extended = pd.DataFrame(extended)
extended['address_id'] = extended['address_id'].astype('Int64')
return extended
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Compare addresses between two csv files.')
parser.add_argument(
'input_file_1', help='BOSA address file, in csv format')
parser.add_argument(
'input_file_2', help='Address file to compare to BOSA address file, in csv format')
parser.add_argument('output_file', help='Name of file to write output to')
parser.add_argument('--mode', default='exact',
choices=['exact'], help='How to compare the addresses.')
parser.add_argument(
'--mapping', default={}, type=json.loads, help='Column names to consider in the comparison and how they map to the \
column names of the BOSA address file. (as a json dict of {comparison_key: bosa_key})')
parser.add_argument('--log_name', default="compare.log",
help='name of the log file')
parser.add_argument('--verbose', action="store_true",
help="toggle verbose output", default=False)
args = parser.parse_args()
logger = get_best_logger(args.log_name, args.verbose)
compare_addresses(args)
| true
| true
|
f71a6f98576f957a645a7ce60612e5c8ac44efe1
| 3,987
|
py
|
Python
|
islykill2/parser.py
|
sindrig/islykill2
|
2ad9e0d249637d7bb03a3535f4e054f3570427b2
|
[
"MIT"
] | 1
|
2019-08-24T23:59:32.000Z
|
2019-08-24T23:59:32.000Z
|
islykill2/parser.py
|
sindrig/islykill2
|
2ad9e0d249637d7bb03a3535f4e054f3570427b2
|
[
"MIT"
] | null | null | null |
islykill2/parser.py
|
sindrig/islykill2
|
2ad9e0d249637d7bb03a3535f4e054f3570427b2
|
[
"MIT"
] | 1
|
2021-06-25T11:15:23.000Z
|
2021-06-25T11:15:23.000Z
|
import os
import traceback
import base64
import datetime
import logging
from xml.etree.ElementTree import XML
from signxml import xmldsig
__all__ = ['AuthenticationError', 'parse_saml']
def decode_response(resp):
return base64.b64decode(resp.encode('utf8'))
# Getters
def get_xmldoc(xmlstring):
return XML(xmlstring)
def get_assertion(doc):
return doc.find('{urn:oasis:names:tc:SAML:2.0:assertion}Assertion')
def get_assertion_attributes(assertion):
ns = '{urn:oasis:names:tc:SAML:2.0:assertion}'
attributes = {}
for attr in assertion.find(
'{}AttributeStatement'.format(ns)).getchildren():
val = attr.find('{}AttributeValue'.format(ns))
attributes[attr.attrib['Name']] = val.text
return attributes
def get_conditions(assertion):
ns = '{urn:oasis:names:tc:SAML:2.0:assertion}'
return assertion.find('{}Conditions'.format(ns))
def strptime(dtstr):
# Example dtstr: 2014-01-18T11:10:44.9568516Z
return datetime.datetime.strptime(dtstr.split('.')[0], '%Y-%m-%dT%H:%M:%S')
# Verifications
def verify_ip(reported_ip, client_ip):
logger = logging.getLogger('islykill')
logger.debug('Reported ip "%s" - client_ip "%s"',
reported_ip, client_ip)
return reported_ip == client_ip
def verify_date_is_after(reported_date, current_date):
return reported_date < current_date
def verify_date_is_before(reported_date, current_date):
return reported_date > current_date
# Helper methods for import
class AuthenticationError(Exception):
pass
class SAMLResponse(object):
def __init__(self, kt):
self.kt = kt
def parse_saml(saml, ip, disable_checks=[], decode=True):
logger = logging.getLogger('islykill')
logger.debug('Starting SAML authentication process')
logger.debug(saml)
try:
logger.debug(saml.__class__)
if decode:
dec_resp = decode_response(saml)
else:
dec_resp = saml
logger.debug(dec_resp.__class__)
ca_pem_loc = os.path.dirname(os.path.abspath(__file__))
ca_pem_file = os.path.join(ca_pem_loc, 'Oll_kedjan.pem')
logger.debug('Using ca_pem_file: %s' % ca_pem_file)
xmldsig(dec_resp).verify(ca_pem_file=ca_pem_file)
logger.debug('verify OK')
xml = get_xmldoc(dec_resp)
assertion = get_assertion(xml)
attributes = get_assertion_attributes(assertion)
conditions = get_conditions(assertion)
logger.debug('all XML fetched...')
now = datetime.datetime.now()
if not verify_ip(attributes['IPAddress'], ip):
checkError('verify_ip failed', disable_checks)
if not verify_date_is_after(
strptime(conditions.attrib['NotBefore']), now):
checkError('verify_date_is_after failed', disable_checks)
if not verify_date_is_before(
strptime(conditions.attrib['NotOnOrAfter']), now):
checkError('verify_date_is_before', disable_checks)
logger.warning(
'NotOnOrAfter: %s',
conditions.attrib['NotOnOrAfter'])
logger.warning(
'Parsed date: %s',
strptime(conditions.attrib['NotOnOrAfter']))
logger.warning(
'Current date: %s',
now)
kt = attributes['UserSSN']
logger.debug('authenticated successfully: %s', kt)
return SAMLResponse(kt)
except AuthenticationError as e:
logger.error('AuthenticationError: %s', e.message)
raise e
except Exception:
logger.error('Unknown error occurred:')
logger.error(traceback.format_exc())
from django.core.mail import mail_admins
mail_admins('SAML authentication error', traceback.format_exc())
checkError('Unknown error', disable_checks)
def checkError(name, disable_checks=[]):
if name not in disable_checks:
raise AuthenticationError(name)
| 28.683453
| 79
| 0.658641
|
import os
import traceback
import base64
import datetime
import logging
from xml.etree.ElementTree import XML
from signxml import xmldsig
__all__ = ['AuthenticationError', 'parse_saml']
def decode_response(resp):
return base64.b64decode(resp.encode('utf8'))
def get_xmldoc(xmlstring):
return XML(xmlstring)
def get_assertion(doc):
return doc.find('{urn:oasis:names:tc:SAML:2.0:assertion}Assertion')
def get_assertion_attributes(assertion):
ns = '{urn:oasis:names:tc:SAML:2.0:assertion}'
attributes = {}
for attr in assertion.find(
'{}AttributeStatement'.format(ns)).getchildren():
val = attr.find('{}AttributeValue'.format(ns))
attributes[attr.attrib['Name']] = val.text
return attributes
def get_conditions(assertion):
ns = '{urn:oasis:names:tc:SAML:2.0:assertion}'
return assertion.find('{}Conditions'.format(ns))
def strptime(dtstr):
return datetime.datetime.strptime(dtstr.split('.')[0], '%Y-%m-%dT%H:%M:%S')
def verify_ip(reported_ip, client_ip):
logger = logging.getLogger('islykill')
logger.debug('Reported ip "%s" - client_ip "%s"',
reported_ip, client_ip)
return reported_ip == client_ip
def verify_date_is_after(reported_date, current_date):
return reported_date < current_date
def verify_date_is_before(reported_date, current_date):
return reported_date > current_date
class AuthenticationError(Exception):
pass
class SAMLResponse(object):
def __init__(self, kt):
self.kt = kt
def parse_saml(saml, ip, disable_checks=[], decode=True):
logger = logging.getLogger('islykill')
logger.debug('Starting SAML authentication process')
logger.debug(saml)
try:
logger.debug(saml.__class__)
if decode:
dec_resp = decode_response(saml)
else:
dec_resp = saml
logger.debug(dec_resp.__class__)
ca_pem_loc = os.path.dirname(os.path.abspath(__file__))
ca_pem_file = os.path.join(ca_pem_loc, 'Oll_kedjan.pem')
logger.debug('Using ca_pem_file: %s' % ca_pem_file)
xmldsig(dec_resp).verify(ca_pem_file=ca_pem_file)
logger.debug('verify OK')
xml = get_xmldoc(dec_resp)
assertion = get_assertion(xml)
attributes = get_assertion_attributes(assertion)
conditions = get_conditions(assertion)
logger.debug('all XML fetched...')
now = datetime.datetime.now()
if not verify_ip(attributes['IPAddress'], ip):
checkError('verify_ip failed', disable_checks)
if not verify_date_is_after(
strptime(conditions.attrib['NotBefore']), now):
checkError('verify_date_is_after failed', disable_checks)
if not verify_date_is_before(
strptime(conditions.attrib['NotOnOrAfter']), now):
checkError('verify_date_is_before', disable_checks)
logger.warning(
'NotOnOrAfter: %s',
conditions.attrib['NotOnOrAfter'])
logger.warning(
'Parsed date: %s',
strptime(conditions.attrib['NotOnOrAfter']))
logger.warning(
'Current date: %s',
now)
kt = attributes['UserSSN']
logger.debug('authenticated successfully: %s', kt)
return SAMLResponse(kt)
except AuthenticationError as e:
logger.error('AuthenticationError: %s', e.message)
raise e
except Exception:
logger.error('Unknown error occurred:')
logger.error(traceback.format_exc())
from django.core.mail import mail_admins
mail_admins('SAML authentication error', traceback.format_exc())
checkError('Unknown error', disable_checks)
def checkError(name, disable_checks=[]):
if name not in disable_checks:
raise AuthenticationError(name)
| true
| true
|
f71a703f2090876a8e79cf5a51d2bb5e3344842c
| 153,793
|
py
|
Python
|
spyke/sort.py
|
spyke/spyke
|
20934521de9c557924911cf6190690ac1c6f8e80
|
[
"CNRI-Python"
] | 22
|
2015-06-01T03:31:00.000Z
|
2022-03-18T09:12:28.000Z
|
spyke/sort.py
|
spyke/spyke
|
20934521de9c557924911cf6190690ac1c6f8e80
|
[
"CNRI-Python"
] | 3
|
2017-03-24T19:16:02.000Z
|
2021-01-27T14:34:30.000Z
|
spyke/sort.py
|
spyke/spyke
|
20934521de9c557924911cf6190690ac1c6f8e80
|
[
"CNRI-Python"
] | 6
|
2015-07-10T15:28:08.000Z
|
2022-03-17T19:30:45.000Z
|
"""Spike sorting classes and window"""
from __future__ import division
from __future__ import print_function
__authors__ = ['Martin Spacek', 'Reza Lotun']
import os
import sys
import time
import datetime
from copy import copy
import operator
import random
import shutil
import hashlib
import multiprocessing as mp
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QAction, QIcon, QApplication
import numpy as np
import scipy
import scipy.signal
#from scipy.cluster.hierarchy import fclusterdata
import pylab as pl
import pyximport
pyximport.install(build_in_temp=False, inplace=True)
from . import util # .pyx file
from . import core
from .core import (WaveForm, Gaussian, MAXLONGLONG, R, toiter, intround, printflush, lstrip,
rstrip, lrstrip, pad, td2days, SpykeToolWindow, NList, NSList, dist,
USList, ClusterChange, SpikeSelectionSlider, lrrep2Darrstripis, rollwin2D)
from .detect import DEBUG
from .surf import EPOCH
from .plot import SpikeSortPanel, CLUSTERCOLOURDICT, WHITE
from .__version__ import __version__
#MAXCHANTOLERANCE = 100 # um
NSLISTWIDTH = 70 # minimize nslist width, enough for 7 digit spike IDs
PANELWIDTHPERCOLUMN = 120 # sort panel width per column of channels
PANELHEIGHTPERROW = 50 # sort panel height per row of channels
VSCROLLBARWIDTH = 14 # hack
SORTWINDOWHEIGHT = 1035 # TODO: this should be set programmatically
MINSORTWINDOWWIDTH = 566
MEANWAVEMAXSAMPLES = 2000
NPCSPERCHAN = 7
PCALIB = 'mdp'
ICALIB = 'sklearn'
DEFMINISI = 50 # default minimum ISI to check for on export, us
MAXGROUPISI = 100000 # us (100 ms)
MAXGROUPDT = 100000000 # us (100 s)
class Sort(object):
"""A spike sorting session, in which you can detect spikes and sort them into Neurons.
A .sort file is a single Python2-pickled Sort object. A .json file is a
jsonpickle-pickled Sort object"""
def __init__(self, detector=None, stream=None, tw=None):
self.__version__ = __version__
self.fname = ''
self.user = ''
self.notes = ''
self.detector = detector # this Sort's current Detector object
self.tw = tw # time window (us) relative to spike time
self.stream = stream
self.probe = stream.probe # only one probe design per sort allowed
self.converter = stream.converter
self.neurons = {}
self.clusters = {} # neurons with multidm params scaled for plotting
self.norder = [] # stores order of neuron ids display in nlist
self.npcsperchan = NPCSPERCHAN
def get_nextnid(self):
"""nextnid is used to retrieve the next unique single unit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return 1 # single unit nids start at 1
else:
return max(max(nids) + 1, 1) # at least 1
nextnid = property(get_nextnid)
def get_nextmuid(self):
"""nextmuid is used to retrieve the next unique multiunit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return -1 # multiunit ids start at -1
else:
return min(min(nids) - 1, -1) # at most -1
nextmuid = property(get_nextmuid)
def get_good(self):
"""Return array of nids marked by user as 'good'"""
good = []
for neuron in self.neurons.values():
try:
if neuron.good:
good.append(neuron.id)
except AttributeError: # neuron is from older sort, no .good attrib
neuron.good = False
return np.asarray(good)
def set_good(self, good):
"""Set good flag to True for nids in good, False otherwise"""
nids = list(self.neurons)
assert np.all([ nid in nids for nid in good ]) # make sure all nids in good exist
notgood = np.setdiff1d(nids, good)
for nid in notgood:
neuron = self.neurons[nid]
neuron.good = False
for nid in good:
neuron = self.neurons[nid]
neuron.good = True
good = property(get_good, set_good)
def get_stream(self):
try:
return self._stream
except AttributeError:
# this is likely a brand new sort, has yet to be assigned a Stream
return None
def set_stream(self, stream=None):
"""Check stream type and name and probe type, and restore filtmeth, car, sampfreq and
shcorrect to stream when binding/modifying stream to self"""
oldstream = self.stream
if stream != None and oldstream != None:
# do stream types match?
if type(stream) != type(oldstream):
raise ValueError("Stream types don't match: %s, %s"
% (type(oldstream), type(stream)))
# do stream probe types match?
if type(stream.probe) != type(oldstream.probe):
raise ValueError("Stream probe types don't match: %s, %s"
% (type(oldstream.probe), type(stream.probe)))
# is one stream fname a superset of the other?
if (stream.fname not in oldstream.fname) and (oldstream.fname not in stream.fname):
raise ValueError("Stream file names are not supersets of each other: %s, %s"
% (oldstream.fname, stream.fname))
else:
print('Stream file names are similar enough to proceed: %s, %s'
% (stream.fname, oldstream.fname))
try:
stream.filtmeth = self.filtmeth
stream.car = self.car
stream.sampfreq = self.sampfreq
stream.shcorrect = self.shcorrect
except AttributeError:
pass # one of the above aren't bound
self._stream = stream # set it
print('Bound stream %r to sort %r' % (stream.fname, self.fname))
# now that tres is known, calculate window timepoints wrt spike time:
self.calc_twts_twi()
stream = property(get_stream, set_stream)
def calc_twts_twi(self):
"""Calculate temporal window timepoints wrt spike time, and the indices of these
timepoints wrt spike time"""
tres = self.tres
tw = self.tw
twts = np.arange(tw[0], tw[1], tres)
twts += twts[0] % tres # get rid of mod, so twts go through zero
self.twts = twts
self.twi = intround(twts[0] / tres), intround(twts[-1] / tres)
#info('twi = %s' % (self.twi,))
def update_tw(self, tw):
"""Update tw and everything that depends on it. Note that this shouldn't
be called directly by the user. Call SpykeWindow.update_spiketw() instead"""
oldtw = self.tw
self.tw = tw
self.calc_twts_twi()
dtw = np.asarray(tw) - np.asarray(oldtw) # new minus old
self.spikes['t0'] += dtw[0]
self.spikes['t1'] += dtw[1]
self.spikes['tis'] = self.spikes['tis'] - intround(dtw[0] / self.tres)
# recalculate any existing templates:
for neuron in self.neurons.values():
if neuron.wave.data != None:
neuron.update_wave()
print('WARNING: all spike waveforms need to be reloaded!')
def get_tres(self):
return self.stream.tres
tres = property(get_tres)
def __getstate__(self):
"""Get object state for pickling"""
# copy it cuz we'll be making changes, this is fast because it's just a shallow copy
d = self.__dict__.copy()
# Spikes and wavedata arrays are (potentially) saved separately.
# usids and PCs/ICs can be regenerated from the spikes array.
for attr in ['spikes', 'wavedata', 'usids', 'X', 'Xhash']:
# keep _stream during normal pickling for multiprocessing, but remove it
# manually when pickling to sort file
try: del d[attr]
except KeyError: pass
return d
def get_nspikes(self):
try: return len(self.spikes)
except AttributeError: return 0
nspikes = property(get_nspikes)
def update_usids(self):
"""Update usids, which is an array of indices of unsorted spikes"""
nids = self.spikes['nid']
self.usids, = np.where(nids == 0) # 0 means unclustered
def get_spikes_sortedby(self, attr='id'):
"""Return array of all spikes, sorted by attribute 'attr'"""
vals = self.spikes[attr]
spikes = self.spikes[vals.argsort()]
return spikes
def get_wave(self, sid):
"""Return WaveForm corresponding to spike sid"""
spikes = self.spikes
nchans = spikes['nchans'][sid]
chans = spikes['chans'][sid, :nchans]
t0 = spikes['t0'][sid]
t1 = spikes['t1'][sid]
wavedata = self.wavedata[sid, 0:nchans]
ts = np.arange(t0, t1, self.tres) # build them up
return WaveForm(data=wavedata, ts=ts, chans=chans, tres=self.tres)
def get_maxchan_wavedata(self, sid=None, nid=None):
"""Return wavedata of maxchan of spike sid or neuron nid"""
if sid != None:
assert nid == None
chani = self.spikes['chani'][sid]
return self.wavedata[sid, chani]
elif nid != None:
assert sid == None
neuron = self.neurons[nid]
chani, = np.where(neuron.chans == neuron.chan)
assert len(chani) == 1
chani = chani[0] # pull out of length 1 array
return neuron.wave.data[chani]
def get_mean_wave(self, sids, nid=None):
"""Return the mean and std waveform of spike waveforms in sids"""
spikes = self.spikes
nsids = len(sids)
if nsids > MEANWAVEMAXSAMPLES:
step = nsids // MEANWAVEMAXSAMPLES + 1
s = ("get_mean_wave() sampling every %d spikes instead of all %d"
% (step, nsids))
if nid != None:
s = "neuron %d: " % nid + s
print(s)
sids = sids[::step]
nsids = len(sids) # update
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
chanslist = [ chans[:nchans] for chans, nchans in zip(chanss, nchanss) ] # list of arrays
chanpopulation = np.concatenate(chanslist)
groupchans = np.unique(chanpopulation) # comes out sorted
wavedata = self.wavedata[sids]
if wavedata.ndim == 2: # should be 3, get only 2 if nsids == 1
wavedata.shape = 1, wavedata.shape[0], wavedata.shape[1] # give it a singleton 3rd dim
nt = wavedata.shape[-1]
maxnchans = len(groupchans)
data = np.zeros((maxnchans, nt))
# all spikes have same nt, but not necessarily same nchans, keep track of
# how many spikes contributed to each of the group's chans
nspikes = np.zeros((maxnchans, 1), dtype=int)
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
data[chanis] += wd[:len(chans)] # accumulate
nspikes[chanis] += 1 # inc spike count for this spike's chans
#t0 = time.time()
data /= nspikes # normalize all data points appropriately, this is now the mean
var = np.zeros((maxnchans, nt))
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
var[chanis] += (wd[:len(chans)] - data[chanis]) ** 2 # accumulate 2nd moment
var /= nspikes # normalize all data points appropriately, this is now the variance
std = np.sqrt(var)
# keep only those chans that at least 1/2 the spikes contributed to
bins = list(groupchans) + [np.inf] # concatenate rightmost bin edge
hist, bins = np.histogram(chanpopulation, bins=bins)
chans = groupchans[hist >= nsids/2]
chanis = groupchans.searchsorted(chans)
data = data[chanis]
std = std[chanis]
return WaveForm(data=data, std=std, chans=chans)
def check_ISIs(self, nids='good'):
"""Check that interspike intervals of spikes in each nid never fall below DEFMINISI"""
print('Checking inter-spike intervals')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
ndupl = (np.diff(spikets) < DEFMINISI).sum()
if ndupl > 0:
msg = ('n%d has %d duplicate spikes (given DEFMINISI=%d us).\n'
'Remove duplicate spikes with the ISI tool in the Verify tab'
% (nid, ndupl, DEFMINISI))
raise RuntimeError(msg)
def check_wavealign(self, nids='good', maxdti=1):
"""Check that each neurons's primary peak on the max chan is no more than +/- maxdti
timepoints away from the t=0 alignment timepoint"""
print('Checking neuron mean waveform alignment')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
nt = self.twi[1] - self.twi[0] + 1 # expected number of points of each chan's wavedata
for nid in nids:
neuron = self.neurons[nid]
wd = self.get_maxchan_wavedata(nid=nid)
assert len(wd) == nt
# find biggest positive and negative peaks, check which comes first, ensure
# the primary peak is within maxdti of t=0 alignment timepoint:
ppeakis, _ = scipy.signal.find_peaks(wd) # positive peak indices
npeakis, _ = scipy.signal.find_peaks(-wd) # negative peak indices
pmaxi = ppeakis[wd[ppeakis].argmax()] # max positive peak index
nmaxi = npeakis[wd[npeakis].argmin()] # max negative peak index
if nmaxi < pmaxi: # usual case: -ve then +ve peak
peak1i = nmaxi
else: # less common: +ve then -ve peak, make sure +ve peak is worthy of alignment
pmax, nmax = wd[pmaxi], wd[nmaxi]
if pmax > abs(nmax): # +ve peak is bigger than -ve peak, align to +ve peak
peak1i = pmaxi
else:
peak1i = nmaxi # default to -ve peak
alignti = 0 - self.twi[0] # +ve
dti = peak1i - alignti
#print("n%d: dti=%d" % (nid, dti))
if abs(dti) > maxdti:
peak1uV = self.converter.AD2uV(wd[peak1i])
peak1us = intround(self.tres*(peak1i-alignti))
msg = ('Primary peak (%+d uV @ t=%d us) of n%d is %+d timepoints away from '
'the t=0 us alignment point. Shift it closer and try again'
% (peak1uV, peak1us, nid, dti))
raise RuntimeError(msg)
def check_wavepadding(self, nids='good', npad=2):
"""Check if any spikes are edge padded, presumably due to being shifted but not
reloaded. For robustness, check for consistent signs of padding across all channels.
An edge is considered padded if it does not change over npad datapoints"""
print('Checking spike waveform padding')
assert npad >= 2 # need at least 2 points to do a diff
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
for sid in neuron.sids:
wd = self.wavedata[sid] # multichannel waveform data
# are left and right edges of wavedata identical for npad number of points?
l, r = wd[:, :npad], wd[:, -npad:] # shape (nchans, npad)
leftpadded = (np.diff(l, axis=1) == 0).all()
rightpadded = (np.diff(r, axis=1) == 0).all()
# handle case where spike is right after or right before a 0-padded
# region of data due to gaps between experiments:
if leftpadded:
if (wd[:, 0] == 0).all():
leftpadded = False
if rightpadded:
if (wd[:, -1] == 0).all():
rightpadded = False
if leftpadded or rightpadded:
msg = ('n%d has s%d that looks like it has been padded.\n'
'leftpadded, rightpadded = %r, %r\n'
'Reload s%d or n%d or all spikes and try again'
% (nid, sid, leftpadded, rightpadded, sid, nid))
raise RuntimeError(msg)
def check_contiguous_nids(self):
"""Check that neuron IDs are contiguous (no gaps)"""
print('Checking that neuron IDs are contiguous')
nids = np.array(list(self.neurons))
nids = nids[nids > 0] # only consider +ve nids
nids.sort()
if (np.diff(nids) != 1).any():
raise RuntimeError('Neuron IDs are not contiguous, renumber all and try again')
def exportptcsfiles(self, basepath, sortpath, user='', notes=''):
"""Export spike data to binary .ptcs files under basepath, one file per recording"""
# First check to make sure various things are OK before exporting:
self.check_ISIs()
self.check_wavealign()
self.check_wavepadding()
self.check_contiguous_nids()
spikes = self.spikes
exportdt = str(datetime.datetime.now()) # get an export datetime stamp
exportdt = exportdt.split('.')[0] # ditch the us
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting "good" clusters to:')
# do a separate export for each recording:
# absolute start and stop times of all streams, rounded to nearest raw timepoint:
tranges = self.stream.tranges
t0 = tranges[0, 0] # absolute start time of first stream
for stream, trange in zip(streams, tranges):
abst0 = trange[0] # absolute start time of this stream relative to t0
# time delta between this stream and first stream, to nearest raw timepoint, us:
dt = abst0 - t0
dt = intround(dt) # to nearest int us
self.exportptcsfile(stream, basepath, dt, exportdt, sortpath,
user=user, notes=notes)
def exportptcsfile(self, stream, basepath, dt, exportdt, sortpath, user='', notes=''):
"""Export spike data of all "good" spikes to binary .ptcs file in basepath.
Constrain to spikes in stream, and undo any time delta in spike times.
dt is the integer time difference between start of stream and start of first stream in
the track, rounded to the nearest us (spike times are stored as int64 us in .ptcs)"""
# build up list of PTCSNeuronRecords that have spikes in this stream,
# and tally their spikes
nsamplebytes = 4 # float32
nrecs = []
nspikes = 0
# only export neurons marked as "good", could be single or multi unit:
for nid in sorted(self.good):
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
spikets -= dt # export spike times relative to t=0 of this recording
# only include spikes that occurred during this recording
lo, hi = spikets.searchsorted([stream.t0, stream.t1])
spikets = spikets[lo:hi]
if len(spikets) == 0:
continue # don't save empty neurons
nrec = PTCSNeuronRecord(neuron, spikets, nsamplebytes, descr='')
nrecs.append(nrec)
nspikes += len(spikets)
nneurons = len(nrecs)
# create the header and write everything to file:
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
fname = stream.srcfnameroot + '.ptcs'
fullfname = os.path.join(path, fname)
header = PTCSHeader(self, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user=user, notes=notes)
with open(fullfname, 'wb') as f:
header.write(f)
for nrec in nrecs:
nrec.write(f)
print(fullfname)
def exportcsv(self, fname):
"""Export all "good" spikes to a .csv file with time (s), nid, and maxchan as the
columns"""
sids = []
#chans = []
for nid in sorted(self.good):
neuron = self.neurons[nid]
sids.append(neuron.sids)
# the alternative is to export each spike's unit's channel:
#chans.append(np.tile(neuron.chan, neuron.nspikes))
sids = np.hstack(sids)
spikes = self.spikes[sids]
tsecs = spikes['t'] / 1e6 # convert from us to s
nids = spikes['nid']
chans = spikes['chan']
#chans = np.hstack(chans)
data = np.column_stack([tsecs, nids, chans])
print('Exporting (tsec, nid, chan) of all spikes marked as "good" to %s' % fname)
np.savetxt(fname, data, fmt='%.6f, %d, %d')
def exporttschid(self, basepath):
"""Export int64 (timestamp, channel, neuron id) 3 tuples to binary file"""
raise NotImplementedError('Needs to be redone to work with multiple streams')
spikes = self.spikes[self.spikes['nid'] > 0] # don't export unsorted/multiunit spikes
dt = str(datetime.datetime.now()) # get an export timestamp
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
srffnameroot = srffnameroot.replace(' ', '_')
tschidfname = dt + '_' + srffnameroot + '.tschid'
tschid = np.empty((len(spikes), 3), dtype=np.int64)
tschid[:, 0] = spikes['t']
tschid[:, 1] = spikes['chan']
tschid[:, 2] = spikes['nid']
tschid.tofile(os.path.join(path, tschidfname)) # save it
print(tschidfname)
def exportdin(self, basepath):
"""Export stimulus din(s) to binary .din file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
dinfiledtype=[('TimeStamp', '<i8'), ('SVal', '<i8')] # pairs of int64s
print('Exporting DIN(s) to:')
for stream in streams:
try: # neither of these attribs should exist for recordings with no stimuli:
svrecs = stream.srff.digitalsvalrecords
dsprecs = stream.srff.displayrecords
except AttributeError:
continue # no din to export for this stream
if len(svrecs) == 0 or stream.srff.ndigitalsvalrecords == 0:
raise ValueError("digitalsvalrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# upcast SVal field from uint16 to int64, creates a copy,
# but it's not too expensive:
svrecs = svrecs.astype(dinfiledtype)
# convert to normal n x 2 int64 array
svrecs = svrecs.view(np.int64).reshape(-1, 2)
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one .din
# per displayrecord. Append experiment ID to each .din filename, if necessary.
svrects = svrecs[:, 0]
dsprects = [ dsprec.TimeStamp for dsprec in dsprecs ]
svalrecis = svrects.searchsorted(dsprects)
assert svalrecis[0] == 0
svalrecis = svalrecis[1:] # exclude the trivial 0 index
# split sval records according to displayrecord timestamps:
dins = np.split(svrecs, svalrecis)
assert len(dins) == len(dsprecs)
for eid, din in enumerate(dins):
if eid == 0 and len(dins) == 1:
eidstr = ''
elif len(dins) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
dinfname = stream.srcfnameroot + eidstr + '.din'
fullfname = os.path.join(path, dinfname)
din.tofile(fullfname) # save it
print(fullfname)
def exporttextheader(self, basepath):
"""Export stimulus text header(s) to .textheader file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting text header(s) to:')
for stream in streams:
try:
dsprecs = stream.srff.displayrecords
except AttributeError: # no textheader to export for this stream
continue
if len(dsprecs) == 0:
raise ValueError("displayrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one
# .textheader per displayrecord. Append experiment ID to each .textheader
# filename, if necessary.
for eid, dsprec in enumerate(dsprecs):
textheader = dsprec.Header.python_tbl
if eid == 0 and len(dsprecs) == 1:
eidstr = ''
elif len(dsprecs) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
textheaderfname = stream.srcfnameroot + eidstr + '.textheader'
fullfname = os.path.join(path, textheaderfname)
with open(fullfname, 'w') as f:
f.write(textheader) # save it
print(fullfname)
def exportall(self, basepath, sortpath):
"""Export spike data, stimulus din and textheader to basepath"""
self.exportptcsfiles(basepath, sortpath)
self.exportdin(basepath)
self.exporttextheader(basepath)
def exportspikewaves(self, sids, selchans, tis, fname, format):
"""Export spike waveform data of selected sids, selchans and tis to binary
.spikes.zip file or text .spikes.csv file"""
nspikes = len(sids)
chans, chanslist = self.get_common_chans(sids, selchans)
nchans = len(chans)
ti0, ti1 = tis
nt = ti1 - ti0
# fill in 3D data array:
dtype = self.wavedata.dtype
data = np.zeros((nspikes, nchans, nt), dtype=dtype)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
if format == 'text': # flatten timepoints of all chans into columns
data.shape = nspikes, nchans*nt
stream = self.stream
assert stream.kind == 'highpass' # should be the only type ever saved to self
if format == 'binary':
nids = self.spikes['nid'][sids]
spiketimes = self.spikes['t'][sids]
chanpos = stream.probe.siteloc_arr()
uVperAD = stream.converter.AD2uV(1) # convert 1 AD unit to uV
with open(fname, 'wb') as f:
np.savez_compressed(f, data=data, sids=sids, nids=nids,
spiketimes=spiketimes, chans=chans, tis=tis,
chanpos=chanpos, uVperAD=uVperAD)
elif format == 'text':
np.savetxt(fname, data, fmt='%d', delimiter=',') # data should be int
else:
raise ValueError('Unknown format: %r' % format)
print('Exported %d spikes on chans=%r and tis=%r to %s'
% (nspikes, list(chans), list(tis), fname))
def get_param_matrix(self, kind=None, sids=None, tis=None, selchans=None, norm=False,
dims=None, scale=True):
"""Organize dims parameters from sids into a data matrix, each column
corresponding to a dim. To do PCA/ICA clustering on all spikes, one maxchan at
a time, caller needs to call this multiple times, one for each set of
maxchan unique spikes,"""
spikes = self.spikes
dtypefields = list(spikes.dtype.fields)
if sids is None:
sids = spikes['id'] # default to all spikes
comps = [ dim for dim in dims if dim.startswith('c') and dim[-1].isdigit() ]
rmserror = np.any([ dim == 'RMSerror' for dim in dims ])
ncomp = len(comps)
hascomps = ncomp > 0
if hascomps:
X = self.get_component_matrix(kind, sids, tis=tis, chans=selchans,
minncomp=ncomp, norm=norm)
if rmserror:
rms = self.get_rms_error(sids, tis=tis, chans=selchans)
data = []
for dim in dims:
if dim in dtypefields:
data.append( np.float32(spikes[dim][sids]) )
elif dim.startswith('c') and dim[-1].isdigit():
compid = int(lstrip(dim, 'c'))
data.append( np.float32(X[:, compid]) )
elif dim == 'RMSerror':
data.append( np.float32(rms) )
else:
raise RuntimeError('Unknown dim %r' % dim)
# np.column_stack returns a copy, not modifying the original array
data = np.column_stack(data)
if scale:
# ensure 0 mean, and unit variance/stdev
for dim, d in zip(dims, data.T): # d iterates over columns
d -= d.mean()
if dim in ['x0', 'y0'] and self.probe.ncols > 1:
try: x0std # normalize spatial params by x0 std
except NameError: x0std = spikes['x0'].std()
if x0std != 0.0:
d /= x0std
#elif dim == 't': # the longer the recording in hours, the greater the
# # scaling in time
# trange = d.max() - d.min()
# tscale = trange / (60*60*1e6)
# d *= tscale / d.std()
else: # normalize all other dims by their std
dstd = d.std()
if dstd != 0.0:
d /= dstd
return data
def get_component_matrix(self, kind, sids, tis=None, chans=None, minncomp=None,
norm=False):
"""Find set of chans common to all sids, and do PCA/ICA on those waveforms. Or,
if chans are specified, limit PCA/ICA to them. Return component matrix with at
least minncomp dimensions"""
spikes = self.spikes
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nspikes < 2:
raise RuntimeError("Need at least 2 spikes for %s" % kind)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for %s" % kind)
# check if desired components have already been calculated (cache hit):
Xhash = self.get_Xhash(kind, sids, tis, chans, self.npcsperchan, norm)
self.Xhash = Xhash # save as key to most recent component matrix in self.X
try: self.X
except AttributeError: self.X = {} # init the dimension reduction cache attrib
if Xhash in self.X:
print('Cache hit, using cached %ss from tis=%r, chans=%r of %d spikes' %
(kind[:-1], list(tis), list(chans), nspikes))
return self.X[Xhash] # no need to recalculate
print('Cache miss, (re)calculating %ss' % kind[:-1])
# collect data between tis from chans from all spikes:
print('Doing %s on tis=%r, chans=%r of %d spikes' %
(kind, list(tis), list(chans), nspikes))
# MDP complains of roundoff errors with float32 for large covariance matrices
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
spikedata = self.wavedata[sid][spikechanis, ti0:ti1]
if norm:
# normalize by Vpp of chan with max Vpp:
maxptp = spikedata.ptp(axis=1).max()
if maxptp != 0: # prevent div by 0
spikedata = spikedata / maxptp
data[sii] = spikedata
print('Input shape for %s: %r' % (kind, data.shape))
t0 = time.time()
data.shape = nspikes, nchans*nt # flatten timepoints of all chans into columns
print('Reshaped input for %s: %r' % (kind, data.shape))
if kind == 'PCA': # principal components analysis
if PCALIB == 'mdp':
import mdp # delay as late as possible
X = mdp.pca(data, output_dim=5, svd=False) # svd=False is default
elif PCALIB == 'sklearn':
# sklearn's PCA is about 8x slower than mdp.pca, I think because it
# doesn't tap into scipy.linalg.eig compiled code. RandomizedPCA is faster
# than PCA, but isn't deterministic, and is still 2-3x slower than mdp.pca
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
X = pca.fit_transform(data) # do both the fit and the transform
else:
raise ValueError('Invalid PCALIB %r' % PCALIB)
if X.shape[1] < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
elif kind == 'sPCA': # sparse principal components analysis
from sklearn.decomposition import SparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
spca = SparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = spca.fit_transform(data) # do both the fit and the transform
elif kind == 'mbsPCA': # mini batch sparse principal components analysis
from sklearn.decomposition import MiniBatchSparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
mbspca = MiniBatchSparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = mbspca.fit_transform(data) # do both the fit and the transform
elif kind == 'NMF': # non-negative matrix factorization
from sklearn.decomposition import NMF
n_components = 5
init = None # 'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'
nmf = NMF(n_components=n_components, init=init)
X = nmf.fit_transform(data) # do both the fit and the transform
elif kind == 'tSNE': # t-distributed stochastic neighbor embedding
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, data.shape[1]))
print('ncomp: %d' % ncomp)
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
data = mdp.pca(data, output_dim=ncomp)
from sklearn.manifold import TSNE
n_components = 3 # not suited for any more than 3, according to the paper
#init = 'random', 'pca'
tsne = TSNE(n_components=n_components)
X = tsne.fit_transform(data) # do both the fit and the transform
elif kind == 'ICA': # independent components analysis
# ensure nspikes >= ndims**2 for good ICA convergence
maxncomp = intround(np.sqrt(nspikes))
if maxncomp < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
if data.shape[0] <= data.shape[1]:
raise RuntimeError('Need more observations than dimensions for ICA')
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, maxncomp, data.shape[1]))
if ICALIB == 'mdp':
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
print('ncomp: %d' % ncomp)
data = mdp.pca(data, output_dim=ncomp)
# nonlinearity g='pow3', ie x**3. tanh seems to separate better,
# but is a bit slower. gaus seems to be slower still, and no better
# than tanh, but these are just vague impressions.
# defaults to whitened=False, ie assumes data isn't whitened
node = mdp.nodes.FastICANode(g='pow3')
X = node(data)
pm = node.get_projmatrix()
X = X[:, np.any(pm, axis=0)] # keep only the non zero columns
elif ICALIB == 'sklearn':
from sklearn.decomposition import FastICA
# when whiten=True (default), FastICA preprocesses the data using PCA, and
# n_components is the number of PCs that are kept before doing ICA.
alg = 'parallel' # parallel or deflation, default is parallel
fun = 'logcosh' # logcosh, exp, or cube, default is logcosh
maxiter = 100 # default is 200
tol = 0.5 # default is 0.0001, seems need >~ 0.1 to exit faster
## TODO: make FastICA algorithm (parallel, deflation), nonlinearity (logcosh,
## exp, cube) and IC sort method (abs(kurtosis) vs. negentropy) GUI options
print('ncomp=%d, alg=%r, fun=%r, maxiter=%d, tol=%g'
% (ncomp, alg, fun, maxiter, tol))
fastica = FastICA(n_components=ncomp, algorithm=alg,
whiten=True, fun=fun, fun_args=None,
max_iter=maxiter, tol=tol, w_init=None,
random_state=None)
X = fastica.fit_transform(data) # do both the fit and the transform
#pm = fastica.components_
print('fastica niters: %d' % (fastica.n_iter_))
else:
raise ValueError('Invalid ICALIB %r' % ICALIB)
if X.shape[1] < 3:
raise RuntimeError('Need at least 3 columns')
# Sort ICs by decreasing kurtosis or negentropy. For kurtosis, see Scholz2004 (or
# rather, opposite to their approach, which picked ICs with most negative
# kurtosis). For methods of estimating negentropy, see Hyvarinen1997.
'''
# sort by abs(kurtosis) of each IC (column)
k = scipy.stats.kurtosis(X, axis=0)
ki = abs(k).argsort()[::-1] # decreasing order of abs(kurtosis)
print('Sort by abs(kurtosis):')
print(k[ki])
X = X[:, ki] # sort the ICs
'''
# sort by negentropy of each IC (column), this seems to work better than kurtosis
# at separating clusters of similar size:
ne = core.negentropy(X, axis=0)
assert (ne > 0).all()
nei = ne.argsort()[::-1] # decreasing order of negentropy
print('Sort by negentropy:')
print(ne[nei])
X = X[:, nei] # sort the ICs
'''
import pylab as pl
pl.figure()
pl.imshow(pm)
pl.colorbar()
pl.title('original projmatrix')
pl.figure()
pl.imshow(pm[:, ki])
pl.colorbar()
pl.title('decreasing abs(kurtosis) projmatrix')
pl.figure()
pl.imshow(pm[:, nei])
pl.colorbar()
pl.title('decreasing negentropy projmatrix')
'''
else:
raise ValueError('Unknown kind %r' % kind)
print('Output shape for %s: %r' % (kind, X.shape))
self.X[Xhash] = X # cache for fast future retrieval
print('%s took %.3f sec' % (kind, time.time()-t0))
unids = list(np.unique(spikes['nid'][sids])) # set of all nids that sids span
for nid in unids:
# don't update pos of junk cluster, if any, since it might not have any chans
# common to all its spikes, and therefore can't have PCA/ICA done on it
if nid != 0:
self.clusters[nid].update_comppos(X, sids)
return X
def get_rms_error(self, sids, tis=None, chans=None):
"""Calculate RMS error of spike waveforms (all from the same cluster) relative to
their cluster's mean waveform. Consider only selected tis and chans"""
spikes = self.spikes
nids = np.unique(spikes['nid'][sids])
nid = nids[0]
if len(nids) > 1 or nid == 0:
raise RuntimeError("Spikes must all belong to the same (non-junk) cluster for "
"RMS error calculation")
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for RMS error")
# collect data between tis from chans from all spikes:
print('Getting RMS error on tis=%r, chans=%r of %d spikes' %
(list(tis), list(chans), nspikes))
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
# get cluster mean waveform between tis on chans:
wave = self.neurons[nid].get_wave()
chanis = wave.chans.searchsorted(chans)
meandata = np.float64(wave.data[chanis, ti0:ti1])
# calculate RMS error between each spike and the cluster mean waveform:
se = (data - meandata) ** 2 # squared error
# take mean across timepoints and chans, but not across spikes:
mse = se.mean(axis=2).mean(axis=1) # mean squared error
return np.sqrt(mse)
def get_common_chans(self, sids, chans=None):
"""Find channels common to all sids, and optionally to chans as well. Also,
return chanslist, ie list of arrays of chans of sids"""
spikes = self.spikes
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
#t0 = time.time()
chanslist = [ cs[:ncs] for cs, ncs in zip(chanss, nchanss) ] # list of arrays
#print('Building chanslist took %.3f sec' % (time.time()-t0))
commonchans = util.intersect1d_uint8(chanslist) # find intersection
if chans is not None and len(chans) > 0:
# values in chans but not in commonchans:
diffchans = np.setdiff1d(chans, commonchans)
commonchans = np.intersect1d(chans, commonchans) # values in both
if len(diffchans) > 0:
print('WARNING: ignored chans %r not common to all spikes' % list(diffchans))
return commonchans, chanslist
def get_Xhash(self, kind, sids, tis, chans, npcsperchan, norm):
"""Return MD5 hex digest of args, for uniquely identifying the matrix resulting
from dimension reduction of spike data"""
h = hashlib.md5()
h.update(kind.encode())
h.update(sids)
h.update(tis)
h.update(chans)
if kind == 'ICA': # consider npcsperchan only if doing ICA
h.update(str(npcsperchan).encode())
h.update(str(norm).encode())
return h.hexdigest()
def create_neuron(self, id=None, inserti=None):
"""Create and return a new Neuron with a unique ID"""
if id == None:
id = self.nextnid
if id in self.neurons:
raise RuntimeError('Neuron %d already exists' % id)
id = int(id) # get rid of numpy ints
neuron = Neuron(self, id)
# add neuron to self
self.neurons[neuron.id] = neuron
if inserti == None:
self.norder.append(neuron.id)
else:
self.norder.insert(inserti, neuron.id)
return neuron
def remove_neuron(self, id):
try:
del self.neurons[id] # may already be removed due to recursive call
del self.clusters[id]
self.norder.remove(id)
except (KeyError, ValueError):
pass
def shift(self, sids, nt):
"""Shift sid waveforms by nt timepoints: -ve shifts waveforms left, +ve shifts right.
For speed, pad waveforms with edge values at the appropriate end"""
spikes = self.spikes
wd = self.wavedata
for sid in sids: # maybe there's a more efficient way than iterating over sids
core.shiftpad(wd[sid], nt) # modifies wd in-place
# update spike parameters:
dt = intround(nt * self.tres) # amount of time to shift by, signed, in us
# so we can later reload the wavedata accurately, shifting the waveform right and
# padding it on its left requires decrementing the associated timepoints
# (and vice versa)
spikes['t'][sids] -= dt
spikes['t0'][sids] -= dt
spikes['t1'][sids] -= dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Opposite sign wrt timepoints above, referencing within
# wavedata:
spikes['tis'][sids] = spikes['tis'][sids] + nt
# this in-place operation raises a TypeError in numpy 1.11.2, something related to
# subtracting an int from an unsigned int:
#spikes['tis'][sid] += nt
# caller should treat all sids as dirty
'''
# replaced by util.alignbest_cy():
def alignbest(self, sids, tis, chans):
"""Align all sids between tis on chans by best fit according to mean squared error.
chans are assumed to be a subset of channels of sids. Return sids
that were actually moved and therefore need to be marked as dirty"""
spikes = self.spikes
nspikes = len(sids)
nchans = len(chans)
wd = self.wavedata
nt = wd.shape[2] # num timepoints in each waveform
ti0, ti1 = tis
subnt = ti1 - ti0 # num timepoints to slice from each waveform
# TODO: make maxshift a f'n of interpolation factor
maxshift = 2 # shift +/- this many timepoints
subntdiv2 = subnt // 2
#print('subntdiv2 on either side of t=0: %d' % subntdiv2)
if subntdiv2 < maxshift:
raise ValueError("Selected waveform duration too short")
#maxshiftus = maxshift * self.stream.tres
# NOTE: in this case, it may be faster to keep shifts and sti0s and sti1s as lists
# of ints instead of np int arrays, maybe because their values are faster to iterate
# over or index with in python loops and lists:
shifts = range(-maxshift, maxshift+1) # from -maxshift to maxshift, inclusive
nshifts = len(shifts)
sti0s = [ ti0+shifti for shifti in range(nshifts) ] # shifted ti0 values
sti1s = [ ti1+shifti for shifti in range(nshifts) ] # shifted ti1 values
sti0ssti1s = zip(sti0s, sti1s)
print("Padding waveforms with up to +/- %d points of fake data" % maxshift)
# not worth subsampling here while calculating meandata, since all this
# stuff in this loop is needed in the shift loop below
subsd = np.zeros((nspikes, nchans, subnt), dtype=wd.dtype) # subset of spike data
spikechanis = np.zeros((nspikes, nchans), dtype=np.int64)
t0 = time.time()
for sidi, sid in enumerate(sids):
spike = spikes[sid]
nspikechans = spike['nchans']
spikechans = spike['chans'][:nspikechans]
spikechanis[sidi] = spikechans.searchsorted(chans)
subsd[sidi] = wd[sid, spikechanis[sidi], ti0:ti1]
print('Mean prep loop for best shift took %.3f sec' % (time.time()-t0))
t0 = time.time()
meandata = subsd.mean(axis=0) # float64
print('Mean for best shift took %.3f sec' % (time.time()-t0))
# choose best shifted waveform for each spike
# widesd holds current spike data plus padding on either side
# to allow for full width slicing for all time shifts:
maxnchans = spikes['nchans'].max() # of all spikes in sort
widesd = np.zeros((maxnchans, maxshift+nt+maxshift), dtype=wd.dtype)
shiftedsubsd = subsd.copy() # init
tempsubshifts = np.zeros((nshifts, nchans, subnt), dtype=wd.dtype)
dirtysids = []
t0 = time.time()
for sidi, sid in enumerate(sids):
# for speed, instead of adding real data, pad start and end with fake values
chanis = spikechanis[sidi]
sd = wd[sid] # sid's spike data
widesd[:, maxshift:-maxshift] = sd # 2D
widesd[:, :maxshift] = sd[:, 0, None] # pad start with first point per chan
widesd[:, -maxshift:] = sd[:, -1, None] # pad end with last point per chan
wideshortsd = widesd[chanis] # sid's padded spike data on chanis, 2D
# keep this inner loop as fast as possible:
for shifti, (sti0, sti1) in enumerate(sti0ssti1s):
tempsubshifts[shifti] = wideshortsd[:, sti0:sti1] # len: subnt
errors = tempsubshifts - meandata # (nshifts, nchans, subnt) - (nchans, subnt)
# get sum squared errors by taking sum across highest two dims - for purpose
# of error comparison, don't need to take mean or square root. Also, order
# of summation along axes doesn't matter, as long as it's done on the highest two:
sserrors = (errors**2).sum(axis=2).sum(axis=1) # nshifts long
bestshifti = sserrors.argmin()
bestshift = shifts[bestshifti]
if bestshift != 0: # no need to update sort.wavedata[sid] if there's no shift
# update time values:
dt = bestshift * self.tres # time to shift by, signed, in us
spikes['t'][sid] += dt # should remain halfway between t0 and t1
spikes['t0'][sid] += dt
spikes['t1'][sid] += dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Opposite sign, referencing within wavedata:
spikes['tis'][sid] -= bestshift
# update sort.wavedata
wd[sid] = widesd[:, bestshifti:bestshifti+nt]
shiftedsubsd[sidi] = tempsubshifts[bestshifti]
dirtysids.append(sid) # mark sid as dirty
print('Shifting loop took %.3f sec' % (time.time()-t0))
AD2uV = self.converter.AD2uV
stdevbefore = AD2uV(subsd.std(axis=0).mean())
stdevafter = AD2uV(shiftedsubsd.std(axis=0).mean())
print('stdev went from %.3f to %.3f uV' % (stdevbefore, stdevafter))
return dirtysids
'''
def alignminmax(self, sids, to):
"""Align sids by their min or max. Return those that were actually moved
and therefore need to be marked as dirty"""
if not self.stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
V0s = spikes['V0'][sids]
V1s = spikes['V1'][sids]
Vss = np.column_stack((V0s, V1s))
alignis = spikes['aligni'][sids]
b = np.column_stack((alignis==0, alignis==1)) # 2D boolean array
if to == 'min':
i = Vss[b] > 0 # indices into sids of spikes aligned to the max peak
elif to == 'max':
i = Vss[b] < 0 # indices into sids of spikes aligned to the min peak
else:
raise ValueError('Unknown to %r' % to)
sids = sids[i] # sids that need realigning
nspikes = len(sids)
print("Realigning %d spikes" % nspikes)
if nspikes == 0: # nothing to do
return [] # no sids to mark as dirty
multichantis = spikes['tis'][sids] # nspikes x nchans x 2 arr
chanis = spikes['chani'][sids] # nspikes arr of max chanis
# peak tis on max chan of each spike, convert from uint8 to int32 for safe math
tis = np.int32(multichantis[np.arange(nspikes), chanis]) # nspikes x 2 arr
# NOTE: tis aren't always in temporal order!
dpeaktis = tis[:, 1] - tis[:, 0] # could be +ve or -ve
dpeaks = spikes['dt'][sids] # stored as +ve
# for each spike, decide whether to add or subtract dpeak to/from its temporal values
ordered = dpeaktis > 0 # in temporal order
reversed = dpeaktis < 0 # in reversed temporal order
alignis = spikes['aligni'][sids]
alignis0 = alignis == 0
alignis1 = alignis == 1
dpeaki = np.zeros(nspikes, dtype=int)
# add dpeak to temporal values to align to later peak
dpeaki[ordered & alignis0 | reversed & alignis1] = 1
# subtact dpeak from temporal values to align to earlier peak
dpeaki[ordered & alignis1 | reversed & alignis0] = -1
# upcast aligni from 1 byte to an int before doing arithmetic on it:
#dalignis = -np.int32(alignis)*2 + 1
dts = dpeaki * dpeaks
dtis = -dpeaki * abs(dpeaktis)
# shift values
spikes['t'][sids] += dts
spikes['t0'][sids] += dts
spikes['t1'][sids] += dts
spikes['tis'][sids] = spikes['tis'][sids] + dtis[:, None, None] # update wrt new t0i
spikes['aligni'][sids[alignis0]] = 1
spikes['aligni'][sids[alignis1]] = 0
# update wavedata for each shifted spike
self.reload_spikes(sids)
return sids # mark all sids as dirty
def choose_new_meanchans(self, sids):
"""Get mean waveform of all sids, then find the mean's chan with max Vpp, then
choose det.maxnchansperspike channels around that maxchan.
Return meanchans, furthestchan, and furthestchani"""
print('Choosing new channel set for all selected spikes')
det = self.detector
meanwave = self.get_mean_wave(sids)
# mean chan with max Vpp:
maxchan = meanwave.chans[meanwave.data.ptp(axis=1).argmax()]
maxchani = det.chans.searchsorted(maxchan)
distances = det.dm.data[maxchani]
# keep the maxnchansperspike closest chans to maxchan, including maxchan:
chanis = distances.argsort()[:det.maxnchansperspike]
meanchans = det.chans[chanis]
meanchans.sort() # keep them sorted
print('meanchans: %r' % list(meanchans))
furthestchan = det.chans[chanis[-1]]
print('furthestchan: %d' % furthestchan)
furthestchani = meanchans.searchsorted(furthestchan)
# sanity checks:
assert len(meanchans) == det.maxnchansperspike
assert maxchan in meanchans
return meanchans, furthestchan, furthestchani
def reload_spikes(self, sids, usemeanchans=False):
"""Update wavedata of designated spikes from stream. Optionally fix incorrect
time values from .sort 0.3 files. Optionally choose new set of channels for all
sids based on the chans closest to the mean of the sids. It's the caller's
responsibility to mark sids as dirty and trigger resaving of .wave file"""
## TODO: add findmaxchan=False and recenteronmaxchan=False kwargs
nsids = len(sids)
print('(Re)loading %d spikes' % nsids)
stream = self.stream
if not stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
det = self.detector
ver_lte_03 = float(self.__version__) <= 0.3
if ver_lte_03:
print('Fixing potentially incorrect time values during spike reloading')
nfixed = 0
treload = time.time()
if usemeanchans:
if ver_lte_03:
raise RuntimeError("Best not to choose new chans from mean until after "
"converting to .sort >= 0.4")
meanchans, furthestchan, furthestchani = self.choose_new_meanchans(sids)
nmeanchans = len(meanchans)
# split up sids into groups efficient for loading from stream:
ts = spikes[sids]['t'] # noncontig, not a copy
# ensure they're in temporal order:
if not (np.diff(ts) >= 0).all():
print("Selected sids aren't in temporal order, sorting by time...")
tsis = ts.argsort()
sids = sids[tsis]
print("Done sorting sids by time")
# break up spikes by ISIs >= MAXGROUPISI:
splitis = np.where(np.diff(ts) >= MAXGROUPISI)[0] + 1
groups = np.split(sids, splitis)
# limit each group of sids to no more than MAXGROUPDT:
groupi = 0
while groupi < len(groups):
group = groups[groupi] # group of sids all with ISIs < MAXGROUPISI
## TODO: not a copy: is this the optimal way to get the times in this case?
relts = spikes[group]['t'] - spikes[group[0]]['t']
splitis = np.where(np.diff(relts // MAXGROUPDT) > 0)[0] + 1
nsubgroups = len(splitis) + 1
if nsubgroups > 1:
# del original group, replace with subgroups
del groups[groupi]
subgroups = np.split(group, splitis)
groups[groupi:groupi] = subgroups
groupi += len(subgroups)
else:
groupi += 1
print('ngroups: %d' % len(groups))
# process each group:
sidi = 0 # init sid index across all groups, used as status counter
for groupi, group in enumerate(groups):
printflush('<%d>' % groupi, end='')
assert len(group) > 0 # otherwise something went wrong above
t0 = spikes[group[0]]['t0']
t1 = spikes[group[-1]]['t1']
if ver_lte_03:
# load a little extra, in case we need to reload misaligned first and/or
# last spike in this group
t0 -= 5000 # -5 ms
t1 += 5000 # +5 ms
"""
Find union of chans of sids in this group, ask Stream for only those such that no
unnecessary resampling takes place on unneeded chans. Note that this doesn't make
a difference when CAR is enabled in the stream, because the full set of enabled
chans have to be maintained in Stream.__call__ until the very end. Don't bother
cutting out the correct nchans for each sid. At worst, chan 0 (the "empty" chans
array value) will be unnecessarily added to unionchans, and we'll retrieve one
extra chan when creating tempwave, which will then later be discarded:
"""
unionchans = np.unique(spikes['chans'][group])
if usemeanchans:
# now that we have the original unionchans of this group,
# update this group's spikes array entries with meanchans:
spikes['nchans'][group] = nmeanchans
# we're using the max num chans, so assign the full array:
spikes['chans'][group] = meanchans
# now update unionchans as well:
unionchans = np.unique(np.hstack((unionchans, meanchans)))
if 0 not in stream.chans: # if chan 0 is disabled in stream
# remove 0 from unionchans, otherwise an error would be raised when
# calling stream()
unionchans = unionchans[unionchans != 0]
# load and resample only what's needed for this group:
tempwave = stream(t0, t1, unionchans)
# slice out each spike's reloaded data from tempwave:
for sid in group:
# print status:
if sidi % 10000 == 0:
printflush(sidi, end='')
elif sidi % 1000 == 0:
printflush('.', end='')
if usemeanchans: # already checked above that ver_lte_03 == False
# this spike's chans have been set to meanchans, now
# check that each spike's maxchan is in meanchans:
chan = spikes[sid]['chan']
if chan not in meanchans:
# replace furthest chan with spike's maxchan:
print("spike %d: replacing furthestchan %d with spike's maxchan %d"
% (sid, furthestchan, chan))
nchans = spikes[sid]['nchans']
chans = spikes[sid]['chans'][:nchans]
# replace furthest chan with max chan, modifies spikes array in-place:
chans[furthestchani] = chan
# make sure chans remain sorted:
chans.sort()
# this isn't necessary, because all the above was in-place:
#spikes['chans'][sid][:nchans] = chans
spike = spikes[sid]
nchans = spike['nchans']
chans = spike['chans'][:nchans]
rd = tempwave[spike['t0']:spike['t1']][chans].data # reloaded data
if ver_lte_03: # fix potentially incorrect spike tis
result = self.reload_spike_ver_lte_03(sid, nchans, tempwave, rd)
if result == None:
sidi += 1 # inc status counter
continue # rollwin2D won't work, skip to next sid
else:
rd, fixed = result
if fixed:
nfixed += 1
nt = rd.shape[1]
self.wavedata[sid, :nchans, :nt] = rd # update wavedata
sidi += 1 # inc status counter
print()
if ver_lte_03:
print('Fixed time values of %d spikes' % nfixed)
print('(Re)loaded %d spikes, took %.3f sec' % (len(sids), time.time()-treload))
def reload_spike_ver_lte_03(self, sid, nchans, tempwave, rd):
"""In sort.__version__ <= 0.3, t, t0, t1, and tis were not updated
during alignbest() calls. To fix this, load new data with old potentially
incorrect t0 and t1 values, and compare this new data to existing old data
in wavedata array. Find where the non-repeating parts of the old data fits
into the new, and calculate the correction needed to fix the time values.
Finally, reload new data according to these corrected time values."""
#print('Reloading sid from ver_lte_03: %d' % sid)
od = self.wavedata[sid, :nchans] # old data
# indices that strip const values from left and right ends:
lefti, righti = lrrep2Darrstripis(od)
od = od[:, lefti:righti] # stripped old data
# reloaded data rd uses old incorrect t0 and t1, but they should be
# wide enough to encompass the non-repeating parts of the old data
width = od.shape[1] # rolling window width
if not width <= rd.shape[1]:
print('') # newline
print("WARNING: od.shape[1]=%d > rd.shape[1]=%d for sid %d" %
(od.shape[1], rd.shape[1], sid))
#import pdb; pdb.set_trace()
return
odinndis = np.where((rollwin2D(rd, width) == od).all(axis=1).all(axis=1))[0]
if len(odinndis) == 0: # no hits of old data in new
dnt = 0 # reload data based on current timepoints
elif len(odinndis) == 1: # exactly 1 hit of old data in new
odinndi = odinndis[0] # pull it out
dnt = odinndi - lefti # num timepoints to correct by, signed
else:
raise RuntimeError("Multiple hits of old data in new, don't know "
"how to reload spike %d" % sid)
newrd, fixed = rd, False
if dnt != 0:
dt = intround(dnt * self.tres) # time to correct by, signed, in us
spikes['t'][sid] += dt # should remain halfway between t0 and t1
spikes['t0'][sid] += dt
spikes['t1'][sid] += dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Use opposite sign because we're
# referencing within wavedata:
# in versions <= 0.3, 'tis' were named 'phasetis':
spikes['phasetis'][sid] = spikes['phasetis'][sid] - dnt
spike = spikes[sid]
# reslice tempwave again now that t0 and t1 have changed
newrd = tempwave[spike['t0']:spike['t1']][chans].data
fixed = True
#printflush('F', end='')
return newrd, fixed
def reload_spikes_and_templates(self, sids, usemeanchans=False):
self.reload_spikes(sids, usemeanchans=usemeanchans)
# update neuron templates:
unids = np.unique(self.spikes['nid'][sids])
unids = unids[unids != 0] # exclude junk cluster, which doesn't have a neuron
neurons = [ self.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
def init_spike_alignment(self):
"""Set initial spike alignment points according to alignment points of each
spike's neuron"""
print('Setting initial spike alignment points')
ntis, nalignis = {}, {} # tis and aligni derived from each neuron's mean waveform
for neuron in self.neurons.values():
nwave = neuron.get_wave() # update and return mean waveform
mintis = nwave.data.argmin(axis=1)
maxtis = nwave.data.argmax(axis=1)
ntis[neuron.id] = np.column_stack([mintis, maxtis])
# choose aligni with least variance:
nalignis[neuron.id] = np.argmin([mintis.std(), maxtis.std()])
AD2uV = self.converter.AD2uV
for s, wd in zip(self.spikes, self.wavedata):
sid = s['id']
# print out progress on a regular basis:
if sid % 100000 == 0:
printflush(sid, end='')
elif sid % 10000 == 0:
printflush('.', end='')
nid = s['nid']
#chan = s['chan']
nchans = s['nchans']
chans = s['chans'][:nchans]
neuronchans = self.neurons[nid].wave.chans
assert (chans == neuronchans).all()
s['tis'][:nchans] = ntis[nid] # set according to its neuron, wrt t0i=0
s['aligni'] = nalignis[nid] # set according to its neuron
maxchani = s['chani']
t0i, t1i = int(s['tis'][maxchani, 0]), int(s['tis'][maxchani, 1])
s['dt'] = abs(t1i - t0i) / self.sampfreq * 1e6 # us
# note that V0 and V1 might not be of opposite sign, because tis are derived
# from mean neuron waveform, not from each individual spike:
s['V0'], s['V1'] = AD2uV(wd[maxchani, t0i]), wd[maxchani, t1i] # uV
s['Vpp'] = abs(s['V1'] - s['V0']) # uV
print()
def spatially_localize_spikes(self, sortwin, method='fit'):
"""Assuming that wavedata have been extracted and neuron mean waveforms calculated,
find tis and perform spatial localization of every spike in self"""
det = self.detector
weights2f = self.extractor.weights2spatial
weights2spatialmean = self.extractor.weights2spatialmean
f = self.extractor.f
nreject = 0 # number spikes rejected during spatial localization
print('Running spatial localization on all %d spikes' % self.nspikes)
tstart = time.clock()
## TODO: chan this be multithreaded/processed?
for s, wd in zip(self.spikes, self.wavedata):
# Get Vpp at each inclchan's tis, use as spatial weights:
# see core.rowtake() or util.rowtake_cy() for indexing explanation:
sid = s['id']
# print out progress on a regular basis:
if sid % 10000 == 0:
printflush(sid, end='')
elif sid % 1000 == 0:
printflush('.', end='')
chan = s['chan']
nchans = s['nchans']
chans = s['chans'][:nchans]
maxchani = s['chani']
chanis = det.chans.searchsorted(chans)
w = np.float32(wd[np.arange(s['nchans'])[:, None], s['tis'][:nchans]]) # nchans x 2
w = abs(w).sum(axis=1) # Vpp for each chan, measured at t0i and t1i
x = det.siteloc[chanis, 0] # 1D array (row)
y = det.siteloc[chanis, 1]
if method == 'fit':
# localize by fitting extractor.f function to wavedata
params = weights2f(f, w, x, y, maxchani)
elif method == 'mean':
# set localization to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# a very ad-hoc guess for spatial sigma:
sx = 2 * dist((x0, y0), self.probe.SiteLoc[chan])
params = x0, y0, sx, sx
else:
print('Unknown method %r' % method)
if params == None: # presumably a non-localizable many-channel noise event
#printflush('X', end='') # to indicate a rejected spike
if DEBUG:
spiket = intround(s['t']) # nearest us
det.log("Reject spike %d at t=%d based on fit params" % (sid, spiket))
neuron = self.neurons[s['nid']]
# remove from its neuron, add to unsorted list of spikes:
sortwin.MoveSpikes2List(neuron, [sid], update=False)
# manually set localization params to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# set sigma to 0 um, and then later round lockr up to 1 um so that only one
# raster tick shows up for each rejected spike, reducing clutter
params = x0, y0, 0, 0
nreject += 1
# Save spatial fit params, and "lockout" only the channels within lockrx*sx
# of the fit spatial location of the spike, up to a max of inclr. "Lockout"
# in this case only refers to which channels are highlighted with a raster tick
# for each spike:
s['x0'], s['y0'], s['sx'], s['sy'] = params
x0, y0 = s['x0'], s['y0']
# lockout radius for this spike:
lockr = min(det.lockrx*s['sx'], det.inclr) # in um
lockr = max(lockr, 1) # at least 1 um, so at least the maxchan gets a tick
# test y coords of chans in y array, ylockchaniis can be used to index
# into x, y and chans:
ylockchaniis, = np.where(np.abs(y - y0) <= lockr) # convert bool arr to int
# test Euclid distance from x0, y0 for each ylockchani:
lockchaniis = ylockchaniis.copy()
for ylockchanii in ylockchaniis:
if dist((x[ylockchanii], y[ylockchanii]), (x0, y0)) > lockr:
# Euclidean distance is too great, remove ylockchanii from lockchaniis:
lockchaniis = lockchaniis[lockchaniis != ylockchanii]
lockchans = chans[lockchaniis]
nlockchans = len(lockchans)
s['lockchans'][:nlockchans], s['nlockchans'] = lockchans, nlockchans
print('Spatial localization of spikes took %.3f s' % (time.clock() - tstart))
return nreject
'''
def get_component_matrix(self, dims=None, weighting=None):
"""Convert spike param matrix into pca/ica data for clustering"""
import mdp # can't delay this any longer
X = self.get_param_matrix(dims=dims)
if weighting == None:
return X
if weighting.lower() == 'ica':
node = mdp.nodes.FastICANode()
elif weighting.lower() == 'pca':
node = mdp.nodes.PCANode()
else:
raise ValueError, 'unknown weighting %r' % weighting
node.train(X)
features = node.execute(X) # returns all available components
#self.node = node
#self.weighting = weighting
#self.features = features
return features
def get_ids(self, cids, spikes):
"""Convert a list of cluster ids into 2 dicts: n2sids maps neuron IDs to
spike IDs; s2nids maps spike IDs to neuron IDs"""
cids = np.asarray(cids)
cids = cids - cids.min() # make sure cluster IDs are 0-based
uniquecids = set(cids)
nclusters = len(uniquecids)
# neuron ID to spike IDs (plural) mapping
n2sids = dict(zip(uniquecids, [ [] for i in range(nclusters) ]))
s2nids = {} # spike ID to neuron ID mapping
for spike, nid in zip(spikes, cids):
s2nids[spike['id']] = nid
n2sids[nid].append(spike['id'])
return n2sids, s2nids
def write_spc_input(self):
"""Generate input data file to SPC"""
X = self.get_component_matrix()
# write to space-delimited .dat file. Each row is a spike, each column a param
spykedir = os.path.dirname(__file__)
dt = str(datetime.datetime.now())
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
self.spcdatfname = os.path.join(spykedir, 'spc', dt+'.dat')
# not sure why spc adds the dg_01 part:
self.spclabfname = os.path.join(spykedir, 'spc', dt+'.dg_01.lab')
f = open(self.spcdatfname, 'w')
for params in X: # write text data to file, one row at a time
params.tofile(f, sep=' ', format='%.6f')
f.write('\n')
f.close()
def parse_spc_lab_file(self, fname=None):
"""Parse output .lab file from SPC. Each row in the file is the assignment of each
spin (datapoint) to a cluster, one row per temperature datapoint. First column is
temperature run number (0-based). 2nd column is the temperature. All remaining
columns correspond to the datapoints in the order presented in the input .dat file.
Returns (Ts, cids)"""
#spikes = self.get_spikes_sortedby('id')
if fname == None:
defaultDir = r"C:\Documents and Settings\Administrator\Desktop\Charlie\From"
dlg = wx.FileDialog(None, message="Open SPC .lab file",
defaultDir=defaultDir, defaultFile='',
wildcard="All files (*.*)|*.*|.lab files (*.lab)|*.lab|",
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath()
dlg.Destroy()
data = np.loadtxt(fname, dtype=np.float32)
Ts = data[:, 1] # 2nd column
cids = np.int32(data[:, 2:]) # 3rd column on
print('Parsed %r' % fname)
return Ts, cids
def parse_charlies_output(self, fname=None):
if fname == None:
fname = (r'C:\Documents and Settings\Administrator\Desktop\Charlie\'
'From\2009-07-20\clustered_events_coiflet_T0.125.txt')
nids = np.loadtxt(fname, dtype=int) # one neuron id per spike
return nids
def write_spc_app_input(self):
"""Generate input data file to spc_app"""
spikes = self.get_spikes_sortedby('id')
X = self.get_component_matrix()
# write to tab-delimited data file. Each row is a param, each column a spike
# (this is the transpose of X)
# first row has labels "AFFX", "NAME", and then spike ids
# first col has labels "AFFX", and then param names
f = open(r'C:\home\mspacek\Desktop\Work\SPC\Weizmann\spc_app\spc_app_input.txt', 'w')
f.write('AFFX\tNAME\t')
for spike in spikes:
f.write('s%d\t' % spike['id'])
f.write('\n')
for parami, param in enumerate(['Vpp', 'dt', 'x0', 'y0', 'sx', 'sy', 'theta']):
f.write(param+'\t'+param+'\t')
for val in X[:, parami]:
f.write('%f\t' % val)
f.write('\n')
f.close()
def hcluster(self, t=1.0):
"""Hierarchically cluster self.spikes
TODO: consider doing multiple cluster runs. First, cluster by spatial location (x0,
y0). Then split those clusters up by Vpp. Then those by spatial distrib (sy/sx,
theta), then by temporal distrib (dt, s1, s2). This will ensure that the lousier
params will only be considered after the best ones already have, and therefore that
you start off with pretty good clusters that are then only slightly refined using
the lousy params
"""
spikes = self.get_spikes_sortedby('id')
X = self.get_component_matrix()
print(X)
# try 'weighted' or 'average' with 'mahalanobis'
cids = fclusterdata(X, t=t, method='single', metric='euclidean')
n2sids, s2nids = self.get_ids(cids, spikes)
return n2sids
def export2Charlie(self, fname='spike_data', onlymaxchan=False, nchans=3, npoints=32):
"""Export spike data to a text file, one spike per row.
Columns are x0, y0, followed by most prominent npoints datapoints
(1/4, 3/4 wrt spike time) of each nearest nchans. This is to
give to Charlie to do WPD and SPC on"""
if onlymaxchan:
nchans = 1
assert np.log2(npoints) % 1 == 0, 'npoints is not a power of 2'
# get ti - time index each spike is assumed to be centered on
self.spikes[0].update_wave(self.stream) # make sure it has a wave
ti = intround(self.spikes[0].wave.data.shape[-1] / 4) # 13 for 50 kHz, 6 for 25 kHz
dims = self.nspikes, 2+nchans*npoints
output = np.empty(dims, dtype=np.float32)
dm = self.detector.dm
chanis = np.arange(len(dm.data))
coords = np.asarray(dm.coords)
xcoords = coords[:, 0]
ycoords = coords[:, 1]
sids = list(self.spikes) # self.spikes is a dict!
sids.sort()
for sid in sids:
spike = self.spikes[sid]
chani = spike.chani # max chani
x0, y0 = spike.x0, spike.y0
if onlymaxchan:
nearestchanis = np.asarray([chani])
else:
# find closest chans to x0, y0
d2s = (xcoords - x0)**2 + (ycoords - y0)**2 # squared distances
sortis = d2s.argsort()
nearestchanis = chanis[sortis][0:nchans] # pick the first nchan nearest chans
if chani not in nearestchanis:
print("WARNING: max chani %d is not among the %d chanis nearest "
"(x0, y0) = (%.1f, %.1f) for spike %d at t=%d"
% (chani, nchans, x0, y0, sid, spike.t))
if spike.wave.data is None:
spike.update_wave(self.stream)
row = [x0, y0]
for chani in nearestchanis:
chan = dm.chans[chani] # dereference
try:
data = spike.wave[chan].data[0] # pull out singleton dimension
except IndexError: # empty array
data = np.zeros(data.shape[-1], data.dtype)
row.extend(data[ti-npoints/4:ti+npoints*3/4])
output[sid] = row
dt = str(datetime.datetime.now())
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
fname += '.' + dt + '.txt'
np.savetxt(fname, output, fmt='%.1f', delimiter=' ')
def match(self, templates=None, weighting='signal', sort=True):
"""Match templates to all .spikes with nearby maxchans,
save error values to respective templates.
Note: slowest step by far is loading in the wave data from disk.
(First match is slow, subsequent ones are ~ 15X faster.)
Unless something's done about that in advance, don't bother optimizing here much.
Right now, once waves are loaded, performance is roughly 20000 matches/sec
TODO: Nick's alternative to gaussian distance weighting: have two templates: a mean
template, and an stdev template, and weight the error between each matched
spike and the mean on each chan at each timepoint by the corresponding stdev value
(divide the error by the stdev, so that timepoints with low stdev are more sensitive
to error)
TODO: looks like I still need to make things more nonlinear - errors at high signal
values aren't penalized enough, while errors at small signal values are penalized
too much. Try cubing both signals, then taking sum(err**2)
DONE: maybe even better, instead of doing an elaborate cubing of signal, followed by
a rather elaborate gaussian spatiotemporal weighting of errors, just take difference
of signals, and weight the error according to the abs(template_signal) at each point
in time and across chans. That way, error in parts of the signal far from zero are
considered more important than deviance of perhaps similar absolute value for signal
close to zero
"""
# None defaults to matching all templates:
templates = templates or self.templates.values()
sys.stdout.write('matching')
t0 = time.time()
nspikes = len(self.spikes)
dm = self.detector.dm
for template in templates:
template.err = [] # overwrite any existing .err attrib
tw = template.tw
templatewave = template.wave[template.chans] # pull out template's enabled chans
#stdev = template.get_stdev()[template.chans] # pull out template's enabled chans
# replace any 0s with 1s - TODO: what's best way to avoid singularities?:
#stdev[stdev == 0] = 1
# Gaussian weighting in space and/or time:
weights = template.get_weights(weighting=weighting, sstdev=self.detector.slock/2,
tstdev=self.detector.tlock/2)
for spike in self.spikes.values():
# check if spike.maxchan is outside some minimum distance from template.maxchan
if dm[template.maxchan, spike.maxchan] > MAXCHANTOLERANCE: # um
continue # don't even bother
if spike.wave.data is None or template.tw != TW: # make sure their data line up
spike.update_wave(tw) # this slows things down a lot, but is necessary
# slice template's enabled chans out of spike, calculate sum of
# squared weighted error
# first impression is that dividing by stdev makes separation worse, not better
# low stdev means more sensitive to error:
#err = (templatewave.data - spike.wave[template.chans].data) / stdev * weights
# pull out template's enabled chans from spike:
spikewave = spike.wave[template.chans]
if weighting == 'signal':
tsdata = np.asarray([templatewave.data, spikewave.data])
# take elementwise max of abs of template and spike data:
weights = np.abs(tsdata).max(axis=0)
err = (templatewave.data - spikewave.data) * weights # weighted error
err = (err**2).sum(axis=None) # sum of squared weighted error
template.err.append((spike.id, intround(err)))
template.err = np.asarray(template.err, dtype=np.int64)
if sort and len(template.err) != 0:
i = template.err[:, 1].argsort() # row indices that sort by error
template.err = template.err[i]
sys.stdout.write('.')
print('\nmatch took %.3f sec' % (time.time()-t0))
'''
class Neuron(object):
"""A collection of spikes that have been deemed somehow, whether manually
or automatically, to have come from the same cell. A Neuron's waveform
is the mean of its member spikes"""
def __init__(self, sort, id=None):
self.sort = sort
self.id = id # neuron id
self.wave = WaveForm() # init to empty waveform
self.sids = np.array([], dtype=int) # indices of spikes that make up this neuron
# relative reference timestamp, here for symmetry with fellow spike rec
# (obj.t comes up sometimes):
self.t = 0
self.plt = None # Plot currently holding self
self.cluster = None
self.good = False # user can mark this neuron as "good" if so desired
#self.fname # not here, let's allow neurons to have spikes from different files?
def get_chans(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans # self.chans just refers to self.wave.chans
chans = property(get_chans)
def get_chan(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans[self.wave.data.ptp(axis=1).argmax()] # chan with max Vpp
chan = property(get_chan)
def get_nspikes(self):
return len(self.sids)
nspikes = property(get_nspikes)
def __getstate__(self):
"""Get object state for pickling"""
d = self.__dict__.copy()
# don't save any calculated PCs/ICs:
#d.pop('X', None)
#d.pop('Xhash', None)
# don't save plot self is assigned to, since that'll change anyway on unpickle
d['plt'] = None
return d
def get_wave(self):
"""Check for valid mean and std waveform before returning it"""
# many neuron waveforms saved in old .sort files won't have a wave.std field:
try:
self.wave.std
except AttributeError:
return self.update_wave()
if self.wave == None or self.wave.data is None or self.wave.std is None:
return self.update_wave()
else:
return self.wave # return existing waveform
def update_wave(self):
"""Update mean and std of self's waveform"""
sort = self.sort
spikes = sort.spikes
if len(self.sids) == 0: # no member spikes, perhaps I should be deleted?
raise RuntimeError("n%d has no spikes and its waveform can't be updated" % self.id)
meanwave = sort.get_mean_wave(self.sids, nid=self.id)
# update self's Waveform object
self.wave.data = meanwave.data
self.wave.std = meanwave.std
self.wave.ts = sort.twts.copy() # meanwave has no .ts, copy for clean jsonpickle
self.wave.chans = meanwave.chans
self.wave.tres = sort.tres # meanwave has no .tres
return self.wave
def __sub__(self, other):
"""Return difference array between self and other neurons' waveforms
on common channels"""
selfwavedata, otherwavedata = self.getCommonWaveData(other.chan, other.chans,
other.wave.data)
return selfwavedata - otherwavedata
def getCommonWaveData(self, otherchan, otherchans, otherwavedata):
"""Return waveform data common to self's chans and otherchans, while
requiring that both include the other's maxchan"""
chans = np.intersect1d(self.chans, otherchans, assume_unique=True)
if len(chans) == 0:
raise ValueError('No common chans')
if self.chan not in chans or otherchan not in chans:
raise ValueError("maxchans aren't part of common chans")
selfchanis = self.chans.searchsorted(chans)
otherchanis = otherchans.searchsorted(chans)
return self.wave.data[selfchanis], otherwavedata[otherchanis]
'''
def get_stdev(self):
"""Return 2D array of stddev of each timepoint of each chan of member spikes.
Assumes self.update_wave has already been called"""
data = []
# TODO: speed this up by pre-allocating memory and then filling in the array
for spike in self.spikes:
data.append(spike.wave.data) # collect spike's data
stdev = np.asarray(data).std(axis=0)
return stdev
def get_weights(self, weighting=None, sstdev=None, tstdev=None):
"""Returns unity, spatial, temporal, or spatiotemporal Gaussian weights
for self's enabled chans in self.wave.data, given spatial and temporal
stdevs"""
nchans = len(self.wave.chans)
nt = len(self.wave.data[0]) # assume all chans have the same number of timepoints
if weighting == None:
weights = 1
elif weighting == 'spatial':
weights = self.get_gaussian_spatial_weights(sstdev) # vector
elif weighting == 'temporal':
weights = self.get_gaussian_temporal_weights(tstdev) # vector
elif weighting == 'spatiotemporal':
sweights = self.get_gaussian_spatial_weights(sstdev)
tweights = self.get_gaussian_temporal_weights(tstdev)
weights = np.outer(sweights, tweights) # matrix, outer product of the two
elif weighting == 'signal':
weights = None # this is handled by caller
#print('\nweights:\n%r' % weights)
return weights
def get_gaussian_spatial_weights(self, stdev):
"""Return a vector that weights self.chans according to a 2D gaussian
centered on self.maxchan with standard deviation stdev in um"""
g = Gaussian(mean=0, stdev=stdev)
# distances between maxchan and all enabled chans:
d = self.sort.detector.dm[self.maxchan, self.chans]
weights = g[d]
weights.shape = (-1, 1) # vertical vector with nchans rows, 1 column
return weights
def get_gaussian_temporal_weights(self, stdev):
"""Return a vector that weights timepoints in self's mean waveform
by a gaussian centered on t=0, with standard deviation stdev in us"""
g = Gaussian(mean=0, stdev=stdev)
ts = self.wave.ts # template mean timepoints relative to t=0 spike time
weights = g[ts] # horizontal vector with 1 row, nt timepoints
return weights
'''
class PTCSHeader(object):
"""
Polytrode clustered spikes file header:
formatversion: int64 (currently version 3)
ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
descr: ndescrbytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
nneurons: uint64 (number of neurons)
nspikes: uint64 (total number of spikes)
nsamplebytes: uint64 (number of bytes per template waveform sample)
samplerate: uint64 (Hz)
npttypebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
pttype: npttypebytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
nptchans: uint64 (total num chans in polytrode)
chanpos: nptchans * 2 * float64
(array of (x, y) positions, in um, relative to top of polytrode,
indexed by 0-based channel IDs)
nsrcfnamebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
srcfname: nsrcfnamebytes of ASCII text
(source file name, probably .srf, padded with null bytes if needed for
8 byte alignment)
datetime: float64
(absolute datetime corresponding to t=0 us timestamp, stored as days since
epoch: December 30, 1899 at 00:00)
ndatetimestrbytes: uint64
datetimestr: ndatetimestrbytes of ASCII text
(human readable string representation of datetime, preferrably ISO 8601,
padded with null bytes if needed for 8 byte alignment)
"""
FORMATVERSION = 3 # overall .ptcs file format version, not header format version
def __init__(self, sort, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user='', notes=''):
self.sort = sort
self.stream = stream
self.nneurons = nneurons
self.nspikes = nspikes
self.nsamplebytes = nsamplebytes
homelessfullfname = lstrip(fullfname, os.path.expanduser('~'))
sortfname = sort.fname
sortfullfname = os.path.join(sortpath, sortfname)
sortfmoddt = str(datetime.datetime.fromtimestamp(os.path.getmtime(sortfullfname)))
sortfmoddt = sortfmoddt.split('.')[0] # ditch the us
sortfsize = os.path.getsize(sortfullfname) # in bytes
d = {'file_type': '.ptcs (polytrode clustered spikes) file',
'original_fname': homelessfullfname, 'export_time': exportdt,
'sort': {'fname': sortfname, 'path': sortpath,
'fmtime': sortfmoddt, 'fsize': sortfsize},
'user': user, 'notes': notes}
descr = str(d)
self.descr = pad(descr, align=8)
self.srcfname = pad(lstrip(stream.fname, '../'), align=8)
self.pttype = pad(stream.probe.name, align=8)
self.dt = stream.datetime
self.dtstr = pad(self.dt.isoformat(), align=8)
def write(self, f):
s = self.sort
np.int64(self.FORMATVERSION).tofile(f) # formatversion
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr
np.uint64(self.nneurons).tofile(f) # nneurons
np.uint64(self.nspikes).tofile(f) # nspikes
np.uint64(self.nsamplebytes).tofile(f) # nsamplebytes
np.uint64(s.sampfreq).tofile(f) # samplerate
np.uint64(len(self.pttype)).tofile(f) # npttypebytes
f.write(self.pttype) # pttype
np.uint64(s.stream.probe.nchans).tofile(f) # nptchans
np.float64(s.stream.probe.siteloc_arr()).tofile(f) # chanpos
np.uint64(len(self.srcfname)).tofile(f) # nsrcfnamebytes
f.write(self.srcfname) # srcfname
np.float64(td2days(self.dt - EPOCH)).tofile(f) # datetime (in days)
np.uint64(len(self.dtstr)).tofile(f) # ndatetimestrbytes
f.write(self.dtstr)
class PTCSNeuronRecord(object):
"""
Polytrode clustered spikes file neuron record:
nid: int64 (signed neuron id, could be -ve, could be non-contiguous with previous)
ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment, defaults to 0)
descr: ndescrbytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
clusterscore: float64
xpos: float64 (um)
ypos: float64 (um)
sigma: float64 (um) (Gaussian spatial sigma)
nchans: uint64 (num chans in template waveforms)
chanids: nchans * uint64 (0 based IDs of channels in template waveforms)
maxchanid: uint64 (0 based ID of max channel in template waveforms)
nt: uint64 (num timepoints per template waveform channel)
nwavedatabytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
wavedata: nwavedatabytes of nsamplebytes sized floats
(template waveform data, laid out as nchans * nt, in uV,
padded with null bytes if needed for 8 byte alignment)
nwavestdbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
wavestd: nwavestdbytes of nsamplebytes sized floats
(template waveform standard deviation, laid out as nchans * nt, in uV,
padded with null bytes if needed for 8 byte alignment)
nspikes: uint64 (number of spikes in this neuron)
spike timestamps: nspikes * uint64 (us, should be sorted)
"""
def __init__(self, neuron, spikets=None, nsamplebytes=None, descr=''):
n = neuron
AD2uV = n.sort.converter.AD2uV
self.neuron = neuron
self.spikets = spikets # constrained to stream range, may be < neuron.sids
self.wavedtype = {2: np.float16, 4: np.float32, 8: np.float64}[nsamplebytes]
if n.wave.data is None or n.wave.std is None: # some may have never been displayed
n.update_wave()
# wavedata and wavestd are nchans * nt * nsamplebytes long:
self.wavedata = pad(self.wavedtype(AD2uV(n.wave.data)), align=8)
self.wavestd = pad(self.wavedtype(AD2uV(n.wave.std)), align=8)
self.descr = pad(descr, align=8)
def write(self, f):
n = self.neuron
np.int64(n.id).tofile(f) # nid
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr, bytes
np.float64(np.nan).tofile(f) # clusterscore
np.float64(n.cluster.pos['x0']).tofile(f) # xpos (um)
np.float64(n.cluster.pos['y0']).tofile(f) # ypos (um)
np.float64(n.cluster.pos['sx']).tofile(f) # sigma (um)
np.uint64(len(n.wave.chans)).tofile(f) # nchans
np.uint64(n.wave.chans).tofile(f) # chanids
np.uint64(n.chan).tofile(f) # maxchanid
np.uint64(len(n.wave.ts)).tofile(f) # nt
np.uint64(self.wavedata.nbytes).tofile(f) # nwavedatabytes
self.wavedata.tofile(f) # wavedata
np.uint64(self.wavestd.nbytes).tofile(f) # nwavestdbytes
self.wavestd.tofile(f) # wavestd
np.uint64(len(self.spikets)).tofile(f) # nspikes
np.uint64(self.spikets).tofile(f) # spike timestamps (us)
class PanelScrollArea(QtGui.QScrollArea):
"""A scroll area for the spikesortpanel"""
def keyPressEvent(self, event):
key = event.key()
# seems the ENTER key needs be handled to directly call plot, unlike in sortwin
# where the event is passed on to be handled by the list widgets
if key in [Qt.Key_Enter, Qt.Key_Return]:
sortwin = self.topLevelWidget()
sortwin.parent().ui.plotButton.click()
else:
QtGui.QScrollArea.keyPressEvent(self, event) # pass it on
class SortWindow(SpykeToolWindow):
"""Sort window"""
def __init__(self, parent, pos=None):
SpykeToolWindow.__init__(self, parent, flags=QtCore.Qt.Tool)
self.spykewindow = parent
ncols = self.sort.probe.ncols
nrows = self.sort.probe.nrows
# try and allow the same amount of horizontal space per column for 2 and 3 col probes:
if ncols <= 2:
self.MAINSPLITTERPOS = 300
else:
self.MAINSPLITTERPOS = 265 # move it more to the left
# make horizontal sort slider use as little vertical space as possible
self.VSPLITTERPOS = 1
panelwidth = PANELWIDTHPERCOLUMN * ncols
panelheight = PANELHEIGHTPERROW * nrows
width = max(self.MAINSPLITTERPOS + panelwidth + VSCROLLBARWIDTH, MINSORTWINDOWWIDTH)
size = (width, SORTWINDOWHEIGHT)
self.setWindowTitle('Sort Window')
self.move(*pos)
self.resize(*size)
self._source = None # source cluster for comparison
self.slider = SpikeSelectionSlider(Qt.Horizontal, self)
self.slider.setInvertedControls(True)
self.slider.setToolTip('Position of sliding spike selection time window')
self.connect(self.slider, QtCore.SIGNAL('valueChanged(int)'),
self.on_slider_valueChanged)
self.connect(self.slider, QtCore.SIGNAL('sliderPressed()'),
self.on_slider_sliderPressed)
self.nlist = NList(self)
self.nlist.setToolTip('Neuron list')
self.nslist = NSList(self)
self.nslist.setToolTip('Sorted spike list')
self.uslist = USList(self) # should really be multicolumn tableview
self.uslist.setToolTip('Unsorted spike list')
tw = self.spykewindow.sort.tw
self.panel = SpikeSortPanel(self, tw=tw)
self.panel.setMinimumSize(QtCore.QSize(panelwidth, panelheight))
self.panelscrollarea = PanelScrollArea(self)
self.panelscrollarea.setWidget(self.panel)
self.panelscrollarea.setMinimumWidth(panelwidth + VSCROLLBARWIDTH)
self.panelscrollarea.setWidgetResizable(True) # allows panel to size bigger than min
self.vsplitter = QtGui.QSplitter(Qt.Vertical)
self.vsplitter.addWidget(self.slider)
self.vsplitter.addWidget(self.nlist)
self.vsplitter.addWidget(self.nslist)
self.vsplitter.addWidget(self.uslist)
self.mainsplitter = QtGui.QSplitter(Qt.Horizontal)
self.mainsplitter.addWidget(self.vsplitter)
self.mainsplitter.addWidget(self.panelscrollarea)
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.mainsplitter)
mainwidget = QtGui.QWidget(self)
mainwidget.setLayout(self.layout)
self.setCentralWidget(mainwidget)
self.toolbar = self.setupToolbar()
self.addToolBar(self.toolbar)
def setupToolbar(self):
toolbar = QtGui.QToolBar(self)
toolbar.setObjectName('toolbar')
toolbar.setFloatable(True)
toolbar.setIconSize(QtCore.QSize(16, 16)) # like in main spyke window
actionDelete = QAction(QIcon('res/edit-delete.svg'), 'Del', self)
tt = ('<nobr><b>Del</b> Delete selected spikes or clusters</nobr>\n'
'<nobr><b>CTRL+Del</b> Delete selected spikes</nobr>')
actionDelete.setToolTip(tt)
self.connect(actionDelete, QtCore.SIGNAL('triggered()'),
self.on_actionDelete_triggered)
toolbar.addAction(actionDelete)
actionMergeClusters = QAction('M', self)
tt = '<nobr><b>M</b> Merge clusters</nobr>'
actionMergeClusters.setToolTip(tt)
self.connect(actionMergeClusters, QtCore.SIGNAL('triggered()'),
self.on_actionMergeClusters_triggered)
toolbar.addAction(actionMergeClusters)
#actionToggleClustersGood = QAction(QIcon('res/dialog-apply.svg'), 'G', self)
actionToggleClustersGood = QAction('G', self)
tt = '<nobr><b>G</b> Toggle clusters as "good"</nobr>'
actionToggleClustersGood.setToolTip(tt)
self.connect(actionToggleClustersGood, QtCore.SIGNAL('triggered()'),
self.on_actionToggleClustersGood_triggered)
toolbar.addAction(actionToggleClustersGood)
actionSplit = QAction('+', self)
tt = '<nobr><b>+</b> Split off selected spikes</nobr>'
actionSplit.setToolTip(tt)
self.connect(actionSplit, QtCore.SIGNAL('triggered()'),
self.on_actionSplit_triggered)
toolbar.addAction(actionSplit)
actionLabelMultiunit = QAction('-', self)
tt = '<nobr><b>-</b> Label clusters as multiunit</nobr>'
actionLabelMultiunit.setToolTip(tt)
self.connect(actionLabelMultiunit, QtCore.SIGNAL('triggered()'),
self.on_actionLabelMultiunit_triggered)
toolbar.addAction(actionLabelMultiunit)
actionChanSplitClusters = QAction('/', self)
tt = '<nobr><b>/</b> Split clusters by channels</nobr>'
actionChanSplitClusters.setToolTip(tt)
self.connect(actionChanSplitClusters, QtCore.SIGNAL('triggered()'),
self.on_actionChanSplitClusters_triggered)
toolbar.addAction(actionChanSplitClusters)
actionDensitySplit = QAction('P', self)
tt = ('<nobr><b>P</b> Split cluster pair by density along line between '
'their centers</nobr>')
actionDensitySplit.setToolTip(tt)
self.connect(actionDensitySplit, QtCore.SIGNAL('triggered()'),
self.on_actionDensitySplit_triggered)
toolbar.addAction(actionDensitySplit)
actionRandomSplit = QAction('\\', self)
tt = ('<nobr><b>\\</b> Randomly split each selected cluster in half</nobr>')
actionRandomSplit.setToolTip(tt)
self.connect(actionRandomSplit, QtCore.SIGNAL('triggered()'),
self.on_actionRandomSplit_triggered)
toolbar.addAction(actionRandomSplit)
#actionRenumber = QAction(QIcon('res/gtk-edit.svg'), '#', self)
actionRenumber = QAction('#', self)
tt = ('<nobr><b>#</b> Renumber all clusters in vertical spatial order</nobr>\n'
'<nobr><b>CTRL+#</b> Renumber selected cluster</nobr>')
actionRenumber.setToolTip(tt)
self.connect(actionRenumber, QtCore.SIGNAL('triggered()'),
self.on_actionRenumber_triggered)
toolbar.addAction(actionRenumber)
actionFind = QAction(QIcon('res/edit-find.svg'), 'Find', self)
tt = ('<nobr><b>CTRL+F</b> Find spike in cluster plot</nobr>')
actionFind.setToolTip(tt)
self.connect(actionFind, QtCore.SIGNAL('triggered()'),
self.on_actionFind_triggered)
toolbar.addAction(actionFind)
actionSelectRandomSpikes = QAction('R', self)
tt = '<nobr><b>R</b> Select random sample of spikes of current clusters</nobr>'
actionSelectRandomSpikes.setToolTip(tt)
self.connect(actionSelectRandomSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionSelectRandomSpikes_triggered)
toolbar.addAction(actionSelectRandomSpikes)
actionToggleErrors = QAction('E', self)
actionToggleErrors.setCheckable(True)
actionToggleErrors.setChecked(self.panel.enable_fills)
tt = '<nobr><b>CTRL+E</b> Toggle visibility of template error limits</nobr>'
actionToggleErrors.setToolTip(tt)
self.connect(actionToggleErrors, QtCore.SIGNAL('toggled(bool)'),
self.on_actionToggleErrors_toggled)
toolbar.addAction(actionToggleErrors)
self.actionToggleErrors = actionToggleErrors
nsamplesComboBox = QtGui.QComboBox(self)
nsamplesComboBox.setToolTip('Number of spikes per cluster to randomly select')
nsamplesComboBox.setFocusPolicy(Qt.NoFocus)
nsamplesComboBox.addItems(['100', '50', '20', '10', '5', '1'])
nsamplesComboBox.setCurrentIndex(2)
toolbar.addWidget(nsamplesComboBox)
self.connect(nsamplesComboBox, QtCore.SIGNAL('activated(int)'),
self.on_actionSelectRandomSpikes_triggered)
self.nsamplesComboBox = nsamplesComboBox
gainComboBox = QtGui.QComboBox(self)
gainComboBox.setToolTip('Waveform gain (default: 1.5)')
gainComboBox.setFocusPolicy(Qt.NoFocus)
gainComboBox.addItems(['4', '3.75', '3.5', '3.25', '3', '2.75', '2.5', '2.25', '2',
'1.75', '1.5', '1.25', '1', '0.75', '0.5', '0.25'])
gainComboBox.setCurrentIndex(3)
toolbar.addWidget(gainComboBox)
self.connect(gainComboBox, QtCore.SIGNAL('activated(int)'),
self.on_gainComboBox_triggered)
self.gainComboBox = gainComboBox
#actionAlignMin = QAction(QIcon('res/go-bottom.svg'), 'Min', self)
actionAlignMin = QAction('Min', self)
actionAlignMin.setToolTip('Align selected spikes to min')
self.connect(actionAlignMin, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMin_triggered)
toolbar.addAction(actionAlignMin)
#actionAlignMax = QAction(QIcon('res/go-top.svg'), 'Max', self)
actionAlignMax = QAction('Max', self)
actionAlignMax.setToolTip('Align selected spikes to max')
self.connect(actionAlignMax, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMax_triggered)
toolbar.addAction(actionAlignMax)
#actionAlignBest = QAction(QIcon('res/emblem-OK.png'), 'Best', self)
actionAlignBest = QAction('B', self)
tt = '<nobr><b>B</b> Align selected spikes by best fit</nobr>'
actionAlignBest.setToolTip(tt)
self.connect(actionAlignBest, QtCore.SIGNAL('triggered()'),
self.on_actionAlignBest_triggered)
toolbar.addAction(actionAlignBest)
actionShiftLeft = QAction('[', self)
tt = ('<nobr><b>[</b> Shift selected spikes 2 points left</nobr>\n'
'<nobr><b>CTRL+[</b> Shift selected spikes 1 point left</nobr>')
actionShiftLeft.setToolTip(tt)
self.connect(actionShiftLeft, QtCore.SIGNAL('triggered()'),
self.on_actionShiftLeft_triggered)
toolbar.addAction(actionShiftLeft)
actionShiftRight = QAction(']', self)
tt = ('<nobr><b>]</b> Shift selected spikes 2 points right</nobr>\n'
'<nobr><b>CTRL+]</b> Shift selected spikes 1 point right</nobr>')
actionShiftRight.setToolTip(tt)
self.connect(actionShiftRight, QtCore.SIGNAL('triggered()'),
self.on_actionShiftRight_triggered)
toolbar.addAction(actionShiftRight)
incltComboBox = QtGui.QComboBox(self)
incltComboBox.setToolTip("Waveform duration (us) to include for component "
"analysis,\nasymmetric around spike time")
incltComboBox.setFocusPolicy(Qt.NoFocus)
dtw = self.sort.tw[1] - self.sort.tw[0] # spike time window width
incltstep = intround(dtw / 10) # evenly spaced inclt values
incltvals = np.arange(dtw, 0, -incltstep)
incltComboBox.addItems([ str(incltval) for incltval in incltvals ])
incltComboBox.setCurrentIndex(0)
toolbar.addWidget(incltComboBox)
self.connect(incltComboBox, QtCore.SIGNAL('activated(int)'),
self.on_incltComboBox_triggered)
self.incltComboBox = incltComboBox
#incltunitsLabel = QtGui.QLabel('us', self)
#toolbar.addWidget(incltunitsLabel)
nPCsPerChanSpinBox = QtGui.QSpinBox(self)
nPCsPerChanSpinBox.setToolTip("Number of PCs to use per channel to feed into ICA")
nPCsPerChanSpinBox.setFocusPolicy(Qt.NoFocus)
toolbar.addWidget(nPCsPerChanSpinBox)
nPCsPerChanSpinBox.setMinimum(1)
self.connect(nPCsPerChanSpinBox, QtCore.SIGNAL('valueChanged(int)'),
self.on_nPCsPerChanSpinBox_valueChanged)
nPCsPerChanSpinBox.setValue(self.sort.npcsperchan)
self.nPCsPerChanSpinBox = nPCsPerChanSpinBox
#actionFindPrevMostSimilar = QAction(QIcon('res/go-previous.svg'), '<', self)
actionFindPrevMostSimilar = QAction('<', self)
tt = '<nobr><b><</b> Find previous most similar cluster</nobr>'
actionFindPrevMostSimilar.setToolTip(tt)
self.connect(actionFindPrevMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindPrevMostSimilar_triggered)
toolbar.addAction(actionFindPrevMostSimilar)
#actionFindNextMostSimilar = QAction(QIcon('res/go-next.svg'), '>', self)
actionFindNextMostSimilar = QAction('>', self)
tt = '<nobr><b>></b> Find next most similar cluster</nobr>'
actionFindNextMostSimilar.setToolTip(tt)
self.connect(actionFindNextMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindNextMostSimilar_triggered)
toolbar.addAction(actionFindNextMostSimilar)
actionReloadSpikes = QAction(QIcon('res/view-refresh.svg'), 'Reload', self)
tt = ('<nobr><b>F5</b> Reload waveforms of selected spikes. '
'If none selected, reload all</nobr>\n'
'<nobr><b>CTRL+F5</b> Use mean waveform to choose chans to reload</nobr>')
actionReloadSpikes.setToolTip(tt)
self.connect(actionReloadSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionReloadSpikes_triggered)
toolbar.addAction(actionReloadSpikes)
actionSave = QAction(QIcon('res/document-save.svg'), '&Save', self)
actionSave.setToolTip('Save sort panel to file')
self.connect(actionSave, QtCore.SIGNAL('triggered()'),
self.on_actionSave_triggered)
toolbar.addAction(actionSave)
return toolbar
def get_sort(self):
return self.spykewindow.sort
sort = property(get_sort) # make this a property for proper behaviour after unpickling
def closeEvent(self, event):
self.spykewindow.HideWindow('Sort')
def mousePressEvent(self, event):
"""These are mostly passed on up from spyke list views and sort panel. Left
clicks are (or should be) filtered out"""
buttons = event.buttons()
if buttons == QtCore.Qt.MiddleButton:
#self.on_actionSelectRandomSpikes_triggered()
self.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist
elif buttons == QtCore.Qt.RightButton:
self.clear()
def keyPressEvent(self, event):
"""Alpha character keypresses are by default caught by the child lists for quickly
scrolling down to and selecting list items. However, the appropriate alpha
keypresses have been set in the child lists to be ignored, so they propagate
up to here"""
key = event.key()
modifiers = event.modifiers()
ctrl = modifiers & Qt.ControlModifier # ctrl is down
spw = self.spykewindow
if key == Qt.Key_A: # ignored in SpykeListViews
spw.ui.plotButton.click() # same as hitting ENTER in nslist
elif key == Qt.Key_X: # ignored in SpykeListViews
spw.ui.plotXcorrsButton.click()
elif key == Qt.Key_N: # ignored in SpykeListViews
spw.ui.normButton.click()
elif key == Qt.Key_Escape: # deselect all spikes and all clusters
self.clear()
elif key == Qt.Key_Delete:
self.on_actionDelete_triggered()
elif key == Qt.Key_M: # ignored in SpykeListViews
self.on_actionMergeClusters_triggered()
elif key == Qt.Key_G: # ignored in SpykeListViews
self.on_actionToggleClustersGood_triggered()
elif key == Qt.Key_Equal: # ignored in SpykeListViews
self.on_actionSplit_triggered()
elif key == Qt.Key_Minus: # ignored in SpykeListViews
self.on_actionLabelMultiunit_triggered()
elif key == Qt.Key_Slash: # ignored in SpykeListViews
self.on_actionChanSplitClusters_triggered()
elif key == Qt.Key_P: # ignored in SpykeListViews
self.on_actionDensitySplit_triggered()
elif key == Qt.Key_Backslash: # ignored in SpykeListViews
self.on_actionRandomSplit_triggered()
elif key == Qt.Key_NumberSign: # ignored in SpykeListViews
self.on_actionRenumber_triggered()
elif key == Qt.Key_F: # ignored in SpykeListViews
if ctrl:
self.FindSpike()
else:
self.FindCluster()
elif key == Qt.Key_R: # ignored in SpykeListViews
self.on_actionSelectRandomSpikes_triggered()
elif key == Qt.Key_Space: # ignored in SpykeListViews
if ctrl:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
else:
spw.on_clusterButton_clicked()
elif key == Qt.Key_B: # ignored in SpykeListViews
self.on_actionAlignBest_triggered()
elif key == Qt.Key_BracketLeft: # ignored in SpykeListViews
self.on_actionShiftLeft_triggered()
elif key == Qt.Key_BracketRight: # ignored in SpykeListViews
self.on_actionShiftRight_triggered()
elif key == Qt.Key_Comma: # ignored in SpykeListViews
self.on_actionFindPrevMostSimilar_triggered()
elif key == Qt.Key_Period: # ignored in SpykeListViews
self.on_actionFindNextMostSimilar_triggered()
elif key == Qt.Key_F5: # ignored in SpykeListViews
self.on_actionReloadSpikes_triggered()
elif key == Qt.Key_E: # ignored in SpykeListViews
if ctrl:
self.actionToggleErrors.toggle()
else:
self.clear() # E is synonymous with ESC
elif key == Qt.Key_C: # toggle between PCA and ICA, ignored in SpykeListViews
c = str(spw.ui.componentAnalysisComboBox.currentText())
if c == 'PCA':
index = spw.ui.componentAnalysisComboBox.findText('ICA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
elif c == 'ICA':
index = spw.ui.componentAnalysisComboBox.findText('PCA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
spw.on_plotButton_clicked()
elif key == Qt.Key_T: # toggle plotting against time, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 't':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.on_c0c1tButton_clicked() # plot against time
elif key == Qt.Key_W: # toggle plotting against RMSError, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 'RMSerror':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.ui.zDimComboBox.setCurrentIndex(3)
spw.on_plotButton_clicked() # plot against RMSError
elif key in [Qt.Key_Enter, Qt.Key_Return]:
# this is handled at a lower level by on_actionItem_triggered
# in the various listview controls
pass
else:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
def clear(self):
"""Clear selections in this order: unsorted spikes, sorted spikes,
cluster automatically selected for comparison, cluster 0, clusters"""
spw = self.spykewindow
clusters = spw.GetClusters()
if len(self.uslist.selectedIndexes()) > 0:
self.uslist.clearSelection()
elif self.nslist.nrowsSelected > 0:
self.nslist.clearSelection()
elif len(clusters) == 2 and self._source in clusters:
clusters.remove(self._source)
spw.SelectClusters(clusters, on=False)
elif 0 in spw.GetClusterIDs():
for cluster in spw.GetClusters():
if cluster.id == 0:
spw.SelectClusters([cluster], on=False)
break
else:
self.nlist.clearSelection()
# reset colours in cluster plot:
gw = spw.windows['Cluster'].glWidget
gw.colour()
gw.updateGL()
def on_actionDelete_triggered(self):
"""Delete explicity selected spikes, or clusters"""
selsids = self.spykewindow.GetSpikes() # IDs of explicitly selected spikes
nselsids = len(selsids)
if (QApplication.instance().keyboardModifiers() & Qt.ControlModifier
or nselsids > 0):
self.delete_spikes()
else:
self.delete_clusters()
def delete_clusters(self):
"""Del button press/click"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'delete clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# deselect and delete clusters
spw.DelClusters(clusters)
if len(s.clusters) > 0:
# select cluster that replaces the first of the deleted clusters in norder
selrows = [ cc.oldnorder.index(oldunid) for oldunid in cc.oldunids ]
if len(selrows) > 0:
selrow = selrows[0]
nlist = spw.windows['Sort'].nlist
nlist.selectRows(selrow) # TODO: this sets selection, but not focus
#else: # first of deleted clusters was last in norder, don't select anything
# save more undo/redo stuff
newclusters = []
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def delete_spikes(self):
"""CTRL+Del button press/click"""
self.spykewindow.SplitSpikes(delete=True)
def on_actionSplit_triggered(self):
"""+ button click. Split off selected clusters into their own cluster"""
self.spykewindow.SplitSpikes(delete=False)
def on_actionMergeClusters_triggered(self):
"""Merge button (M) click. Merge selected clusters. Easier to use than
running gac() on selected clusters using a really big sigma to force
them to all merge"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = [] # spikes to merge
for cluster in clusters:
sids.append(cluster.neuron.sids)
# merge any selected usids as well
sids.append(spw.GetUnsortedSpikes())
sids = np.concatenate(sids)
if len(sids) == 0:
return
# save some undo/redo stuff
message = 'merge clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# decide on newnid and where to insert it into norder
newnid = None # merge by default into a new highest numbered nid
inserti = None # order new cluster by default to end of nlist
if len(clusters) == 1:
# keep same position of this one nid in norder, regardless of whether it's
# single-unit, multiunit, or junk
inserti = s.norder.index(clusters[0].id)
elif len(clusters) > 1:
oldunids = np.asarray(cc.oldunids)
suids = oldunids[oldunids > 0] # selected single unit nids
if len(suids) > 0: # merge into largest selected single unit nid:
spikecounts = np.asarray([ s.neurons[suid].nspikes for suid in suids ])
newnid = suids[spikecounts.argmax()]
inserti = s.norder.index(newnid)
# correct for shift due to deletion of oldunids that precede newnid in norder:
inserti -= sum([ s.norder.index(oldunid) < inserti for oldunid in oldunids])
# delete selected clusters and deselect selected usids
spw.DelClusters(clusters, update=False)
self.uslist.clearSelection()
# create new cluster
#t0 = time.time()
newcluster = spw.CreateCluster(update=False, id=newnid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
plotdims = spw.GetClusterPlotDims()
newcluster.update_pos()
# save more undo/redo stuff
cc.save_new([newcluster], s.norder, s.good)
spw.AddClusterChangeToStack(cc)
# now do some final updates
spw.UpdateClustersGUI()
spw.ColourPoints(newcluster)
#print('applying clusters to plot took %.3f sec' % (time.time()-t0))
# select newly created cluster
spw.SelectClusters(newcluster)
cc.message += ' into cluster %d' % newcluster.id
print(cc.message)
def on_actionToggleClustersGood_triggered(self):
"""'Good' button (G) click. Toggle 'good' flag of all selected clusters"""
spw = self.spykewindow
clusters = spw.GetClusters()
cids = []
for cluster in clusters:
cluster.neuron.good = not cluster.neuron.good
cids.append(cluster.id)
self.nlist.updateAll() # nlist item colouring will change as a result
print("Toggled 'good' flag of clusters %r" % cids)
def on_actionLabelMultiunit_triggered(self):
"""- button click. Label all selected clusters as multiunit by deleting them
and creating new ones with -ve IDs"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
# only relabel single unit clusters:
clusters = [ cluster for cluster in clusters if cluster.id > 0 ]
if len(clusters) == 0:
return
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'label as multiunit clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# delete old clusters
inserti = s.norder.index(clusters[0].id)
# collect cluster sids before cluster deletion
sidss = [ cluster.neuron.sids for cluster in clusters ]
spw.DelClusters(clusters, update=False)
# create new multiunit clusters
newclusters = []
for sids in sidss:
muid = s.get_nextmuid()
newcluster = spw.CreateCluster(update=False, id=muid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
newcluster.update_pos()
newclusters.append(newcluster)
inserti += 1
# select newly labelled multiunit clusters
spw.SelectClusters(newclusters)
# save more undo/redo stuff
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def on_actionChanSplitClusters_triggered(self):
"""Split by channels button (/) click"""
## TODO: make sure this works on .srf files! Why was chancombosplit being used?
self.spykewindow.maxchansplit()
#self.spykewindow.chancombosplit()
def on_actionDensitySplit_triggered(self):
"""Split cluster pair by density along line between their centers"""
self.spykewindow.densitysplit()
def on_actionRandomSplit_triggered(self):
"""Randomly split each selected cluster in half"""
self.spykewindow.randomsplit()
def on_actionRenumber_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
self.renumber_selected_cluster()
else:
self.renumber_all_clusters()
def renumber_selected_cluster(self):
"""Renumber a single selected cluster to whatever free ID the user wants, for
colouring purposes"""
spw = self.spykewindow
s = self.sort
spikes = s.spikes
cluster = spw.GetCluster() # exactly one selected cluster
oldid = cluster.id
newid = max(s.norder) + 1
newid, ok = QtGui.QInputDialog.getInt(self, "Renumber cluster",
"This will clear the undo/redo stack, and is not undoable.\n"
"Enter new ID:", value=newid)
if not ok:
return
if newid in s.norder:
print("Choose a non-existing nid to renumber to")
return
# deselect cluster
spw.SelectClusters(cluster, on=False)
# rename to newid
cluster.id = newid # this indirectly updates neuron.id
# update cluster and neuron dicts, and spikes array
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
# remove duplicate oldid dict entries
del s.clusters[oldid]
del s.neurons[oldid]
# replace oldid with newid in norder
s.norder[s.norder.index(oldid)] = newid
# update colour of any relevant points in cluster plot
spw.ColourPoints(cluster)
# reselect cluster
spw.SelectClusters(cluster)
# some cluster changes in stack may no longer be applicable, reset cchanges
del spw.cchanges[:]
spw.cci = -1
print('Renumbered neuron %d to %d' % (oldid, newid))
def renumber_all_clusters(self):
"""Renumber single unit clusters consecutively from 1, ordered by y position. Do the
same for multiunit (-ve number) clusters, starting from -1. Sorting by y position
makes user inspection of clusters more orderly, makes the presence of duplicate
clusters more obvious, and allows for maximal spatial separation between clusters of
the same colour, reducing colour conflicts"""
val = QtGui.QMessageBox.question(self.panel, "Renumber all clusters",
"Are you sure? This will clear the undo/redo stack, and is not undoable.",
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if val == QtGui.QMessageBox.No:
return
spw = self.spykewindow
s = self.sort
spikes = s.spikes
# get spatially and numerically ordered lists of new ids
oldids = np.asarray(s.norder)
oldsuids = oldids[oldids > 0]
oldmuids = oldids[oldids < 0]
# this is a bit confusing: find indices that would sort old ids by y pos, but then
# what you really want is to find the y pos *rank* of each old id, so you need to
# take argsort again:
newsuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldsuids ]).argsort().argsort() + 1
newmuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldmuids ]).argsort().argsort() + 1
newmuids = -newmuids
# multiunit, followed by single unit, no 0 junk cluster. Can't seem to do it the other
# way around as of Qt 4.7.2 - it seems QListViews don't like having a -ve value in
# the last entry. Doing so causes all 2 digit values in the list to become blank,
# suggests a spacing calculation bug. Reproduce by making last entry multiunit,
# undoing then redoing. Actually, maybe the bug is it doesn't like having a number
# in the last entry with fewer digits than the preceding entry. Only seems to be a
# problem when setting self.setUniformItemSizes(True).
newids = np.concatenate([newmuids, newsuids])
# test
if np.all(oldids == newids):
print('Nothing to renumber: cluster IDs already ordered in y0 and contiguous')
return
# update for replacing oldids with newids
oldids = np.concatenate([oldmuids, oldsuids])
# deselect current selections
selclusters = spw.GetClusters()
oldselids = [ cluster.id for cluster in selclusters ]
spw.SelectClusters(selclusters, on=False)
# delete junk cluster, if it exists
if 0 in s.clusters:
s.remove_neuron(0)
print('Deleted junk cluster 0')
if 0 in oldselids:
oldselids.remove(0)
# replace old ids with new ids
cw = spw.windows['Cluster']
oldclusters = s.clusters.copy() # no need to deepcopy, just copy refs, not clusters
dims = spw.GetClusterPlotDims()
for oldid, newid in zip(oldids, newids):
newid = int(newid) # keep as Python int, not numpy int
if oldid == newid:
continue # no need to waste time removing and recreating this cluster
# change all occurences of oldid to newid
cluster = oldclusters[oldid]
cluster.id = newid # this indirectly updates neuron.id
# update cluster and neuron dicts
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
# remove any orphaned cluster ids
for oldid in oldids:
if oldid not in newids:
del s.clusters[oldid]
del s.neurons[oldid]
# reset norder
s.norder = []
s.norder.extend(sorted([ int(newid) for newid in newmuids ])[::-1])
s.norder.extend(sorted([ int(newid) for newid in newsuids ]))
# now do some final updates
spw.UpdateClustersGUI()
spw.ColourPoints(s.clusters.values())
# reselect the previously selected (but now renumbered) clusters,
# helps user keep track
oldiis = [ list(oldids).index(oldselid) for oldselid in oldselids ]
newselids = newids[oldiis]
spw.SelectClusters([s.clusters[cid] for cid in newselids])
# all cluster changes in stack are no longer applicable, reset cchanges
del spw.cchanges[:]
spw.cci = -1
print('Renumbering complete')
def on_actionFind_triggered(self):
"""Find current cluster or spike"""
ctrl = QApplication.instance().keyboardModifiers() & Qt.ControlModifier
if ctrl:
self.FindSpike()
else:
self.FindCluster()
def FindCluster(self):
"""Move focus to location of currently selected (single) cluster"""
spw = self.spykewindow
try:
cluster = spw.GetCluster()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
dims = spw.GetClusterPlotDims()
gw.focus = np.float32([ cluster.normpos[dim] for dim in dims ])
gw.panTo() # pan to new focus
gw.updateGL()
def FindSpike(self):
"""Move focus to location of currently selected (single) spike"""
spw = self.spykewindow
try:
sid = spw.GetSpike()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
pointis = gw.sids.searchsorted(sid)
gw.focus = gw.points[pointis]
gw.panTo() # pan to new focus
gw.updateGL()
def on_actionSelectRandomSpikes_triggered(self):
"""Select random sample of spikes in current cluster(s), or random sample
of unsorted spikes if no cluster(S) selected"""
nsamples = int(self.nsamplesComboBox.currentText())
if len(self.nslist.neurons) > 0:
slist = self.nslist
else:
slist = self.uslist
slist.clearSelection() # emits selectionChanged signal, .reset() doesn't
slist.selectRandom(nsamples)
def on_gainComboBox_triggered(self):
"""Set gain of panel based on gainComboBox selection"""
panel = self.panel
panel.gain = float(self.gainComboBox.currentText())
panel.do_layout() # resets axes lims and recalcs panel.pos
panel._update_scale()
panel.draw_refs()
panel.updateAllItems()
def on_actionAlignMin_triggered(self):
self.Align('min')
def on_actionAlignMax_triggered(self):
self.Align('max')
def on_actionAlignBest_triggered(self):
self.Align('best')
def on_actionShiftLeft_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = -1
else:
nt = -2
self.Shift(nt)
def on_actionShiftRight_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = 1
else:
nt = 2
self.Shift(nt)
def on_incltComboBox_triggered(self):
"""Change length of chan selection lines, optionally trigger cluster replot"""
self.panel.update_selvrefs()
self.panel.draw_refs()
#self.spykewindow.ui.plotButton.click()
def get_inclt(self):
"""Return inclt value in incltComboBox"""
return float(self.incltComboBox.currentText()) # us
inclt = property(get_inclt)
def get_tis(self):
"""Return tis (start and end timepoint indices) of duration inclt, asymmetric around
t=0 spike time. Note that any changes to the code here should also be made in the
timepoint selection display code in SortPanel.update_selvrefs()"""
s = self.sort
inclt = self.inclt # duration to include, asymmetric around t=0 spike time (us)
tw = self.panel.tw
dtw = tw[1] - tw[0] # spike time window width
left = intround(abs(tw[0]) / dtw * inclt) # left fraction wrt t=0 spike time
right = inclt - left # right fraction wrt t=0 spike time
tis = s.twts.searchsorted([-left, right])
return tis
tis = property(get_tis)
def on_nPCsPerChanSpinBox_valueChanged(self, val):
self.sort.npcsperchan = val
def on_actionReloadSpikes_triggered(self):
spw = self.spykewindow
sids = spw.GetAllSpikes()
sort = self.sort
if len(sids) == 0:
# if no spikes specified, reload all spikes
sids = sort.spikes['id']
usemeanchans = False
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
usemeanchans = True
sort.reload_spikes_and_templates(sids, usemeanchans=usemeanchans)
# add sids to the set of dirtysids to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots:
self.panel.updateAllItems()
def on_actionFindPrevMostSimilar_triggered(self):
self.findMostSimilarCluster('previous')
def on_actionFindNextMostSimilar_triggered(self):
self.findMostSimilarCluster('next')
def on_actionToggleErrors_toggled(self, checked):
self.panel.showFills(checked)
def on_slider_valueChanged(self, slideri):
self.nslist.clearSelection() # emits selectionChanged signal, .reset() doesn't
if self.nslist.model().sliding == False:
self.nslist.model().sids.sort() # change from nid order to sid order
self.nslist.updateAll() # update to reflect new ordering
self.nslist.model().sliding = True
nsamples = int(self.nsamplesComboBox.currentText())
rows = np.arange(slideri, slideri+nsamples)
self.nslist.selectRows(rows)
def on_slider_sliderPressed(self):
"""Make slider click (without movement) highlight the first nsamples
or fewer spikes when slider is at 0 position"""
slideri = self.slider.value()
if slideri == 0:
nsamples = int(self.nsamplesComboBox.currentText())
nsamples = min(nsamples, self.nslist.model().nspikes)
rows = np.arange(nsamples)
self.nslist.selectRows(rows)
def update_slider(self):
"""Update slider limits and step sizes"""
nsamples = int(self.nsamplesComboBox.currentText())
nsids = len(self.nslist.sids)
ulim = max(nsids-nsamples, 1) # upper limit
self.slider.setRange(0, ulim)
self.slider.setSingleStep(1)
self.slider.setPageStep(nsamples)
def findMostSimilarCluster(self, which='next'):
"""If no chans selected, compare source to next or previous most similar cluster
based on chans the two have in common, while requiring the two have each others'
max chans in common. If chans have been selected, use them as a starting set of
chans to compare on. Also, use only the timepoint range selected in incltComboBox"""
try:
source = self.getClusterComparisonSource()
except RuntimeError as err:
print(err)
return
destinations = list(self.sort.clusters.values())
destinations.remove(source)
selchans = np.sort(self.panel.chans_selected)
if len(selchans) > 0:
srcchans = np.intersect1d(source.neuron.wave.chans, selchans)
if len(srcchans) == 0:
print("Source cluster doesn't overlap with selected chans")
return
else:
srcchans = source.neuron.wave.chans
if self.spykewindow.ui.normButton.isChecked():
print("NOTE: findMostSimilarCluster() doesn't currently take spike amplitude "
"normalization into account. To see the true amplitudes used to compare "
"neuron pairs, turn off normalization")
errors = []
dests = []
t0i, t1i = self.tis # timepoint range selected in incltComboBox
# try and compare source neuron waveform to all destination neuron waveforms
for dest in destinations:
if dest.neuron.wave.data is None: # hasn't been calculated yet
dest.neuron.update_wave()
dstchans = dest.neuron.wave.chans
if len(selchans) > 0:
if not set(selchans).issubset(dstchans):
continue
dstchans = selchans
cmpchans = np.intersect1d(srcchans, dstchans)
if len(cmpchans) == 0: # not comparable
continue
# ensure maxchan of both source and dest neuron are both in cmpchans
if source.neuron.chan not in cmpchans or dest.neuron.chan not in cmpchans:
continue
srcwavedata = source.neuron.wave[cmpchans].data[:, t0i:t1i]
dstwavedata = dest.neuron.wave[cmpchans].data[:, t0i:t1i]
error = core.rms(srcwavedata - dstwavedata)
errors.append(error)
dests.append(dest)
if len(errors) == 0:
print("No sufficiently overlapping clusters on selected chans to compare to")
return
errors = np.asarray(errors)
dests = np.asarray(dests)
desterrsortis = errors.argsort()
if which == 'next':
self._cmpid += 1
elif which == 'previous':
self._cmpid -= 1
else: raise ValueError('Unknown which: %r' % which)
self._cmpid = max(self._cmpid, 0)
self._cmpid = min(self._cmpid, len(dests)-1)
dest = dests[desterrsortis][self._cmpid]
self.spykewindow.SelectClusters(dest)
desterr = errors[desterrsortis][self._cmpid]
print('n%d to n%d rmserror: %.2f uV' %
(source.id, dest.id, self.sort.converter.AD2uV(desterr)))
def getClusterComparisonSource(self):
selclusters = self.spykewindow.GetClusters()
errmsg = 'unclear which cluster to use as source for comparison'
if len(selclusters) == 1:
source = selclusters[0]
self._source = source
self._cmpid = -1 # init/reset
elif len(selclusters) == 2:
source = self._source
if source not in selclusters:
raise RuntimeError(errmsg)
# deselect old destination cluster:
selclusters.remove(source)
self.spykewindow.SelectClusters(selclusters, on=False)
else:
self._source = None # reset for tidiness
raise RuntimeError(errmsg)
return source
def Shift(self, nt):
"""Shift selected sids by nt timepoints"""
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
self.sort.shift(sids, nt)
print('Shifted %d spikes by %d timepoints' % (len(sids), nt))
unids = np.unique(spikes['nid'][sids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots
self.panel.updateAllItems()
def Align(self, to):
"""Align all implicitly selected spikes to min or max, or best fit
on selected chans"""
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
if to == 'best':
tis = self.tis
# find which chans are common to all sids:
commonchans = s.get_common_chans(sids)[0]
# check selected chans
selchans = spw.get_selchans(sids)
for selchan in selchans:
if selchan not in commonchans:
print("Chan %d not common to all spikes, pick from %r"
% (selchan, list(commonchans)))
return
print('Best fit aligning %d spikes between tis=%r on chans=%r' %
(len(sids), list(tis), selchans))
# numpy implementation:
#dirtysids = s.alignbest(sids, tis, selchans)
# cython implementation:
dirtysids = util.alignbest_cy(s, sids, tis, np.int64(selchans))
else: # to in ['min', 'max']
print('Aligning %d spikes to %s' % (len(sids), to))
dirtysids = s.alignminmax(sids, to)
paligned = len(dirtysids) / len(sids) * 100
print('Aligned %d/%d (%.1f%%) spikes' % (len(dirtysids), len(sids), paligned))
unids = np.unique(spikes['nid'][dirtysids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(dirtysids)
# auto-refresh all plots:
self.panel.updateAllItems()
def RemoveNeuron(self, neuron, update=True):
"""Remove neuron and all its spikes from the GUI and the Sort"""
self.MoveSpikes2List(neuron, neuron.sids, update=update)
self.sort.remove_neuron(neuron.id)
if update:
self.nlist.updateAll()
def MoveSpikes2Neuron(self, sids, neuron=None, update=True):
"""Assign spikes from sort.spikes to a neuron, and trigger eventual update of
mean wave. If neuron is None, create a new one"""
sids = toiter(sids)
spikes = self.sort.spikes
if neuron == None:
neuron = self.sort.create_neuron()
neuron.sids = np.union1d(neuron.sids, sids) # update
spikes['nid'][sids] = neuron.id
if update:
self.sort.update_usids()
self.uslist.updateAll()
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # trigger nslist refresh
# TODO: selection doesn't seem to be working, always jumps to top of list
#self.uslist.Select(row) # automatically select the new item at that position
neuron.wave.data = None # trigger template mean update
return neuron
def MoveSpikes2List(self, neuron, sids, update=True):
"""Move spikes from a neuron back to the unsorted spike list control"""
sids = toiter(sids)
if len(sids) == 0:
return # nothing to do
spikes = self.sort.spikes
neuron.sids = np.setdiff1d(neuron.sids, sids) # return what's in 1st arr and not in 2nd
spikes['nid'][sids] = 0 # unbind neuron id of sids in spikes struct array
if update:
self.sort.update_usids()
self.uslist.updateAll()
# this only makes sense if the neuron is currently selected in the nlist:
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # this triggers a refresh
neuron.wave.data = None # triggers an update when it's actually needed
def PlotClusterHistogram(self, X, nids):
"""Plot histogram of given clusters along a single dimension. If two clusters are
given, project them onto axis connecting their centers, and calculate separation
indices between them. Otherwise, plot the distribution of all given clusters
(up to a limit) along the first dimension in X."""
spw = self.spykewindow
mplw = spw.OpenWindow('MPL')
unids = np.unique(nids) # each unid corresponds to a cluster, except possibly unid 0
nclusters = len(unids)
if nclusters == 0:
mplw.ax.clear()
mplw.figurecanvas.draw()
print("No spikes selected")
return
elif nclusters > 5: # to prevent slowdowns, don't plot too many
mplw.ax.clear()
mplw.figurecanvas.draw()
print("Too many clusters selected for cluster histogram")
return
elif nclusters == 2:
calc_measures = True
else:
calc_measures = False
projdimi = 0
ndims = X.shape[1]
points = [] # list of projection of each cluster's points onto dimi
for unid in unids:
sidis, = np.where(nids == unid)
# don't seem to need contig points for NDsepmetric, no need for copy:
points.append(X[sidis])
#points.append(np.ascontiguousarray(X[sidis]))
if calc_measures:
t0 = time.time()
NDsep = util.NDsepmetric(*points, Nmax=20000)
print('NDsep calc took %.3f sec' % (time.time()-t0))
# centers of both clusters, use median:
c0 = np.median(points[0], axis=0) # ndims vector
c1 = np.median(points[1], axis=0)
# line connecting the centers of the two clusters, wrt c0
line = c1-c0
line /= np.linalg.norm(line) # make it unit length
#print('c0=%r, c1=%r, line=%r' % (c0, c1, line))
else:
line = np.zeros(ndims)
line[projdimi] = 1.0 # pick out just the one component
c0 = 0.0 # set origin at 0
# calculate projection of each cluster's points onto line
projs = []
for cpoints in points:
projs.append(np.dot(cpoints-c0, line))
if calc_measures:
d = np.median(projs[1]) - np.median(projs[0])
# measure whether centers are at least 3 of the bigger stdevs away from
# each other:
maxstd = max(projs[0].std(), projs[1].std())
if maxstd == 0:
oneDsep = 0 # not sure if this is ideal
else:
oneDsep = d / (3 * maxstd)
#print('std0=%f, std1=%f, d=%f' % (projs[0].std(), projs[1].std(), d))
proj = np.concatenate(projs)
nbins = max(intround(np.sqrt(len(proj))), 2) # seems like a good heuristic
#print('nbins = %d' % nbins)
edges = np.histogram(proj, bins=nbins)[1]
hists = []
for i in range(nclusters):
hists.append(np.histogram(projs[i], bins=edges)[0])
hist = np.concatenate([hists]) # one cluster hist per row
masses = np.asarray([ h.sum() for h in hist ])
sortedmassis = masses.argsort()
# Take the fraction of area that the two distribs overlap.
# At each bin, take min value of the two distribs. Add up all those min values,
# and divide by the mass of the smaller distrib.
if calc_measures:
overlaparearatio = hist.min(axis=0).sum() / masses[sortedmassis[0]]
djs = core.DJS(hists[0], hists[1])
# plotting:
ledges = edges[:-1] # keep just the left edges, discard the last right edge
assert len(ledges) == nbins
binwidth = ledges[1] - ledges[0]
# plot:
a = mplw.ax
a.clear()
windowtitle = "clusters %r" % list(unids)
print(windowtitle)
mplw.setWindowTitle(windowtitle)
if calc_measures:
#title = ("sep index=%.3f, overlap area ratio=%.3f, DJS=%.3f, sqrt(DJS)=%.3f"
# % (oneDsep, overlaparearatio, djs, np.sqrt(djs)))
title = ("%dDsep=%.3f, 1Dsep=%.3f, OAR=%.3f, DJS=%.3f"
% (ndims, NDsep, oneDsep, overlaparearatio, djs))
print(title)
a.set_title(title)
cs = [ CLUSTERCOLOURDICT[unid] for unid in unids ]
for i, c in enumerate(cs):
# due to white background, replace white clusters with black:
if c == WHITE:
cs[i] = 'black'
# plot the smaller cluster last, to maximize visibility:
for i in sortedmassis[::-1]:
a.bar(ledges, hist[i], width=binwidth, color=cs[i], edgecolor=cs[i])
## TODO: tight_layout call needs updating for MPL 2.2:
#mplw.f.tight_layout(pad=0.3) # crop figure to contents
mplw.figurecanvas.draw()
| 47.437693
| 98
| 0.600495
|
from __future__ import division
from __future__ import print_function
__authors__ = ['Martin Spacek', 'Reza Lotun']
import os
import sys
import time
import datetime
from copy import copy
import operator
import random
import shutil
import hashlib
import multiprocessing as mp
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QAction, QIcon, QApplication
import numpy as np
import scipy
import scipy.signal
import pylab as pl
import pyximport
pyximport.install(build_in_temp=False, inplace=True)
from . import util
from . import core
from .core import (WaveForm, Gaussian, MAXLONGLONG, R, toiter, intround, printflush, lstrip,
rstrip, lrstrip, pad, td2days, SpykeToolWindow, NList, NSList, dist,
USList, ClusterChange, SpikeSelectionSlider, lrrep2Darrstripis, rollwin2D)
from .detect import DEBUG
from .surf import EPOCH
from .plot import SpikeSortPanel, CLUSTERCOLOURDICT, WHITE
from .__version__ import __version__
LISTWIDTH = 70
PANELWIDTHPERCOLUMN = 120
PANELHEIGHTPERROW = 50
VSCROLLBARWIDTH = 14
SORTWINDOWHEIGHT = 1035
MINSORTWINDOWWIDTH = 566
MEANWAVEMAXSAMPLES = 2000
NPCSPERCHAN = 7
PCALIB = 'mdp'
ICALIB = 'sklearn'
DEFMINISI = 50
MAXGROUPISI = 100000
MAXGROUPDT = 100000000
class Sort(object):
def __init__(self, detector=None, stream=None, tw=None):
self.__version__ = __version__
self.fname = ''
self.user = ''
self.notes = ''
self.detector = detector
self.tw = tw # time window (us) relative to spike time
self.stream = stream
self.probe = stream.probe # only one probe design per sort allowed
self.converter = stream.converter
self.neurons = {}
self.clusters = {} # neurons with multidm params scaled for plotting
self.norder = [] # stores order of neuron ids display in nlist
self.npcsperchan = NPCSPERCHAN
def get_nextnid(self):
nids = list(self.neurons)
if len(nids) == 0:
return 1 # single unit nids start at 1
else:
return max(max(nids) + 1, 1) # at least 1
nextnid = property(get_nextnid)
def get_nextmuid(self):
nids = list(self.neurons)
if len(nids) == 0:
return -1 # multiunit ids start at -1
else:
return min(min(nids) - 1, -1) # at most -1
nextmuid = property(get_nextmuid)
def get_good(self):
good = []
for neuron in self.neurons.values():
try:
if neuron.good:
good.append(neuron.id)
except AttributeError: # neuron is from older sort, no .good attrib
neuron.good = False
return np.asarray(good)
def set_good(self, good):
nids = list(self.neurons)
assert np.all([ nid in nids for nid in good ]) # make sure all nids in good exist
notgood = np.setdiff1d(nids, good)
for nid in notgood:
neuron = self.neurons[nid]
neuron.good = False
for nid in good:
neuron = self.neurons[nid]
neuron.good = True
good = property(get_good, set_good)
def get_stream(self):
try:
return self._stream
except AttributeError:
# this is likely a brand new sort, has yet to be assigned a Stream
return None
def set_stream(self, stream=None):
oldstream = self.stream
if stream != None and oldstream != None:
# do stream types match?
if type(stream) != type(oldstream):
raise ValueError("Stream types don't match: %s, %s"
% (type(oldstream), type(stream)))
if type(stream.probe) != type(oldstream.probe):
raise ValueError("Stream probe types don't match: %s, %s"
% (type(oldstream.probe), type(stream.probe)))
# is one stream fname a superset of the other?
if (stream.fname not in oldstream.fname) and (oldstream.fname not in stream.fname):
raise ValueError("Stream file names are not supersets of each other: %s, %s"
% (oldstream.fname, stream.fname))
else:
print('Stream file names are similar enough to proceed: %s, %s'
% (stream.fname, oldstream.fname))
try:
stream.filtmeth = self.filtmeth
stream.car = self.car
stream.sampfreq = self.sampfreq
stream.shcorrect = self.shcorrect
except AttributeError:
pass # one of the above aren't bound
self._stream = stream
print('Bound stream %r to sort %r' % (stream.fname, self.fname))
self.calc_twts_twi()
stream = property(get_stream, set_stream)
def calc_twts_twi(self):
tres = self.tres
tw = self.tw
twts = np.arange(tw[0], tw[1], tres)
twts += twts[0] % tres
self.twts = twts
self.twi = intround(twts[0] / tres), intround(twts[-1] / tres)
def update_tw(self, tw):
oldtw = self.tw
self.tw = tw
self.calc_twts_twi()
dtw = np.asarray(tw) - np.asarray(oldtw)
self.spikes['t0'] += dtw[0]
self.spikes['t1'] += dtw[1]
self.spikes['tis'] = self.spikes['tis'] - intround(dtw[0] / self.tres)
for neuron in self.neurons.values():
if neuron.wave.data != None:
neuron.update_wave()
print('WARNING: all spike waveforms need to be reloaded!')
def get_tres(self):
return self.stream.tres
tres = property(get_tres)
def __getstate__(self):
d = self.__dict__.copy()
for attr in ['spikes', 'wavedata', 'usids', 'X', 'Xhash']:
try: del d[attr]
except KeyError: pass
return d
def get_nspikes(self):
try: return len(self.spikes)
except AttributeError: return 0
nspikes = property(get_nspikes)
def update_usids(self):
nids = self.spikes['nid']
self.usids, = np.where(nids == 0)
def get_spikes_sortedby(self, attr='id'):
vals = self.spikes[attr]
spikes = self.spikes[vals.argsort()]
return spikes
def get_wave(self, sid):
spikes = self.spikes
nchans = spikes['nchans'][sid]
chans = spikes['chans'][sid, :nchans]
t0 = spikes['t0'][sid]
t1 = spikes['t1'][sid]
wavedata = self.wavedata[sid, 0:nchans]
ts = np.arange(t0, t1, self.tres)
return WaveForm(data=wavedata, ts=ts, chans=chans, tres=self.tres)
def get_maxchan_wavedata(self, sid=None, nid=None):
if sid != None:
assert nid == None
chani = self.spikes['chani'][sid]
return self.wavedata[sid, chani]
elif nid != None:
assert sid == None
neuron = self.neurons[nid]
chani, = np.where(neuron.chans == neuron.chan)
assert len(chani) == 1
chani = chani[0]
return neuron.wave.data[chani]
def get_mean_wave(self, sids, nid=None):
spikes = self.spikes
nsids = len(sids)
if nsids > MEANWAVEMAXSAMPLES:
step = nsids // MEANWAVEMAXSAMPLES + 1
s = ("get_mean_wave() sampling every %d spikes instead of all %d"
% (step, nsids))
if nid != None:
s = "neuron %d: " % nid + s
print(s)
sids = sids[::step]
nsids = len(sids)
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
chanslist = [ chans[:nchans] for chans, nchans in zip(chanss, nchanss) ]
chanpopulation = np.concatenate(chanslist)
groupchans = np.unique(chanpopulation)
wavedata = self.wavedata[sids]
if wavedata.ndim == 2:
wavedata.shape = 1, wavedata.shape[0], wavedata.shape[1]
nt = wavedata.shape[-1]
maxnchans = len(groupchans)
data = np.zeros((maxnchans, nt))
nspikes = np.zeros((maxnchans, 1), dtype=int)
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
data[chanis] += wd[:len(chans)]
nspikes[chanis] += 1
#t0 = time.time()
data /= nspikes # normalize all data points appropriately, this is now the mean
var = np.zeros((maxnchans, nt))
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
var[chanis] += (wd[:len(chans)] - data[chanis]) ** 2
var /= nspikes
std = np.sqrt(var)
bins = list(groupchans) + [np.inf]
hist, bins = np.histogram(chanpopulation, bins=bins)
chans = groupchans[hist >= nsids/2]
chanis = groupchans.searchsorted(chans)
data = data[chanis]
std = std[chanis]
return WaveForm(data=data, std=std, chans=chans)
def check_ISIs(self, nids='good'):
print('Checking inter-spike intervals')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids]
assert spikets.flags['OWNDATA']
spikets.sort()
ndupl = (np.diff(spikets) < DEFMINISI).sum()
if ndupl > 0:
msg = ('n%d has %d duplicate spikes (given DEFMINISI=%d us).\n'
'Remove duplicate spikes with the ISI tool in the Verify tab'
% (nid, ndupl, DEFMINISI))
raise RuntimeError(msg)
def check_wavealign(self, nids='good', maxdti=1):
print('Checking neuron mean waveform alignment')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
nt = self.twi[1] - self.twi[0] + 1 # expected number of points of each chan's wavedata
for nid in nids:
neuron = self.neurons[nid]
wd = self.get_maxchan_wavedata(nid=nid)
assert len(wd) == nt
ppeakis, _ = scipy.signal.find_peaks(wd)
npeakis, _ = scipy.signal.find_peaks(-wd)
pmaxi = ppeakis[wd[ppeakis].argmax()]
nmaxi = npeakis[wd[npeakis].argmin()]
if nmaxi < pmaxi:
peak1i = nmaxi
else:
pmax, nmax = wd[pmaxi], wd[nmaxi]
if pmax > abs(nmax):
peak1i = pmaxi
else:
peak1i = nmaxi
alignti = 0 - self.twi[0]
dti = peak1i - alignti
if abs(dti) > maxdti:
peak1uV = self.converter.AD2uV(wd[peak1i])
peak1us = intround(self.tres*(peak1i-alignti))
msg = ('Primary peak (%+d uV @ t=%d us) of n%d is %+d timepoints away from '
'the t=0 us alignment point. Shift it closer and try again'
% (peak1uV, peak1us, nid, dti))
raise RuntimeError(msg)
def check_wavepadding(self, nids='good', npad=2):
print('Checking spike waveform padding')
assert npad >= 2
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
for sid in neuron.sids:
wd = self.wavedata[sid]
l, r = wd[:, :npad], wd[:, -npad:]
leftpadded = (np.diff(l, axis=1) == 0).all()
rightpadded = (np.diff(r, axis=1) == 0).all()
if leftpadded:
if (wd[:, 0] == 0).all():
leftpadded = False
if rightpadded:
if (wd[:, -1] == 0).all():
rightpadded = False
if leftpadded or rightpadded:
msg = ('n%d has s%d that looks like it has been padded.\n'
'leftpadded, rightpadded = %r, %r\n'
'Reload s%d or n%d or all spikes and try again'
% (nid, sid, leftpadded, rightpadded, sid, nid))
raise RuntimeError(msg)
def check_contiguous_nids(self):
print('Checking that neuron IDs are contiguous')
nids = np.array(list(self.neurons))
nids = nids[nids > 0]
nids.sort()
if (np.diff(nids) != 1).any():
raise RuntimeError('Neuron IDs are not contiguous, renumber all and try again')
def exportptcsfiles(self, basepath, sortpath, user='', notes=''):
self.check_ISIs()
self.check_wavealign()
self.check_wavepadding()
self.check_contiguous_nids()
spikes = self.spikes
exportdt = str(datetime.datetime.now())
exportdt = exportdt.split('.')[0]
if self.stream.is_multi():
streams = self.stream.streams
else:
streams = [self.stream]
print('Exporting "good" clusters to:')
tranges = self.stream.tranges
t0 = tranges[0, 0]
for stream, trange in zip(streams, tranges):
abst0 = trange[0]
dt = abst0 - t0
dt = intround(dt)
self.exportptcsfile(stream, basepath, dt, exportdt, sortpath,
user=user, notes=notes)
def exportptcsfile(self, stream, basepath, dt, exportdt, sortpath, user='', notes=''):
nsamplebytes = 4
nrecs = []
nspikes = 0
for nid in sorted(self.good):
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids]
assert spikets.flags['OWNDATA']
spikets.sort()
spikets -= dt # export spike times relative to t=0 of this recording
# only include spikes that occurred during this recording
lo, hi = spikets.searchsorted([stream.t0, stream.t1])
spikets = spikets[lo:hi]
if len(spikets) == 0:
continue # don't save empty neurons
nrec = PTCSNeuronRecord(neuron, spikets, nsamplebytes, descr='')
nrecs.append(nrec)
nspikes += len(spikets)
nneurons = len(nrecs)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass
fname = stream.srcfnameroot + '.ptcs'
fullfname = os.path.join(path, fname)
header = PTCSHeader(self, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user=user, notes=notes)
with open(fullfname, 'wb') as f:
header.write(f)
for nrec in nrecs:
nrec.write(f)
print(fullfname)
def exportcsv(self, fname):
sids = []
for nid in sorted(self.good):
neuron = self.neurons[nid]
sids.append(neuron.sids)
sids = np.hstack(sids)
spikes = self.spikes[sids]
tsecs = spikes['t'] / 1e6
nids = spikes['nid']
chans = spikes['chan']
data = np.column_stack([tsecs, nids, chans])
print('Exporting (tsec, nid, chan) of all spikes marked as "good" to %s' % fname)
np.savetxt(fname, data, fmt='%.6f, %d, %d')
def exporttschid(self, basepath):
raise NotImplementedError('Needs to be redone to work with multiple streams')
spikes = self.spikes[self.spikes['nid'] > 0]
dt = str(datetime.datetime.now()) # get an export timestamp
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
srffnameroot = srffnameroot.replace(' ', '_')
tschidfname = dt + '_' + srffnameroot + '.tschid'
tschid = np.empty((len(spikes), 3), dtype=np.int64)
tschid[:, 0] = spikes['t']
tschid[:, 1] = spikes['chan']
tschid[:, 2] = spikes['nid']
tschid.tofile(os.path.join(path, tschidfname)) # save it
print(tschidfname)
def exportdin(self, basepath):
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
dinfiledtype=[('TimeStamp', '<i8'), ('SVal', '<i8')] # pairs of int64s
print('Exporting DIN(s) to:')
for stream in streams:
try: # neither of these attribs should exist for recordings with no stimuli:
svrecs = stream.srff.digitalsvalrecords
dsprecs = stream.srff.displayrecords
except AttributeError:
continue # no din to export for this stream
if len(svrecs) == 0 or stream.srff.ndigitalsvalrecords == 0:
raise ValueError("digitalsvalrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass
svrecs = svrecs.astype(dinfiledtype)
# convert to normal n x 2 int64 array
svrecs = svrecs.view(np.int64).reshape(-1, 2)
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one .din
# per displayrecord. Append experiment ID to each .din filename, if necessary.
svrects = svrecs[:, 0]
dsprects = [ dsprec.TimeStamp for dsprec in dsprecs ]
svalrecis = svrects.searchsorted(dsprects)
assert svalrecis[0] == 0
svalrecis = svalrecis[1:] # exclude the trivial 0 index
# split sval records according to displayrecord timestamps:
dins = np.split(svrecs, svalrecis)
assert len(dins) == len(dsprecs)
for eid, din in enumerate(dins):
if eid == 0 and len(dins) == 1:
eidstr = ''
elif len(dins) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
dinfname = stream.srcfnameroot + eidstr + '.din'
fullfname = os.path.join(path, dinfname)
din.tofile(fullfname) # save it
print(fullfname)
def exporttextheader(self, basepath):
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting text header(s) to:')
for stream in streams:
try:
dsprecs = stream.srff.displayrecords
except AttributeError: # no textheader to export for this stream
continue
if len(dsprecs) == 0:
raise ValueError("displayrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass
for eid, dsprec in enumerate(dsprecs):
textheader = dsprec.Header.python_tbl
if eid == 0 and len(dsprecs) == 1:
eidstr = ''
elif len(dsprecs) < 10:
eidstr = '.%d' % eid
else:
eidstr = '.%02d' % eid
textheaderfname = stream.srcfnameroot + eidstr + '.textheader'
fullfname = os.path.join(path, textheaderfname)
with open(fullfname, 'w') as f:
f.write(textheader)
print(fullfname)
def exportall(self, basepath, sortpath):
self.exportptcsfiles(basepath, sortpath)
self.exportdin(basepath)
self.exporttextheader(basepath)
def exportspikewaves(self, sids, selchans, tis, fname, format):
nspikes = len(sids)
chans, chanslist = self.get_common_chans(sids, selchans)
nchans = len(chans)
ti0, ti1 = tis
nt = ti1 - ti0
dtype = self.wavedata.dtype
data = np.zeros((nspikes, nchans, nt), dtype=dtype)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
if format == 'text':
data.shape = nspikes, nchans*nt
stream = self.stream
assert stream.kind == 'highpass'
if format == 'binary':
nids = self.spikes['nid'][sids]
spiketimes = self.spikes['t'][sids]
chanpos = stream.probe.siteloc_arr()
uVperAD = stream.converter.AD2uV(1)
with open(fname, 'wb') as f:
np.savez_compressed(f, data=data, sids=sids, nids=nids,
spiketimes=spiketimes, chans=chans, tis=tis,
chanpos=chanpos, uVperAD=uVperAD)
elif format == 'text':
np.savetxt(fname, data, fmt='%d', delimiter=',')
else:
raise ValueError('Unknown format: %r' % format)
print('Exported %d spikes on chans=%r and tis=%r to %s'
% (nspikes, list(chans), list(tis), fname))
def get_param_matrix(self, kind=None, sids=None, tis=None, selchans=None, norm=False,
dims=None, scale=True):
spikes = self.spikes
dtypefields = list(spikes.dtype.fields)
if sids is None:
sids = spikes['id']
comps = [ dim for dim in dims if dim.startswith('c') and dim[-1].isdigit() ]
rmserror = np.any([ dim == 'RMSerror' for dim in dims ])
ncomp = len(comps)
hascomps = ncomp > 0
if hascomps:
X = self.get_component_matrix(kind, sids, tis=tis, chans=selchans,
minncomp=ncomp, norm=norm)
if rmserror:
rms = self.get_rms_error(sids, tis=tis, chans=selchans)
data = []
for dim in dims:
if dim in dtypefields:
data.append( np.float32(spikes[dim][sids]) )
elif dim.startswith('c') and dim[-1].isdigit():
compid = int(lstrip(dim, 'c'))
data.append( np.float32(X[:, compid]) )
elif dim == 'RMSerror':
data.append( np.float32(rms) )
else:
raise RuntimeError('Unknown dim %r' % dim)
data = np.column_stack(data)
if scale:
for dim, d in zip(dims, data.T):
d -= d.mean()
if dim in ['x0', 'y0'] and self.probe.ncols > 1:
try: x0std
except NameError: x0std = spikes['x0'].std()
if x0std != 0.0:
d /= x0std
else:
dstd = d.std()
if dstd != 0.0:
d /= dstd
return data
def get_component_matrix(self, kind, sids, tis=None, chans=None, minncomp=None,
norm=False):
spikes = self.spikes
nt = self.wavedata.shape[2]
if tis is None:
tis = np.asarray([0, nt])
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nspikes < 2:
raise RuntimeError("Need at least 2 spikes for %s" % kind)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for %s" % kind)
Xhash = self.get_Xhash(kind, sids, tis, chans, self.npcsperchan, norm)
self.Xhash = Xhash
try: self.X
except AttributeError: self.X = {}
if Xhash in self.X:
print('Cache hit, using cached %ss from tis=%r, chans=%r of %d spikes' %
(kind[:-1], list(tis), list(chans), nspikes))
return self.X[Xhash]
print('Cache miss, (re)calculating %ss' % kind[:-1])
print('Doing %s on tis=%r, chans=%r of %d spikes' %
(kind, list(tis), list(chans), nspikes))
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
spikedata = self.wavedata[sid][spikechanis, ti0:ti1]
if norm:
maxptp = spikedata.ptp(axis=1).max()
if maxptp != 0:
spikedata = spikedata / maxptp
data[sii] = spikedata
print('Input shape for %s: %r' % (kind, data.shape))
t0 = time.time()
data.shape = nspikes, nchans*nt
print('Reshaped input for %s: %r' % (kind, data.shape))
if kind == 'PCA':
if PCALIB == 'mdp':
import mdp
X = mdp.pca(data, output_dim=5, svd=False)
elif PCALIB == 'sklearn':
# doesn't tap into scipy.linalg.eig compiled code. RandomizedPCA is faster
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
X = pca.fit_transform(data) # do both the fit and the transform
else:
raise ValueError('Invalid PCALIB %r' % PCALIB)
if X.shape[1] < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
elif kind == 'sPCA':
from sklearn.decomposition import SparsePCA
n_components = 5
alpha = 1
n_jobs = mp.cpu_count()
spca = SparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = spca.fit_transform(data)
elif kind == 'mbsPCA':
from sklearn.decomposition import MiniBatchSparsePCA
n_components = 5
alpha = 1
n_jobs = mp.cpu_count()
mbspca = MiniBatchSparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = mbspca.fit_transform(data)
elif kind == 'NMF':
from sklearn.decomposition import NMF
n_components = 5
init = None
nmf = NMF(n_components=n_components, init=init)
X = nmf.fit_transform(data)
elif kind == 'tSNE':
ncomp = min((self.npcsperchan*nchans, data.shape[1]))
print('ncomp: %d' % ncomp)
import mdp
data = mdp.pca(data, output_dim=ncomp)
from sklearn.manifold import TSNE
n_components = 3
tsne = TSNE(n_components=n_components)
X = tsne.fit_transform(data)
elif kind == 'ICA':
maxncomp = intround(np.sqrt(nspikes))
if maxncomp < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
if data.shape[0] <= data.shape[1]:
raise RuntimeError('Need more observations than dimensions for ICA')
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, maxncomp, data.shape[1]))
if ICALIB == 'mdp':
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
print('ncomp: %d' % ncomp)
data = mdp.pca(data, output_dim=ncomp)
# nonlinearity g='pow3', ie x**3. tanh seems to separate better,
# but is a bit slower. gaus seems to be slower still, and no better
# than tanh, but these are just vague impressions.
# defaults to whitened=False, ie assumes data isn't whitened
node = mdp.nodes.FastICANode(g='pow3')
X = node(data)
pm = node.get_projmatrix()
X = X[:, np.any(pm, axis=0)]
elif ICALIB == 'sklearn':
from sklearn.decomposition import FastICA
alg = 'parallel'
fun = 'logcosh'
maxiter = 100
tol = 0.5
fastica = FastICA(n_components=ncomp, algorithm=alg,
whiten=True, fun=fun, fun_args=None,
max_iter=maxiter, tol=tol, w_init=None,
random_state=None)
X = fastica.fit_transform(data)
print('fastica niters: %d' % (fastica.n_iter_))
else:
raise ValueError('Invalid ICALIB %r' % ICALIB)
if X.shape[1] < 3:
raise RuntimeError('Need at least 3 columns')
'''
# sort by abs(kurtosis) of each IC (column)
k = scipy.stats.kurtosis(X, axis=0)
ki = abs(k).argsort()[::-1] # decreasing order of abs(kurtosis)
print('Sort by abs(kurtosis):')
print(k[ki])
X = X[:, ki] # sort the ICs
'''
ne = core.negentropy(X, axis=0)
assert (ne > 0).all()
nei = ne.argsort()[::-1]
print('Sort by negentropy:')
print(ne[nei])
X = X[:, nei]
'''
import pylab as pl
pl.figure()
pl.imshow(pm)
pl.colorbar()
pl.title('original projmatrix')
pl.figure()
pl.imshow(pm[:, ki])
pl.colorbar()
pl.title('decreasing abs(kurtosis) projmatrix')
pl.figure()
pl.imshow(pm[:, nei])
pl.colorbar()
pl.title('decreasing negentropy projmatrix')
'''
else:
raise ValueError('Unknown kind %r' % kind)
print('Output shape for %s: %r' % (kind, X.shape))
self.X[Xhash] = X
print('%s took %.3f sec' % (kind, time.time()-t0))
unids = list(np.unique(spikes['nid'][sids]))
for nid in unids:
# common to all its spikes, and therefore can't have PCA/ICA done on it
if nid != 0:
self.clusters[nid].update_comppos(X, sids)
return X
def get_rms_error(self, sids, tis=None, chans=None):
spikes = self.spikes
nids = np.unique(spikes['nid'][sids])
nid = nids[0]
if len(nids) > 1 or nid == 0:
raise RuntimeError("Spikes must all belong to the same (non-junk) cluster for "
"RMS error calculation")
nt = self.wavedata.shape[2]
if tis is None:
tis = np.asarray([0, nt])
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for RMS error")
print('Getting RMS error on tis=%r, chans=%r of %d spikes' %
(list(tis), list(chans), nspikes))
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
wave = self.neurons[nid].get_wave()
chanis = wave.chans.searchsorted(chans)
meandata = np.float64(wave.data[chanis, ti0:ti1])
se = (data - meandata) ** 2
mse = se.mean(axis=2).mean(axis=1)
return np.sqrt(mse)
def get_common_chans(self, sids, chans=None):
spikes = self.spikes
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
chanslist = [ cs[:ncs] for cs, ncs in zip(chanss, nchanss) ]
commonchans = util.intersect1d_uint8(chanslist)
if chans is not None and len(chans) > 0:
diffchans = np.setdiff1d(chans, commonchans)
commonchans = np.intersect1d(chans, commonchans)
if len(diffchans) > 0:
print('WARNING: ignored chans %r not common to all spikes' % list(diffchans))
return commonchans, chanslist
def get_Xhash(self, kind, sids, tis, chans, npcsperchan, norm):
h = hashlib.md5()
h.update(kind.encode())
h.update(sids)
h.update(tis)
h.update(chans)
if kind == 'ICA':
h.update(str(npcsperchan).encode())
h.update(str(norm).encode())
return h.hexdigest()
def create_neuron(self, id=None, inserti=None):
if id == None:
id = self.nextnid
if id in self.neurons:
raise RuntimeError('Neuron %d already exists' % id)
id = int(id)
neuron = Neuron(self, id)
self.neurons[neuron.id] = neuron
if inserti == None:
self.norder.append(neuron.id)
else:
self.norder.insert(inserti, neuron.id)
return neuron
def remove_neuron(self, id):
try:
del self.neurons[id]
del self.clusters[id]
self.norder.remove(id)
except (KeyError, ValueError):
pass
def shift(self, sids, nt):
spikes = self.spikes
wd = self.wavedata
for sid in sids:
core.shiftpad(wd[sid], nt) # modifies wd in-place
# update spike parameters:
dt = intround(nt * self.tres) # amount of time to shift by, signed, in us
# so we can later reload the wavedata accurately, shifting the waveform right and
# padding it on its left requires decrementing the associated timepoints
# (and vice versa)
spikes['t'][sids] -= dt
spikes['t0'][sids] -= dt
spikes['t1'][sids] -= dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Opposite sign wrt timepoints above, referencing within
# wavedata:
spikes['tis'][sids] = spikes['tis'][sids] + nt
# this in-place operation raises a TypeError in numpy 1.11.2, something related to
# subtracting an int from an unsigned int:
#spikes['tis'][sid] += nt
# caller should treat all sids as dirty
def alignminmax(self, sids, to):
if not self.stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
V0s = spikes['V0'][sids]
V1s = spikes['V1'][sids]
Vss = np.column_stack((V0s, V1s))
alignis = spikes['aligni'][sids]
b = np.column_stack((alignis==0, alignis==1)) # 2D boolean array
if to == 'min':
i = Vss[b] > 0 # indices into sids of spikes aligned to the max peak
elif to == 'max':
i = Vss[b] < 0 # indices into sids of spikes aligned to the min peak
else:
raise ValueError('Unknown to %r' % to)
sids = sids[i] # sids that need realigning
nspikes = len(sids)
print("Realigning %d spikes" % nspikes)
if nspikes == 0: # nothing to do
return [] # no sids to mark as dirty
multichantis = spikes['tis'][sids] # nspikes x nchans x 2 arr
chanis = spikes['chani'][sids] # nspikes arr of max chanis
# peak tis on max chan of each spike, convert from uint8 to int32 for safe math
tis = np.int32(multichantis[np.arange(nspikes), chanis]) # nspikes x 2 arr
# NOTE: tis aren't always in temporal order!
dpeaktis = tis[:, 1] - tis[:, 0]
dpeaks = spikes['dt'][sids]
ordered = dpeaktis > 0
reversed = dpeaktis < 0
alignis = spikes['aligni'][sids]
alignis0 = alignis == 0
alignis1 = alignis == 1
dpeaki = np.zeros(nspikes, dtype=int)
dpeaki[ordered & alignis0 | reversed & alignis1] = 1
dpeaki[ordered & alignis1 | reversed & alignis0] = -1
dts = dpeaki * dpeaks
dtis = -dpeaki * abs(dpeaktis)
spikes['t'][sids] += dts
spikes['t0'][sids] += dts
spikes['t1'][sids] += dts
spikes['tis'][sids] = spikes['tis'][sids] + dtis[:, None, None]
spikes['aligni'][sids[alignis0]] = 1
spikes['aligni'][sids[alignis1]] = 0
self.reload_spikes(sids)
return sids
def choose_new_meanchans(self, sids):
print('Choosing new channel set for all selected spikes')
det = self.detector
meanwave = self.get_mean_wave(sids)
maxchan = meanwave.chans[meanwave.data.ptp(axis=1).argmax()]
maxchani = det.chans.searchsorted(maxchan)
distances = det.dm.data[maxchani]
chanis = distances.argsort()[:det.maxnchansperspike]
meanchans = det.chans[chanis]
meanchans.sort()
print('meanchans: %r' % list(meanchans))
furthestchan = det.chans[chanis[-1]]
print('furthestchan: %d' % furthestchan)
furthestchani = meanchans.searchsorted(furthestchan)
assert len(meanchans) == det.maxnchansperspike
assert maxchan in meanchans
return meanchans, furthestchan, furthestchani
def reload_spikes(self, sids, usemeanchans=False):
' % nsids)
stream = self.stream
if not stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
det = self.detector
ver_lte_03 = float(self.__version__) <= 0.3
if ver_lte_03:
print('Fixing potentially incorrect time values during spike reloading')
nfixed = 0
treload = time.time()
if usemeanchans:
if ver_lte_03:
raise RuntimeError("Best not to choose new chans from mean until after "
"converting to .sort >= 0.4")
meanchans, furthestchan, furthestchani = self.choose_new_meanchans(sids)
nmeanchans = len(meanchans)
ts = spikes[sids]['t']
if not (np.diff(ts) >= 0).all():
print("Selected sids aren't in temporal order, sorting by time...")
tsis = ts.argsort()
sids = sids[tsis]
print("Done sorting sids by time")
splitis = np.where(np.diff(ts) >= MAXGROUPISI)[0] + 1
groups = np.split(sids, splitis)
groupi = 0
while groupi < len(groups):
group = groups[groupi]
splitis = np.where(np.diff(relts // MAXGROUPDT) > 0)[0] + 1
nsubgroups = len(splitis) + 1
if nsubgroups > 1:
del groups[groupi]
subgroups = np.split(group, splitis)
groups[groupi:groupi] = subgroups
groupi += len(subgroups)
else:
groupi += 1
print('ngroups: %d' % len(groups))
sidi = 0
for groupi, group in enumerate(groups):
printflush('<%d>' % groupi, end='')
assert len(group) > 0
t0 = spikes[group[0]]['t0']
t1 = spikes[group[-1]]['t1']
if ver_lte_03:
t0 -= 5000
t1 += 5000
unionchans = np.unique(spikes['chans'][group])
if usemeanchans:
spikes['nchans'][group] = nmeanchans
# we're using the max num chans, so assign the full array:
spikes['chans'][group] = meanchans
unionchans = np.unique(np.hstack((unionchans, meanchans)))
if 0 not in stream.chans:
unionchans = unionchans[unionchans != 0]
tempwave = stream(t0, t1, unionchans)
# slice out each spike's reloaded data from tempwave:
for sid in group:
if sidi % 10000 == 0:
printflush(sidi, end='')
elif sidi % 1000 == 0:
printflush('.', end='')
if usemeanchans:
# check that each spike's maxchan is in meanchans:
chan = spikes[sid]['chan']
if chan not in meanchans:
print("spike %d: replacing furthestchan %d with spike's maxchan %d"
% (sid, furthestchan, chan))
nchans = spikes[sid]['nchans']
chans = spikes[sid]['chans'][:nchans]
chans[furthestchani] = chan
chans.sort()
#spikes['chans'][sid][:nchans] = chans
spike = spikes[sid]
nchans = spike['nchans']
chans = spike['chans'][:nchans]
rd = tempwave[spike['t0']:spike['t1']][chans].data # reloaded data
if ver_lte_03: # fix potentially incorrect spike tis
result = self.reload_spike_ver_lte_03(sid, nchans, tempwave, rd)
if result == None:
sidi += 1 # inc status counter
continue # rollwin2D won't work, skip to next sid
else:
rd, fixed = result
if fixed:
nfixed += 1
nt = rd.shape[1]
self.wavedata[sid, :nchans, :nt] = rd
sidi += 1
print()
if ver_lte_03:
print('Fixed time values of %d spikes' % nfixed)
print('(Re)loaded %d spikes, took %.3f sec' % (len(sids), time.time()-treload))
def reload_spike_ver_lte_03(self, sid, nchans, tempwave, rd):
od = self.wavedata[sid, :nchans]
lefti, righti = lrrep2Darrstripis(od)
od = od[:, lefti:righti]
width = od.shape[1]
if not width <= rd.shape[1]:
print('')
print("WARNING: od.shape[1]=%d > rd.shape[1]=%d for sid %d" %
(od.shape[1], rd.shape[1], sid))
return
odinndis = np.where((rollwin2D(rd, width) == od).all(axis=1).all(axis=1))[0]
if len(odinndis) == 0:
dnt = 0
elif len(odinndis) == 1:
odinndi = odinndis[0]
dnt = odinndi - lefti
else:
raise RuntimeError("Multiple hits of old data in new, don't know "
"how to reload spike %d" % sid)
newrd, fixed = rd, False
if dnt != 0:
dt = intround(dnt * self.tres) # time to correct by, signed, in us
spikes['t'][sid] += dt # should remain halfway between t0 and t1
spikes['t0'][sid] += dt
spikes['t1'][sid] += dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Use opposite sign because we're
spikes['phasetis'][sid] = spikes['phasetis'][sid] - dnt
spike = spikes[sid]
newrd = tempwave[spike['t0']:spike['t1']][chans].data
fixed = True
return newrd, fixed
def reload_spikes_and_templates(self, sids, usemeanchans=False):
self.reload_spikes(sids, usemeanchans=usemeanchans)
unids = np.unique(self.spikes['nid'][sids])
unids = unids[unids != 0]
neurons = [ self.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
def init_spike_alignment(self):
print('Setting initial spike alignment points')
ntis, nalignis = {}, {} # tis and aligni derived from each neuron's mean waveform
for neuron in self.neurons.values():
nwave = neuron.get_wave()
mintis = nwave.data.argmin(axis=1)
maxtis = nwave.data.argmax(axis=1)
ntis[neuron.id] = np.column_stack([mintis, maxtis])
nalignis[neuron.id] = np.argmin([mintis.std(), maxtis.std()])
AD2uV = self.converter.AD2uV
for s, wd in zip(self.spikes, self.wavedata):
sid = s['id']
if sid % 100000 == 0:
printflush(sid, end='')
elif sid % 10000 == 0:
printflush('.', end='')
nid = s['nid']
nchans = s['nchans']
chans = s['chans'][:nchans]
neuronchans = self.neurons[nid].wave.chans
assert (chans == neuronchans).all()
s['tis'][:nchans] = ntis[nid]
s['aligni'] = nalignis[nid]
maxchani = s['chani']
t0i, t1i = int(s['tis'][maxchani, 0]), int(s['tis'][maxchani, 1])
s['dt'] = abs(t1i - t0i) / self.sampfreq * 1e6
s['V0'], s['V1'] = AD2uV(wd[maxchani, t0i]), wd[maxchani, t1i]
s['Vpp'] = abs(s['V1'] - s['V0'])
print()
def spatially_localize_spikes(self, sortwin, method='fit'):
det = self.detector
weights2f = self.extractor.weights2spatial
weights2spatialmean = self.extractor.weights2spatialmean
f = self.extractor.f
nreject = 0
print('Running spatial localization on all %d spikes' % self.nspikes)
tstart = time.clock()
wavedata):
# see core.rowtake() or util.rowtake_cy() for indexing explanation:
sid = s['id']
# print out progress on a regular basis:
if sid % 10000 == 0:
printflush(sid, end='')
elif sid % 1000 == 0:
printflush('.', end='')
chan = s['chan']
nchans = s['nchans']
chans = s['chans'][:nchans]
maxchani = s['chani']
chanis = det.chans.searchsorted(chans)
w = np.float32(wd[np.arange(s['nchans'])[:, None], s['tis'][:nchans]]) # nchans x 2
w = abs(w).sum(axis=1) # Vpp for each chan, measured at t0i and t1i
x = det.siteloc[chanis, 0] # 1D array (row)
y = det.siteloc[chanis, 1]
if method == 'fit':
# localize by fitting extractor.f function to wavedata
params = weights2f(f, w, x, y, maxchani)
elif method == 'mean':
# set localization to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# a very ad-hoc guess for spatial sigma:
sx = 2 * dist((x0, y0), self.probe.SiteLoc[chan])
params = x0, y0, sx, sx
else:
print('Unknown method %r' % method)
if params == None: # presumably a non-localizable many-channel noise event
#printflush('X', end='') # to indicate a rejected spike
if DEBUG:
spiket = intround(s['t']) # nearest us
det.log("Reject spike %d at t=%d based on fit params" % (sid, spiket))
neuron = self.neurons[s['nid']]
# remove from its neuron, add to unsorted list of spikes:
sortwin.MoveSpikes2List(neuron, [sid], update=False)
# manually set localization params to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# set sigma to 0 um, and then later round lockr up to 1 um so that only one
# raster tick shows up for each rejected spike, reducing clutter
params = x0, y0, 0, 0
nreject += 1
# Save spatial fit params, and "lockout" only the channels within lockrx*sx
# of the fit spatial location of the spike, up to a max of inclr. "Lockout"
# in this case only refers to which channels are highlighted with a raster tick
# for each spike:
s['x0'], s['y0'], s['sx'], s['sy'] = params
x0, y0 = s['x0'], s['y0']
# lockout radius for this spike:
lockr = min(det.lockrx*s['sx'], det.inclr) # in um
lockr = max(lockr, 1) # at least 1 um, so at least the maxchan gets a tick
# test y coords of chans in y array, ylockchaniis can be used to index
# into x, y and chans:
ylockchaniis, = np.where(np.abs(y - y0) <= lockr) # convert bool arr to int
# test Euclid distance from x0, y0 for each ylockchani:
lockchaniis = ylockchaniis.copy()
for ylockchanii in ylockchaniis:
if dist((x[ylockchanii], y[ylockchanii]), (x0, y0)) > lockr:
# Euclidean distance is too great, remove ylockchanii from lockchaniis:
lockchaniis = lockchaniis[lockchaniis != ylockchanii]
lockchans = chans[lockchaniis]
nlockchans = len(lockchans)
s['lockchans'][:nlockchans], s['nlockchans'] = lockchans, nlockchans
print('Spatial localization of spikes took %.3f s' % (time.clock() - tstart))
return nreject
class Neuron(object):
def __init__(self, sort, id=None):
self.sort = sort
self.id = id # neuron id
self.wave = WaveForm() # init to empty waveform
self.sids = np.array([], dtype=int) # indices of spikes that make up this neuron
# relative reference timestamp, here for symmetry with fellow spike rec
# (obj.t comes up sometimes):
self.t = 0
self.plt = None # Plot currently holding self
self.cluster = None
self.good = False # user can mark this neuron as "good" if so desired
#self.fname # not here, let's allow neurons to have spikes from different files?
def get_chans(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans
chans = property(get_chans)
def get_chan(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans[self.wave.data.ptp(axis=1).argmax()]
chan = property(get_chan)
def get_nspikes(self):
return len(self.sids)
nspikes = property(get_nspikes)
def __getstate__(self):
d = self.__dict__.copy()
#d.pop('X', None)
#d.pop('Xhash', None)
# don't save plot self is assigned to, since that'll change anyway on unpickle
d['plt'] = None
return d
def get_wave(self):
# many neuron waveforms saved in old .sort files won't have a wave.std field:
try:
self.wave.std
except AttributeError:
return self.update_wave()
if self.wave == None or self.wave.data is None or self.wave.std is None:
return self.update_wave()
else:
return self.wave
def update_wave(self):
sort = self.sort
spikes = sort.spikes
if len(self.sids) == 0:
raise RuntimeError("n%d has no spikes and its waveform can't be updated" % self.id)
meanwave = sort.get_mean_wave(self.sids, nid=self.id)
# update self's Waveform object
self.wave.data = meanwave.data
self.wave.std = meanwave.std
self.wave.ts = sort.twts.copy()
self.wave.chans = meanwave.chans
self.wave.tres = sort.tres
return self.wave
def __sub__(self, other):
selfwavedata, otherwavedata = self.getCommonWaveData(other.chan, other.chans,
other.wave.data)
return selfwavedata - otherwavedata
def getCommonWaveData(self, otherchan, otherchans, otherwavedata):
chans = np.intersect1d(self.chans, otherchans, assume_unique=True)
if len(chans) == 0:
raise ValueError('No common chans')
if self.chan not in chans or otherchan not in chans:
raise ValueError("maxchans aren't part of common chans")
selfchanis = self.chans.searchsorted(chans)
otherchanis = otherchans.searchsorted(chans)
return self.wave.data[selfchanis], otherwavedata[otherchanis]
class PTCSHeader(object):
FORMATVERSION = 3 # overall .ptcs file format version, not header format version
def __init__(self, sort, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user='', notes=''):
self.sort = sort
self.stream = stream
self.nneurons = nneurons
self.nspikes = nspikes
self.nsamplebytes = nsamplebytes
homelessfullfname = lstrip(fullfname, os.path.expanduser('~'))
sortfname = sort.fname
sortfullfname = os.path.join(sortpath, sortfname)
sortfmoddt = str(datetime.datetime.fromtimestamp(os.path.getmtime(sortfullfname)))
sortfmoddt = sortfmoddt.split('.')[0] # ditch the us
sortfsize = os.path.getsize(sortfullfname) # in bytes
d = {'file_type': '.ptcs (polytrode clustered spikes) file',
'original_fname': homelessfullfname, 'export_time': exportdt,
'sort': {'fname': sortfname, 'path': sortpath,
'fmtime': sortfmoddt, 'fsize': sortfsize},
'user': user, 'notes': notes}
descr = str(d)
self.descr = pad(descr, align=8)
self.srcfname = pad(lstrip(stream.fname, '../'), align=8)
self.pttype = pad(stream.probe.name, align=8)
self.dt = stream.datetime
self.dtstr = pad(self.dt.isoformat(), align=8)
def write(self, f):
s = self.sort
np.int64(self.FORMATVERSION).tofile(f) # formatversion
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr
np.uint64(self.nneurons).tofile(f) # nneurons
np.uint64(self.nspikes).tofile(f) # nspikes
np.uint64(self.nsamplebytes).tofile(f) # nsamplebytes
np.uint64(s.sampfreq).tofile(f) # samplerate
np.uint64(len(self.pttype)).tofile(f) # npttypebytes
f.write(self.pttype) # pttype
np.uint64(s.stream.probe.nchans).tofile(f) # nptchans
np.float64(s.stream.probe.siteloc_arr()).tofile(f) # chanpos
np.uint64(len(self.srcfname)).tofile(f) # nsrcfnamebytes
f.write(self.srcfname) # srcfname
np.float64(td2days(self.dt - EPOCH)).tofile(f) # datetime (in days)
np.uint64(len(self.dtstr)).tofile(f) # ndatetimestrbytes
f.write(self.dtstr)
class PTCSNeuronRecord(object):
def __init__(self, neuron, spikets=None, nsamplebytes=None, descr=''):
n = neuron
AD2uV = n.sort.converter.AD2uV
self.neuron = neuron
self.spikets = spikets # constrained to stream range, may be < neuron.sids
self.wavedtype = {2: np.float16, 4: np.float32, 8: np.float64}[nsamplebytes]
if n.wave.data is None or n.wave.std is None: # some may have never been displayed
n.update_wave()
# wavedata and wavestd are nchans * nt * nsamplebytes long:
self.wavedata = pad(self.wavedtype(AD2uV(n.wave.data)), align=8)
self.wavestd = pad(self.wavedtype(AD2uV(n.wave.std)), align=8)
self.descr = pad(descr, align=8)
def write(self, f):
n = self.neuron
np.int64(n.id).tofile(f) # nid
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr, bytes
np.float64(np.nan).tofile(f) # clusterscore
np.float64(n.cluster.pos['x0']).tofile(f) # xpos (um)
np.float64(n.cluster.pos['y0']).tofile(f) # ypos (um)
np.float64(n.cluster.pos['sx']).tofile(f) # sigma (um)
np.uint64(len(n.wave.chans)).tofile(f) # nchans
np.uint64(n.wave.chans).tofile(f) # chanids
np.uint64(n.chan).tofile(f) # maxchanid
np.uint64(len(n.wave.ts)).tofile(f) # nt
np.uint64(self.wavedata.nbytes).tofile(f) # nwavedatabytes
self.wavedata.tofile(f) # wavedata
np.uint64(self.wavestd.nbytes).tofile(f) # nwavestdbytes
self.wavestd.tofile(f) # wavestd
np.uint64(len(self.spikets)).tofile(f) # nspikes
np.uint64(self.spikets).tofile(f) # spike timestamps (us)
class PanelScrollArea(QtGui.QScrollArea):
def keyPressEvent(self, event):
key = event.key()
# seems the ENTER key needs be handled to directly call plot, unlike in sortwin
# where the event is passed on to be handled by the list widgets
if key in [Qt.Key_Enter, Qt.Key_Return]:
sortwin = self.topLevelWidget()
sortwin.parent().ui.plotButton.click()
else:
QtGui.QScrollArea.keyPressEvent(self, event) # pass it on
class SortWindow(SpykeToolWindow):
def __init__(self, parent, pos=None):
SpykeToolWindow.__init__(self, parent, flags=QtCore.Qt.Tool)
self.spykewindow = parent
ncols = self.sort.probe.ncols
nrows = self.sort.probe.nrows
# try and allow the same amount of horizontal space per column for 2 and 3 col probes:
if ncols <= 2:
self.MAINSPLITTERPOS = 300
else:
self.MAINSPLITTERPOS = 265 # move it more to the left
# make horizontal sort slider use as little vertical space as possible
self.VSPLITTERPOS = 1
panelwidth = PANELWIDTHPERCOLUMN * ncols
panelheight = PANELHEIGHTPERROW * nrows
width = max(self.MAINSPLITTERPOS + panelwidth + VSCROLLBARWIDTH, MINSORTWINDOWWIDTH)
size = (width, SORTWINDOWHEIGHT)
self.setWindowTitle('Sort Window')
self.move(*pos)
self.resize(*size)
self._source = None # source cluster for comparison
self.slider = SpikeSelectionSlider(Qt.Horizontal, self)
self.slider.setInvertedControls(True)
self.slider.setToolTip('Position of sliding spike selection time window')
self.connect(self.slider, QtCore.SIGNAL('valueChanged(int)'),
self.on_slider_valueChanged)
self.connect(self.slider, QtCore.SIGNAL('sliderPressed()'),
self.on_slider_sliderPressed)
self.nlist = NList(self)
self.nlist.setToolTip('Neuron list')
self.nslist = NSList(self)
self.nslist.setToolTip('Sorted spike list')
self.uslist = USList(self) # should really be multicolumn tableview
self.uslist.setToolTip('Unsorted spike list')
tw = self.spykewindow.sort.tw
self.panel = SpikeSortPanel(self, tw=tw)
self.panel.setMinimumSize(QtCore.QSize(panelwidth, panelheight))
self.panelscrollarea = PanelScrollArea(self)
self.panelscrollarea.setWidget(self.panel)
self.panelscrollarea.setMinimumWidth(panelwidth + VSCROLLBARWIDTH)
self.panelscrollarea.setWidgetResizable(True) # allows panel to size bigger than min
self.vsplitter = QtGui.QSplitter(Qt.Vertical)
self.vsplitter.addWidget(self.slider)
self.vsplitter.addWidget(self.nlist)
self.vsplitter.addWidget(self.nslist)
self.vsplitter.addWidget(self.uslist)
self.mainsplitter = QtGui.QSplitter(Qt.Horizontal)
self.mainsplitter.addWidget(self.vsplitter)
self.mainsplitter.addWidget(self.panelscrollarea)
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.mainsplitter)
mainwidget = QtGui.QWidget(self)
mainwidget.setLayout(self.layout)
self.setCentralWidget(mainwidget)
self.toolbar = self.setupToolbar()
self.addToolBar(self.toolbar)
def setupToolbar(self):
toolbar = QtGui.QToolBar(self)
toolbar.setObjectName('toolbar')
toolbar.setFloatable(True)
toolbar.setIconSize(QtCore.QSize(16, 16)) # like in main spyke window
actionDelete = QAction(QIcon('res/edit-delete.svg'), 'Del', self)
tt = ('<nobr><b>Del</b> Delete selected spikes or clusters</nobr>\n'
'<nobr><b>CTRL+Del</b> Delete selected spikes</nobr>')
actionDelete.setToolTip(tt)
self.connect(actionDelete, QtCore.SIGNAL('triggered()'),
self.on_actionDelete_triggered)
toolbar.addAction(actionDelete)
actionMergeClusters = QAction('M', self)
tt = '<nobr><b>M</b> Merge clusters</nobr>'
actionMergeClusters.setToolTip(tt)
self.connect(actionMergeClusters, QtCore.SIGNAL('triggered()'),
self.on_actionMergeClusters_triggered)
toolbar.addAction(actionMergeClusters)
#actionToggleClustersGood = QAction(QIcon('res/dialog-apply.svg'), 'G', self)
actionToggleClustersGood = QAction('G', self)
tt = '<nobr><b>G</b> Toggle clusters as "good"</nobr>'
actionToggleClustersGood.setToolTip(tt)
self.connect(actionToggleClustersGood, QtCore.SIGNAL('triggered()'),
self.on_actionToggleClustersGood_triggered)
toolbar.addAction(actionToggleClustersGood)
actionSplit = QAction('+', self)
tt = '<nobr><b>+</b> Split off selected spikes</nobr>'
actionSplit.setToolTip(tt)
self.connect(actionSplit, QtCore.SIGNAL('triggered()'),
self.on_actionSplit_triggered)
toolbar.addAction(actionSplit)
actionLabelMultiunit = QAction('-', self)
tt = '<nobr><b>-</b> Label clusters as multiunit</nobr>'
actionLabelMultiunit.setToolTip(tt)
self.connect(actionLabelMultiunit, QtCore.SIGNAL('triggered()'),
self.on_actionLabelMultiunit_triggered)
toolbar.addAction(actionLabelMultiunit)
actionChanSplitClusters = QAction('/', self)
tt = '<nobr><b>/</b> Split clusters by channels</nobr>'
actionChanSplitClusters.setToolTip(tt)
self.connect(actionChanSplitClusters, QtCore.SIGNAL('triggered()'),
self.on_actionChanSplitClusters_triggered)
toolbar.addAction(actionChanSplitClusters)
actionDensitySplit = QAction('P', self)
tt = ('<nobr><b>P</b> Split cluster pair by density along line between '
'their centers</nobr>')
actionDensitySplit.setToolTip(tt)
self.connect(actionDensitySplit, QtCore.SIGNAL('triggered()'),
self.on_actionDensitySplit_triggered)
toolbar.addAction(actionDensitySplit)
actionRandomSplit = QAction('\\', self)
tt = ('<nobr><b>\\</b> Randomly split each selected cluster in half</nobr>')
actionRandomSplit.setToolTip(tt)
self.connect(actionRandomSplit, QtCore.SIGNAL('triggered()'),
self.on_actionRandomSplit_triggered)
toolbar.addAction(actionRandomSplit)
#actionRenumber = QAction(QIcon('res/gtk-edit.svg'), '
actionRenumber = QAction('
tt = ('<nobr><b>
'<nobr><b>CTRL+
actionRenumber.setToolTip(tt)
self.connect(actionRenumber, QtCore.SIGNAL('triggered()'),
self.on_actionRenumber_triggered)
toolbar.addAction(actionRenumber)
actionFind = QAction(QIcon('res/edit-find.svg'), 'Find', self)
tt = ('<nobr><b>CTRL+F</b> Find spike in cluster plot</nobr>')
actionFind.setToolTip(tt)
self.connect(actionFind, QtCore.SIGNAL('triggered()'),
self.on_actionFind_triggered)
toolbar.addAction(actionFind)
actionSelectRandomSpikes = QAction('R', self)
tt = '<nobr><b>R</b> Select random sample of spikes of current clusters</nobr>'
actionSelectRandomSpikes.setToolTip(tt)
self.connect(actionSelectRandomSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionSelectRandomSpikes_triggered)
toolbar.addAction(actionSelectRandomSpikes)
actionToggleErrors = QAction('E', self)
actionToggleErrors.setCheckable(True)
actionToggleErrors.setChecked(self.panel.enable_fills)
tt = '<nobr><b>CTRL+E</b> Toggle visibility of template error limits</nobr>'
actionToggleErrors.setToolTip(tt)
self.connect(actionToggleErrors, QtCore.SIGNAL('toggled(bool)'),
self.on_actionToggleErrors_toggled)
toolbar.addAction(actionToggleErrors)
self.actionToggleErrors = actionToggleErrors
nsamplesComboBox = QtGui.QComboBox(self)
nsamplesComboBox.setToolTip('Number of spikes per cluster to randomly select')
nsamplesComboBox.setFocusPolicy(Qt.NoFocus)
nsamplesComboBox.addItems(['100', '50', '20', '10', '5', '1'])
nsamplesComboBox.setCurrentIndex(2)
toolbar.addWidget(nsamplesComboBox)
self.connect(nsamplesComboBox, QtCore.SIGNAL('activated(int)'),
self.on_actionSelectRandomSpikes_triggered)
self.nsamplesComboBox = nsamplesComboBox
gainComboBox = QtGui.QComboBox(self)
gainComboBox.setToolTip('Waveform gain (default: 1.5)')
gainComboBox.setFocusPolicy(Qt.NoFocus)
gainComboBox.addItems(['4', '3.75', '3.5', '3.25', '3', '2.75', '2.5', '2.25', '2',
'1.75', '1.5', '1.25', '1', '0.75', '0.5', '0.25'])
gainComboBox.setCurrentIndex(3)
toolbar.addWidget(gainComboBox)
self.connect(gainComboBox, QtCore.SIGNAL('activated(int)'),
self.on_gainComboBox_triggered)
self.gainComboBox = gainComboBox
#actionAlignMin = QAction(QIcon('res/go-bottom.svg'), 'Min', self)
actionAlignMin = QAction('Min', self)
actionAlignMin.setToolTip('Align selected spikes to min')
self.connect(actionAlignMin, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMin_triggered)
toolbar.addAction(actionAlignMin)
#actionAlignMax = QAction(QIcon('res/go-top.svg'), 'Max', self)
actionAlignMax = QAction('Max', self)
actionAlignMax.setToolTip('Align selected spikes to max')
self.connect(actionAlignMax, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMax_triggered)
toolbar.addAction(actionAlignMax)
#actionAlignBest = QAction(QIcon('res/emblem-OK.png'), 'Best', self)
actionAlignBest = QAction('B', self)
tt = '<nobr><b>B</b> Align selected spikes by best fit</nobr>'
actionAlignBest.setToolTip(tt)
self.connect(actionAlignBest, QtCore.SIGNAL('triggered()'),
self.on_actionAlignBest_triggered)
toolbar.addAction(actionAlignBest)
actionShiftLeft = QAction('[', self)
tt = ('<nobr><b>[</b> Shift selected spikes 2 points left</nobr>\n'
'<nobr><b>CTRL+[</b> Shift selected spikes 1 point left</nobr>')
actionShiftLeft.setToolTip(tt)
self.connect(actionShiftLeft, QtCore.SIGNAL('triggered()'),
self.on_actionShiftLeft_triggered)
toolbar.addAction(actionShiftLeft)
actionShiftRight = QAction(']', self)
tt = ('<nobr><b>]</b> Shift selected spikes 2 points right</nobr>\n'
'<nobr><b>CTRL+]</b> Shift selected spikes 1 point right</nobr>')
actionShiftRight.setToolTip(tt)
self.connect(actionShiftRight, QtCore.SIGNAL('triggered()'),
self.on_actionShiftRight_triggered)
toolbar.addAction(actionShiftRight)
incltComboBox = QtGui.QComboBox(self)
incltComboBox.setToolTip("Waveform duration (us) to include for component "
"analysis,\nasymmetric around spike time")
incltComboBox.setFocusPolicy(Qt.NoFocus)
dtw = self.sort.tw[1] - self.sort.tw[0] # spike time window width
incltstep = intround(dtw / 10) # evenly spaced inclt values
incltvals = np.arange(dtw, 0, -incltstep)
incltComboBox.addItems([ str(incltval) for incltval in incltvals ])
incltComboBox.setCurrentIndex(0)
toolbar.addWidget(incltComboBox)
self.connect(incltComboBox, QtCore.SIGNAL('activated(int)'),
self.on_incltComboBox_triggered)
self.incltComboBox = incltComboBox
#incltunitsLabel = QtGui.QLabel('us', self)
#toolbar.addWidget(incltunitsLabel)
nPCsPerChanSpinBox = QtGui.QSpinBox(self)
nPCsPerChanSpinBox.setToolTip("Number of PCs to use per channel to feed into ICA")
nPCsPerChanSpinBox.setFocusPolicy(Qt.NoFocus)
toolbar.addWidget(nPCsPerChanSpinBox)
nPCsPerChanSpinBox.setMinimum(1)
self.connect(nPCsPerChanSpinBox, QtCore.SIGNAL('valueChanged(int)'),
self.on_nPCsPerChanSpinBox_valueChanged)
nPCsPerChanSpinBox.setValue(self.sort.npcsperchan)
self.nPCsPerChanSpinBox = nPCsPerChanSpinBox
#actionFindPrevMostSimilar = QAction(QIcon('res/go-previous.svg'), '<', self)
actionFindPrevMostSimilar = QAction('<', self)
tt = '<nobr><b><</b> Find previous most similar cluster</nobr>'
actionFindPrevMostSimilar.setToolTip(tt)
self.connect(actionFindPrevMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindPrevMostSimilar_triggered)
toolbar.addAction(actionFindPrevMostSimilar)
#actionFindNextMostSimilar = QAction(QIcon('res/go-next.svg'), '>', self)
actionFindNextMostSimilar = QAction('>', self)
tt = '<nobr><b>></b> Find next most similar cluster</nobr>'
actionFindNextMostSimilar.setToolTip(tt)
self.connect(actionFindNextMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindNextMostSimilar_triggered)
toolbar.addAction(actionFindNextMostSimilar)
actionReloadSpikes = QAction(QIcon('res/view-refresh.svg'), 'Reload', self)
tt = ('<nobr><b>F5</b> Reload waveforms of selected spikes. '
'If none selected, reload all</nobr>\n'
'<nobr><b>CTRL+F5</b> Use mean waveform to choose chans to reload</nobr>')
actionReloadSpikes.setToolTip(tt)
self.connect(actionReloadSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionReloadSpikes_triggered)
toolbar.addAction(actionReloadSpikes)
actionSave = QAction(QIcon('res/document-save.svg'), '&Save', self)
actionSave.setToolTip('Save sort panel to file')
self.connect(actionSave, QtCore.SIGNAL('triggered()'),
self.on_actionSave_triggered)
toolbar.addAction(actionSave)
return toolbar
def get_sort(self):
return self.spykewindow.sort
sort = property(get_sort) # make this a property for proper behaviour after unpickling
def closeEvent(self, event):
self.spykewindow.HideWindow('Sort')
def mousePressEvent(self, event):
buttons = event.buttons()
if buttons == QtCore.Qt.MiddleButton:
#self.on_actionSelectRandomSpikes_triggered()
self.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist
elif buttons == QtCore.Qt.RightButton:
self.clear()
def keyPressEvent(self, event):
key = event.key()
modifiers = event.modifiers()
ctrl = modifiers & Qt.ControlModifier # ctrl is down
spw = self.spykewindow
if key == Qt.Key_A: # ignored in SpykeListViews
spw.ui.plotButton.click() # same as hitting ENTER in nslist
elif key == Qt.Key_X: # ignored in SpykeListViews
spw.ui.plotXcorrsButton.click()
elif key == Qt.Key_N: # ignored in SpykeListViews
spw.ui.normButton.click()
elif key == Qt.Key_Escape: # deselect all spikes and all clusters
self.clear()
elif key == Qt.Key_Delete:
self.on_actionDelete_triggered()
elif key == Qt.Key_M: # ignored in SpykeListViews
self.on_actionMergeClusters_triggered()
elif key == Qt.Key_G: # ignored in SpykeListViews
self.on_actionToggleClustersGood_triggered()
elif key == Qt.Key_Equal: # ignored in SpykeListViews
self.on_actionSplit_triggered()
elif key == Qt.Key_Minus: # ignored in SpykeListViews
self.on_actionLabelMultiunit_triggered()
elif key == Qt.Key_Slash: # ignored in SpykeListViews
self.on_actionChanSplitClusters_triggered()
elif key == Qt.Key_P: # ignored in SpykeListViews
self.on_actionDensitySplit_triggered()
elif key == Qt.Key_Backslash: # ignored in SpykeListViews
self.on_actionRandomSplit_triggered()
elif key == Qt.Key_NumberSign: # ignored in SpykeListViews
self.on_actionRenumber_triggered()
elif key == Qt.Key_F: # ignored in SpykeListViews
if ctrl:
self.FindSpike()
else:
self.FindCluster()
elif key == Qt.Key_R: # ignored in SpykeListViews
self.on_actionSelectRandomSpikes_triggered()
elif key == Qt.Key_Space: # ignored in SpykeListViews
if ctrl:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
else:
spw.on_clusterButton_clicked()
elif key == Qt.Key_B: # ignored in SpykeListViews
self.on_actionAlignBest_triggered()
elif key == Qt.Key_BracketLeft: # ignored in SpykeListViews
self.on_actionShiftLeft_triggered()
elif key == Qt.Key_BracketRight: # ignored in SpykeListViews
self.on_actionShiftRight_triggered()
elif key == Qt.Key_Comma: # ignored in SpykeListViews
self.on_actionFindPrevMostSimilar_triggered()
elif key == Qt.Key_Period: # ignored in SpykeListViews
self.on_actionFindNextMostSimilar_triggered()
elif key == Qt.Key_F5: # ignored in SpykeListViews
self.on_actionReloadSpikes_triggered()
elif key == Qt.Key_E: # ignored in SpykeListViews
if ctrl:
self.actionToggleErrors.toggle()
else:
self.clear() # E is synonymous with ESC
elif key == Qt.Key_C: # toggle between PCA and ICA, ignored in SpykeListViews
c = str(spw.ui.componentAnalysisComboBox.currentText())
if c == 'PCA':
index = spw.ui.componentAnalysisComboBox.findText('ICA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
elif c == 'ICA':
index = spw.ui.componentAnalysisComboBox.findText('PCA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
spw.on_plotButton_clicked()
elif key == Qt.Key_T: # toggle plotting against time, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 't':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.on_c0c1tButton_clicked() # plot against time
elif key == Qt.Key_W: # toggle plotting against RMSError, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 'RMSerror':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.ui.zDimComboBox.setCurrentIndex(3)
spw.on_plotButton_clicked() # plot against RMSError
elif key in [Qt.Key_Enter, Qt.Key_Return]:
# this is handled at a lower level by on_actionItem_triggered
# in the various listview controls
pass
else:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
def clear(self):
spw = self.spykewindow
clusters = spw.GetClusters()
if len(self.uslist.selectedIndexes()) > 0:
self.uslist.clearSelection()
elif self.nslist.nrowsSelected > 0:
self.nslist.clearSelection()
elif len(clusters) == 2 and self._source in clusters:
clusters.remove(self._source)
spw.SelectClusters(clusters, on=False)
elif 0 in spw.GetClusterIDs():
for cluster in spw.GetClusters():
if cluster.id == 0:
spw.SelectClusters([cluster], on=False)
break
else:
self.nlist.clearSelection()
# reset colours in cluster plot:
gw = spw.windows['Cluster'].glWidget
gw.colour()
gw.updateGL()
def on_actionDelete_triggered(self):
selsids = self.spykewindow.GetSpikes() # IDs of explicitly selected spikes
nselsids = len(selsids)
if (QApplication.instance().keyboardModifiers() & Qt.ControlModifier
or nselsids > 0):
self.delete_spikes()
else:
self.delete_clusters()
def delete_clusters(self):
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'delete clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# deselect and delete clusters
spw.DelClusters(clusters)
if len(s.clusters) > 0:
# select cluster that replaces the first of the deleted clusters in norder
selrows = [ cc.oldnorder.index(oldunid) for oldunid in cc.oldunids ]
if len(selrows) > 0:
selrow = selrows[0]
nlist = spw.windows['Sort'].nlist
nlist.selectRows(selrow) # TODO: this sets selection, but not focus
#else: # first of deleted clusters was last in norder, don't select anything
newclusters = []
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def delete_spikes(self):
self.spykewindow.SplitSpikes(delete=True)
def on_actionSplit_triggered(self):
self.spykewindow.SplitSpikes(delete=False)
def on_actionMergeClusters_triggered(self):
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids.append(spw.GetUnsortedSpikes())
sids = np.concatenate(sids)
if len(sids) == 0:
return
message = 'merge clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
newnid = None
inserti = None
if len(clusters) == 1:
# single-unit, multiunit, or junk
inserti = s.norder.index(clusters[0].id)
elif len(clusters) > 1:
oldunids = np.asarray(cc.oldunids)
suids = oldunids[oldunids > 0] # selected single unit nids
if len(suids) > 0: # merge into largest selected single unit nid:
spikecounts = np.asarray([ s.neurons[suid].nspikes for suid in suids ])
newnid = suids[spikecounts.argmax()]
inserti = s.norder.index(newnid)
# correct for shift due to deletion of oldunids that precede newnid in norder:
inserti -= sum([ s.norder.index(oldunid) < inserti for oldunid in oldunids])
# delete selected clusters and deselect selected usids
spw.DelClusters(clusters, update=False)
self.uslist.clearSelection()
# create new cluster
#t0 = time.time()
newcluster = spw.CreateCluster(update=False, id=newnid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
plotdims = spw.GetClusterPlotDims()
newcluster.update_pos()
# save more undo/redo stuff
cc.save_new([newcluster], s.norder, s.good)
spw.AddClusterChangeToStack(cc)
# now do some final updates
spw.UpdateClustersGUI()
spw.ColourPoints(newcluster)
#print('applying clusters to plot took %.3f sec' % (time.time()-t0))
# select newly created cluster
spw.SelectClusters(newcluster)
cc.message += ' into cluster %d' % newcluster.id
print(cc.message)
def on_actionToggleClustersGood_triggered(self):
spw = self.spykewindow
clusters = spw.GetClusters()
cids = []
for cluster in clusters:
cluster.neuron.good = not cluster.neuron.good
cids.append(cluster.id)
self.nlist.updateAll() # nlist item colouring will change as a result
print("Toggled 'good' flag of clusters %r" % cids)
def on_actionLabelMultiunit_triggered(self):
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
# only relabel single unit clusters:
clusters = [ cluster for cluster in clusters if cluster.id > 0 ]
if len(clusters) == 0:
return
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'label as multiunit clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# delete old clusters
inserti = s.norder.index(clusters[0].id)
# collect cluster sids before cluster deletion
sidss = [ cluster.neuron.sids for cluster in clusters ]
spw.DelClusters(clusters, update=False)
# create new multiunit clusters
newclusters = []
for sids in sidss:
muid = s.get_nextmuid()
newcluster = spw.CreateCluster(update=False, id=muid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
newcluster.update_pos()
newclusters.append(newcluster)
inserti += 1
# select newly labelled multiunit clusters
spw.SelectClusters(newclusters)
# save more undo/redo stuff
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def on_actionChanSplitClusters_triggered(self):
## TODO: make sure this works on .srf files! Why was chancombosplit being used?
self.spykewindow.maxchansplit()
#self.spykewindow.chancombosplit()
def on_actionDensitySplit_triggered(self):
self.spykewindow.densitysplit()
def on_actionRandomSplit_triggered(self):
self.spykewindow.randomsplit()
def on_actionRenumber_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
self.renumber_selected_cluster()
else:
self.renumber_all_clusters()
def renumber_selected_cluster(self):
spw = self.spykewindow
s = self.sort
spikes = s.spikes
cluster = spw.GetCluster() # exactly one selected cluster
oldid = cluster.id
newid = max(s.norder) + 1
newid, ok = QtGui.QInputDialog.getInt(self, "Renumber cluster",
"This will clear the undo/redo stack, and is not undoable.\n"
"Enter new ID:", value=newid)
if not ok:
return
if newid in s.norder:
print("Choose a non-existing nid to renumber to")
return
# deselect cluster
spw.SelectClusters(cluster, on=False)
# rename to newid
cluster.id = newid # this indirectly updates neuron.id
# update cluster and neuron dicts, and spikes array
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
# remove duplicate oldid dict entries
del s.clusters[oldid]
del s.neurons[oldid]
# replace oldid with newid in norder
s.norder[s.norder.index(oldid)] = newid
# update colour of any relevant points in cluster plot
spw.ColourPoints(cluster)
# reselect cluster
spw.SelectClusters(cluster)
# some cluster changes in stack may no longer be applicable, reset cchanges
del spw.cchanges[:]
spw.cci = -1
print('Renumbered neuron %d to %d' % (oldid, newid))
def renumber_all_clusters(self):
val = QtGui.QMessageBox.question(self.panel, "Renumber all clusters",
"Are you sure? This will clear the undo/redo stack, and is not undoable.",
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if val == QtGui.QMessageBox.No:
return
spw = self.spykewindow
s = self.sort
spikes = s.spikes
# get spatially and numerically ordered lists of new ids
oldids = np.asarray(s.norder)
oldsuids = oldids[oldids > 0]
oldmuids = oldids[oldids < 0]
# this is a bit confusing: find indices that would sort old ids by y pos, but then
# what you really want is to find the y pos *rank* of each old id, so you need to
# take argsort again:
newsuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldsuids ]).argsort().argsort() + 1
newmuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldmuids ]).argsort().argsort() + 1
newmuids = -newmuids
# multiunit, followed by single unit, no 0 junk cluster. Can't seem to do it the other
# the last entry. Doing so causes all 2 digit values in the list to become blank,
# suggests a spacing calculation bug. Reproduce by making last entry multiunit,
# undoing then redoing. Actually, maybe the bug is it doesn't like having a number
newids = np.concatenate([newmuids, newsuids])
if np.all(oldids == newids):
print('Nothing to renumber: cluster IDs already ordered in y0 and contiguous')
return
oldids = np.concatenate([oldmuids, oldsuids])
selclusters = spw.GetClusters()
oldselids = [ cluster.id for cluster in selclusters ]
spw.SelectClusters(selclusters, on=False)
if 0 in s.clusters:
s.remove_neuron(0)
print('Deleted junk cluster 0')
if 0 in oldselids:
oldselids.remove(0)
cw = spw.windows['Cluster']
oldclusters = s.clusters.copy()
dims = spw.GetClusterPlotDims()
for oldid, newid in zip(oldids, newids):
newid = int(newid)
if oldid == newid:
continue
cluster = oldclusters[oldid]
cluster.id = newid
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
for oldid in oldids:
if oldid not in newids:
del s.clusters[oldid]
del s.neurons[oldid]
s.norder = []
s.norder.extend(sorted([ int(newid) for newid in newmuids ])[::-1])
s.norder.extend(sorted([ int(newid) for newid in newsuids ]))
spw.UpdateClustersGUI()
spw.ColourPoints(s.clusters.values())
oldiis = [ list(oldids).index(oldselid) for oldselid in oldselids ]
newselids = newids[oldiis]
spw.SelectClusters([s.clusters[cid] for cid in newselids])
del spw.cchanges[:]
spw.cci = -1
print('Renumbering complete')
def on_actionFind_triggered(self):
ctrl = QApplication.instance().keyboardModifiers() & Qt.ControlModifier
if ctrl:
self.FindSpike()
else:
self.FindCluster()
def FindCluster(self):
spw = self.spykewindow
try:
cluster = spw.GetCluster()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
dims = spw.GetClusterPlotDims()
gw.focus = np.float32([ cluster.normpos[dim] for dim in dims ])
gw.panTo()
gw.updateGL()
def FindSpike(self):
spw = self.spykewindow
try:
sid = spw.GetSpike()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
pointis = gw.sids.searchsorted(sid)
gw.focus = gw.points[pointis]
gw.panTo()
gw.updateGL()
def on_actionSelectRandomSpikes_triggered(self):
nsamples = int(self.nsamplesComboBox.currentText())
if len(self.nslist.neurons) > 0:
slist = self.nslist
else:
slist = self.uslist
slist.clearSelection()
slist.selectRandom(nsamples)
def on_gainComboBox_triggered(self):
panel = self.panel
panel.gain = float(self.gainComboBox.currentText())
panel.do_layout() # resets axes lims and recalcs panel.pos
panel._update_scale()
panel.draw_refs()
panel.updateAllItems()
def on_actionAlignMin_triggered(self):
self.Align('min')
def on_actionAlignMax_triggered(self):
self.Align('max')
def on_actionAlignBest_triggered(self):
self.Align('best')
def on_actionShiftLeft_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = -1
else:
nt = -2
self.Shift(nt)
def on_actionShiftRight_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = 1
else:
nt = 2
self.Shift(nt)
def on_incltComboBox_triggered(self):
self.panel.update_selvrefs()
self.panel.draw_refs()
#self.spykewindow.ui.plotButton.click()
def get_inclt(self):
return float(self.incltComboBox.currentText()) # us
inclt = property(get_inclt)
def get_tis(self):
s = self.sort
inclt = self.inclt # duration to include, asymmetric around t=0 spike time (us)
tw = self.panel.tw
dtw = tw[1] - tw[0] # spike time window width
left = intround(abs(tw[0]) / dtw * inclt) # left fraction wrt t=0 spike time
right = inclt - left # right fraction wrt t=0 spike time
tis = s.twts.searchsorted([-left, right])
return tis
tis = property(get_tis)
def on_nPCsPerChanSpinBox_valueChanged(self, val):
self.sort.npcsperchan = val
def on_actionReloadSpikes_triggered(self):
spw = self.spykewindow
sids = spw.GetAllSpikes()
sort = self.sort
if len(sids) == 0:
# if no spikes specified, reload all spikes
sids = sort.spikes['id']
usemeanchans = False
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
usemeanchans = True
sort.reload_spikes_and_templates(sids, usemeanchans=usemeanchans)
# add sids to the set of dirtysids to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots:
self.panel.updateAllItems()
def on_actionFindPrevMostSimilar_triggered(self):
self.findMostSimilarCluster('previous')
def on_actionFindNextMostSimilar_triggered(self):
self.findMostSimilarCluster('next')
def on_actionToggleErrors_toggled(self, checked):
self.panel.showFills(checked)
def on_slider_valueChanged(self, slideri):
self.nslist.clearSelection() # emits selectionChanged signal, .reset() doesn't
if self.nslist.model().sliding == False:
self.nslist.model().sids.sort()
self.nslist.updateAll()
self.nslist.model().sliding = True
nsamples = int(self.nsamplesComboBox.currentText())
rows = np.arange(slideri, slideri+nsamples)
self.nslist.selectRows(rows)
def on_slider_sliderPressed(self):
slideri = self.slider.value()
if slideri == 0:
nsamples = int(self.nsamplesComboBox.currentText())
nsamples = min(nsamples, self.nslist.model().nspikes)
rows = np.arange(nsamples)
self.nslist.selectRows(rows)
def update_slider(self):
nsamples = int(self.nsamplesComboBox.currentText())
nsids = len(self.nslist.sids)
ulim = max(nsids-nsamples, 1)
self.slider.setRange(0, ulim)
self.slider.setSingleStep(1)
self.slider.setPageStep(nsamples)
def findMostSimilarCluster(self, which='next'):
try:
source = self.getClusterComparisonSource()
except RuntimeError as err:
print(err)
return
destinations = list(self.sort.clusters.values())
destinations.remove(source)
selchans = np.sort(self.panel.chans_selected)
if len(selchans) > 0:
srcchans = np.intersect1d(source.neuron.wave.chans, selchans)
if len(srcchans) == 0:
print("Source cluster doesn't overlap with selected chans")
return
else:
srcchans = source.neuron.wave.chans
if self.spykewindow.ui.normButton.isChecked():
print("NOTE: findMostSimilarCluster() doesn't currently take spike amplitude "
"normalization into account. To see the true amplitudes used to compare "
"neuron pairs, turn off normalization")
errors = []
dests = []
t0i, t1i = self.tis
for dest in destinations:
if dest.neuron.wave.data is None:
dest.neuron.update_wave()
dstchans = dest.neuron.wave.chans
if len(selchans) > 0:
if not set(selchans).issubset(dstchans):
continue
dstchans = selchans
cmpchans = np.intersect1d(srcchans, dstchans)
if len(cmpchans) == 0: # not comparable
continue
# ensure maxchan of both source and dest neuron are both in cmpchans
if source.neuron.chan not in cmpchans or dest.neuron.chan not in cmpchans:
continue
srcwavedata = source.neuron.wave[cmpchans].data[:, t0i:t1i]
dstwavedata = dest.neuron.wave[cmpchans].data[:, t0i:t1i]
error = core.rms(srcwavedata - dstwavedata)
errors.append(error)
dests.append(dest)
if len(errors) == 0:
print("No sufficiently overlapping clusters on selected chans to compare to")
return
errors = np.asarray(errors)
dests = np.asarray(dests)
desterrsortis = errors.argsort()
if which == 'next':
self._cmpid += 1
elif which == 'previous':
self._cmpid -= 1
else: raise ValueError('Unknown which: %r' % which)
self._cmpid = max(self._cmpid, 0)
self._cmpid = min(self._cmpid, len(dests)-1)
dest = dests[desterrsortis][self._cmpid]
self.spykewindow.SelectClusters(dest)
desterr = errors[desterrsortis][self._cmpid]
print('n%d to n%d rmserror: %.2f uV' %
(source.id, dest.id, self.sort.converter.AD2uV(desterr)))
def getClusterComparisonSource(self):
selclusters = self.spykewindow.GetClusters()
errmsg = 'unclear which cluster to use as source for comparison'
if len(selclusters) == 1:
source = selclusters[0]
self._source = source
self._cmpid = -1 # init/reset
elif len(selclusters) == 2:
source = self._source
if source not in selclusters:
raise RuntimeError(errmsg)
# deselect old destination cluster:
selclusters.remove(source)
self.spykewindow.SelectClusters(selclusters, on=False)
else:
self._source = None # reset for tidiness
raise RuntimeError(errmsg)
return source
def Shift(self, nt):
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
self.sort.shift(sids, nt)
print('Shifted %d spikes by %d timepoints' % (len(sids), nt))
unids = np.unique(spikes['nid'][sids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots
self.panel.updateAllItems()
def Align(self, to):
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
if to == 'best':
tis = self.tis
# find which chans are common to all sids:
commonchans = s.get_common_chans(sids)[0]
# check selected chans
selchans = spw.get_selchans(sids)
for selchan in selchans:
if selchan not in commonchans:
print("Chan %d not common to all spikes, pick from %r"
% (selchan, list(commonchans)))
return
print('Best fit aligning %d spikes between tis=%r on chans=%r' %
(len(sids), list(tis), selchans))
# numpy implementation:
#dirtysids = s.alignbest(sids, tis, selchans)
# cython implementation:
dirtysids = util.alignbest_cy(s, sids, tis, np.int64(selchans))
else: # to in ['min', 'max']
print('Aligning %d spikes to %s' % (len(sids), to))
dirtysids = s.alignminmax(sids, to)
paligned = len(dirtysids) / len(sids) * 100
print('Aligned %d/%d (%.1f%%) spikes' % (len(dirtysids), len(sids), paligned))
unids = np.unique(spikes['nid'][dirtysids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(dirtysids)
# auto-refresh all plots:
self.panel.updateAllItems()
def RemoveNeuron(self, neuron, update=True):
self.MoveSpikes2List(neuron, neuron.sids, update=update)
self.sort.remove_neuron(neuron.id)
if update:
self.nlist.updateAll()
def MoveSpikes2Neuron(self, sids, neuron=None, update=True):
sids = toiter(sids)
spikes = self.sort.spikes
if neuron == None:
neuron = self.sort.create_neuron()
neuron.sids = np.union1d(neuron.sids, sids) # update
spikes['nid'][sids] = neuron.id
if update:
self.sort.update_usids()
self.uslist.updateAll()
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # trigger nslist refresh
# TODO: selection doesn't seem to be working, always jumps to top of list
ron
def MoveSpikes2List(self, neuron, sids, update=True):
sids = toiter(sids)
if len(sids) == 0:
return
spikes = self.sort.spikes
neuron.sids = np.setdiff1d(neuron.sids, sids)
spikes['nid'][sids] = 0 # unbind neuron id of sids in spikes struct array
if update:
self.sort.update_usids()
self.uslist.updateAll()
# this only makes sense if the neuron is currently selected in the nlist:
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # this triggers a refresh
neuron.wave.data = None # triggers an update when it's actually needed
def PlotClusterHistogram(self, X, nids):
spw = self.spykewindow
mplw = spw.OpenWindow('MPL')
unids = np.unique(nids)
nclusters = len(unids)
if nclusters == 0:
mplw.ax.clear()
mplw.figurecanvas.draw()
print("No spikes selected")
return
elif nclusters > 5:
mplw.ax.clear()
mplw.figurecanvas.draw()
print("Too many clusters selected for cluster histogram")
return
elif nclusters == 2:
calc_measures = True
else:
calc_measures = False
projdimi = 0
ndims = X.shape[1]
points = [] # list of projection of each cluster's points onto dimi
for unid in unids:
sidis, = np.where(nids == unid)
points.append(X[sidis])
#points.append(np.ascontiguousarray(X[sidis]))
if calc_measures:
t0 = time.time()
NDsep = util.NDsepmetric(*points, Nmax=20000)
print('NDsep calc took %.3f sec' % (time.time()-t0))
# centers of both clusters, use median:
c0 = np.median(points[0], axis=0) # ndims vector
c1 = np.median(points[1], axis=0)
# line connecting the centers of the two clusters, wrt c0
line = c1-c0
line /= np.linalg.norm(line) # make it unit length
#print('c0=%r, c1=%r, line=%r' % (c0, c1, line))
else:
line = np.zeros(ndims)
line[projdimi] = 1.0 # pick out just the one component
c0 = 0.0 # set origin at 0
# calculate projection of each cluster's points onto line
projs = []
for cpoints in points:
projs.append(np.dot(cpoints-c0, line))
if calc_measures:
d = np.median(projs[1]) - np.median(projs[0])
maxstd = max(projs[0].std(), projs[1].std())
if maxstd == 0:
oneDsep = 0
else:
oneDsep = d / (3 * maxstd)
proj = np.concatenate(projs)
nbins = max(intround(np.sqrt(len(proj))), 2)
edges = np.histogram(proj, bins=nbins)[1]
hists = []
for i in range(nclusters):
hists.append(np.histogram(projs[i], bins=edges)[0])
hist = np.concatenate([hists])
masses = np.asarray([ h.sum() for h in hist ])
sortedmassis = masses.argsort()
if calc_measures:
overlaparearatio = hist.min(axis=0).sum() / masses[sortedmassis[0]]
djs = core.DJS(hists[0], hists[1])
ledges = edges[:-1]
assert len(ledges) == nbins
binwidth = ledges[1] - ledges[0]
a = mplw.ax
a.clear()
windowtitle = "clusters %r" % list(unids)
print(windowtitle)
mplw.setWindowTitle(windowtitle)
if calc_measures:
title = ("%dDsep=%.3f, 1Dsep=%.3f, OAR=%.3f, DJS=%.3f"
% (ndims, NDsep, oneDsep, overlaparearatio, djs))
print(title)
a.set_title(title)
cs = [ CLUSTERCOLOURDICT[unid] for unid in unids ]
for i, c in enumerate(cs):
if c == WHITE:
cs[i] = 'black'
for i in sortedmassis[::-1]:
a.bar(ledges, hist[i], width=binwidth, color=cs[i], edgecolor=cs[i])
| true
| true
|
f71a7085403e8ce0a19e0672e598aeec15a4a023
| 899
|
py
|
Python
|
examples/show_debug.py
|
Matuiss2/python-sc2
|
dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953
|
[
"MIT"
] | 2
|
2019-01-23T19:11:53.000Z
|
2019-04-05T17:45:49.000Z
|
examples/show_debug.py
|
Matuiss2/python-sc2
|
dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953
|
[
"MIT"
] | null | null | null |
examples/show_debug.py
|
Matuiss2/python-sc2
|
dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953
|
[
"MIT"
] | 1
|
2019-04-24T13:31:20.000Z
|
2019-04-24T13:31:20.000Z
|
import sc2
from sc2 import run_game, maps, Race, Difficulty
from sc2.player import Bot, Computer
class MyBot(sc2.BotAI):
async def on_step(self, iteration):
for structure in self.structures:
self._client.debug_text_world(
"\n".join([
f"{structure.type_id.name}:{structure.type_id.value}",
f"({structure.position.x:.2f},{structure.position.y:.2f})",
f"{structure.build_progress:.2f}",
] + [repr(x) for x in structure.orders]),
structure.position3d,
color=(0, 255, 0),
size=12,
)
await self._client.send_debug()
def main():
run_game(maps.get("Abyssal Reef LE"), [
Bot(Race.Terran, MyBot()),
Computer(Race.Protoss, Difficulty.Medium)
], realtime=True)
if __name__ == '__main__':
main()
| 31
| 79
| 0.558398
|
import sc2
from sc2 import run_game, maps, Race, Difficulty
from sc2.player import Bot, Computer
class MyBot(sc2.BotAI):
async def on_step(self, iteration):
for structure in self.structures:
self._client.debug_text_world(
"\n".join([
f"{structure.type_id.name}:{structure.type_id.value}",
f"({structure.position.x:.2f},{structure.position.y:.2f})",
f"{structure.build_progress:.2f}",
] + [repr(x) for x in structure.orders]),
structure.position3d,
color=(0, 255, 0),
size=12,
)
await self._client.send_debug()
def main():
run_game(maps.get("Abyssal Reef LE"), [
Bot(Race.Terran, MyBot()),
Computer(Race.Protoss, Difficulty.Medium)
], realtime=True)
if __name__ == '__main__':
main()
| true
| true
|
f71a7119f0a598c0a33db2eb55c1805b7e234b08
| 21,798
|
py
|
Python
|
archive/reuUpdated.py
|
emmettmeinzer/hmwgen
|
cd47733b5a34a6a3a9b56026eb5e73069e398033
|
[
"MIT"
] | null | null | null |
archive/reuUpdated.py
|
emmettmeinzer/hmwgen
|
cd47733b5a34a6a3a9b56026eb5e73069e398033
|
[
"MIT"
] | null | null | null |
archive/reuUpdated.py
|
emmettmeinzer/hmwgen
|
cd47733b5a34a6a3a9b56026eb5e73069e398033
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 13:41:14 2019
@author: Emmett & Binyang
"""
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
##Let’s first build a corpus to train our tokenizer on. We’ll use stuff available in NLTK:
from nltk.corpus import gutenberg
# print (dir(gutenberg))
# print (gutenberg.fileids())
text = ""
for file_id in gutenberg.fileids():
text += gutenberg.raw(file_id)
print (len(text))
##a funtion that converts a list to a string
def listToString(s):
# initialize an empty string
str1 = ""
# traverse in the string
for ele in s:
str1 += ele
# return string
return str1
##extract sentences from samples for following sentiment analysis
sampNum = 1
sent_df = pd.DataFrame()
i = 0
while (sampNum < 186):
fileOpen = open("sample"+str(sampNum)+".txt","r")
temp = fileOpen.readlines()
temp = listToString(temp)
trainer = PunktTrainer()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
tokenizer = PunktSentenceTokenizer(trainer.get_params())
##Adding more abbreviations
tokenizer._params.abbrev_types.add('dr')
sent = tokenizer.tokenize(temp)
for sent in sent:
sent_df.loc[i, 'sent'] = sent
sent_df.loc[i, 'sample'] = sampNum
i += 1
sampNum += 1
##NLTK’s built-in Vader Sentiment Analyzer will simply rank a piece of text as positive, negative or neutral
##using a lexicon of positive and negative words.
##We can utilize this tool by first creating a Sentiment Intensity Analyzer (SIA) to categorize our headlines,
##then we'll use the polarity_scores method to get the sentiment.
##We'll append each sentiment dictionary to a results list, which we'll transform into a dataframe:
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
sia = SIA()
results = []
for idx, row in sent_df.iterrows():
line = row['sent']
score = sia.polarity_scores(line)
sent_df.loc[idx, 'neg'] = score.get('neg')
sent_df.loc[idx, 'neu'] = score.get('neu')
sent_df.loc[idx, 'pos'] = score.get('pos')
sent_df.loc[idx, 'compound'] = score.get('compound')
# pprint(results[:10], width=100)
##We will consider posts with a compound value greater than 0.2 as positive and less than -0.2 as negative.
##There's some testing and experimentation that goes with choosing these ranges, and there is a trade-off to be
##made here. If you choose a higher value, you might get more compact results (less false positives and false
##negatives), but the size of the results will decrease significantly.
sent_df['label'] = 0
sent_df.loc[sent_df['compound'] > 0.3, 'label'] = 1
sent_df.loc[sent_df['compound'] < -0.3, 'label'] = -1
# sent_df.head()
##We have all the data we need to save, so let's do that:
sent_df.to_csv('sentiment analysis.csv', mode='a', encoding='utf-8', index=False)
##We can now keep appending to this csv, but just make sure that if you reassign the headlines set, you could get
##duplicates. Maybe add a more advanced saving function that reads and removes duplicates before saving.
#Let's first take a peak at a few positive and negative headlines:
print("Positive headlines:\n")
pprint(list(sent_df[sent_df['label'] == 1].sent)[:5], width=200)
print("\nNegative headlines:\n")
pprint(list(sent_df[sent_df['label'] == -1].sent)[:5], width=200)
##Now let's check how many total positives and negatives we have in this dataset:
print(sent_df.label.value_counts())
print(sent_df.label.value_counts(normalize=True) * 100)
##The first line gives us raw value counts of the labels, whereas the second line provides percentages
##with the normalize keyword.
##For fun, let's plot a bar chart:
"""
fig, ax = plt.subplots(figsize=(8, 8))
counts = sent_df.label.value_counts(normalize=True) * 100
sns.barplot(x=counts.index, y=counts, ax=ax)
ax.set_xticklabels(['Negative', 'Neutral', 'Positive'])
ax.set_ylabel("Percentage")
plt.show()
"""
##filter the sentences by number of words in it
for idx, row in sent_df.iterrows():
sentence = row['sent']
sent_df.loc[idx, 'len_sent'] = len(sentence.split())
##split positive and other sentences
pos = sent_df[sent_df['label'] == 1]
neg = sent_df[sent_df['label'] != 1]
import gensim
from gensim.parsing.preprocessing import strip_non_alphanum
from gensim.parsing.preprocessing import strip_punctuation
from gensim.parsing.preprocessing import strip_multiple_whitespaces
from gensim.parsing.preprocessing import stem_text
corpus_full = []
for idx, row in sent_df.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_full.append(final)
corpus_pos = []
for idx, row in pos.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_pos.append(final)
corpus_neg = []
for idx, row in neg.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_neg.append(final)
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stoplist = set('a about above after again against all am an and any are arent\
as also at be because been before being below between both but\
by cant cannot could couldnt did didnt do does doesnt doing dont\
down during each els few for from further had hadnt has have havent\
having he hed hes her here heres hers herself him himself his\
how hows i id ill im ive if in into is isnt it its itself lets\
me more most mustnt my myself no nor not of off on once only or\
other ought our ours ourselves out over own same shant she shes\
should shouldnt so some such than that thats the their theirs\
them themselves then there theres these they theyd theyll theyre\
theyve this those through to too under until up very was wasnt\
we wed were weve were werent what whats when whens which while\
who whos whom why whys with wont would wouldnt you youd youll\
youre youve your yours yourself yourselves ll ve s ar mayb ha re\
us thi isn a b c d e f g h i j k l m n o p q r s t u v w x y z\
hi will can get back go don wa let atc ok ani mi thei whenev make\
just take aw know sai good baltimor jetblu lol thank thanks like\
vari might less highest billion nice probabl lot fuck shit sure\
feel dure befor realli work veri chanc see awai onc onli dy aren\
100 someth thing even happen becaus wai everi much help want think\
fear flight plane fly mai time dai\
1 2 3 4 5 6 7 8 9 10'.split())
print (len(stoplist))
stoplist.update(stop_words)
print(len(stop_words))
print(len(stoplist))
#standardize text -- makes all characters lowercase and removes common stop words
text_full = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_full]
print(text_full)
text_pos = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_pos]
text_neg = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_neg]
#count number of times that word appears in corpus
#pair frequency with respective word in new array
from collections import defaultdict
frequency = defaultdict(int)
for text in text_full:
for token in text:
frequency[token] += 1
corpus_removeOne_full = [[token for token in text if frequency[token]>1] for text in text_full]
frequency = defaultdict(int)
for text in text_pos:
for token in text:
frequency[token] += 1
corpus_removeOne_pos = [[token for token in text if frequency[token]>1] for text in text_pos]
frequency = defaultdict(int)
for text in text_neg:
for token in text:
frequency[token] += 1
corpus_removeOne_neg = [[token for token in text if frequency[token]>1] for text in text_neg]
from gensim import corpora
#add corpora to dictionary
dictionary_full = corpora.Dictionary(corpus_removeOne_full)
dictionary_pos = corpora.Dictionary(corpus_removeOne_pos)
dictionary_neg = corpora.Dictionary(corpus_removeOne_neg)
#save dictionary for future reference
dictionary_full.save('redditTest_full.dict')
dictionary_pos.save('redditTest_pos.dict') #location of document in computer
dictionary_neg.save('redditTest_neg.dict')
#dict = gensim.corpora.Dictionary.load('redditTest.dict')
#assign numeric id to each token in dictionary
dictID_full = dictionary_full.token2id
dictID_pos = dictionary_pos.token2id
dictID_neg = dictionary_neg.token2id
#remove empty sentences
for text in corpus_removeOne_full:
if len(text) == 0:
corpus_removeOne_full.remove(text)
for text in corpus_removeOne_pos:
if len(text) == 0:
corpus_removeOne_pos.remove(text)
for text in corpus_removeOne_neg:
if len(text) == 0:
corpus_removeOne_neg.remove(text)
#converts each word into vector following same process as example
#Bag of Word Corpus of Full Sentiment
bow_corpus_full = [dictionary_full.doc2bow(text) for text in corpus_removeOne_full]
corpora.MmCorpus.serialize('redditTest_full.mm', bow_corpus_full)
corp_full = gensim.corpora.MmCorpus('redditTest_full.mm')
from gensim import models
tfidf_pos = models.TfidfModel(bow_corpus_full)
corpus_tfidf_full = tfidf_pos[bow_corpus_full]
#Bag of Word Corpus of Positive Sentiment
bow_corpus_pos = [dictionary_pos.doc2bow(text) for text in corpus_removeOne_pos]
corpora.MmCorpus.serialize('redditTest_pos.mm', bow_corpus_pos)
corp_pos = gensim.corpora.MmCorpus('redditTest_pos.mm')
from gensim import models
tfidf_pos = models.TfidfModel(bow_corpus_pos)
corpus_tfidf_pos = tfidf_pos[bow_corpus_pos]
#Bag of Word Corpus of Negative Sentiment
bow_corpus_neg = [dictionary_neg.doc2bow(text) for text in corpus_removeOne_neg]
corpora.MmCorpus.serialize('redditTest_neg.mm', bow_corpus_neg)
corp_neg = gensim.corpora.MmCorpus('redditTest_neg.mm')
from gensim import models
tfidf_neg = models.TfidfModel(bow_corpus_neg)
corpus_tfidf_neg = tfidf_neg[bow_corpus_neg]
#LDA Mallet for full corpus
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_full = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_full, num_topics=9, id2word=dictionary_full, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_full = lda_full[bow_corpus_full]
lda_full.print_topics(9)
#LDA Mallet for positive corpus
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_pos = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_pos, num_topics=9, id2word=dictionary_pos, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_pos = lda_pos[bow_corpus_pos]
lda_pos.print_topics(9)
#LDA Mallet for negative corpus
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_neg = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_neg, num_topics=9, id2word=dictionary_neg, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_neg = lda_neg[bow_corpus_neg]
lda_neg.print_topics(9)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from sklearn.manifold import TSNE
colors = np.array([color for name, color in mcolors.TABLEAU_COLORS.items()])
#t-SNE plot for full corpus
n_topics = 9
topic_weights_full = []
for row_list in lda_full[bow_corpus_full]:
tmp = np.zeros(n_topics)
for i, w in row_list:
tmp[i] = w
topic_weights_full.append(tmp)
arr_full = pd.DataFrame(topic_weights_full).fillna(9).values
topic_num_full = np.argmax(arr_full, axis=1)
tsne_model_full = TSNE(n_components=3, random_state=None, method='barnes_hut',
angle=0.5, init='pca')
tsne_lda_full = tsne_model_full.fit_transform(arr_full)
sub = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
plt.xlabel('t-SNE1'.translate(sub))
plt.ylabel('t-SNE2'.translate(sub))
plt.title('t-SNE Plot of Topics within Positive Sentiment Corpus')
tsne_full = plt.scatter(x=tsne_lda_full[:,0], y=tsne_lda_full[:,1])
plt.show(tsne_full)
"""
#t-SNE plot for positive corpus
n_topics = 9
topic_weights_pos = []
for row_list in lda_pos[bow_corpus_pos]:
tmp = np.zeros(n_topics)
for i, w in row_list:
tmp[i] = w
topic_weights_pos.append(tmp)
arr_pos = pd.DataFrame(topic_weights_pos).fillna(0).values
topic_num_pos = np.argmax(arr_pos, axis=1)
tsne_model_pos = TSNE(n_components=3, random_state=None, method='barnes_hut',
angle=0.5, init='pca')
tsne_lda_pos = tsne_model_pos.fit_transform(arr_pos)
sub = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
plt.xlabel('t-SNE1'.translate(sub))
plt.ylabel('t-SNE2'.translate(sub))
plt.title('t-SNE Plot of Topics within Positive Sentiment Corpus')
tsne_pos = plt.scatter(x=tsne_lda_pos[:,0], y=tsne_lda_pos[:,1])
#plt.show(tsne_pos)
#t-SNE plot for negative corpus
n_topics = 9
topic_weights_neg = []
for row_list in lda_neg[bow_corpus_neg]:
tmp = np.zeros(n_topics)
for i, w in row_list:
tmp[i] = w
topic_weights_neg.append(tmp)
arr_neg = pd.DataFrame(topic_weights_neg).fillna(0).values
topic_num_neg = np.argmax(arr_neg, axis=1)
tsne_model_neg = TSNE(n_components=3, random_state=None, method='barnes_hut',
angle=0.5, init='pca')
tsne_lda_neg = tsne_model_neg.fit_transform(arr_neg)
sub = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
plt.xlabel('t-SNE1'.translate(sub))
plt.ylabel('t-SNE2'.translate(sub))
plt.title('t-SNE Plot of Topics within Negative Sentiment Corpus')
tsne_neg = plt.scatter(tsne_lda_neg[:,0], tsne_lda_neg[:,1])
#plt.show(tsne_neg)
"""
from collections import Counter
#Word Count & Keyword for Full Corpus
topics_full = lda_full.show_topics(formatted=False)
flatten_full = [w for w_list in bow_corpus_full for w in w_list]
counter_full = Counter(flatten_full)
topic_weight_full = []
for i, topic in topics_full:
for word, weight in topic:
topic_weight_full.append([word, i , weight, counter_full[word]])
data_frame_full = pd.DataFrame(topic_weight_full, columns=['word', 'topic_id', 'importance', 'word_count'])
fig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)
for i, ax in enumerate(axes.flatten()):
ax.bar(x='word', height="word_count", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')
ax_twin = ax.twinx()
ax_twin.bar(x='word', height="importance", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.2, label='Weights')
ax.set_ylabel('Word Count', color=colors[i])
ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)
ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)
ax.tick_params(axis='y', left=False)
ax.set_xticklabels(data_frame_full.loc[data_frame_full.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')
ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')
fig.tight_layout(w_pad=2)
plt.show()
"""
#Word Count & Keyword for Positive Corpus
topics_pos = lda_pos.show_topics(formatted=False)
flatten_pos = [w for w_list in bow_corpus_pos for w in w_list]
counter_pos = Counter(flatten_pos)
topic_weight_pos = []
for i, topic in topics_pos:
for word, weight in topic:
topic_weight_pos.append([word, i , weight, counter_pos[word]])
data_frame_pos = pd.DataFrame(topic_weight_pos, columns=['word', 'topic_id', 'importance', 'word_count'])
fig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)
for i, ax in enumerate(axes.flatten()):
ax.bar(x='word', height="word_count", data=data_frame_pos.loc[data_frame_pos.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')
ax_twin = ax.twinx()
ax_twin.bar(x='word', height="importance", data=data_frame_pos.loc[data_frame_pos.topic_id==i, :], color=colors[i], width=0.2, label='Weights')
ax.set_ylabel('Word Count', color=colors[i])
ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)
ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)
ax.tick_params(axis='y', left=False)
ax.set_xticklabels(data_frame_pos.loc[data_frame_pos.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')
ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')
fig.tight_layout(w_pad=2)
plt.show()
#Word Count & Keyword for Negative Corpus
topics_neg = lda_neg.show_topics(formatted=False)
flatten_neg = [w for w_list in bow_corpus_neg for w in w_list]
counter_neg = Counter(flatten_neg)
topic_weight_neg = []
for i, topic in topics_neg:
for word, weight in topic:
topic_weight_neg.append([word, i , weight, counter_neg[word]])
data_frame_neg = pd.DataFrame(topic_weight_neg, columns=['word', 'topic_id', 'importance', 'word_count'])
fig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)
for i, ax in enumerate(axes.flatten()):
ax.bar(x='word', height="word_count", data=data_frame_neg.loc[data_frame_neg.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')
ax_twin = ax.twinx()
ax_twin.bar(x='word', height="importance", data=data_frame_neg.loc[data_frame_neg.topic_id==i, :], color=colors[i], width=0.2, label='Weights')
ax.set_ylabel('Word Count', color=colors[i])
ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)
ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)
ax.tick_params(axis='y', left=False)
ax.set_xticklabels(data_frame_neg.loc[data_frame_neg.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')
ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')
fig.tight_layout(w_pad=2)
plt.show()
"""
from wordcloud import WordCloud
import matplotlib.colors as mcolors
#Word Cloud Display for Full Corpus
cloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)
topics_full = lda_full.show_topics(formatted=False)
fig, axes = plt.subplots(3, 3, figsize=(10, 6))
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words_full = dict(topics_full[i][1])
cloud.generate_from_frequencies(topic_words_full, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))
plt.gca().axis('off')
plt.axis('off')
plt.tight_layout()
plt.show()
"""
#Word Cloud Display for Positive Corpus
cloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)
topics_pos = lda_pos.show_topics(formatted=False)
fig, axes = plt.subplots(3, 3, figsize=(10, 6))
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words_pos = dict(topics_pos[i][1])
cloud.generate_from_frequencies(topic_words_pos, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))
plt.gca().axis('off')
plt.axis('off')
plt.tight_layout()
plt.show()
#Word Cloud Display for Negative Corpus
cloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)
topics_neg = lda_neg.show_topics(formatted=False)
fig, axes = plt.subplots(3, 3, figsize=(10, 6))
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words_neg = dict(topics_neg[i][1])
cloud.generate_from_frequencies(topic_words_neg, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))
plt.gca().axis('off')
plt.axis('off')
plt.tight_layout()
plt.show()
"""
import pyLDAvis.gensim
import pyLDAvis
import gensim
#LDA Mallet pyLDAvis for Full Corpus
mallet2lda_full = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_full)
visualizeLDA_full = pyLDAvis.gensim.prepare(mallet2lda_full, bow_corpus_full, dictionary_full)
pyLDAvis.show()
"""
#LDA Mallet pyLDAvis for Postiive Corpus
mallet2lda_pos = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_pos)
visualizeLDA_pos = pyLDAvis.gensim.prepare(mallet2lda_pos, bow_corpus_pos, dictionary_pos)
pyLDAvis.show(visualizeLDA_pos)
#LDA Mallet pyLDAvis for Negative Corpus
mallet2lda_neg = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_neg)
visualizeLDA_neg = pyLDAvis.gensim.prepare(mallet2lda_neg, bow_corpus_neg, dictionary_neg)
pyLDAvis.show(visualizeLDA_neg)
"""
| 38.376761
| 189
| 0.708551
|
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
text += gutenberg.raw(file_id)
print (len(text))
= ""
for ele in s:
str1 += ele
return str1
186):
fileOpen = open("sample"+str(sampNum)+".txt","r")
temp = fileOpen.readlines()
temp = listToString(temp)
trainer = PunktTrainer()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
tokenizer = PunktSentenceTokenizer(trainer.get_params())
ev_types.add('dr')
sent = tokenizer.tokenize(temp)
for sent in sent:
sent_df.loc[i, 'sent'] = sent
sent_df.loc[i, 'sample'] = sampNum
i += 1
sampNum += 1
g'] = score.get('neg')
sent_df.loc[idx, 'neu'] = score.get('neu')
sent_df.loc[idx, 'pos'] = score.get('pos')
sent_df.loc[idx, 'compound'] = score.get('compound')
# pprint(results[:10], width=100)
##We will consider posts with a compound value greater than 0.2 as positive and less than -0.2 as negative.
##There's some testing and experimentation that goes with choosing these ranges, and there is a trade-off to be
keep appending to this csv, but just make sure that if you reassign the headlines set, you could get
##duplicates. Maybe add a more advanced saving function that reads and removes duplicates before saving.
#Let's first take a peak at a few positive and negative headlines:
print("Positive headlines:\n")
pprint(list(sent_df[sent_df['label'] == 1].sent)[:5], width=200)
print("\nNegative headlines:\n")
pprint(list(sent_df[sent_df['label'] == -1].sent)[:5], width=200)
e=True) * 100)
##The first line gives us raw value counts of the labels, whereas the second line provides percentages
##with the normalize keyword.
##For fun, let's plot a bar chart:
nce = row['sent']
sent_df.loc[idx, 'len_sent'] = len(sentence.split())
]
neg = sent_df[sent_df['label'] != 1]
import gensim
from gensim.parsing.preprocessing import strip_non_alphanum
from gensim.parsing.preprocessing import strip_punctuation
from gensim.parsing.preprocessing import strip_multiple_whitespaces
from gensim.parsing.preprocessing import stem_text
corpus_full = []
for idx, row in sent_df.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_full.append(final)
corpus_pos = []
for idx, row in pos.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_pos.append(final)
corpus_neg = []
for idx, row in neg.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_neg.append(final)
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stoplist = set('a about above after again against all am an and any are arent\
as also at be because been before being below between both but\
by cant cannot could couldnt did didnt do does doesnt doing dont\
down during each els few for from further had hadnt has have havent\
having he hed hes her here heres hers herself him himself his\
how hows i id ill im ive if in into is isnt it its itself lets\
me more most mustnt my myself no nor not of off on once only or\
other ought our ours ourselves out over own same shant she shes\
should shouldnt so some such than that thats the their theirs\
them themselves then there theres these they theyd theyll theyre\
theyve this those through to too under until up very was wasnt\
we wed were weve were werent what whats when whens which while\
who whos whom why whys with wont would wouldnt you youd youll\
youre youve your yours yourself yourselves ll ve s ar mayb ha re\
us thi isn a b c d e f g h i j k l m n o p q r s t u v w x y z\
hi will can get back go don wa let atc ok ani mi thei whenev make\
just take aw know sai good baltimor jetblu lol thank thanks like\
vari might less highest billion nice probabl lot fuck shit sure\
feel dure befor realli work veri chanc see awai onc onli dy aren\
100 someth thing even happen becaus wai everi much help want think\
fear flight plane fly mai time dai\
1 2 3 4 5 6 7 8 9 10'.split())
print (len(stoplist))
stoplist.update(stop_words)
print(len(stop_words))
print(len(stoplist))
text_full = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_full]
print(text_full)
text_pos = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_pos]
text_neg = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_neg]
from collections import defaultdict
frequency = defaultdict(int)
for text in text_full:
for token in text:
frequency[token] += 1
corpus_removeOne_full = [[token for token in text if frequency[token]>1] for text in text_full]
frequency = defaultdict(int)
for text in text_pos:
for token in text:
frequency[token] += 1
corpus_removeOne_pos = [[token for token in text if frequency[token]>1] for text in text_pos]
frequency = defaultdict(int)
for text in text_neg:
for token in text:
frequency[token] += 1
corpus_removeOne_neg = [[token for token in text if frequency[token]>1] for text in text_neg]
from gensim import corpora
dictionary_full = corpora.Dictionary(corpus_removeOne_full)
dictionary_pos = corpora.Dictionary(corpus_removeOne_pos)
dictionary_neg = corpora.Dictionary(corpus_removeOne_neg)
dictionary_full.save('redditTest_full.dict')
dictionary_pos.save('redditTest_pos.dict')
dictionary_neg.save('redditTest_neg.dict')
dictID_full = dictionary_full.token2id
dictID_pos = dictionary_pos.token2id
dictID_neg = dictionary_neg.token2id
for text in corpus_removeOne_full:
if len(text) == 0:
corpus_removeOne_full.remove(text)
for text in corpus_removeOne_pos:
if len(text) == 0:
corpus_removeOne_pos.remove(text)
for text in corpus_removeOne_neg:
if len(text) == 0:
corpus_removeOne_neg.remove(text)
bow_corpus_full = [dictionary_full.doc2bow(text) for text in corpus_removeOne_full]
corpora.MmCorpus.serialize('redditTest_full.mm', bow_corpus_full)
corp_full = gensim.corpora.MmCorpus('redditTest_full.mm')
from gensim import models
tfidf_pos = models.TfidfModel(bow_corpus_full)
corpus_tfidf_full = tfidf_pos[bow_corpus_full]
bow_corpus_pos = [dictionary_pos.doc2bow(text) for text in corpus_removeOne_pos]
corpora.MmCorpus.serialize('redditTest_pos.mm', bow_corpus_pos)
corp_pos = gensim.corpora.MmCorpus('redditTest_pos.mm')
from gensim import models
tfidf_pos = models.TfidfModel(bow_corpus_pos)
corpus_tfidf_pos = tfidf_pos[bow_corpus_pos]
bow_corpus_neg = [dictionary_neg.doc2bow(text) for text in corpus_removeOne_neg]
corpora.MmCorpus.serialize('redditTest_neg.mm', bow_corpus_neg)
corp_neg = gensim.corpora.MmCorpus('redditTest_neg.mm')
from gensim import models
tfidf_neg = models.TfidfModel(bow_corpus_neg)
corpus_tfidf_neg = tfidf_neg[bow_corpus_neg]
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_full = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_full, num_topics=9, id2word=dictionary_full, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_full = lda_full[bow_corpus_full]
lda_full.print_topics(9)
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_pos = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_pos, num_topics=9, id2word=dictionary_pos, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_pos = lda_pos[bow_corpus_pos]
lda_pos.print_topics(9)
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_neg = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_neg, num_topics=9, id2word=dictionary_neg, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_neg = lda_neg[bow_corpus_neg]
lda_neg.print_topics(9)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from sklearn.manifold import TSNE
colors = np.array([color for name, color in mcolors.TABLEAU_COLORS.items()])
n_topics = 9
topic_weights_full = []
for row_list in lda_full[bow_corpus_full]:
tmp = np.zeros(n_topics)
for i, w in row_list:
tmp[i] = w
topic_weights_full.append(tmp)
arr_full = pd.DataFrame(topic_weights_full).fillna(9).values
topic_num_full = np.argmax(arr_full, axis=1)
tsne_model_full = TSNE(n_components=3, random_state=None, method='barnes_hut',
angle=0.5, init='pca')
tsne_lda_full = tsne_model_full.fit_transform(arr_full)
sub = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
plt.xlabel('t-SNE1'.translate(sub))
plt.ylabel('t-SNE2'.translate(sub))
plt.title('t-SNE Plot of Topics within Positive Sentiment Corpus')
tsne_full = plt.scatter(x=tsne_lda_full[:,0], y=tsne_lda_full[:,1])
plt.show(tsne_full)
from collections import Counter
topics_full = lda_full.show_topics(formatted=False)
flatten_full = [w for w_list in bow_corpus_full for w in w_list]
counter_full = Counter(flatten_full)
topic_weight_full = []
for i, topic in topics_full:
for word, weight in topic:
topic_weight_full.append([word, i , weight, counter_full[word]])
data_frame_full = pd.DataFrame(topic_weight_full, columns=['word', 'topic_id', 'importance', 'word_count'])
fig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)
for i, ax in enumerate(axes.flatten()):
ax.bar(x='word', height="word_count", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')
ax_twin = ax.twinx()
ax_twin.bar(x='word', height="importance", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.2, label='Weights')
ax.set_ylabel('Word Count', color=colors[i])
ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)
ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)
ax.tick_params(axis='y', left=False)
ax.set_xticklabels(data_frame_full.loc[data_frame_full.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')
ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')
fig.tight_layout(w_pad=2)
plt.show()
from wordcloud import WordCloud
import matplotlib.colors as mcolors
cloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)
topics_full = lda_full.show_topics(formatted=False)
fig, axes = plt.subplots(3, 3, figsize=(10, 6))
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words_full = dict(topics_full[i][1])
cloud.generate_from_frequencies(topic_words_full, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))
plt.gca().axis('off')
plt.axis('off')
plt.tight_layout()
plt.show()
import pyLDAvis.gensim
import pyLDAvis
import gensim
mallet2lda_full = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_full)
visualizeLDA_full = pyLDAvis.gensim.prepare(mallet2lda_full, bow_corpus_full, dictionary_full)
pyLDAvis.show()
| true
| true
|
f71a71c02c39541a49fbe5ad95d204ca99999495
| 1,129
|
py
|
Python
|
migrations/versions/0076_add_intl_flag_to_provider.py
|
cds-snc/notifier-api
|
90b385ec49efbaee7e607516fc7d9f08991af813
|
[
"MIT"
] | 41
|
2019-11-28T16:58:41.000Z
|
2022-01-28T21:11:16.000Z
|
migrations/versions/0076_add_intl_flag_to_provider.py
|
cds-snc/notification-api
|
b1c1064f291eb860b494c3fa65ac256ad70bf47c
|
[
"MIT"
] | 1,083
|
2019-07-08T12:57:24.000Z
|
2022-03-08T18:53:40.000Z
|
migrations/versions/0076_add_intl_flag_to_provider.py
|
cds-snc/notifier-api
|
90b385ec49efbaee7e607516fc7d9f08991af813
|
[
"MIT"
] | 9
|
2020-01-24T19:56:43.000Z
|
2022-01-27T21:36:53.000Z
|
"""empty message
Revision ID: 0076_add_intl_flag_to_provider
Revises: 0075_create_rates_table
Create Date: 2017-04-25 09:44:13.194164
"""
# revision identifiers, used by Alembic.
revision = "0076_add_intl_flag_to_provider"
down_revision = "0075_create_rates_table"
import sqlalchemy as sa
from alembic import op
def upgrade():
op.add_column(
"provider_details",
sa.Column(
"supports_international",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
op.add_column(
"provider_details_history",
sa.Column(
"supports_international",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
op.execute("UPDATE provider_details SET supports_international=True WHERE identifier='mmg'")
op.execute("UPDATE provider_details_history SET supports_international=True WHERE identifier='mmg'")
def downgrade():
op.drop_column("provider_details_history", "supports_international")
op.drop_column("provider_details", "supports_international")
| 25.659091
| 104
| 0.675819
|
revision = "0076_add_intl_flag_to_provider"
down_revision = "0075_create_rates_table"
import sqlalchemy as sa
from alembic import op
def upgrade():
op.add_column(
"provider_details",
sa.Column(
"supports_international",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
op.add_column(
"provider_details_history",
sa.Column(
"supports_international",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
op.execute("UPDATE provider_details SET supports_international=True WHERE identifier='mmg'")
op.execute("UPDATE provider_details_history SET supports_international=True WHERE identifier='mmg'")
def downgrade():
op.drop_column("provider_details_history", "supports_international")
op.drop_column("provider_details", "supports_international")
| true
| true
|
f71a721c1a9432964c02aa7cb35a51f05080d90d
| 1,983
|
py
|
Python
|
openbb_terminal/cryptocurrency/onchain/whale_alert_view.py
|
joshuabuildsthings/GamestonkTerminal
|
385d12803ae1725a22b0a440c3b88bffa974edcd
|
[
"MIT"
] | 255
|
2022-03-29T16:43:51.000Z
|
2022-03-31T23:57:08.000Z
|
openbb_terminal/cryptocurrency/onchain/whale_alert_view.py
|
joshuabuildsthings/GamestonkTerminal
|
385d12803ae1725a22b0a440c3b88bffa974edcd
|
[
"MIT"
] | 14
|
2022-03-29T14:20:33.000Z
|
2022-03-31T23:39:20.000Z
|
openbb_terminal/cryptocurrency/onchain/whale_alert_view.py
|
joshuabuildsthings/GamestonkTerminal
|
385d12803ae1725a22b0a440c3b88bffa974edcd
|
[
"MIT"
] | 24
|
2022-03-29T15:28:56.000Z
|
2022-03-31T23:54:15.000Z
|
"""Whale Alert view"""
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.cryptocurrency.onchain import whale_alert_model
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_WHALE_ALERT_KEY"])
def display_whales_transactions(
min_value: int = 800000,
top: int = 100,
sortby: str = "date",
descend: bool = False,
show_address: bool = False,
export: str = "",
) -> None:
"""Display huge value transactions from major blockchains. [Source: https://docs.whale-alert.io/]
Parameters
----------
min_value: int
Minimum value of trade to track.
top: int
Limit of transactions. Maximum 100
sortby: str
Key to sort by.
descend: str
Sort in descending order.
show_address: bool
Flag to show addresses of transactions.
export : str
Export dataframe data to csv,json,xlsx file
"""
df = whale_alert_model.get_whales_transactions(min_value)
if df.empty:
console.print("Failed to retrieve data.")
return
df_data = df.copy()
df = df.sort_values(by=sortby, ascending=descend)
if not show_address:
df = df.drop(["from_address", "to_address"], axis=1)
else:
df = df.drop(["from", "to", "blockchain"], axis=1)
for col in ["amount_usd", "amount"]:
df[col] = df[col].apply(lambda x: lambda_long_number_format(x))
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Large Value Transactions",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"whales",
df_data,
)
| 25.101266
| 101
| 0.660111
|
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.cryptocurrency.onchain import whale_alert_model
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_WHALE_ALERT_KEY"])
def display_whales_transactions(
min_value: int = 800000,
top: int = 100,
sortby: str = "date",
descend: bool = False,
show_address: bool = False,
export: str = "",
) -> None:
df = whale_alert_model.get_whales_transactions(min_value)
if df.empty:
console.print("Failed to retrieve data.")
return
df_data = df.copy()
df = df.sort_values(by=sortby, ascending=descend)
if not show_address:
df = df.drop(["from_address", "to_address"], axis=1)
else:
df = df.drop(["from", "to", "blockchain"], axis=1)
for col in ["amount_usd", "amount"]:
df[col] = df[col].apply(lambda x: lambda_long_number_format(x))
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Large Value Transactions",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"whales",
df_data,
)
| true
| true
|
f71a7324585ada53dbc92d0b00bc1d9b2653e2ad
| 78,121
|
py
|
Python
|
deepspeed/runtime/engine.py
|
LatencyTDH/DeepSpeed
|
eecef309cb12528cfa78d932a6f073afb43847e5
|
[
"MIT"
] | 1
|
2021-04-21T01:14:32.000Z
|
2021-04-21T01:14:32.000Z
|
deepspeed/runtime/engine.py
|
LatencyTDH/DeepSpeed
|
eecef309cb12528cfa78d932a6f073afb43847e5
|
[
"MIT"
] | null | null | null |
deepspeed/runtime/engine.py
|
LatencyTDH/DeepSpeed
|
eecef309cb12528cfa78d932a6f073afb43847e5
|
[
"MIT"
] | null | null | null |
'''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import os
import stat
import torch
import warnings
import hashlib
import torch.distributed as dist
from collections import OrderedDict
from shutil import copyfile
from torch.nn.modules import Module
from torch.distributed.distributed_c10d import _get_global_rank
from tensorboardX import SummaryWriter
from deepspeed.runtime.utils import see_memory_usage
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
from deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from deepspeed.runtime.zero.utils import is_zero_supported_optimizer
from deepspeed.runtime.activation_checkpointing import checkpointing as activation_checkpointing
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.runtime.config import DeepSpeedConfig, DEEPSPEED_OPTIMIZERS, \
ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, \
TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT
from deepspeed.runtime.dataloader import DeepSpeedDataLoader
from deepspeed.runtime.constants import \
ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \
PLD_THETA, PLD_GAMMA
from deepspeed.runtime.zero.constants import \
ZERO_OPTIMIZATION_OPTIMIZER_STATES, ZERO_OPTIMIZATION_GRADIENTS, ZERO_OPTIMIZATION_WEIGHTS
from deepspeed.runtime.csr_tensor import CSRTensor
import deepspeed.runtime.lr_schedules as lr_schedules
from deepspeed.utils import logger, log_dist, init_distributed
from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer
from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop
from .pipe.module import PipelineModule
from .utils import ensure_directory_exists
from ..ops.op_builder import UtilsBuilder
from ..ops.adam import DeepSpeedCPUAdam
from ..ops.adam import FusedAdam
from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler
MEMORY_OPT_ALLREDUCE_SIZE = 500000000
try:
from apex import amp
except ImportError:
# Fail silently so we don't spam logs unnecessarily if user isn't using amp
pass
def split_half_float_double_csr(tensors):
dtypes = [
"torch.cuda.HalfTensor",
"torch.cuda.FloatTensor",
"torch.cuda.DoubleTensor",
CSRTensor.type()
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append((dtype, bucket))
return buckets
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
if parameter_parallel_size is None:
parameter_parallel_size = int(data_parallel_size)
logger.info("data_parallel_size: %s, parameter_parallel_size: %s",
data_parallel_size,
parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(dist.get_world_size() // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = torch.distributed.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
def print_configuration(args, name):
logger.info('{}:'.format(name))
for arg in sorted(vars(args)):
dots = '.' * (29 - len(arg))
logger.info(' {} {} {}'.format(arg, dots, getattr(args, arg)))
class DeepSpeedEngine(Module):
r"""DeepSpeed engine for training.
"""
def __init__(self,
args,
model,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
mpu=None,
dist_init_required=None,
collate_fn=None,
config_params=None,
dont_change_device=False):
super(DeepSpeedEngine, self).__init__()
self.dont_change_device = dont_change_device
self.client_optimizer = optimizer
self.client_model_parameters = model_parameters
self.client_lr_scheduler = lr_scheduler
self.training_data = training_data
self.collate_fn = collate_fn
self.mpu = mpu
self.data_parallel_group = None
self.global_steps = 0
self.global_samples = 0
self.micro_steps = 0
self.skipped_steps = 0
self.gradient_average = True
self.warn_unscaled_loss = True
self.config_params = config_params
self.loaded_checkpoint_mp_world_size = None
self.loaded_checkpoint_dp_world_size = None
self.enable_backward_allreduce = True
self.progressive_layer_drop = None
self.dist_backend = "nccl"
if dist_init_required is None:
dist_init_required = not dist.is_initialized()
if dist_init_required is False:
assert dist.is_initialized() is True, "Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
else:
# Initialize torch distributed if needed
init_distributed(dist_backend=self.dist_backend)
see_memory_usage(f"DeepSpeed Engine: Before args sanity test")
self._do_args_sanity_check(args)
self._configure_with_arguments(args, mpu)
self._do_sanity_check()
if mpu is not None:
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with model parallelism."
self._set_distributed_vars()
if self.tensorboard_enabled() and self.global_rank == 0:
self.summary_writer = self.get_summary_writer()
see_memory_usage(f"DeepSpeed Engine: Before configure distributed model")
# Configure distributed model
self._configure_distributed_model(model)
see_memory_usage(f"DeepSpeed Engine: After configure distributed model")
# Configure wall clock timer
self.timers = SynchronizedWallClockTimer()
# Throughput timer
self.tput_timer = ThroughputTimer(
batch_size=self.train_micro_batch_size_per_gpu(),
num_workers=self.dp_world_size,
steps_per_output=self.steps_per_print(),
monitor_memory=False)
if training_data:
self.training_dataloader = self.deepspeed_io(training_data)
else:
self.training_dataloader = None
# Configure optimizer and scheduler
self.optimizer = None
self.lr_scheduler = None
if model_parameters or optimizer:
self._configure_optimizer(optimizer, model_parameters)
self._configure_lr_scheduler(lr_scheduler)
self._report_progress(0)
# Bookkeeping for csr support
self.csr_tensor_module_names = set()
if self.sparse_gradients_enabled():
for name, module in self.module.named_modules():
if isinstance(module, torch.nn.Embedding):
self.csr_tensor_module_names.add(name + ".weight")
logger.info("Will convert {} to sparse (csr) "
"tensor during training".format(name))
self.save_non_zero_checkpoint = False
self.save_zero_checkpoint = False
self._configure_checkpointing(dist_init_required)
if self.pld_enabled():
self.progressive_layer_drop = self._configure_progressive_layer_drop()
if self.global_rank == 0:
self._config.print('DeepSpeedEngine configuration')
if self.dump_state():
print_configuration(self, 'DeepSpeedEngine')
# Load pre-installed or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
def get_batch_info(self):
""" Get all training batch related settings.
Returns:
train_batch_size (int): The effective training batch size. This is the amount of data
samples that leads to one step of model update.
train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one
step (without gradient accumulation).
gradient_accumulation_steps (int): Number of training steps to accumulate gradients
before averaging and applying them.
"""
return self.train_batch_size, self.train_micro_batch_size_per_gpu, self.gradient_accumulation_steps
def checkpoint_tag_validation_enabled(self):
return self._config.checkpoint_tag_validation_enabled
def checkpoint_tag_validation_fail(self):
return self._config.checkpoint_tag_validation_fail
def elasticity_enabled(self):
return self._config.elasticity_enabled
def pld_enabled(self):
return self._config.pld_enabled
def pld_params(self):
return self._config.pld_params
def pld_theta(self):
return self.pld_params()[PLD_THETA]
def pld_gamma(self):
return self.pld_params()[PLD_GAMMA]
def tensorboard_enabled(self):
return self._config.tensorboard_enabled
def tensorboard_output_path(self):
return self._config.tensorboard_output_path
def tensorboard_job_name(self):
return self._config.tensorboard_job_name
def get_summary_writer(self,
name="DeepSpeedJobName",
base=os.path.join(os.path.expanduser("~"),
"tensorboard")):
if self.tensorboard_output_path():
base_dir = self.tensorboard_output_path()
job_name = self.tensorboard_job_name()
log_dir = os.path.join(base_dir, job_name)
else:
if self.tensorboard_job_name():
name = self.tensorboard_job_name()
# Infrastructure-specific job-id
if 'DLWS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLWS_JOB_ID']
elif 'DLTS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLTS_JOB_ID']
else:
infra_job_id = 'unknown-job-id'
summary_writer_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, summary_writer_dir_name, name)
os.makedirs(log_dir, exist_ok=True)
return SummaryWriter(log_dir=log_dir)
def wall_clock_breakdown(self):
return self._config.wall_clock_breakdown
def flops_profiler_enabled(self):
return self._config.flops_profiler_config.enabled
def flops_profiler_profile_step(self):
return self._config.flops_profiler_config.profile_step
def flops_profiler_module_depth(self):
return self._config.flops_profiler_config.module_depth
def flops_profiler_top_modules(self):
return self._config.flops_profiler_config.top_modules
def flops_profiler_detailed(self):
return self._config.flops_profiler_config.detailed
def memory_breakdown(self):
return self._config.memory_breakdown
def sparse_gradients_enabled(self):
return self._config.sparse_gradients_enabled
def train_batch_size(self):
return self._config.train_batch_size
def train_micro_batch_size_per_gpu(self):
return self._config.train_micro_batch_size_per_gpu
def optimizer_name(self):
return self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name
def optimizer_params(self):
return self._config.optimizer_params
def optimizer_legacy_fusion(self):
return self._config.optimizer_legacy_fusion
def scheduler_name(self):
return self._config.scheduler_name
def scheduler_params(self):
return self._config.scheduler_params
def zero_optimization(self):
return self._config.zero_enabled
def zero_allow_untested_optimizer(self):
return self._config.zero_allow_untested_optimizer
def zero_reduce_scatter(self):
return self._config.zero_config.reduce_scatter
def zero_overlap_comm(self):
return self._config.zero_config.overlap_comm
def zero_offload_optimizer(self):
return self._config.zero_config.offload_optimizer
def zero_offload_param(self):
return self._config.zero_config.offload_param
def zero_cpu_offload(self):
return self._config.zero_config.offload_optimizer is not None
def zero_sub_group_size(self):
return self._config.zero_config.sub_group_size
def zero_optimization_stage(self):
return self._config.zero_optimization_stage
def zero_reduce_bucket_size(self):
return self._config.zero_config.reduce_bucket_size
def zero_allgather_bucket_size(self):
return self._config.zero_config.allgather_bucket_size
def zero_optimization_partition_gradients(self):
return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_GRADIENTS
def zero_optimization_partition_weights(self):
return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_WEIGHTS
def zero_contiguous_gradients(self):
return self._config.zero_config.contiguous_gradients
def zero_load_from_fp32_weights(self):
return self._config.zero_config.load_from_fp32_weights
def zero_elastic_checkpoint(self):
return self._config.zero_config.elastic_checkpoint
def zero_max_live_parameters(self):
return self._config.zero_config.max_live_parameters
def zero_max_reuse_distance(self):
return self._config.zero_config.max_reuse_distance
def zero_prefetch_bucket_size(self):
return self._config.zero_config.prefetch_bucket_size
def zero_param_persistence_threshold(self):
return self._config.zero_config.param_persistence_threshold
def zero_gather_fp16_weights_on_model_save(self):
return self._config.zero_config.gather_fp16_weights_on_model_save
def fp16_enabled(self):
return self._config.fp16_enabled
def amp_enabled(self):
return self._config.amp_enabled
def amp_params(self):
return self._config.amp_params
def loss_scale(self):
return self._config.loss_scale
def gradient_accumulation_steps(self):
return self._config.gradient_accumulation_steps
def allreduce_always_fp32(self):
return self._config.allreduce_always_fp32
def postscale_gradients(self):
return not self._config.prescale_gradients
def gradient_predivide_factor(self):
return self._config.gradient_predivide_factor
def steps_per_print(self):
return self._config.steps_per_print
def zero_allgather_partitions(self):
return self._config.zero_config.allgather_partitions
def dump_state(self):
return self._config.dump_state
def gradient_clipping(self):
return self._config.gradient_clipping
def dynamic_loss_scale(self):
return self._config.loss_scale == 0
def initial_dynamic_scale(self):
return self._config.initial_dynamic_scale
def dynamic_loss_scale_args(self):
return self._config.dynamic_loss_scale_args
def swap_tensor_config(self):
return self._config.swap_tensor_config
def aio_config(self):
return self._config.aio_config
def _configure_lr_scheduler(self, client_lr_scheduler):
# First check for scheduler in json configuration
lr_scheduler = self._scheduler_from_config(self.optimizer)
if lr_scheduler:
if self.global_rank == 0:
logger.info(
f'DeepSpeed using configured LR scheduler = {self.scheduler_name()}')
self.lr_scheduler = lr_scheduler
else:
if self.global_rank == 0:
logger.info('DeepSpeed using client LR scheduler')
self.lr_scheduler = client_lr_scheduler
log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])
def _configure_checkpointing(self, dist_init_required):
dp_rank = self.global_rank
if self.mpu:
dp_rank = self.mpu.get_data_parallel_rank()
# only the first data parallel process needs to store the model checkpoint
self.save_non_zero_checkpoint = (
dp_rank == 0) or self.zero_optimization_partition_weights()
if self.zero_optimization():
param_rank = torch.distributed.get_rank(
group=self.optimizer.dp_process_group)
# Only the first parameter parallel process needs to store the
# optimizer state checkpoints for zero
self.save_zero_checkpoint = (param_rank == dp_rank)
def _scheduler_from_config(self, optimizer):
scheduler_name = self.scheduler_name()
if scheduler_name is not None:
if hasattr(lr_schedules, scheduler_name):
scheduler = getattr(lr_schedules, scheduler_name)
else:
assert hasattr(torch.optim.lr_scheduler, scheduler_name), \
f"DeepSpeed does not recognize LR scheduler {scheduler_name}"
scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)
scheduler_params = self.scheduler_params()
instantiated_scheduler = scheduler(optimizer, **scheduler_params)
return instantiated_scheduler
else:
return None
def _set_distributed_vars(self):
if self.local_rank >= 0:
torch.cuda.set_device(self.local_rank)
self.device = torch.device("cuda", self.local_rank)
self.world_size = dist.get_world_size()
self.global_rank = dist.get_rank()
else:
self.world_size = 1
self.global_rank = 0
self.device = torch.device("cuda")
# Configure based on command line arguments
def _configure_with_arguments(self, args, mpu):
# After the distributed backend is initialized we are guaranteed the LOCAL_RANK
# environment variable is set. We must align args.local_rank to this value for
# backwards compatability with scripts relying on [args|self].local_rank containing
# the correct local rank info. _do_args_sanity_check will ensure this is the case.
self.local_rank = int(os.environ['LOCAL_RANK'])
if hasattr(args, 'local_rank'):
args.local_rank = self.local_rank
config_file = args.deepspeed_config if hasattr(args,
'deepspeed_config') else None
self._config = DeepSpeedConfig(config_file, mpu, param_dict=self.config_params)
# Validate command line arguments
def _do_args_sanity_check(self, args):
if hasattr(args, 'deepscale_config') and args.deepscale_config is not None:
logger.warning(
"************ --deepscale_config is deprecated, please use --deepspeed_config ************"
)
if hasattr(args, 'deepspeed_config'):
assert args.deepspeed_config is None, "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
args.deepspeed_config = args.deepscale_config
assert "LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment variable, it is set by the deepspeed launcher, " \
"deepspeed.init_distributed, or the torch.distributed launcher. If using a different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed."
if hasattr(args, 'local_rank') and args.local_rank != None:
assert isinstance(args.local_rank, int), f"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}"
if args.local_rank >= 0:
env_local_rank = int(os.environ.get("LOCAL_RANK"))
assert env_local_rank == args.local_rank, \
f"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}."
if self.config_params is None:
assert hasattr(args, 'deepspeed_config') and args.deepspeed_config is not None, \
'DeepSpeed requires --deepspeed_config to specify configuration file'
assert os.path.isfile(args.deepspeed_config), \
'DeepSpeed configuration file: {} is not an existing file'.format(args.deepspeed_config)
def _is_supported_optimizer(self, optimizer_name):
return optimizer_name in DEEPSPEED_OPTIMIZERS or \
getattr(torch.optim, optimizer_name, None) is not None
# Validate configuration based on command line arguments
def _do_sanity_check(self):
if not self.client_optimizer:
if self.optimizer_name() is not None:
assert self._is_supported_optimizer(self.optimizer_name()), \
'{} is not a supported DeepSpeed Optimizer'.format(self.optimizer_name())
if self.optimizer_name() == LAMB_OPTIMIZER:
assert self.dynamic_loss_scale(), \
'DeepSpeed {} optimizer requires dynamic loss scaling'.format(self.optimizer_name())
def _broadcast_model(self):
def is_replicated(p):
if hasattr(p, 'ds_status') and p.ds_status is not ZeroParamStatus.AVAILABLE:
return False
return True
for p in self.module.parameters():
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p,
self.broadcast_src_rank,
group=self.data_parallel_group)
def _configure_distributed_model(self, model):
self.module = model
if self.fp16_enabled():
self.module.half()
if not self.dont_change_device:
self.module.to(self.device)
if self.mpu is None:
self.data_parallel_group = _initialize_parameter_parallel_groups()
self.dp_world_size = dist.get_world_size()
self.mp_world_size = 1
self.broadcast_src_rank = 0
else:
self.data_parallel_group = self.mpu.get_data_parallel_group()
self.dp_world_size = self.mpu.get_data_parallel_world_size()
self.mp_world_size = self.mpu.get_model_parallel_world_size()
self.broadcast_src_rank = _get_global_rank(
self.mpu.get_data_parallel_group(),
0)
if not self.amp_enabled():
self._broadcast_model()
# Configure optimizer
def _configure_optimizer(self, client_optimizer, model_parameters):
if client_optimizer is not None:
client_optimizer.param_groups[:] = [
pg for pg in client_optimizer.param_groups if len(pg["params"]) != 0
]
if self.global_rank == 0:
logger.info(
"Removing param_group that has no 'params' in the client Optimizer")
basic_optimizer = client_optimizer
if self.global_rank == 0:
logger.info('Using client Optimizer as basic optimizer')
else:
basic_optimizer = self._configure_basic_optimizer(model_parameters)
if self.global_rank == 0:
logger.info(
'Using DeepSpeed Optimizer param name {} as basic optimizer'.format(
self.optimizer_name()))
if self.global_rank == 0:
logger.info('DeepSpeed Basic Optimizer = {}'.format(
basic_optimizer.__class__.__name__))
if self.zero_optimization():
assert not self.amp_enabled(), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2"
if not is_zero_supported_optimizer(basic_optimizer):
assert self.zero_allow_untested_optimizer(), \
'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.'
if self.global_rank == 0:
logger.warning(
"**** You are using ZeRO with an untested optimizer, proceed with caution *****"
)
self.optimizer = self._configure_zero_optimizer(basic_optimizer)
elif self.amp_enabled():
assert not self.fp16_enabled(), "Cannot enable both amp with (legacy) fp16 mode"
amp_params = self.amp_params()
if self.global_rank == 0:
logger.info(f"Initializing AMP with these params: {amp_params}")
try:
logger.info("Initializing Apex amp from: {}".format(amp.__path__))
except NameError:
# If apex/amp is available it will be imported above
raise RuntimeError(
"Unable to import apex/amp, please make sure it is installed")
self.module, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)
self._broadcast_model()
elif self.fp16_enabled():
self.optimizer = self._configure_fp16_optimizer(basic_optimizer)
else:
self.optimizer = basic_optimizer
log_dist('DeepSpeed Final Optimizer = {}'.format(self.optimizer_name()),
ranks=[0])
def _configure_basic_optimizer(self, model_parameters):
optimizer_parameters = self.optimizer_params()
# print(optimizer_parameters.keys())
if 'max_grad_norm' in optimizer_parameters.keys():
raise ValueError(
"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details"
)
if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]:
torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)
adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)
# Optimizer name of Adam forces AdamW logic unless adam_w_mode is explictly set
effective_adam_w_mode = self.optimizer_name(
) == ADAMW_OPTIMIZER or adam_w_mode
if torch_adam:
if not effective_adam_w_mode:
optimizer = torch.optim.Adam(model_parameters,
**optimizer_parameters)
else:
optimizer = torch.optim.AdamW(model_parameters,
**optimizer_parameters)
else:
if self.zero_cpu_offload():
from deepspeed.ops.adam import DeepSpeedCPUAdam
optimizer = DeepSpeedCPUAdam(model_parameters,
**optimizer_parameters,
adamw_mode=effective_adam_w_mode)
else:
from deepspeed.ops.adam import FusedAdam
optimizer = FusedAdam(model_parameters,
**optimizer_parameters,
adam_w_mode=effective_adam_w_mode)
elif self.optimizer_name() == LAMB_OPTIMIZER:
from deepspeed.ops.lamb import FusedLamb
optimizer = FusedLamb(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
from deepspeed.runtime.fp16.onebit.adam import OnebitAdam
optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(
f'Currently the convergence of 1-bit Adam is only verified under FP16'
)
else:
torch_optimizer = getattr(torch.optim, self.optimizer_name())
optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
return optimizer
def _configure_fp16_optimizer(self, optimizer):
initial_dynamic_scale = self.initial_dynamic_scale()
dynamic_loss_args = self.dynamic_loss_scale_args()
clip_grad = self.gradient_clipping()
if isinstance(optimizer,
FusedAdam) or self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
if self.dynamic_loss_scale():
log_dist('Creating fp16 optimizer with dynamic loss scale', ranks=[0])
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = FP16_Optimizer(
optimizer,
dynamic_loss_scale=True,
initial_dynamic_scale=initial_dynamic_scale,
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
timers=timers)
else:
log_dist('Creating fp16 optimizer with static loss scale: {}'.format(
self.loss_scale()),
ranks=[0])
optimizer = FP16_Optimizer(
optimizer,
static_loss_scale=self.loss_scale(),
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion())
else:
log_dist('Creating fp16 unfused optimizer with dynamic loss scale',
ranks=[0])
optimizer = FP16_UnfusedOptimizer(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER)
return optimizer
def _configure_zero_optimizer(self, optimizer):
zero_stage = self.zero_optimization_stage()
log_dist('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage), ranks=[0])
assert not self.allreduce_always_fp32(), "ZeRO does not support 'fp32_allreduce': true"
timers = self.timers if self.wall_clock_breakdown() else None
if zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
assert self.zero_reduce_scatter(), 'Stage 1 only supports reduce scatter mode'
optimizer = FP16_DeepSpeedZeroOptimizer_Stage1(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
all_gather_partitions=self.zero_allgather_partitions(),
allgather_size=self.zero_allgather_bucket_size(),
max_elements_per_comm=self.zero_reduce_bucket_size(),
dp_process_group=self.data_parallel_group,
elastic_checkpoint=self.zero_elastic_checkpoint(),
mpu=self.mpu)
elif zero_stage == ZERO_OPTIMIZATION_GRADIENTS:
optimizer = FP16_DeepSpeedZeroOptimizer(
optimizer,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
cpu_offload=self.zero_cpu_offload(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps())
elif zero_stage == ZERO_OPTIMIZATION_WEIGHTS:
print("Initializing ZeRO Stage 3") if dist.get_rank() == 0 else None
from deepspeed.runtime.zero.stage3 import FP16_DeepSpeedZeroOptimizer_Stage3
optimizer = FP16_DeepSpeedZeroOptimizer_Stage3(
self.module,
optimizer,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
offload_optimizer_config=self.zero_offload_optimizer(),
offload_param_config=self.zero_offload_param(),
sub_group_size=self.zero_sub_group_size(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
aio_config=self.aio_config())
else:
raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage))
return optimizer
def _configure_progressive_layer_drop(self):
pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())
return pld
def deepspeed_io(self,
dataset,
batch_size=None,
route=ROUTE_TRAIN,
pin_memory=True,
data_sampler=None,
collate_fn=None,
num_local_io_workers=None):
if not isinstance(dataset, torch.utils.data.Dataset):
raise ValueError("Training data must be a torch Dataset")
if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):
data_sampler = torch.utils.data.SequentialSampler(dataset)
if batch_size is None:
batch_size = self.train_micro_batch_size_per_gpu()
if collate_fn is None:
collate_fn = self.collate_fn
# Currently we only use timer in train route
deepspeed_io_timer = None
if route == ROUTE_TRAIN:
deepspeed_io_timer = self.tput_timer
# If mpu is provied, forward world size and parallel rank to sampler.
data_parallel_world_size = None
data_parallel_rank = None
if self.mpu is not None:
data_parallel_world_size = self.mpu.get_data_parallel_world_size()
data_parallel_rank = self.mpu.get_data_parallel_rank()
return DeepSpeedDataLoader(dataset=dataset,
batch_size=batch_size,
pin_memory=pin_memory,
collate_fn=collate_fn,
local_rank=self.local_rank,
tput_timer=deepspeed_io_timer,
num_local_io_workers=num_local_io_workers,
data_sampler=data_sampler,
data_parallel_world_size=data_parallel_world_size,
data_parallel_rank=data_parallel_rank)
def train(self, mode=True):
r"""
"""
self.warn_unscaled_loss = True
self.module.train(mode)
def eval(self):
r"""
"""
self.warn_unscaled_loss = True
self.module.train(False)
def _scale_loss(self, prescaled_loss):
if isinstance(prescaled_loss, torch.Tensor):
scaled_loss = prescaled_loss / self.gradient_accumulation_steps()
elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):
scaled_loss = []
for l in prescaled_loss:
if isinstance(l, torch.Tensor):
scaled_loss.append(l / self.gradient_accumulation_steps())
else:
scaled_loss.append(l)
else:
scaled_loss = prescaled_loss
if self.warn_unscaled_loss:
logger.warning(
f'DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}'
)
self.warn_unscaled_loss = False
return scaled_loss
def forward(self, *inputs, **kwargs):
r"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
if self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step(
) and self.global_rank == 0:
self.flops_profiler = FlopsProfiler(self.module)
self.flops_profiler.start_profile(ignore_list=None)
if self.module.training and self.progressive_layer_drop:
kwargs.update(self.progressive_layer_drop.get_state())
if self.zero_optimization_partition_weights():
# Enable automated discovery of external parameters by indicating that
# we are in a forward pass.
for module in self.module.modules():
module._parameters._in_forward = True
pass
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward').start()
if self.training_dataloader is None:
self.tput_timer.start()
loss = self.module(*inputs, **kwargs)
if self.zero_optimization_partition_weights():
# Reset the ZeRO-3 state if we are only doing forward-passes (ie evaluation).
if not torch._C.is_grad_enabled():
self.optimizer.param_coordinator.reset_step()
# Disable automated discovery of external parameters
for module in self.module.modules():
module._parameters._in_forward = False
if self.wall_clock_breakdown():
self.timers('forward').stop()
self.timers('forward_microstep').stop()
if self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step(
) and self.global_rank == 0:
self.flops_profiler.print_model_profile(
profile_step=self.global_steps,
module_depth=self.flops_profiler_module_depth(),
top_modules=self.flops_profiler_top_modules(),
detailed=self.flops_profiler_detailed())
self.flops_profiler.end_profile()
return loss
def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):
#Zero stage 2 communicates during non gradient accumulation boundaries as well
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
#Communicate only at gradient accumulation boundaries
elif self.is_gradient_accumulation_boundary():
if self.zero_optimization_stage() == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
assert self.zero_reduce_scatter()
self.optimizer.reduce_scatter_gradients(
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_average=self.gradient_average)
else:
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)
def backward(self, loss, allreduce_gradients=True, release_loss=False):
r"""Execute backward pass on the loss
Arguments:
loss: Torch tensor on which to execute backward propagation
allreduce_gradients: is deprecated, ignored, and will soon be removed'
"""
if not allreduce_gradients:
logger.warning(
f'Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed'
)
# scale loss w.r.t. gradient accumulation if needed
if self.gradient_accumulation_steps() > 1:
loss = self._scale_loss(loss.float())
# Log training Loss
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/train_loss',
loss.mean().item() * self.gradient_accumulation_steps(),
self.global_samples)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
if self.wall_clock_breakdown():
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
if self.zero_optimization():
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(
)
self.optimizer.backward(loss)
elif self.amp_enabled():
# AMP requires delaying unscale when inside gradient accumulation boundaries
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
delay_unscale = not self.is_gradient_accumulation_boundary()
with amp.scale_loss(loss,
self.optimizer,
delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward()
elif self.fp16_enabled():
self.optimizer.backward(loss)
else:
loss.backward()
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
if self.wall_clock_breakdown():
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce').start()
if self.enable_backward_allreduce:
self.allreduce_gradients()
if self.wall_clock_breakdown():
self.timers('backward_allreduce').stop()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
if release_loss:
# loss.data = None
pass
return loss
def is_gradient_accumulation_boundary(self):
"""Query whether the current micro-batch is at the boundary of
gradient accumulation, and thus will trigger gradient reductions and
an optimizer step.
Returns:
bool: if the current step is a gradient accumulation boundary.
"""
return (self.micro_steps + 1) % \
self.gradient_accumulation_steps() == 0
def zero_grad(self):
"""
Zero parameter grads.
"""
for param_name, param in self.module.named_parameters():
param.grad = None
def clip_fp32_gradients(self):
torch.nn.utils.clip_grad_norm_(parameters=self.module.parameters(),
max_norm=self.gradient_clipping())
def _take_model_step(self, lr_kwargs):
if self.gradient_clipping() > 0.0:
if not self.fp16_enabled() and not self.amp_enabled():
self.clip_fp32_gradients()
elif self.amp_enabled():
# AMP's recommended way of doing clipping
# https://nvidia.github.io/apex/advanced.html#gradient-clipping
master_params = amp.master_params(self.optimizer)
torch.nn.utils.clip_grad_norm_(parameters=master_params,
max_norm=self.gradient_clipping())
self.optimizer.step()
#zero grad in basic optimizer could be unreliable and may not exhibit
#the behaviour that we want
if not self.zero_optimization() and not self.fp16_enabled(
) and not self.amp_enabled():
self.zero_grad()
else:
self.optimizer.zero_grad()
report_progress = self.global_rank == 0 if self.global_rank else True
# Check overlow here since in DS fp16 optimizer, the overflow is updated in above step() function.
overflow = False
if hasattr(self.optimizer, 'overflow'):
overflow = self.optimizer.overflow
if overflow:
self.skipped_steps += 1
else:
if self.lr_scheduler is not None:
self.lr_scheduler.step(**(lr_kwargs or {}))
if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:
self._report_progress(self.global_steps + 1)
self.global_steps += 1
self.global_samples += self.train_batch_size()
def step(self, lr_kwargs=None):
r"""Execute the weight update step after forward and backward propagation
on effective_train_batch.
"""
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use step"
report_progress = self.global_rank == 0 if self.global_rank else True
# Update the model when we reach gradient accumulation boundaries
if self.is_gradient_accumulation_boundary():
if self.progressive_layer_drop:
self.progressive_layer_drop.update_state(self.global_steps)
self._take_model_step(lr_kwargs)
self.tput_timer.stop(report_progress)
# Log learning rate
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(f'Train/Samples/lr',
self.get_lr()[0],
self.global_samples)]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append((f'Train/Samples/loss_scale',
self.optimizer.cur_scale,
self.global_samples))
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('step').stop()
self.timers('step_microstep').stop()
timer_names = [
'forward_microstep',
'backward_microstep',
'backward_inner_microstep',
'backward_allreduce_microstep',
'step_microstep'
]
self.timers.log(names=timer_names, memory_breakdown=self.memory_breakdown())
# Log timing
if self.is_gradient_accumulation_boundary():
if self.tensorboard_enabled():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/elapsed_time_ms_forward',
self.timers('forward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward',
self.timers('backward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_inner',
self.timers('backward_inner').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_allreduce',
self.timers('backward_allreduce').elapsed(reset=False) *
1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_step',
self.timers('step').elapsed(reset=False) * 1000.0,
self.global_samples)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers.log([
'forward',
'backward',
'backward_inner',
'backward_allreduce',
'step'
])
self.micro_steps += 1
def _get_optimizer_param(self, param_name):
result = []
if not self.optimizer:
return result
for group in self.optimizer.param_groups:
if param_name in group:
result.append(group[param_name])
else:
result.append(0.0)
return result
def get_lr(self):
return self._get_optimizer_param('lr')
def get_type(self):
return self._get_optimizer_param('type')
def get_mom(self):
if self.optimizer_name() in ['SGD', 'RMSprop']:
return self._get_optimizer_param('momentum')
else:
return self._get_optimizer_param('betas')
def get_pld_theta(self):
if self.progressive_layer_drop:
return self.progressive_layer_drop.get_theta()
else:
return None
def _report_progress(self, step):
lr = self.get_lr()
mom = self.get_mom()
log_dist(f'step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}',
ranks=[0])
def allreduce_bucket(self, bucket):
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if self.allreduce_always_fp32():
tensor_to_allreduce = tensor.float()
if self.postscale_gradients():
if self.gradient_predivide_factor() != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor())
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.gradient_average:
if self.gradient_predivide_factor() != self.dp_world_size:
tensor_to_allreduce.mul_(self.gradient_predivide_factor() /
self.dp_world_size)
else:
tensor_to_allreduce.div_(self.dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.allreduce_always_fp32() and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def allreduce_and_copy(self, small_bucket):
allreduced = self.allreduce_bucket(small_bucket)
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket)
small_bucket = []
numel = 0
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket)
def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):
grads = []
for param_name, param in self.module.named_parameters():
if param.grad is None:
# In cases where there is an imbalance of empty grads across
# ranks we must create empty grads, this will ensure that every
# rank is reducing the same size. In some cases it may make
# sense in the future to support the ability to average not
# w.r.t. world size but with a different value.
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=param.device)
grads.append(param.grad.data)
else:
grad_data = param.grad.data
if self.sparse_gradients_enabled(
) and param_name in self.csr_tensor_module_names:
grads.append(CSRTensor(grad_data))
else:
grads.append(grad_data)
split_buckets = split_half_float_double_csr(grads)
for i, bucket_tuple in enumerate(split_buckets):
bucket_type, bucket = bucket_tuple
if bucket_type == CSRTensor.type():
self.csr_allreduce_no_retain(bucket)
else:
self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer)
def csr_allreduce_no_retain(self, bucket):
allreduced_csrs = self.csr_allreduce_bucket(bucket)
# Densify csr tensor and copy back to original location
for csr in allreduced_csrs:
dense_tensor = csr.to_dense()
csr.orig_dense_tensor.copy_(dense_tensor)
def csr_allreduce_bucket(self, bucket):
csr_list = []
for csr in bucket:
csr_list.append(self.csr_allreduce(csr))
return csr_list
def csr_allreduce(self, csr):
# Pre-divide for fp16 stability
csr.values.div_(self.dp_world_size)
indices_device_list = self.csr_all_gather(csr.indices)
values_device_list = self.csr_all_gather(csr.values)
csr.indices = torch.cat(indices_device_list)
csr.values = torch.cat(values_device_list)
return csr
def csr_all_gather(self, value):
my_size = torch.LongTensor([value.size()[0]]).to(self.device)
all_sizes = self.all_gather_scalar(my_size)
max_size = torch.cat(all_sizes).max()
fill_size = (max_size - my_size)
assert value.dim() in [1, 2]
if value.dim() == 1:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size)])
tensor_list = [value.new_zeros(max_size) for _ in range(self.dp_world_size)]
else:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size, value.size()[1])])
tensor_list = [
value.new_zeros(max_size,
value.size()[1]) for _ in range(self.dp_world_size)
]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
tensors = []
for dev_idx, t in enumerate(tensor_list):
size = all_sizes[dev_idx][0]
tensors.append(
t.index_select(0,
torch.LongTensor(range(size)).to(self.device)))
return tensors
def all_gather_scalar(self, value):
tensor_list = [value.new_zeros(value.size()) for _ in range(self.dp_world_size)]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
return tensor_list
def module_state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.module.state_dict(destination, prefix, keep_vars)
return sd
def load_module_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank):
filename = 'zero_pp_rank_{}'.format(dp_rank)
zero_ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_{:02d}'.format(mp_rank) + '_optim_states.pt')
return zero_ckpt_name
def _get_zero_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)
return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank)
def _get_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
if self.zero_optimization_partition_weights():
filename = 'zero_pp_rank_{}'.format(
torch.distributed.get_rank(group=self.optimizer.dp_process_group))
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
else:
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
return ckpt_name
def load_checkpoint(self,
load_dir,
tag=None,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True):
"""Load training checkpoint
Arguments:
load_dir: Required. Directory to load the checkpoint from
tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file
load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.
load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance
load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.
Returns:
A tuple of ``load_path`` and ``client_state``.
*``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed.
*``client_state``: State dictionary used for loading required training states in the client code.
"""
if tag is None:
latest_path = os.path.join(load_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
logger.warning(f"Unable to find latest file at {latest_path}, if trying to load latest " \
"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint.")
return None, None
load_path, client_states = self._load_checkpoint(load_dir,
tag,
load_module_strict=load_module_strict,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states)
if self.zero_optimization() and load_path is not None:
self._load_zero_checkpoint(load_dir,
tag,
load_optimizer_states=load_optimizer_states)
return load_path, client_states
def _load_checkpoint(self,
load_dir,
tag,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True):
load_path = self._get_ckpt_name(load_dir, tag)
if not os.path.exists(load_path):
logger.warn(
'Client provided checkpoint load path: {} does not exist ... skip checkpoint load'
.format(load_path))
return None, None
logger.info(f'rank: {self.global_rank} loading checkpoint: {load_path}')
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
if isinstance(self.module, PipelineModule):
# Pipeline parallelism uses this to load its own checkpoint files.
self._curr_ckpt_path = os.path.join(load_dir, tag)
self.load_module_state_dict(state_dict=checkpoint['module'],
strict=load_module_strict)
if self.optimizer is not None and not self.zero_optimization():
if self.fp16_enabled():
self.optimizer.load_state_dict(
checkpoint['optimizer'],
load_optimizer_states=load_optimizer_states)
elif load_optimizer_states:
self.optimizer.load_state_dict(checkpoint['optimizer'])
if load_lr_scheduler_states and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
self.csr_tensor_module_names = checkpoint['csr_tensor_module_names']
self.global_steps = checkpoint['global_steps']
self.global_samples = checkpoint.get('global_samples',
self.global_steps * self.train_batch_size())
self.skipped_steps = checkpoint['skipped_steps']
self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']
self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']
deepspeed_states = [
'module',
'optimizer',
'lr_scheduler',
'csr_tensor_module_names',
'skipped_steps',
'global_steps',
'dp_world_size',
'mp_world_size'
]
client_state = {
key: value
for key,
value in checkpoint.items() if not key in deepspeed_states
}
return load_path, client_state
def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):
zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)
if zero_sd_list is None:
return
self.optimizer.load_state_dict(
state_dict_list=zero_sd_list,
load_optimizer_states=load_optimizer_states,
load_from_fp32_weights=self.zero_load_from_fp32_weights())
print(
f'loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}'
)
def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size):
zero_ckpt_names = []
for dp_rank in range(dp_world_size):
ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_rank=dp_rank)
zero_ckpt_names.append(ckpt_name)
return zero_ckpt_names
def _get_all_zero_checkpoint_names(self,
load_dir,
tag,
mp_world_size,
dp_world_size):
zero_ckpt_names = []
for mp_rank in range(mp_world_size):
mp_rank_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=dp_world_size)
zero_ckpt_names += mp_rank_ckpt_names
return zero_ckpt_names
def _get_all_zero_checkpoints(self, load_dir, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=self.loaded_checkpoint_dp_world_size)
invalid_zero_ckpt_paths = []
for i, ckpt_name in enumerate(zero_ckpt_names):
if not os.path.exists(ckpt_name):
# transparently handle the old file pattern for optim_states
if 'optim_states.pt' in ckpt_name:
ckpt_name_try = ckpt_name.replace("_optim_states.pt",
"optim_states.pt")
if os.path.exists(ckpt_name_try):
zero_ckpt_names[i] = ckpt_name_try
continue
invalid_zero_ckpt_paths.append(ckpt_name)
if len(invalid_zero_ckpt_paths) > 0:
logger.warn(
f"The following zero checkpoints paths are missing: {invalid_zero_ckpt_paths}"
)
return None
zero_sd_list = []
for ckpt_name in zero_ckpt_names:
zero_sd_list.append(torch.load(ckpt_name, map_location='cpu'))
zero_optimizer_sd = [sd['optimizer_state_dict'] for sd in zero_sd_list]
print(
f"successfully loaded {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}"
)
return zero_optimizer_sd
def _checkpoint_tag_validation(self, tag):
if self.checkpoint_tag_validation_enabled():
s_hash = hashlib.sha1(tag.encode())
bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device)
max_bhash = bhash.clone()
min_bhash = bhash.clone()
dist.all_reduce(max_bhash, op=torch.distributed.ReduceOp.MAX)
dist.all_reduce(min_bhash, op=torch.distributed.ReduceOp.MIN)
valid = all(min_bhash == bhash) and all(max_bhash == bhash)
msg = f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across " \
"all ranks. Including rank unique information in checkpoint tag could cause issues when " \
"restoring with different world sizes."
if self.checkpoint_tag_validation_fail():
assert valid, msg
elif not valid:
logger.warning(msg)
def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True):
r"""Save training checkpoint
Arguments:
save_dir: Required. Directory for saving the checkpoint
tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is
used if not provided. Tag name must be the same across all ranks.
client_state: Optional. State dictionary used for saving required training states in the client code.
save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint.
Important: all processes must call this method and not just the process with rank 0. It is
because each process needs to save its master weights and scheduler+optimizer states. This
method will hang waiting to synchronize with other processes if it's called just for the
process with rank 0.
"""
if self.zero_optimization_partition_weights():
# Prepare for state_dict() by ensuring all parameters are partitioned
self.optimizer.save_checkpoint_prologue()
# This is to make sure the checkpoint names are created without collision
# There seems to be issue creating them in parallel
# Ensure save_dir directory exists
os.makedirs(save_dir, exist_ok=True)
if tag is None:
tag = f"global_step{self.global_steps}"
# Ensure tag is a string
tag = str(tag)
# Ensure checkpoint tag is consistent across ranks
self._checkpoint_tag_validation(tag)
if self.save_non_zero_checkpoint:
self._create_checkpoint_file(save_dir, tag, False)
self._save_checkpoint(save_dir, tag, client_state=client_state)
if self.save_zero_checkpoint:
self._create_zero_checkpoint_files(save_dir, tag)
self._save_zero_checkpoint(save_dir, tag)
# Save latest checkpoint tag
if save_latest:
with open(os.path.join(save_dir, 'latest'), 'w') as fd:
fd.write(tag)
if self.zero_optimization_partition_weights():
self.optimizer.save_checkpoint_epilogue()
return True
def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):
name_function = self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name
try:
checkpoint_name = name_function(save_dir, tag)
ensure_directory_exists(checkpoint_name)
except:
logger.error(f'Failed saving model checkpoint to {save_dir} with tag {tag}')
return False
return True
def _create_zero_checkpoint_files(self, save_dir, tag):
success = True
# zero checkpoint files are created sequentially
for rank in range(self.world_size):
if rank == self.global_rank:
success = self._create_checkpoint_file(save_dir, tag, True)
dist.barrier()
return success
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None.
self._curr_ckpt_path = os.path.join(save_dir, tag)
state = dict(
module=self.module_state_dict(),
optimizer=self.optimizer.state_dict()
if self.optimizer and not self.zero_optimization() else None,
lr_scheduler=self.lr_scheduler.state_dict()
if self.lr_scheduler is not None else None,
csr_tensor_module_names=self.csr_tensor_module_names,
skipped_steps=self.skipped_steps,
global_steps=self.global_steps,
global_samples=self.global_samples,
dp_world_size=self.dp_world_size,
mp_world_size=self.mp_world_size,
)
state.update(client_state)
log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0])
#logger.info('Saving model checkpoint: {}'.format(save_path))
torch.save(state, save_path)
self._curr_save_path = None
def _get_param_shapes(self):
param_shapes = OrderedDict()
for name, param in self.module.named_parameters():
param_shapes[name] = param.ds_shape if hasattr(param,
"ds_shape") else param.shape
# print(f"saving param {name} {param_shapes[name]}")
return param_shapes
def _copy_recovery_script(self, save_path):
base_dir = os.path.dirname(os.path.dirname(__file__))
script = "zero_to_fp32.py"
src = os.path.join(base_dir, "utils", script)
dst = os.path.join(save_path, script)
logger.info(f"creating recovery script {dst}")
copyfile(src, dst)
# make executable
os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)
def _save_zero_checkpoint(self, save_path, tag):
zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)
zero_sd = dict(
optimizer_state_dict=self.optimizer.state_dict(),
param_shapes=self._get_param_shapes(),
)
torch.save(zero_sd, zero_checkpoint_name)
self._copy_recovery_script(save_path)
logger.info('zero checkpoint saved {}'.format(zero_checkpoint_name))
def _zero3_consolidated_fp16_state_dict(self):
"""
Get a full non-partitioned state_dict with fp16 weights on cpu.
Important: this function must be called on all ranks and not just rank 0.
This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but:
1. consolidates the weights from different partitions on gpu0
2. works on one layer at a time to require as little gpu0 memory as possible, by
moving the already consolidated weights to cpu
3. takes care to keep the shared params shared when gradually copying the params to cpu
Returns:
a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks
"""
import deepspeed
if not self.zero_optimization_partition_weights():
raise ValueError("this function requires ZeRO-3 mode")
state_dict = OrderedDict() if torch.distributed.get_rank() == 0 else None
shared_weights = {}
def get_layer_state_dict(module, prefix=""):
# gather one layer at a time to be memory-efficient
with deepspeed.zero.GatheredParameters(list(
module.parameters(recurse=False))):
if torch.distributed.get_rank() == 0:
for name, param in module.named_parameters(recurse=False):
if param is None:
continue
key = prefix + name
# for shared weights we want to make sure not to unshare them when copying to cpu
data_ptr_id = param.storage().data_ptr()
if data_ptr_id in shared_weights:
# shared weights
# print(f"`{key}` is shared with `{shared_weights[data_ptr_id]}`")
state_dict[key] = state_dict[shared_weights[data_ptr_id]]
else:
state_dict[key] = param.detach().cpu()
shared_weights[data_ptr_id] = key
#print(f"param {name} {param.shape}")
#print(f"param {key} {param.shape} {state_dict[key].storage().data_ptr()}")
# now buffers - not sure if need to take care of potentially shared weights here
for name, buf in module.named_buffers(recurse=False):
if buf is not None and name not in module._non_persistent_buffers_set:
state_dict[prefix + name] = buf.detach().cpu()
for name, child in module.named_children():
if child is not None:
get_layer_state_dict(child, prefix + name + ".")
see_memory_usage("before get_layer_state_dict", force=False)
get_layer_state_dict(self.module, prefix="")
see_memory_usage("after get_layer_state_dict", force=False)
return state_dict
def save_fp16_model(self, save_dir, save_filename="pytorch_model.bin"):
r"""Save fp16 model weights
This method saves the fp16 model weights at the desired destination.
Arguments:
save_dir: Required. Directory for saving the model
save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin``
Important: all processes must call this method and not just the process with rank 0. It is
because the processes need to work in sync to gather the weights. This method will hang
waiting to synchronize with other processes if it's called just for the process with rank 0.
"""
path = os.path.join(save_dir, save_filename)
if self.zero_optimization_partition_weights():
if self.zero_gather_fp16_weights_on_model_save():
# consolidation is expensive in time and memory and therefore isn't a default
state_dict = self._zero3_consolidated_fp16_state_dict()
else:
# the model will be bogus if not consolidated so don't confuse the user by saving it
logger.info(
f"Did not save the model {path} because `stage3_gather_fp16_weights_on_model_save` is False"
)
return
else:
state_dict = self.module.state_dict()
if torch.distributed.get_rank() == 0:
os.makedirs(save_dir, exist_ok=True)
logger.info(f"Saving model weights to {path}")
torch.save(state_dict, path)
| 42.735777
| 227
| 0.624493
|
import os
import stat
import torch
import warnings
import hashlib
import torch.distributed as dist
from collections import OrderedDict
from shutil import copyfile
from torch.nn.modules import Module
from torch.distributed.distributed_c10d import _get_global_rank
from tensorboardX import SummaryWriter
from deepspeed.runtime.utils import see_memory_usage
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
from deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from deepspeed.runtime.zero.utils import is_zero_supported_optimizer
from deepspeed.runtime.activation_checkpointing import checkpointing as activation_checkpointing
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.runtime.config import DeepSpeedConfig, DEEPSPEED_OPTIMIZERS, \
ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, \
TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT
from deepspeed.runtime.dataloader import DeepSpeedDataLoader
from deepspeed.runtime.constants import \
ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \
PLD_THETA, PLD_GAMMA
from deepspeed.runtime.zero.constants import \
ZERO_OPTIMIZATION_OPTIMIZER_STATES, ZERO_OPTIMIZATION_GRADIENTS, ZERO_OPTIMIZATION_WEIGHTS
from deepspeed.runtime.csr_tensor import CSRTensor
import deepspeed.runtime.lr_schedules as lr_schedules
from deepspeed.utils import logger, log_dist, init_distributed
from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer
from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop
from .pipe.module import PipelineModule
from .utils import ensure_directory_exists
from ..ops.op_builder import UtilsBuilder
from ..ops.adam import DeepSpeedCPUAdam
from ..ops.adam import FusedAdam
from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler
MEMORY_OPT_ALLREDUCE_SIZE = 500000000
try:
from apex import amp
except ImportError:
pass
def split_half_float_double_csr(tensors):
dtypes = [
"torch.cuda.HalfTensor",
"torch.cuda.FloatTensor",
"torch.cuda.DoubleTensor",
CSRTensor.type()
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append((dtype, bucket))
return buckets
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
if parameter_parallel_size is None:
parameter_parallel_size = int(data_parallel_size)
logger.info("data_parallel_size: %s, parameter_parallel_size: %s",
data_parallel_size,
parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(dist.get_world_size() // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = torch.distributed.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
def print_configuration(args, name):
logger.info('{}:'.format(name))
for arg in sorted(vars(args)):
dots = '.' * (29 - len(arg))
logger.info(' {} {} {}'.format(arg, dots, getattr(args, arg)))
class DeepSpeedEngine(Module):
def __init__(self,
args,
model,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
mpu=None,
dist_init_required=None,
collate_fn=None,
config_params=None,
dont_change_device=False):
super(DeepSpeedEngine, self).__init__()
self.dont_change_device = dont_change_device
self.client_optimizer = optimizer
self.client_model_parameters = model_parameters
self.client_lr_scheduler = lr_scheduler
self.training_data = training_data
self.collate_fn = collate_fn
self.mpu = mpu
self.data_parallel_group = None
self.global_steps = 0
self.global_samples = 0
self.micro_steps = 0
self.skipped_steps = 0
self.gradient_average = True
self.warn_unscaled_loss = True
self.config_params = config_params
self.loaded_checkpoint_mp_world_size = None
self.loaded_checkpoint_dp_world_size = None
self.enable_backward_allreduce = True
self.progressive_layer_drop = None
self.dist_backend = "nccl"
if dist_init_required is None:
dist_init_required = not dist.is_initialized()
if dist_init_required is False:
assert dist.is_initialized() is True, "Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
else:
init_distributed(dist_backend=self.dist_backend)
see_memory_usage(f"DeepSpeed Engine: Before args sanity test")
self._do_args_sanity_check(args)
self._configure_with_arguments(args, mpu)
self._do_sanity_check()
if mpu is not None:
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with model parallelism."
self._set_distributed_vars()
if self.tensorboard_enabled() and self.global_rank == 0:
self.summary_writer = self.get_summary_writer()
see_memory_usage(f"DeepSpeed Engine: Before configure distributed model")
self._configure_distributed_model(model)
see_memory_usage(f"DeepSpeed Engine: After configure distributed model")
self.timers = SynchronizedWallClockTimer()
self.tput_timer = ThroughputTimer(
batch_size=self.train_micro_batch_size_per_gpu(),
num_workers=self.dp_world_size,
steps_per_output=self.steps_per_print(),
monitor_memory=False)
if training_data:
self.training_dataloader = self.deepspeed_io(training_data)
else:
self.training_dataloader = None
self.optimizer = None
self.lr_scheduler = None
if model_parameters or optimizer:
self._configure_optimizer(optimizer, model_parameters)
self._configure_lr_scheduler(lr_scheduler)
self._report_progress(0)
self.csr_tensor_module_names = set()
if self.sparse_gradients_enabled():
for name, module in self.module.named_modules():
if isinstance(module, torch.nn.Embedding):
self.csr_tensor_module_names.add(name + ".weight")
logger.info("Will convert {} to sparse (csr) "
"tensor during training".format(name))
self.save_non_zero_checkpoint = False
self.save_zero_checkpoint = False
self._configure_checkpointing(dist_init_required)
if self.pld_enabled():
self.progressive_layer_drop = self._configure_progressive_layer_drop()
if self.global_rank == 0:
self._config.print('DeepSpeedEngine configuration')
if self.dump_state():
print_configuration(self, 'DeepSpeedEngine')
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
def get_batch_info(self):
return self.train_batch_size, self.train_micro_batch_size_per_gpu, self.gradient_accumulation_steps
def checkpoint_tag_validation_enabled(self):
return self._config.checkpoint_tag_validation_enabled
def checkpoint_tag_validation_fail(self):
return self._config.checkpoint_tag_validation_fail
def elasticity_enabled(self):
return self._config.elasticity_enabled
def pld_enabled(self):
return self._config.pld_enabled
def pld_params(self):
return self._config.pld_params
def pld_theta(self):
return self.pld_params()[PLD_THETA]
def pld_gamma(self):
return self.pld_params()[PLD_GAMMA]
def tensorboard_enabled(self):
return self._config.tensorboard_enabled
def tensorboard_output_path(self):
return self._config.tensorboard_output_path
def tensorboard_job_name(self):
return self._config.tensorboard_job_name
def get_summary_writer(self,
name="DeepSpeedJobName",
base=os.path.join(os.path.expanduser("~"),
"tensorboard")):
if self.tensorboard_output_path():
base_dir = self.tensorboard_output_path()
job_name = self.tensorboard_job_name()
log_dir = os.path.join(base_dir, job_name)
else:
if self.tensorboard_job_name():
name = self.tensorboard_job_name()
if 'DLWS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLWS_JOB_ID']
elif 'DLTS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLTS_JOB_ID']
else:
infra_job_id = 'unknown-job-id'
summary_writer_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, summary_writer_dir_name, name)
os.makedirs(log_dir, exist_ok=True)
return SummaryWriter(log_dir=log_dir)
def wall_clock_breakdown(self):
return self._config.wall_clock_breakdown
def flops_profiler_enabled(self):
return self._config.flops_profiler_config.enabled
def flops_profiler_profile_step(self):
return self._config.flops_profiler_config.profile_step
def flops_profiler_module_depth(self):
return self._config.flops_profiler_config.module_depth
def flops_profiler_top_modules(self):
return self._config.flops_profiler_config.top_modules
def flops_profiler_detailed(self):
return self._config.flops_profiler_config.detailed
def memory_breakdown(self):
return self._config.memory_breakdown
def sparse_gradients_enabled(self):
return self._config.sparse_gradients_enabled
def train_batch_size(self):
return self._config.train_batch_size
def train_micro_batch_size_per_gpu(self):
return self._config.train_micro_batch_size_per_gpu
def optimizer_name(self):
return self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name
def optimizer_params(self):
return self._config.optimizer_params
def optimizer_legacy_fusion(self):
return self._config.optimizer_legacy_fusion
def scheduler_name(self):
return self._config.scheduler_name
def scheduler_params(self):
return self._config.scheduler_params
def zero_optimization(self):
return self._config.zero_enabled
def zero_allow_untested_optimizer(self):
return self._config.zero_allow_untested_optimizer
def zero_reduce_scatter(self):
return self._config.zero_config.reduce_scatter
def zero_overlap_comm(self):
return self._config.zero_config.overlap_comm
def zero_offload_optimizer(self):
return self._config.zero_config.offload_optimizer
def zero_offload_param(self):
return self._config.zero_config.offload_param
def zero_cpu_offload(self):
return self._config.zero_config.offload_optimizer is not None
def zero_sub_group_size(self):
return self._config.zero_config.sub_group_size
def zero_optimization_stage(self):
return self._config.zero_optimization_stage
def zero_reduce_bucket_size(self):
return self._config.zero_config.reduce_bucket_size
def zero_allgather_bucket_size(self):
return self._config.zero_config.allgather_bucket_size
def zero_optimization_partition_gradients(self):
return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_GRADIENTS
def zero_optimization_partition_weights(self):
return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_WEIGHTS
def zero_contiguous_gradients(self):
return self._config.zero_config.contiguous_gradients
def zero_load_from_fp32_weights(self):
return self._config.zero_config.load_from_fp32_weights
def zero_elastic_checkpoint(self):
return self._config.zero_config.elastic_checkpoint
def zero_max_live_parameters(self):
return self._config.zero_config.max_live_parameters
def zero_max_reuse_distance(self):
return self._config.zero_config.max_reuse_distance
def zero_prefetch_bucket_size(self):
return self._config.zero_config.prefetch_bucket_size
def zero_param_persistence_threshold(self):
return self._config.zero_config.param_persistence_threshold
def zero_gather_fp16_weights_on_model_save(self):
return self._config.zero_config.gather_fp16_weights_on_model_save
def fp16_enabled(self):
return self._config.fp16_enabled
def amp_enabled(self):
return self._config.amp_enabled
def amp_params(self):
return self._config.amp_params
def loss_scale(self):
return self._config.loss_scale
def gradient_accumulation_steps(self):
return self._config.gradient_accumulation_steps
def allreduce_always_fp32(self):
return self._config.allreduce_always_fp32
def postscale_gradients(self):
return not self._config.prescale_gradients
def gradient_predivide_factor(self):
return self._config.gradient_predivide_factor
def steps_per_print(self):
return self._config.steps_per_print
def zero_allgather_partitions(self):
return self._config.zero_config.allgather_partitions
def dump_state(self):
return self._config.dump_state
def gradient_clipping(self):
return self._config.gradient_clipping
def dynamic_loss_scale(self):
return self._config.loss_scale == 0
def initial_dynamic_scale(self):
return self._config.initial_dynamic_scale
def dynamic_loss_scale_args(self):
return self._config.dynamic_loss_scale_args
def swap_tensor_config(self):
return self._config.swap_tensor_config
def aio_config(self):
return self._config.aio_config
def _configure_lr_scheduler(self, client_lr_scheduler):
lr_scheduler = self._scheduler_from_config(self.optimizer)
if lr_scheduler:
if self.global_rank == 0:
logger.info(
f'DeepSpeed using configured LR scheduler = {self.scheduler_name()}')
self.lr_scheduler = lr_scheduler
else:
if self.global_rank == 0:
logger.info('DeepSpeed using client LR scheduler')
self.lr_scheduler = client_lr_scheduler
log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])
def _configure_checkpointing(self, dist_init_required):
dp_rank = self.global_rank
if self.mpu:
dp_rank = self.mpu.get_data_parallel_rank()
self.save_non_zero_checkpoint = (
dp_rank == 0) or self.zero_optimization_partition_weights()
if self.zero_optimization():
param_rank = torch.distributed.get_rank(
group=self.optimizer.dp_process_group)
self.save_zero_checkpoint = (param_rank == dp_rank)
def _scheduler_from_config(self, optimizer):
scheduler_name = self.scheduler_name()
if scheduler_name is not None:
if hasattr(lr_schedules, scheduler_name):
scheduler = getattr(lr_schedules, scheduler_name)
else:
assert hasattr(torch.optim.lr_scheduler, scheduler_name), \
f"DeepSpeed does not recognize LR scheduler {scheduler_name}"
scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)
scheduler_params = self.scheduler_params()
instantiated_scheduler = scheduler(optimizer, **scheduler_params)
return instantiated_scheduler
else:
return None
def _set_distributed_vars(self):
if self.local_rank >= 0:
torch.cuda.set_device(self.local_rank)
self.device = torch.device("cuda", self.local_rank)
self.world_size = dist.get_world_size()
self.global_rank = dist.get_rank()
else:
self.world_size = 1
self.global_rank = 0
self.device = torch.device("cuda")
def _configure_with_arguments(self, args, mpu):
self.local_rank = int(os.environ['LOCAL_RANK'])
if hasattr(args, 'local_rank'):
args.local_rank = self.local_rank
config_file = args.deepspeed_config if hasattr(args,
'deepspeed_config') else None
self._config = DeepSpeedConfig(config_file, mpu, param_dict=self.config_params)
def _do_args_sanity_check(self, args):
if hasattr(args, 'deepscale_config') and args.deepscale_config is not None:
logger.warning(
"************ --deepscale_config is deprecated, please use --deepspeed_config ************"
)
if hasattr(args, 'deepspeed_config'):
assert args.deepspeed_config is None, "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
args.deepspeed_config = args.deepscale_config
assert "LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment variable, it is set by the deepspeed launcher, " \
"deepspeed.init_distributed, or the torch.distributed launcher. If using a different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed."
if hasattr(args, 'local_rank') and args.local_rank != None:
assert isinstance(args.local_rank, int), f"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}"
if args.local_rank >= 0:
env_local_rank = int(os.environ.get("LOCAL_RANK"))
assert env_local_rank == args.local_rank, \
f"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}."
if self.config_params is None:
assert hasattr(args, 'deepspeed_config') and args.deepspeed_config is not None, \
'DeepSpeed requires --deepspeed_config to specify configuration file'
assert os.path.isfile(args.deepspeed_config), \
'DeepSpeed configuration file: {} is not an existing file'.format(args.deepspeed_config)
def _is_supported_optimizer(self, optimizer_name):
return optimizer_name in DEEPSPEED_OPTIMIZERS or \
getattr(torch.optim, optimizer_name, None) is not None
def _do_sanity_check(self):
if not self.client_optimizer:
if self.optimizer_name() is not None:
assert self._is_supported_optimizer(self.optimizer_name()), \
'{} is not a supported DeepSpeed Optimizer'.format(self.optimizer_name())
if self.optimizer_name() == LAMB_OPTIMIZER:
assert self.dynamic_loss_scale(), \
'DeepSpeed {} optimizer requires dynamic loss scaling'.format(self.optimizer_name())
def _broadcast_model(self):
def is_replicated(p):
if hasattr(p, 'ds_status') and p.ds_status is not ZeroParamStatus.AVAILABLE:
return False
return True
for p in self.module.parameters():
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p,
self.broadcast_src_rank,
group=self.data_parallel_group)
def _configure_distributed_model(self, model):
self.module = model
if self.fp16_enabled():
self.module.half()
if not self.dont_change_device:
self.module.to(self.device)
if self.mpu is None:
self.data_parallel_group = _initialize_parameter_parallel_groups()
self.dp_world_size = dist.get_world_size()
self.mp_world_size = 1
self.broadcast_src_rank = 0
else:
self.data_parallel_group = self.mpu.get_data_parallel_group()
self.dp_world_size = self.mpu.get_data_parallel_world_size()
self.mp_world_size = self.mpu.get_model_parallel_world_size()
self.broadcast_src_rank = _get_global_rank(
self.mpu.get_data_parallel_group(),
0)
if not self.amp_enabled():
self._broadcast_model()
def _configure_optimizer(self, client_optimizer, model_parameters):
if client_optimizer is not None:
client_optimizer.param_groups[:] = [
pg for pg in client_optimizer.param_groups if len(pg["params"]) != 0
]
if self.global_rank == 0:
logger.info(
"Removing param_group that has no 'params' in the client Optimizer")
basic_optimizer = client_optimizer
if self.global_rank == 0:
logger.info('Using client Optimizer as basic optimizer')
else:
basic_optimizer = self._configure_basic_optimizer(model_parameters)
if self.global_rank == 0:
logger.info(
'Using DeepSpeed Optimizer param name {} as basic optimizer'.format(
self.optimizer_name()))
if self.global_rank == 0:
logger.info('DeepSpeed Basic Optimizer = {}'.format(
basic_optimizer.__class__.__name__))
if self.zero_optimization():
assert not self.amp_enabled(), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2"
if not is_zero_supported_optimizer(basic_optimizer):
assert self.zero_allow_untested_optimizer(), \
'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.'
if self.global_rank == 0:
logger.warning(
"**** You are using ZeRO with an untested optimizer, proceed with caution *****"
)
self.optimizer = self._configure_zero_optimizer(basic_optimizer)
elif self.amp_enabled():
assert not self.fp16_enabled(), "Cannot enable both amp with (legacy) fp16 mode"
amp_params = self.amp_params()
if self.global_rank == 0:
logger.info(f"Initializing AMP with these params: {amp_params}")
try:
logger.info("Initializing Apex amp from: {}".format(amp.__path__))
except NameError:
raise RuntimeError(
"Unable to import apex/amp, please make sure it is installed")
self.module, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)
self._broadcast_model()
elif self.fp16_enabled():
self.optimizer = self._configure_fp16_optimizer(basic_optimizer)
else:
self.optimizer = basic_optimizer
log_dist('DeepSpeed Final Optimizer = {}'.format(self.optimizer_name()),
ranks=[0])
def _configure_basic_optimizer(self, model_parameters):
optimizer_parameters = self.optimizer_params()
if 'max_grad_norm' in optimizer_parameters.keys():
raise ValueError(
"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details"
)
if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]:
torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)
adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)
effective_adam_w_mode = self.optimizer_name(
) == ADAMW_OPTIMIZER or adam_w_mode
if torch_adam:
if not effective_adam_w_mode:
optimizer = torch.optim.Adam(model_parameters,
**optimizer_parameters)
else:
optimizer = torch.optim.AdamW(model_parameters,
**optimizer_parameters)
else:
if self.zero_cpu_offload():
from deepspeed.ops.adam import DeepSpeedCPUAdam
optimizer = DeepSpeedCPUAdam(model_parameters,
**optimizer_parameters,
adamw_mode=effective_adam_w_mode)
else:
from deepspeed.ops.adam import FusedAdam
optimizer = FusedAdam(model_parameters,
**optimizer_parameters,
adam_w_mode=effective_adam_w_mode)
elif self.optimizer_name() == LAMB_OPTIMIZER:
from deepspeed.ops.lamb import FusedLamb
optimizer = FusedLamb(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
from deepspeed.runtime.fp16.onebit.adam import OnebitAdam
optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(
f'Currently the convergence of 1-bit Adam is only verified under FP16'
)
else:
torch_optimizer = getattr(torch.optim, self.optimizer_name())
optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
return optimizer
def _configure_fp16_optimizer(self, optimizer):
initial_dynamic_scale = self.initial_dynamic_scale()
dynamic_loss_args = self.dynamic_loss_scale_args()
clip_grad = self.gradient_clipping()
if isinstance(optimizer,
FusedAdam) or self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
if self.dynamic_loss_scale():
log_dist('Creating fp16 optimizer with dynamic loss scale', ranks=[0])
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = FP16_Optimizer(
optimizer,
dynamic_loss_scale=True,
initial_dynamic_scale=initial_dynamic_scale,
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
timers=timers)
else:
log_dist('Creating fp16 optimizer with static loss scale: {}'.format(
self.loss_scale()),
ranks=[0])
optimizer = FP16_Optimizer(
optimizer,
static_loss_scale=self.loss_scale(),
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion())
else:
log_dist('Creating fp16 unfused optimizer with dynamic loss scale',
ranks=[0])
optimizer = FP16_UnfusedOptimizer(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER)
return optimizer
def _configure_zero_optimizer(self, optimizer):
zero_stage = self.zero_optimization_stage()
log_dist('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage), ranks=[0])
assert not self.allreduce_always_fp32(), "ZeRO does not support 'fp32_allreduce': true"
timers = self.timers if self.wall_clock_breakdown() else None
if zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
assert self.zero_reduce_scatter(), 'Stage 1 only supports reduce scatter mode'
optimizer = FP16_DeepSpeedZeroOptimizer_Stage1(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
all_gather_partitions=self.zero_allgather_partitions(),
allgather_size=self.zero_allgather_bucket_size(),
max_elements_per_comm=self.zero_reduce_bucket_size(),
dp_process_group=self.data_parallel_group,
elastic_checkpoint=self.zero_elastic_checkpoint(),
mpu=self.mpu)
elif zero_stage == ZERO_OPTIMIZATION_GRADIENTS:
optimizer = FP16_DeepSpeedZeroOptimizer(
optimizer,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
cpu_offload=self.zero_cpu_offload(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps())
elif zero_stage == ZERO_OPTIMIZATION_WEIGHTS:
print("Initializing ZeRO Stage 3") if dist.get_rank() == 0 else None
from deepspeed.runtime.zero.stage3 import FP16_DeepSpeedZeroOptimizer_Stage3
optimizer = FP16_DeepSpeedZeroOptimizer_Stage3(
self.module,
optimizer,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
offload_optimizer_config=self.zero_offload_optimizer(),
offload_param_config=self.zero_offload_param(),
sub_group_size=self.zero_sub_group_size(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
aio_config=self.aio_config())
else:
raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage))
return optimizer
def _configure_progressive_layer_drop(self):
pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())
return pld
def deepspeed_io(self,
dataset,
batch_size=None,
route=ROUTE_TRAIN,
pin_memory=True,
data_sampler=None,
collate_fn=None,
num_local_io_workers=None):
if not isinstance(dataset, torch.utils.data.Dataset):
raise ValueError("Training data must be a torch Dataset")
if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):
data_sampler = torch.utils.data.SequentialSampler(dataset)
if batch_size is None:
batch_size = self.train_micro_batch_size_per_gpu()
if collate_fn is None:
collate_fn = self.collate_fn
deepspeed_io_timer = None
if route == ROUTE_TRAIN:
deepspeed_io_timer = self.tput_timer
data_parallel_world_size = None
data_parallel_rank = None
if self.mpu is not None:
data_parallel_world_size = self.mpu.get_data_parallel_world_size()
data_parallel_rank = self.mpu.get_data_parallel_rank()
return DeepSpeedDataLoader(dataset=dataset,
batch_size=batch_size,
pin_memory=pin_memory,
collate_fn=collate_fn,
local_rank=self.local_rank,
tput_timer=deepspeed_io_timer,
num_local_io_workers=num_local_io_workers,
data_sampler=data_sampler,
data_parallel_world_size=data_parallel_world_size,
data_parallel_rank=data_parallel_rank)
def train(self, mode=True):
self.warn_unscaled_loss = True
self.module.train(mode)
def eval(self):
self.warn_unscaled_loss = True
self.module.train(False)
def _scale_loss(self, prescaled_loss):
if isinstance(prescaled_loss, torch.Tensor):
scaled_loss = prescaled_loss / self.gradient_accumulation_steps()
elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):
scaled_loss = []
for l in prescaled_loss:
if isinstance(l, torch.Tensor):
scaled_loss.append(l / self.gradient_accumulation_steps())
else:
scaled_loss.append(l)
else:
scaled_loss = prescaled_loss
if self.warn_unscaled_loss:
logger.warning(
f'DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}'
)
self.warn_unscaled_loss = False
return scaled_loss
def forward(self, *inputs, **kwargs):
if self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step(
) and self.global_rank == 0:
self.flops_profiler = FlopsProfiler(self.module)
self.flops_profiler.start_profile(ignore_list=None)
if self.module.training and self.progressive_layer_drop:
kwargs.update(self.progressive_layer_drop.get_state())
if self.zero_optimization_partition_weights():
for module in self.module.modules():
module._parameters._in_forward = True
pass
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward').start()
if self.training_dataloader is None:
self.tput_timer.start()
loss = self.module(*inputs, **kwargs)
if self.zero_optimization_partition_weights():
if not torch._C.is_grad_enabled():
self.optimizer.param_coordinator.reset_step()
for module in self.module.modules():
module._parameters._in_forward = False
if self.wall_clock_breakdown():
self.timers('forward').stop()
self.timers('forward_microstep').stop()
if self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step(
) and self.global_rank == 0:
self.flops_profiler.print_model_profile(
profile_step=self.global_steps,
module_depth=self.flops_profiler_module_depth(),
top_modules=self.flops_profiler_top_modules(),
detailed=self.flops_profiler_detailed())
self.flops_profiler.end_profile()
return loss
def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
elif self.is_gradient_accumulation_boundary():
if self.zero_optimization_stage() == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
assert self.zero_reduce_scatter()
self.optimizer.reduce_scatter_gradients(
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_average=self.gradient_average)
else:
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)
def backward(self, loss, allreduce_gradients=True, release_loss=False):
if not allreduce_gradients:
logger.warning(
f'Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed'
)
if self.gradient_accumulation_steps() > 1:
loss = self._scale_loss(loss.float())
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/train_loss',
loss.mean().item() * self.gradient_accumulation_steps(),
self.global_samples)
]
for event in self.summary_events:
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
if self.wall_clock_breakdown():
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
if self.zero_optimization():
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(
)
self.optimizer.backward(loss)
elif self.amp_enabled():
_gradient_accumulation_boundary()
with amp.scale_loss(loss,
self.optimizer,
delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward()
elif self.fp16_enabled():
self.optimizer.backward(loss)
else:
loss.backward()
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
if self.wall_clock_breakdown():
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce').start()
if self.enable_backward_allreduce:
self.allreduce_gradients()
if self.wall_clock_breakdown():
self.timers('backward_allreduce').stop()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
if release_loss:
pass
return loss
def is_gradient_accumulation_boundary(self):
return (self.micro_steps + 1) % \
self.gradient_accumulation_steps() == 0
def zero_grad(self):
for param_name, param in self.module.named_parameters():
param.grad = None
def clip_fp32_gradients(self):
torch.nn.utils.clip_grad_norm_(parameters=self.module.parameters(),
max_norm=self.gradient_clipping())
def _take_model_step(self, lr_kwargs):
if self.gradient_clipping() > 0.0:
if not self.fp16_enabled() and not self.amp_enabled():
self.clip_fp32_gradients()
elif self.amp_enabled():
# https://nvidia.github.io/apex/advanced.html#gradient-clipping
master_params = amp.master_params(self.optimizer)
torch.nn.utils.clip_grad_norm_(parameters=master_params,
max_norm=self.gradient_clipping())
self.optimizer.step()
#zero grad in basic optimizer could be unreliable and may not exhibit
#the behaviour that we want
if not self.zero_optimization() and not self.fp16_enabled(
) and not self.amp_enabled():
self.zero_grad()
else:
self.optimizer.zero_grad()
report_progress = self.global_rank == 0 if self.global_rank else True
# Check overlow here since in DS fp16 optimizer, the overflow is updated in above step() function.
overflow = False
if hasattr(self.optimizer, 'overflow'):
overflow = self.optimizer.overflow
if overflow:
self.skipped_steps += 1
else:
if self.lr_scheduler is not None:
self.lr_scheduler.step(**(lr_kwargs or {}))
if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:
self._report_progress(self.global_steps + 1)
self.global_steps += 1
self.global_samples += self.train_batch_size()
def step(self, lr_kwargs=None):
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use step"
report_progress = self.global_rank == 0 if self.global_rank else True
# Update the model when we reach gradient accumulation boundaries
if self.is_gradient_accumulation_boundary():
if self.progressive_layer_drop:
self.progressive_layer_drop.update_state(self.global_steps)
self._take_model_step(lr_kwargs)
self.tput_timer.stop(report_progress)
# Log learning rate
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(f'Train/Samples/lr',
self.get_lr()[0],
self.global_samples)]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append((f'Train/Samples/loss_scale',
self.optimizer.cur_scale,
self.global_samples))
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('step').stop()
self.timers('step_microstep').stop()
timer_names = [
'forward_microstep',
'backward_microstep',
'backward_inner_microstep',
'backward_allreduce_microstep',
'step_microstep'
]
self.timers.log(names=timer_names, memory_breakdown=self.memory_breakdown())
# Log timing
if self.is_gradient_accumulation_boundary():
if self.tensorboard_enabled():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/elapsed_time_ms_forward',
self.timers('forward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward',
self.timers('backward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_inner',
self.timers('backward_inner').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_allreduce',
self.timers('backward_allreduce').elapsed(reset=False) *
1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_step',
self.timers('step').elapsed(reset=False) * 1000.0,
self.global_samples)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers.log([
'forward',
'backward',
'backward_inner',
'backward_allreduce',
'step'
])
self.micro_steps += 1
def _get_optimizer_param(self, param_name):
result = []
if not self.optimizer:
return result
for group in self.optimizer.param_groups:
if param_name in group:
result.append(group[param_name])
else:
result.append(0.0)
return result
def get_lr(self):
return self._get_optimizer_param('lr')
def get_type(self):
return self._get_optimizer_param('type')
def get_mom(self):
if self.optimizer_name() in ['SGD', 'RMSprop']:
return self._get_optimizer_param('momentum')
else:
return self._get_optimizer_param('betas')
def get_pld_theta(self):
if self.progressive_layer_drop:
return self.progressive_layer_drop.get_theta()
else:
return None
def _report_progress(self, step):
lr = self.get_lr()
mom = self.get_mom()
log_dist(f'step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}',
ranks=[0])
def allreduce_bucket(self, bucket):
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if self.allreduce_always_fp32():
tensor_to_allreduce = tensor.float()
if self.postscale_gradients():
if self.gradient_predivide_factor() != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor())
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.gradient_average:
if self.gradient_predivide_factor() != self.dp_world_size:
tensor_to_allreduce.mul_(self.gradient_predivide_factor() /
self.dp_world_size)
else:
tensor_to_allreduce.div_(self.dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.allreduce_always_fp32() and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def allreduce_and_copy(self, small_bucket):
allreduced = self.allreduce_bucket(small_bucket)
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket)
small_bucket = []
numel = 0
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket)
def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):
grads = []
for param_name, param in self.module.named_parameters():
if param.grad is None:
# In cases where there is an imbalance of empty grads across
# ranks we must create empty grads, this will ensure that every
# rank is reducing the same size. In some cases it may make
# sense in the future to support the ability to average not
# w.r.t. world size but with a different value.
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=param.device)
grads.append(param.grad.data)
else:
grad_data = param.grad.data
if self.sparse_gradients_enabled(
) and param_name in self.csr_tensor_module_names:
grads.append(CSRTensor(grad_data))
else:
grads.append(grad_data)
split_buckets = split_half_float_double_csr(grads)
for i, bucket_tuple in enumerate(split_buckets):
bucket_type, bucket = bucket_tuple
if bucket_type == CSRTensor.type():
self.csr_allreduce_no_retain(bucket)
else:
self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer)
def csr_allreduce_no_retain(self, bucket):
allreduced_csrs = self.csr_allreduce_bucket(bucket)
# Densify csr tensor and copy back to original location
for csr in allreduced_csrs:
dense_tensor = csr.to_dense()
csr.orig_dense_tensor.copy_(dense_tensor)
def csr_allreduce_bucket(self, bucket):
csr_list = []
for csr in bucket:
csr_list.append(self.csr_allreduce(csr))
return csr_list
def csr_allreduce(self, csr):
# Pre-divide for fp16 stability
csr.values.div_(self.dp_world_size)
indices_device_list = self.csr_all_gather(csr.indices)
values_device_list = self.csr_all_gather(csr.values)
csr.indices = torch.cat(indices_device_list)
csr.values = torch.cat(values_device_list)
return csr
def csr_all_gather(self, value):
my_size = torch.LongTensor([value.size()[0]]).to(self.device)
all_sizes = self.all_gather_scalar(my_size)
max_size = torch.cat(all_sizes).max()
fill_size = (max_size - my_size)
assert value.dim() in [1, 2]
if value.dim() == 1:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size)])
tensor_list = [value.new_zeros(max_size) for _ in range(self.dp_world_size)]
else:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size, value.size()[1])])
tensor_list = [
value.new_zeros(max_size,
value.size()[1]) for _ in range(self.dp_world_size)
]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
tensors = []
for dev_idx, t in enumerate(tensor_list):
size = all_sizes[dev_idx][0]
tensors.append(
t.index_select(0,
torch.LongTensor(range(size)).to(self.device)))
return tensors
def all_gather_scalar(self, value):
tensor_list = [value.new_zeros(value.size()) for _ in range(self.dp_world_size)]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
return tensor_list
def module_state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.module.state_dict(destination, prefix, keep_vars)
return sd
def load_module_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank):
filename = 'zero_pp_rank_{}'.format(dp_rank)
zero_ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_{:02d}'.format(mp_rank) + '_optim_states.pt')
return zero_ckpt_name
def _get_zero_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)
return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank)
def _get_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
if self.zero_optimization_partition_weights():
filename = 'zero_pp_rank_{}'.format(
torch.distributed.get_rank(group=self.optimizer.dp_process_group))
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
else:
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
return ckpt_name
def load_checkpoint(self,
load_dir,
tag=None,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True):
if tag is None:
latest_path = os.path.join(load_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
logger.warning(f"Unable to find latest file at {latest_path}, if trying to load latest " \
"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint.")
return None, None
load_path, client_states = self._load_checkpoint(load_dir,
tag,
load_module_strict=load_module_strict,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states)
if self.zero_optimization() and load_path is not None:
self._load_zero_checkpoint(load_dir,
tag,
load_optimizer_states=load_optimizer_states)
return load_path, client_states
def _load_checkpoint(self,
load_dir,
tag,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True):
load_path = self._get_ckpt_name(load_dir, tag)
if not os.path.exists(load_path):
logger.warn(
'Client provided checkpoint load path: {} does not exist ... skip checkpoint load'
.format(load_path))
return None, None
logger.info(f'rank: {self.global_rank} loading checkpoint: {load_path}')
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
if isinstance(self.module, PipelineModule):
# Pipeline parallelism uses this to load its own checkpoint files.
self._curr_ckpt_path = os.path.join(load_dir, tag)
self.load_module_state_dict(state_dict=checkpoint['module'],
strict=load_module_strict)
if self.optimizer is not None and not self.zero_optimization():
if self.fp16_enabled():
self.optimizer.load_state_dict(
checkpoint['optimizer'],
load_optimizer_states=load_optimizer_states)
elif load_optimizer_states:
self.optimizer.load_state_dict(checkpoint['optimizer'])
if load_lr_scheduler_states and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
self.csr_tensor_module_names = checkpoint['csr_tensor_module_names']
self.global_steps = checkpoint['global_steps']
self.global_samples = checkpoint.get('global_samples',
self.global_steps * self.train_batch_size())
self.skipped_steps = checkpoint['skipped_steps']
self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']
self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']
deepspeed_states = [
'module',
'optimizer',
'lr_scheduler',
'csr_tensor_module_names',
'skipped_steps',
'global_steps',
'dp_world_size',
'mp_world_size'
]
client_state = {
key: value
for key,
value in checkpoint.items() if not key in deepspeed_states
}
return load_path, client_state
def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):
zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)
if zero_sd_list is None:
return
self.optimizer.load_state_dict(
state_dict_list=zero_sd_list,
load_optimizer_states=load_optimizer_states,
load_from_fp32_weights=self.zero_load_from_fp32_weights())
print(
f'loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}'
)
def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size):
zero_ckpt_names = []
for dp_rank in range(dp_world_size):
ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_rank=dp_rank)
zero_ckpt_names.append(ckpt_name)
return zero_ckpt_names
def _get_all_zero_checkpoint_names(self,
load_dir,
tag,
mp_world_size,
dp_world_size):
zero_ckpt_names = []
for mp_rank in range(mp_world_size):
mp_rank_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=dp_world_size)
zero_ckpt_names += mp_rank_ckpt_names
return zero_ckpt_names
def _get_all_zero_checkpoints(self, load_dir, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=self.loaded_checkpoint_dp_world_size)
invalid_zero_ckpt_paths = []
for i, ckpt_name in enumerate(zero_ckpt_names):
if not os.path.exists(ckpt_name):
# transparently handle the old file pattern for optim_states
if 'optim_states.pt' in ckpt_name:
ckpt_name_try = ckpt_name.replace("_optim_states.pt",
"optim_states.pt")
if os.path.exists(ckpt_name_try):
zero_ckpt_names[i] = ckpt_name_try
continue
invalid_zero_ckpt_paths.append(ckpt_name)
if len(invalid_zero_ckpt_paths) > 0:
logger.warn(
f"The following zero checkpoints paths are missing: {invalid_zero_ckpt_paths}"
)
return None
zero_sd_list = []
for ckpt_name in zero_ckpt_names:
zero_sd_list.append(torch.load(ckpt_name, map_location='cpu'))
zero_optimizer_sd = [sd['optimizer_state_dict'] for sd in zero_sd_list]
print(
f"successfully loaded {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}"
)
return zero_optimizer_sd
def _checkpoint_tag_validation(self, tag):
if self.checkpoint_tag_validation_enabled():
s_hash = hashlib.sha1(tag.encode())
bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device)
max_bhash = bhash.clone()
min_bhash = bhash.clone()
dist.all_reduce(max_bhash, op=torch.distributed.ReduceOp.MAX)
dist.all_reduce(min_bhash, op=torch.distributed.ReduceOp.MIN)
valid = all(min_bhash == bhash) and all(max_bhash == bhash)
msg = f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across " \
"all ranks. Including rank unique information in checkpoint tag could cause issues when " \
"restoring with different world sizes."
if self.checkpoint_tag_validation_fail():
assert valid, msg
elif not valid:
logger.warning(msg)
def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True):
if self.zero_optimization_partition_weights():
# Prepare for state_dict() by ensuring all parameters are partitioned
self.optimizer.save_checkpoint_prologue()
# This is to make sure the checkpoint names are created without collision
# There seems to be issue creating them in parallel
# Ensure save_dir directory exists
os.makedirs(save_dir, exist_ok=True)
if tag is None:
tag = f"global_step{self.global_steps}"
# Ensure tag is a string
tag = str(tag)
# Ensure checkpoint tag is consistent across ranks
self._checkpoint_tag_validation(tag)
if self.save_non_zero_checkpoint:
self._create_checkpoint_file(save_dir, tag, False)
self._save_checkpoint(save_dir, tag, client_state=client_state)
if self.save_zero_checkpoint:
self._create_zero_checkpoint_files(save_dir, tag)
self._save_zero_checkpoint(save_dir, tag)
# Save latest checkpoint tag
if save_latest:
with open(os.path.join(save_dir, 'latest'), 'w') as fd:
fd.write(tag)
if self.zero_optimization_partition_weights():
self.optimizer.save_checkpoint_epilogue()
return True
def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):
name_function = self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name
try:
checkpoint_name = name_function(save_dir, tag)
ensure_directory_exists(checkpoint_name)
except:
logger.error(f'Failed saving model checkpoint to {save_dir} with tag {tag}')
return False
return True
def _create_zero_checkpoint_files(self, save_dir, tag):
success = True
# zero checkpoint files are created sequentially
for rank in range(self.world_size):
if rank == self.global_rank:
success = self._create_checkpoint_file(save_dir, tag, True)
dist.barrier()
return success
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None.
self._curr_ckpt_path = os.path.join(save_dir, tag)
state = dict(
module=self.module_state_dict(),
optimizer=self.optimizer.state_dict()
if self.optimizer and not self.zero_optimization() else None,
lr_scheduler=self.lr_scheduler.state_dict()
if self.lr_scheduler is not None else None,
csr_tensor_module_names=self.csr_tensor_module_names,
skipped_steps=self.skipped_steps,
global_steps=self.global_steps,
global_samples=self.global_samples,
dp_world_size=self.dp_world_size,
mp_world_size=self.mp_world_size,
)
state.update(client_state)
log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0])
#logger.info('Saving model checkpoint: {}'.format(save_path))
torch.save(state, save_path)
self._curr_save_path = None
def _get_param_shapes(self):
param_shapes = OrderedDict()
for name, param in self.module.named_parameters():
param_shapes[name] = param.ds_shape if hasattr(param,
"ds_shape") else param.shape
# print(f"saving param {name} {param_shapes[name]}")
return param_shapes
def _copy_recovery_script(self, save_path):
base_dir = os.path.dirname(os.path.dirname(__file__))
script = "zero_to_fp32.py"
src = os.path.join(base_dir, "utils", script)
dst = os.path.join(save_path, script)
logger.info(f"creating recovery script {dst}")
copyfile(src, dst)
# make executable
os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)
def _save_zero_checkpoint(self, save_path, tag):
zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)
zero_sd = dict(
optimizer_state_dict=self.optimizer.state_dict(),
param_shapes=self._get_param_shapes(),
)
torch.save(zero_sd, zero_checkpoint_name)
self._copy_recovery_script(save_path)
logger.info('zero checkpoint saved {}'.format(zero_checkpoint_name))
def _zero3_consolidated_fp16_state_dict(self):
import deepspeed
if not self.zero_optimization_partition_weights():
raise ValueError("this function requires ZeRO-3 mode")
state_dict = OrderedDict() if torch.distributed.get_rank() == 0 else None
shared_weights = {}
def get_layer_state_dict(module, prefix=""):
# gather one layer at a time to be memory-efficient
with deepspeed.zero.GatheredParameters(list(
module.parameters(recurse=False))):
if torch.distributed.get_rank() == 0:
for name, param in module.named_parameters(recurse=False):
if param is None:
continue
key = prefix + name
# for shared weights we want to make sure not to unshare them when copying to cpu
data_ptr_id = param.storage().data_ptr()
if data_ptr_id in shared_weights:
# shared weights
# print(f"`{key}` is shared with `{shared_weights[data_ptr_id]}`")
state_dict[key] = state_dict[shared_weights[data_ptr_id]]
else:
state_dict[key] = param.detach().cpu()
shared_weights[data_ptr_id] = key
#print(f"param {name} {param.shape}")
#print(f"param {key} {param.shape} {state_dict[key].storage().data_ptr()}")
# now buffers - not sure if need to take care of potentially shared weights here
for name, buf in module.named_buffers(recurse=False):
if buf is not None and name not in module._non_persistent_buffers_set:
state_dict[prefix + name] = buf.detach().cpu()
for name, child in module.named_children():
if child is not None:
get_layer_state_dict(child, prefix + name + ".")
see_memory_usage("before get_layer_state_dict", force=False)
get_layer_state_dict(self.module, prefix="")
see_memory_usage("after get_layer_state_dict", force=False)
return state_dict
def save_fp16_model(self, save_dir, save_filename="pytorch_model.bin"):
path = os.path.join(save_dir, save_filename)
if self.zero_optimization_partition_weights():
if self.zero_gather_fp16_weights_on_model_save():
# consolidation is expensive in time and memory and therefore isn't a default
state_dict = self._zero3_consolidated_fp16_state_dict()
else:
logger.info(
f"Did not save the model {path} because `stage3_gather_fp16_weights_on_model_save` is False"
)
return
else:
state_dict = self.module.state_dict()
if torch.distributed.get_rank() == 0:
os.makedirs(save_dir, exist_ok=True)
logger.info(f"Saving model weights to {path}")
torch.save(state_dict, path)
| true
| true
|
f71a744b58bcf58f5653e87192017fca4a93e074
| 580
|
py
|
Python
|
code/py/test_statsrecorder.py
|
notmatthancock/notmatthancock.github.io
|
abcd91cc7c2653c5243fe96ba2fd681ec03930bb
|
[
"MIT"
] | null | null | null |
code/py/test_statsrecorder.py
|
notmatthancock/notmatthancock.github.io
|
abcd91cc7c2653c5243fe96ba2fd681ec03930bb
|
[
"MIT"
] | null | null | null |
code/py/test_statsrecorder.py
|
notmatthancock/notmatthancock.github.io
|
abcd91cc7c2653c5243fe96ba2fd681ec03930bb
|
[
"MIT"
] | null | null | null |
import numpy as np
import statsrecorder as sr
rs = np.random.RandomState(323)
mystats = sr.StatsRecorder()
# Hold all observations in "data" to check for correctness.
ndims = 42
data = np.empty((0, ndims))
for i in range(1000):
nobserv = rs.randint(10,101)
newdata = rs.randn(nobserv, ndims)
data = np.vstack((data, newdata))
# Update stats recorder object
mystats.update(newdata)
# Check stats recorder object is doing its business right.
assert np.allclose(mystats.mean, data.mean(axis=0))
assert np.allclose(mystats.std, data.std(axis=0))
| 25.217391
| 62
| 0.705172
|
import numpy as np
import statsrecorder as sr
rs = np.random.RandomState(323)
mystats = sr.StatsRecorder()
ndims = 42
data = np.empty((0, ndims))
for i in range(1000):
nobserv = rs.randint(10,101)
newdata = rs.randn(nobserv, ndims)
data = np.vstack((data, newdata))
mystats.update(newdata)
assert np.allclose(mystats.mean, data.mean(axis=0))
assert np.allclose(mystats.std, data.std(axis=0))
| true
| true
|
f71a746bad402ab1d91d173ac40a919ce1f67c52
| 40,695
|
py
|
Python
|
sscanss/ui/dialogs/insert.py
|
samtygier-stfc/SScanSS-2
|
0df2160c32fdc533f7d391735bd55d524e253f4d
|
[
"BSD-3-Clause"
] | null | null | null |
sscanss/ui/dialogs/insert.py
|
samtygier-stfc/SScanSS-2
|
0df2160c32fdc533f7d391735bd55d524e253f4d
|
[
"BSD-3-Clause"
] | null | null | null |
sscanss/ui/dialogs/insert.py
|
samtygier-stfc/SScanSS-2
|
0df2160c32fdc533f7d391735bd55d524e253f4d
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from sscanss.config import path_for, settings
from sscanss.core.math import Plane, Matrix33, Vector3, clamp, map_range, trunc, VECTOR_EPS
from sscanss.core.geometry import mesh_plane_intersection
from sscanss.core.util import Primitives, DockFlag, StrainComponents, PointType, PlaneOptions, Attributes
from sscanss.ui.widgets import (FormGroup, FormControl, GraphicsView, GraphicsScene, create_tool_button, FormTitle,
create_scroll_area, CompareValidator, GraphicsPointItem, Grid, create_icon)
from .managers import PointManager
class InsertPrimitiveDialog(QtWidgets.QWidget):
"""Provides UI for typing in measurement/fiducial points
:param primitive: primitive type
:type primitive: Primitives
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, primitive, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = self.parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.primitive = primitive
self.main_layout = QtWidgets.QVBoxLayout()
self.textboxes = {}
name = self.parent_model.uniqueKey(self.primitive.value)
self.mesh_args = {'name': name}
if self.primitive == Primitives.Tube:
self.mesh_args.update({'outer_radius': 100.000, 'inner_radius': 50.000, 'height': 200.000})
elif self.primitive == Primitives.Sphere:
self.mesh_args.update({'radius': 100.000})
elif self.primitive == Primitives.Cylinder:
self.mesh_args.update({'radius': 100.000, 'height': 200.000})
else:
self.mesh_args.update({'width': 50.000, 'height': 100.000, 'depth': 200.000})
self.createPrimitiveSwitcher()
self.createFormInputs()
button_layout = QtWidgets.QHBoxLayout()
self.create_primitive_button = QtWidgets.QPushButton('Create')
self.create_primitive_button.clicked.connect(self.createPrimiviteButtonClicked)
button_layout.addWidget(self.create_primitive_button)
button_layout.addStretch(1)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.title = 'Insert {}'.format(self.primitive.value)
self.setMinimumWidth(450)
self.textboxes['name'].setFocus()
def createPrimitiveSwitcher(self):
switcher_layout = QtWidgets.QHBoxLayout()
switcher = create_tool_button(style_name='MenuButton', status_tip='Open dialog for a different primitive')
switcher.setArrowType(QtCore.Qt.DownArrow)
switcher.setPopupMode(QtWidgets.QToolButton.InstantPopup)
switcher.setMenu(self.parent.primitives_menu)
switcher_layout.addStretch(1)
switcher_layout.addWidget(switcher)
self.main_layout.addLayout(switcher_layout)
def createFormInputs(self):
self.form_group = FormGroup()
for key, value in self.mesh_args.items():
pretty_label = key.replace('_', ' ').title()
if key == 'name':
control = FormControl(pretty_label, value, required=True)
control.form_lineedit.textChanged.connect(self.nameCheck)
else:
control = FormControl(pretty_label, value, desc='mm', required=True, number=True)
control.range(0, None, min_exclusive=True)
self.textboxes[key] = control
self.form_group.addControl(control)
if self.primitive == Primitives.Tube:
outer_radius = self.textboxes['outer_radius']
inner_radius = self.textboxes['inner_radius']
outer_radius.compareWith(inner_radius, CompareValidator.Operator.Greater)
inner_radius.compareWith(outer_radius, CompareValidator.Operator.Less)
self.main_layout.addWidget(self.form_group)
self.form_group.groupValidation.connect(self.formValidation)
def nameCheck(self, value):
if self.parent_model.all_sample_key == value:
self.textboxes['name'].isInvalid(f'"{self.parent_model.all_sample_key}" is a reserved name')
def formValidation(self, is_valid):
if is_valid:
self.create_primitive_button.setEnabled(True)
else:
self.create_primitive_button.setDisabled(True)
def createPrimiviteButtonClicked(self):
for key, textbox in self.textboxes.items():
value = textbox.value
self.mesh_args[key] = value
self.parent.presenter.addPrimitive(self.primitive, self.mesh_args)
new_name = self.parent_model.uniqueKey(self.primitive.value)
self.textboxes['name'].value = new_name
class InsertPointDialog(QtWidgets.QWidget):
"""Provides UI for typing in measurement/fiducial points
:param point_type: point type
:type point_type: PointType
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, point_type, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.point_type = point_type
self.title = 'Add {} Point'.format(point_type.value)
self.main_layout = QtWidgets.QVBoxLayout()
unit = 'mm'
self.form_group = FormGroup()
self.x_axis = FormControl('X', 0.0, required=True, desc=unit, number=True)
self.y_axis = FormControl('Y', 0.0, required=True, desc=unit, number=True)
self.z_axis = FormControl('Z', 0.0, required=True, desc=unit, number=True)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.formValidation)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton(self.title)
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.main_layout.addWidget(self.form_group)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.setMinimumWidth(450)
def formValidation(self, is_valid):
if is_valid:
self.execute_button.setEnabled(True)
else:
self.execute_button.setDisabled(True)
def executeButtonClicked(self):
point = [self.x_axis.value, self.y_axis.value, self.z_axis.value]
self.parent.presenter.addPoints([(point, True)], self.point_type)
class InsertVectorDialog(QtWidgets.QWidget):
"""Provides UI for adding measurement vectors using a variety of methods
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.title = 'Add Measurement Vectors'
self.main_layout = QtWidgets.QVBoxLayout()
spacing = 10
self.main_layout.addSpacing(spacing)
self.main_layout.addWidget(QtWidgets.QLabel('Measurement Point:'))
self.points_combobox = QtWidgets.QComboBox()
self.points_combobox.setView(QtWidgets.QListView())
self.main_layout.addWidget(self.points_combobox)
self.updatePointList()
self.main_layout.addSpacing(spacing)
layout = QtWidgets.QHBoxLayout()
alignment_layout = QtWidgets.QVBoxLayout()
alignment_layout.addWidget(QtWidgets.QLabel('Alignment:'))
self.alignment_combobox = QtWidgets.QComboBox()
self.alignment_combobox.setView(QtWidgets.QListView())
self.alignment_combobox.setInsertPolicy(QtWidgets.QComboBox.InsertAtCurrent)
self.updateAlignment()
self.alignment_combobox.activated.connect(self.addNewAlignment)
self.alignment_combobox.currentIndexChanged.connect(self.changeRenderedAlignment)
alignment_layout.addWidget(self.alignment_combobox)
alignment_layout.addSpacing(spacing)
layout.addLayout(alignment_layout)
self.detector_combobox = QtWidgets.QComboBox()
self.detector_combobox.setView(QtWidgets.QListView())
self.detector_combobox.addItems(list(self.parent_model.instrument.detectors.keys()))
if len(self.parent_model.instrument.detectors) > 1:
detector_layout = QtWidgets.QVBoxLayout()
detector_layout.addWidget(QtWidgets.QLabel('Detector:'))
detector_layout.addWidget(self.detector_combobox)
size = self.detector_combobox.iconSize()
self.detector_combobox.setItemIcon(0, create_icon(settings.value(settings.Key.Vector_1_Colour), size))
self.detector_combobox.setItemIcon(1, create_icon(settings.value(settings.Key.Vector_2_Colour), size))
detector_layout.addSpacing(spacing)
layout.addSpacing(spacing)
layout.addLayout(detector_layout)
self.main_layout.addLayout(layout)
self.main_layout.addWidget(QtWidgets.QLabel('Strain Component:'))
self.component_combobox = QtWidgets.QComboBox()
self.component_combobox.setView(QtWidgets.QListView())
strain_components = [s.value for s in StrainComponents]
self.component_combobox.addItems(strain_components)
self.component_combobox.currentTextChanged.connect(self.toggleKeyInBox)
self.main_layout.addWidget(self.component_combobox)
self.main_layout.addSpacing(spacing)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton(self.title)
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.createKeyInBox()
self.reverse_checkbox = QtWidgets.QCheckBox('Reverse Direction of Vector')
self.main_layout.addWidget(self.reverse_checkbox)
self.main_layout.addSpacing(spacing)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.parent_model.measurement_points_changed.connect(self.updatePointList)
self.parent_model.measurement_vectors_changed.connect(self.updateAlignment)
self.parent.scenes.rendered_alignment_changed.connect(self.alignment_combobox.setCurrentIndex)
self.setMinimumWidth(450)
def updatePointList(self):
self.points_combobox.clear()
point_list = ['All Points']
point_list.extend(['{}'.format(i+1) for i in range(self.parent_model.measurement_points.size)])
self.points_combobox.addItems(point_list)
def updateAlignment(self):
align_count = self.parent_model.measurement_vectors.shape[2]
if align_count != self.alignment_combobox.count() - 1:
self.alignment_combobox.clear()
alignment_list = ['{}'.format(i + 1) for i in range(align_count)]
alignment_list.append('Add New...')
self.alignment_combobox.addItems(alignment_list)
self.alignment_combobox.setCurrentIndex(self.parent.scenes.rendered_alignment)
def addNewAlignment(self, index):
if index == self.alignment_combobox.count() - 1:
self.alignment_combobox.insertItem(index, '{}'.format(index + 1))
self.alignment_combobox.setCurrentIndex(index)
def changeRenderedAlignment(self, index):
align_count = self.parent_model.measurement_vectors.shape[2]
if 0 <= index < align_count:
self.parent.scenes.changeRenderedAlignment(index)
elif index >= align_count:
self.parent.scenes.changeVisibility(Attributes.Vectors, False)
def toggleKeyInBox(self, selected_text):
strain_component = StrainComponents(selected_text)
if strain_component == StrainComponents.custom:
self.key_in_box.setVisible(True)
self.form_group.validateGroup()
else:
self.key_in_box.setVisible(False)
self.execute_button.setEnabled(True)
def createKeyInBox(self):
self.key_in_box = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
self.form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_axis = FormControl('X', 1.0, required=True, number=True, decimals=7)
self.x_axis.range(-1.0, 1.0)
self.y_axis = FormControl('Y', 0.0, required=True, number=True, decimals=7)
self.y_axis.range(-1.0, 1.0)
self.z_axis = FormControl('Z', 0.0, required=True, number=True, decimals=7)
self.z_axis.range(-1.0, 1.0)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.formValidation)
layout.addWidget(self.form_group)
self.key_in_box.setLayout(layout)
self.main_layout.addWidget(self.key_in_box)
self.toggleKeyInBox(self.component_combobox.currentText())
def formValidation(self, is_valid):
self.execute_button.setDisabled(True)
if is_valid:
if np.linalg.norm([self.x_axis.value, self.y_axis.value, self.z_axis.value]) > VECTOR_EPS:
self.x_axis.validation_label.setText('')
self.execute_button.setEnabled(True)
else:
self.x_axis.validation_label.setText('Bad Normal')
def executeButtonClicked(self):
points = self.points_combobox.currentIndex() - 1
selected_text = self.component_combobox.currentText()
strain_component = StrainComponents(selected_text)
alignment = self.alignment_combobox.currentIndex()
detector = self.detector_combobox.currentIndex()
check_state = self.reverse_checkbox.checkState()
reverse = True if check_state == QtCore.Qt.Checked else False
if strain_component == StrainComponents.custom:
vector = [self.x_axis.value, self.y_axis.value, self.z_axis.value]
else:
vector = None
self.parent.presenter.addVectors(points, strain_component, alignment, detector,
key_in=vector, reverse=reverse)
# New vectors are drawn by the scene manager after function ends
self.parent.scenes._rendered_alignment = alignment
def closeEvent(self, event):
self.parent.scenes.changeRenderedAlignment(0)
event.accept()
class PickPointDialog(QtWidgets.QWidget):
"""Provides UI for selecting measurement points on a cross section of the sample
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Full
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.title = 'Add Measurement Points Graphically'
self.setMinimumWidth(500)
self.plane_offset_range = (-1., 1.)
self.slider_range = (-10000000, 10000000)
self.sample_scale = 20
self.path_pen = QtGui.QPen(QtGui.QColor(255, 0, 0), 0)
self.point_pen = QtGui.QPen(QtGui.QColor(200, 0, 0), 0)
self.main_layout = QtWidgets.QVBoxLayout()
self.setLayout(self.main_layout)
button_layout = QtWidgets.QHBoxLayout()
self.help_button = create_tool_button(tooltip='Help', style_name='ToolButton',
status_tip='Display shortcuts for the cross-section view',
icon_path=path_for('question.png'))
self.help_button.clicked.connect(self.showHelp)
self.reset_button = create_tool_button(tooltip='Reset View', style_name='ToolButton',
status_tip='Reset camera transformation of the cross-section view',
icon_path=path_for('refresh.png'))
self.execute_button = QtWidgets.QPushButton('Add Points')
self.execute_button.clicked.connect(self.addPoints)
button_layout.addWidget(self.help_button)
button_layout.addWidget(self.reset_button)
button_layout.addStretch(1)
button_layout.addWidget(self.execute_button)
self.main_layout.addLayout(button_layout)
self.splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
self.splitter.setChildrenCollapsible(False)
self.main_layout.addWidget(self.splitter)
self.createGraphicsView()
self.reset_button.clicked.connect(self.view.reset)
self.createControlPanel()
self.prepareMesh()
self.parent_model.sample_changed.connect(self.prepareMesh)
self.parent_model.measurement_points_changed.connect(self.updateCrossSection)
self.initializing = True
def showEvent(self, event):
if self.initializing:
self.view.fitInView(self.view.anchor, QtCore.Qt.KeepAspectRatio)
self.initializing = False
super().showEvent(event)
def closeEvent(self, event):
self.parent.scenes.removePlane()
event.accept()
def prepareMesh(self):
self.mesh = None
samples = self.parent_model.sample
for _, sample in samples.items():
if self.mesh is None:
self.mesh = sample.copy()
else:
self.mesh.append(sample)
self.scene.clear()
self.tabs.setEnabled(self.mesh is not None)
if self.mesh is not None:
self.setPlane(self.plane_combobox.currentText())
else:
self.parent.scenes.removePlane()
self.view.reset()
def updateStatusBar(self, point):
if self.view.rect().contains(point):
transform = self.view.scene_transform.inverted()[0]
scene_pt = transform.map(self.view.mapToScene(point)) / self.sample_scale
world_pt = [scene_pt.x(), scene_pt.y(), -self.old_distance] @ self.matrix.transpose()
cursor_text = f'X: {world_pt[0]:.3f} Y: {world_pt[1]:.3f} Z: {world_pt[2]:.3f}'
self.parent.cursor_label.setText(cursor_text)
else:
self.parent.cursor_label.clear()
def createGraphicsView(self):
self.scene = GraphicsScene(self.sample_scale, self)
self.view = GraphicsView(self.scene)
self.view.mouse_moved.connect(self.updateStatusBar)
self.view.setMinimumHeight(350)
self.splitter.addWidget(self.view)
def createControlPanel(self):
self.tabs = QtWidgets.QTabWidget()
self.tabs.setMinimumHeight(250)
self.tabs.setTabPosition(QtWidgets.QTabWidget.South)
self.splitter.addWidget(self.tabs)
self.createPlaneTab()
self.createSelectionToolsTab()
self.createGridOptionsTab()
point_manager = PointManager(PointType.Measurement, self.parent)
self.tabs.addTab(create_scroll_area(point_manager), 'Point Manager')
def createPlaneTab(self):
layout = QtWidgets.QVBoxLayout()
layout.addWidget(QtWidgets.QLabel('Specify Plane:'))
self.plane_combobox = QtWidgets.QComboBox()
self.plane_combobox.setView(QtWidgets.QListView())
self.plane_combobox.addItems([p.value for p in PlaneOptions])
self.plane_combobox.currentTextChanged.connect(self.setPlane)
self.createCustomPlaneBox()
layout.addWidget(self.plane_combobox)
layout.addWidget(self.custom_plane_widget)
layout.addSpacing(20)
slider_layout = QtWidgets.QHBoxLayout()
slider_layout.addWidget(QtWidgets.QLabel('Plane Distance from Origin (mm):'))
self.plane_lineedit = QtWidgets.QLineEdit()
validator = QtGui.QDoubleValidator(self.plane_lineedit)
validator.setNotation(QtGui.QDoubleValidator.StandardNotation)
validator.setDecimals(3)
self.plane_lineedit.setValidator(validator)
self.plane_lineedit.textEdited.connect(self.updateSlider)
self.plane_lineedit.editingFinished.connect(self.movePlane)
slider_layout.addStretch(1)
slider_layout.addWidget(self.plane_lineedit)
layout.addLayout(slider_layout)
self.plane_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.plane_slider.setMinimum(self.slider_range[0])
self.plane_slider.setMaximum(self.slider_range[1])
self.plane_slider.setFocusPolicy(QtCore.Qt.StrongFocus)
self.plane_slider.setSingleStep(1)
self.plane_slider.sliderMoved.connect(self.updateLineEdit)
self.plane_slider.sliderReleased.connect(self.movePlane)
layout.addWidget(self.plane_slider)
layout.addStretch(1)
plane_tab = QtWidgets.QWidget()
plane_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(plane_tab), 'Define Plane')
def createSelectionToolsTab(self):
layout = QtWidgets.QVBoxLayout()
selector_layout = QtWidgets.QHBoxLayout()
selector_layout.addWidget(QtWidgets.QLabel('Select Geometry of Points: '))
self.button_group = QtWidgets.QButtonGroup()
self.button_group.buttonClicked[int].connect(self.changeSceneMode)
self.object_selector = create_tool_button(checkable=True, checked=True, tooltip='Select Points',
status_tip='Select movable points from the cross-section view',
style_name='MidToolButton', icon_path=path_for('select.png'))
self.point_selector = create_tool_button(checkable=True, tooltip='Draw a Point',
status_tip='Draw a single point at the selected position',
style_name='MidToolButton', icon_path=path_for('point.png'))
self.line_selector = create_tool_button(checkable=True, tooltip='Draw Points on Line',
status_tip='Draw equally spaced points on the selected line',
style_name='MidToolButton', icon_path=path_for('line_tool.png'))
self.area_selector = create_tool_button(checkable=True, tooltip='Draw Points on Area',
status_tip='Draw a grid of points on the selected area',
style_name='MidToolButton', icon_path=path_for('area_tool.png'))
self.button_group.addButton(self.object_selector, GraphicsScene.Mode.Select.value)
self.button_group.addButton(self.point_selector, GraphicsScene.Mode.Draw_point.value)
self.button_group.addButton(self.line_selector, GraphicsScene.Mode.Draw_line.value)
self.button_group.addButton(self.area_selector, GraphicsScene.Mode.Draw_area.value)
selector_layout.addWidget(self.object_selector)
selector_layout.addWidget(self.point_selector)
selector_layout.addWidget(self.line_selector)
selector_layout.addWidget(self.area_selector)
selector_layout.addStretch(1)
self.createLineToolWidget()
self.createAreaToolWidget()
layout.addLayout(selector_layout)
layout.addWidget(self.line_tool_widget)
layout.addWidget(self.area_tool_widget)
layout.addStretch(1)
select_tab = QtWidgets.QWidget()
select_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(select_tab), 'Selection Tools')
def createGridOptionsTab(self):
layout = QtWidgets.QVBoxLayout()
self.show_grid_checkbox = QtWidgets.QCheckBox('Show Grid')
self.show_grid_checkbox.stateChanged.connect(self.showGrid)
self.snap_to_grid_checkbox = QtWidgets.QCheckBox('Snap Selection to Grid')
self.snap_to_grid_checkbox.stateChanged.connect(self.snapToGrid)
self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)
layout.addWidget(self.show_grid_checkbox)
layout.addWidget(self.snap_to_grid_checkbox)
self.createGridWidget()
layout.addWidget(self.grid_widget)
layout.addStretch(1)
grid_tab = QtWidgets.QWidget()
grid_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(grid_tab), 'Grid Options')
def createCustomPlaneBox(self):
self.custom_plane_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
self.form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_axis = FormControl('X', 1.0, required=True, number=True)
self.x_axis.range(-1.0, 1.0)
self.y_axis = FormControl('Y', 0.0, required=True, number=True)
self.y_axis.range(-1.0, 1.0)
self.z_axis = FormControl('Z', 0.0, required=True, number=True)
self.z_axis.range(-1.0, 1.0)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.setCustomPlane)
layout.addWidget(self.form_group)
self.custom_plane_widget.setLayout(layout)
def createLineToolWidget(self):
self.line_tool_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 20, 0, 0)
layout.addWidget(QtWidgets.QLabel('Number of Points: '))
self.line_point_count_spinbox = QtWidgets.QSpinBox()
self.line_point_count_spinbox.setValue(self.scene.line_tool_size)
self.line_point_count_spinbox.setRange(2, 100)
self.line_point_count_spinbox.valueChanged.connect(self.scene.setLineToolSize)
layout.addWidget(self.line_point_count_spinbox)
self.line_tool_widget.setVisible(False)
self.line_tool_widget.setLayout(layout)
def createAreaToolWidget(self):
self.area_tool_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 20, 0, 0)
layout.addWidget(QtWidgets.QLabel('Number of Points: '))
self.area_x_spinbox = QtWidgets.QSpinBox()
self.area_x_spinbox.setValue(self.scene.area_tool_size[0])
self.area_x_spinbox.setRange(2, 100)
self.area_y_spinbox = QtWidgets.QSpinBox()
self.area_y_spinbox.setValue(self.scene.area_tool_size[1])
self.area_y_spinbox.setRange(2, 100)
stretch_factor = 3
layout.addStretch(1)
layout.addWidget(QtWidgets.QLabel('X: '))
self.area_x_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),
self.area_y_spinbox.value()))
layout.addWidget(self.area_x_spinbox, stretch_factor)
layout.addStretch(1)
layout.addWidget(QtWidgets.QLabel('Y: '))
self.area_y_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),
self.area_y_spinbox.value()))
layout.addWidget(self.area_y_spinbox, stretch_factor)
self.area_tool_widget.setVisible(False)
self.area_tool_widget.setLayout(layout)
def createGridWidget(self):
self.grid_widget = QtWidgets.QWidget(self)
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(0, 20, 0, 0)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel('Grid Type: '))
grid_combobox = QtWidgets.QComboBox()
grid_combobox.setView(QtWidgets.QListView())
grid_combobox.addItems([g.value for g in Grid.Type])
grid_combobox.currentTextChanged.connect(lambda value: self.setGridType(Grid.Type(value)))
layout.addWidget(grid_combobox)
main_layout.addLayout(layout)
main_layout.addSpacing(20)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel('Grid Size: '))
self.grid_x_label = QtWidgets.QLabel('')
self.grid_x_spinbox = QtWidgets.QDoubleSpinBox()
self.grid_x_spinbox.setDecimals(1)
self.grid_x_spinbox.setSingleStep(0.1)
self.grid_x_spinbox.valueChanged.connect(self.changeGridSize)
self.grid_y_label = QtWidgets.QLabel('')
self.grid_y_spinbox = QtWidgets.QDoubleSpinBox()
self.grid_y_spinbox.setDecimals(1)
self.grid_y_spinbox.setSingleStep(0.1)
self.grid_y_spinbox.valueChanged.connect(self.changeGridSize)
stretch_factor = 3
layout.addStretch(1)
layout.addWidget(self.grid_x_label)
layout.addWidget(self.grid_x_spinbox, stretch_factor)
layout.addStretch(1)
layout.addWidget(self.grid_y_label)
layout.addWidget(self.grid_y_spinbox, stretch_factor)
main_layout.addLayout(layout)
self.setGridType(self.view.grid.type)
self.grid_widget.setVisible(False)
self.grid_widget.setLayout(main_layout)
def changeGridSize(self):
if self.view.grid.type == Grid.Type.Box:
grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)
grid_y = int(self.grid_y_spinbox.value() * self.sample_scale)
else:
grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)
grid_y = self.grid_y_spinbox.value()
self.view.setGridSize((grid_x, grid_y))
def setGridType(self, grid_type):
self.view.setGridType(grid_type)
size = self.view.grid.size
if grid_type == Grid.Type.Box:
self.grid_x_label.setText('X (mm): ')
self.grid_y_label.setText('Y (mm): ')
self.grid_x_spinbox.setValue(size[0])
self.grid_y_spinbox.setValue(size[1])
self.grid_x_spinbox.setRange(0.1, 1000)
self.grid_y_spinbox.setRange(0.1, 1000)
else:
self.grid_x_label.setText('Radius (mm): ')
self.grid_y_label.setText('Angle (degree): ')
self.grid_x_spinbox.setValue(size[0])
self.grid_y_spinbox.setValue(size[1])
self.grid_x_spinbox.setRange(0.1, 1000)
self.grid_y_spinbox.setRange(0.1, 360)
def changeSceneMode(self, button_id):
self.scene.mode = GraphicsScene.Mode(button_id)
self.line_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_line)
self.area_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_area)
def showHelp(self):
self.view.show_help = False if self.view.has_foreground else True
self.scene.update()
def showGrid(self, state):
self.view.show_grid = True if state == QtCore.Qt.Checked else False
self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)
self.grid_widget.setVisible(self.view.show_grid)
self.scene.update()
def snapToGrid(self, state):
self.view.snap_to_grid = True if state == QtCore.Qt.Checked else False
def updateSlider(self, value):
if not self.plane_lineedit.hasAcceptableInput():
return
new_distance = clamp(float(value), *self.plane_offset_range)
slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, new_distance))
self.plane_slider.setValue(slider_value)
offset = new_distance - self.old_distance
self.parent.scenes.movePlane(offset * self.plane.normal)
self.old_distance = new_distance
def updateLineEdit(self, value):
new_distance = trunc(map_range(*self.slider_range, *self.plane_offset_range, value), 3)
self.plane_lineedit.setText('{:.3f}'.format(new_distance))
offset = new_distance - self.old_distance
self.parent.scenes.movePlane(offset * self.plane.normal)
self.old_distance = new_distance
def movePlane(self):
distance = clamp(float(self.plane_lineedit.text()), *self.plane_offset_range)
self.plane_lineedit.setText('{:.3f}'.format(distance))
point = distance * self.plane.normal
self.plane = Plane(self.plane.normal, point)
self.updateCrossSection()
def setCustomPlane(self, is_valid):
if is_valid:
normal = np.array([self.x_axis.value, self.y_axis.value, self.z_axis.value])
try:
self.initializePlane(normal, self.mesh.bounding_box.center)
except ValueError:
self.x_axis.validation_label.setText('Bad Normal')
def setPlane(self, selected_text):
if selected_text == PlaneOptions.Custom.value:
self.custom_plane_widget.setVisible(True)
self.form_group.validateGroup()
return
else:
self.custom_plane_widget.setVisible(False)
if selected_text == PlaneOptions.XY.value:
plane_normal = np.array([0., 0., 1.])
elif selected_text == PlaneOptions.XZ.value:
plane_normal = np.array([0., 1., 0.])
else:
plane_normal = np.array([1., 0., 0.])
self.initializePlane(plane_normal, self.mesh.bounding_box.center)
def initializePlane(self, plane_normal, plane_point):
self.plane = Plane(plane_normal, plane_point)
plane_size = self.mesh.bounding_box.radius
self.parent.scenes.drawPlane(self.plane, 2 * plane_size, 2 * plane_size)
distance = self.plane.distanceFromOrigin()
self.plane_offset_range = (distance - plane_size, distance + plane_size)
slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, distance))
self.plane_slider.setValue(slider_value)
self.plane_lineedit.setText('{:.3f}'.format(distance))
self.old_distance = distance
# inverted the normal so that the y-axis is flipped
self.matrix = self.__lookAt(-Vector3(self.plane.normal))
self.view.resetTransform()
self.updateCrossSection()
def updateCrossSection(self):
self.scene.clear()
segments = mesh_plane_intersection(self.mesh, self.plane)
if len(segments) == 0:
return
segments = np.array(segments)
item = QtWidgets.QGraphicsPathItem()
cross_section_path = QtGui.QPainterPath()
rotated_segments = self.sample_scale * (segments @ self.matrix)
for i in range(0, rotated_segments.shape[0], 2):
start = rotated_segments[i, :]
cross_section_path.moveTo(start[0], start[1])
end = rotated_segments[i + 1, :]
cross_section_path.lineTo(end[0], end[1])
item.setPath(cross_section_path)
item.setPen(self.path_pen)
item.setTransform(self.view.scene_transform)
self.scene.addItem(item)
rect = item.boundingRect()
anchor = rect.center()
ab = self.plane.point - self.parent_model.measurement_points.points
d = np.einsum('ij,ij->i', np.expand_dims(self.plane.normal, axis=0), ab)
index = np.where(np.abs(d) < VECTOR_EPS)[0]
rotated_points = self.parent_model.measurement_points.points[index, :]
rotated_points = rotated_points @ self.matrix
for i, p in zip(index, rotated_points):
point = QtCore.QPointF(p[0], p[1]) * self.sample_scale
point = self.view.scene_transform.map(point)
item = GraphicsPointItem(point, size=self.scene.point_size)
item.setToolTip(f'Point {i + 1}')
item.fixed = True
item.makeControllable(self.scene.mode == GraphicsScene.Mode.Select)
item.setPen(self.point_pen)
self.scene.addItem(item)
rect = rect.united(item.boundingRect().translated(point))
# calculate new rectangle that encloses original rect with a different anchor
rect.united(rect.translated(anchor - rect.center()))
self.view.setSceneRect(rect)
self.view.fitInView(rect, QtCore.Qt.KeepAspectRatio)
self.view.anchor = rect
@staticmethod
def __lookAt(forward):
rot_matrix = Matrix33.identity()
up = Vector3([0., -1., 0.]) if -VECTOR_EPS < forward[1] < VECTOR_EPS else Vector3([0., 0., 1.])
left = up ^ forward
left.normalize()
up = forward ^ left
rot_matrix.c1[:3] = left
rot_matrix.c2[:3] = up
rot_matrix.c3[:3] = forward
return rot_matrix
def addPoints(self):
if len(self.scene.items()) < 2:
return
points_2d = []
transform = self.view.scene_transform.inverted()[0]
for item in self.scene.items():
if isinstance(item, GraphicsPointItem) and not item.fixed:
pos = transform.map(item.pos()) / self.sample_scale
# negate distance due to inverted normal when creating matrix
points_2d.append([pos.x(), pos.y(), -self.old_distance])
self.scene.removeItem(item)
if not points_2d:
return
points = points_2d[::-1] @ self.matrix.transpose()
enabled = [True] * points.shape[0]
self.parent.presenter.addPoints(list(zip(points, enabled)), PointType.Measurement, False)
class AlignSample(QtWidgets.QWidget):
"""Provides UI for aligning sample on instrument with 6D pose
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent.scenes.switchToInstrumentScene()
self.title = 'Align Sample with 6D pose'
self.setMinimumWidth(450)
self.main_layout = QtWidgets.QVBoxLayout()
self.setLayout(self.main_layout)
self.main_layout.addSpacing(20)
self.main_layout.addWidget(FormTitle('Create Transformation for Alignment'))
self.main_layout.addSpacing(10)
self.main_layout.addWidget(QtWidgets.QLabel('Translation along the X, Y, and Z axis (mm):'))
self.position_form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_position = FormControl('X', 0.0, required=True, number=True)
self.y_position = FormControl('Y', 0.0, required=True, number=True)
self.z_position = FormControl('Z', 0.0, required=True, number=True)
self.position_form_group.addControl(self.x_position)
self.position_form_group.addControl(self.y_position)
self.position_form_group.addControl(self.z_position)
self.position_form_group.groupValidation.connect(self.formValidation)
self.main_layout.addWidget(self.position_form_group)
self.main_layout.addWidget(QtWidgets.QLabel('Rotation around the X, Y, and Z axis (degrees):'))
self.orientation_form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_rotation = FormControl('X', 0.0, required=True, number=True)
self.x_rotation.range(-360.0, 360.0)
self.y_rotation = FormControl('Y', 0.0, required=True, number=True)
self.y_rotation.range(-360.0, 360.0)
self.z_rotation = FormControl('Z', 0.0, required=True, number=True)
self.z_rotation.range(-360.0, 360.0)
self.orientation_form_group.addControl(self.x_rotation)
self.orientation_form_group.addControl(self.y_rotation)
self.orientation_form_group.addControl(self.z_rotation)
self.orientation_form_group.groupValidation.connect(self.formValidation)
self.main_layout.addWidget(self.orientation_form_group)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton('Align Sample')
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
def formValidation(self):
if self.position_form_group.valid and self.orientation_form_group.valid:
self.execute_button.setEnabled(True)
else:
self.execute_button.setDisabled(True)
def executeButtonClicked(self):
pose = [self.x_position.value, self.y_position.value, self.z_position.value,
self.z_rotation.value, self.y_rotation.value, self.x_rotation.value]
self.parent.presenter.alignSampleWithPose(pose)
| 44.966851
| 115
| 0.672589
|
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from sscanss.config import path_for, settings
from sscanss.core.math import Plane, Matrix33, Vector3, clamp, map_range, trunc, VECTOR_EPS
from sscanss.core.geometry import mesh_plane_intersection
from sscanss.core.util import Primitives, DockFlag, StrainComponents, PointType, PlaneOptions, Attributes
from sscanss.ui.widgets import (FormGroup, FormControl, GraphicsView, GraphicsScene, create_tool_button, FormTitle,
create_scroll_area, CompareValidator, GraphicsPointItem, Grid, create_icon)
from .managers import PointManager
class InsertPrimitiveDialog(QtWidgets.QWidget):
dock_flag = DockFlag.Upper
def __init__(self, primitive, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = self.parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.primitive = primitive
self.main_layout = QtWidgets.QVBoxLayout()
self.textboxes = {}
name = self.parent_model.uniqueKey(self.primitive.value)
self.mesh_args = {'name': name}
if self.primitive == Primitives.Tube:
self.mesh_args.update({'outer_radius': 100.000, 'inner_radius': 50.000, 'height': 200.000})
elif self.primitive == Primitives.Sphere:
self.mesh_args.update({'radius': 100.000})
elif self.primitive == Primitives.Cylinder:
self.mesh_args.update({'radius': 100.000, 'height': 200.000})
else:
self.mesh_args.update({'width': 50.000, 'height': 100.000, 'depth': 200.000})
self.createPrimitiveSwitcher()
self.createFormInputs()
button_layout = QtWidgets.QHBoxLayout()
self.create_primitive_button = QtWidgets.QPushButton('Create')
self.create_primitive_button.clicked.connect(self.createPrimiviteButtonClicked)
button_layout.addWidget(self.create_primitive_button)
button_layout.addStretch(1)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.title = 'Insert {}'.format(self.primitive.value)
self.setMinimumWidth(450)
self.textboxes['name'].setFocus()
def createPrimitiveSwitcher(self):
switcher_layout = QtWidgets.QHBoxLayout()
switcher = create_tool_button(style_name='MenuButton', status_tip='Open dialog for a different primitive')
switcher.setArrowType(QtCore.Qt.DownArrow)
switcher.setPopupMode(QtWidgets.QToolButton.InstantPopup)
switcher.setMenu(self.parent.primitives_menu)
switcher_layout.addStretch(1)
switcher_layout.addWidget(switcher)
self.main_layout.addLayout(switcher_layout)
def createFormInputs(self):
self.form_group = FormGroup()
for key, value in self.mesh_args.items():
pretty_label = key.replace('_', ' ').title()
if key == 'name':
control = FormControl(pretty_label, value, required=True)
control.form_lineedit.textChanged.connect(self.nameCheck)
else:
control = FormControl(pretty_label, value, desc='mm', required=True, number=True)
control.range(0, None, min_exclusive=True)
self.textboxes[key] = control
self.form_group.addControl(control)
if self.primitive == Primitives.Tube:
outer_radius = self.textboxes['outer_radius']
inner_radius = self.textboxes['inner_radius']
outer_radius.compareWith(inner_radius, CompareValidator.Operator.Greater)
inner_radius.compareWith(outer_radius, CompareValidator.Operator.Less)
self.main_layout.addWidget(self.form_group)
self.form_group.groupValidation.connect(self.formValidation)
def nameCheck(self, value):
if self.parent_model.all_sample_key == value:
self.textboxes['name'].isInvalid(f'"{self.parent_model.all_sample_key}" is a reserved name')
def formValidation(self, is_valid):
if is_valid:
self.create_primitive_button.setEnabled(True)
else:
self.create_primitive_button.setDisabled(True)
def createPrimiviteButtonClicked(self):
for key, textbox in self.textboxes.items():
value = textbox.value
self.mesh_args[key] = value
self.parent.presenter.addPrimitive(self.primitive, self.mesh_args)
new_name = self.parent_model.uniqueKey(self.primitive.value)
self.textboxes['name'].value = new_name
class InsertPointDialog(QtWidgets.QWidget):
dock_flag = DockFlag.Upper
def __init__(self, point_type, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.point_type = point_type
self.title = 'Add {} Point'.format(point_type.value)
self.main_layout = QtWidgets.QVBoxLayout()
unit = 'mm'
self.form_group = FormGroup()
self.x_axis = FormControl('X', 0.0, required=True, desc=unit, number=True)
self.y_axis = FormControl('Y', 0.0, required=True, desc=unit, number=True)
self.z_axis = FormControl('Z', 0.0, required=True, desc=unit, number=True)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.formValidation)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton(self.title)
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.main_layout.addWidget(self.form_group)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.setMinimumWidth(450)
def formValidation(self, is_valid):
if is_valid:
self.execute_button.setEnabled(True)
else:
self.execute_button.setDisabled(True)
def executeButtonClicked(self):
point = [self.x_axis.value, self.y_axis.value, self.z_axis.value]
self.parent.presenter.addPoints([(point, True)], self.point_type)
class InsertVectorDialog(QtWidgets.QWidget):
dock_flag = DockFlag.Upper
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.title = 'Add Measurement Vectors'
self.main_layout = QtWidgets.QVBoxLayout()
spacing = 10
self.main_layout.addSpacing(spacing)
self.main_layout.addWidget(QtWidgets.QLabel('Measurement Point:'))
self.points_combobox = QtWidgets.QComboBox()
self.points_combobox.setView(QtWidgets.QListView())
self.main_layout.addWidget(self.points_combobox)
self.updatePointList()
self.main_layout.addSpacing(spacing)
layout = QtWidgets.QHBoxLayout()
alignment_layout = QtWidgets.QVBoxLayout()
alignment_layout.addWidget(QtWidgets.QLabel('Alignment:'))
self.alignment_combobox = QtWidgets.QComboBox()
self.alignment_combobox.setView(QtWidgets.QListView())
self.alignment_combobox.setInsertPolicy(QtWidgets.QComboBox.InsertAtCurrent)
self.updateAlignment()
self.alignment_combobox.activated.connect(self.addNewAlignment)
self.alignment_combobox.currentIndexChanged.connect(self.changeRenderedAlignment)
alignment_layout.addWidget(self.alignment_combobox)
alignment_layout.addSpacing(spacing)
layout.addLayout(alignment_layout)
self.detector_combobox = QtWidgets.QComboBox()
self.detector_combobox.setView(QtWidgets.QListView())
self.detector_combobox.addItems(list(self.parent_model.instrument.detectors.keys()))
if len(self.parent_model.instrument.detectors) > 1:
detector_layout = QtWidgets.QVBoxLayout()
detector_layout.addWidget(QtWidgets.QLabel('Detector:'))
detector_layout.addWidget(self.detector_combobox)
size = self.detector_combobox.iconSize()
self.detector_combobox.setItemIcon(0, create_icon(settings.value(settings.Key.Vector_1_Colour), size))
self.detector_combobox.setItemIcon(1, create_icon(settings.value(settings.Key.Vector_2_Colour), size))
detector_layout.addSpacing(spacing)
layout.addSpacing(spacing)
layout.addLayout(detector_layout)
self.main_layout.addLayout(layout)
self.main_layout.addWidget(QtWidgets.QLabel('Strain Component:'))
self.component_combobox = QtWidgets.QComboBox()
self.component_combobox.setView(QtWidgets.QListView())
strain_components = [s.value for s in StrainComponents]
self.component_combobox.addItems(strain_components)
self.component_combobox.currentTextChanged.connect(self.toggleKeyInBox)
self.main_layout.addWidget(self.component_combobox)
self.main_layout.addSpacing(spacing)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton(self.title)
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.createKeyInBox()
self.reverse_checkbox = QtWidgets.QCheckBox('Reverse Direction of Vector')
self.main_layout.addWidget(self.reverse_checkbox)
self.main_layout.addSpacing(spacing)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.parent_model.measurement_points_changed.connect(self.updatePointList)
self.parent_model.measurement_vectors_changed.connect(self.updateAlignment)
self.parent.scenes.rendered_alignment_changed.connect(self.alignment_combobox.setCurrentIndex)
self.setMinimumWidth(450)
def updatePointList(self):
self.points_combobox.clear()
point_list = ['All Points']
point_list.extend(['{}'.format(i+1) for i in range(self.parent_model.measurement_points.size)])
self.points_combobox.addItems(point_list)
def updateAlignment(self):
align_count = self.parent_model.measurement_vectors.shape[2]
if align_count != self.alignment_combobox.count() - 1:
self.alignment_combobox.clear()
alignment_list = ['{}'.format(i + 1) for i in range(align_count)]
alignment_list.append('Add New...')
self.alignment_combobox.addItems(alignment_list)
self.alignment_combobox.setCurrentIndex(self.parent.scenes.rendered_alignment)
def addNewAlignment(self, index):
if index == self.alignment_combobox.count() - 1:
self.alignment_combobox.insertItem(index, '{}'.format(index + 1))
self.alignment_combobox.setCurrentIndex(index)
def changeRenderedAlignment(self, index):
align_count = self.parent_model.measurement_vectors.shape[2]
if 0 <= index < align_count:
self.parent.scenes.changeRenderedAlignment(index)
elif index >= align_count:
self.parent.scenes.changeVisibility(Attributes.Vectors, False)
def toggleKeyInBox(self, selected_text):
strain_component = StrainComponents(selected_text)
if strain_component == StrainComponents.custom:
self.key_in_box.setVisible(True)
self.form_group.validateGroup()
else:
self.key_in_box.setVisible(False)
self.execute_button.setEnabled(True)
def createKeyInBox(self):
self.key_in_box = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
self.form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_axis = FormControl('X', 1.0, required=True, number=True, decimals=7)
self.x_axis.range(-1.0, 1.0)
self.y_axis = FormControl('Y', 0.0, required=True, number=True, decimals=7)
self.y_axis.range(-1.0, 1.0)
self.z_axis = FormControl('Z', 0.0, required=True, number=True, decimals=7)
self.z_axis.range(-1.0, 1.0)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.formValidation)
layout.addWidget(self.form_group)
self.key_in_box.setLayout(layout)
self.main_layout.addWidget(self.key_in_box)
self.toggleKeyInBox(self.component_combobox.currentText())
def formValidation(self, is_valid):
self.execute_button.setDisabled(True)
if is_valid:
if np.linalg.norm([self.x_axis.value, self.y_axis.value, self.z_axis.value]) > VECTOR_EPS:
self.x_axis.validation_label.setText('')
self.execute_button.setEnabled(True)
else:
self.x_axis.validation_label.setText('Bad Normal')
def executeButtonClicked(self):
points = self.points_combobox.currentIndex() - 1
selected_text = self.component_combobox.currentText()
strain_component = StrainComponents(selected_text)
alignment = self.alignment_combobox.currentIndex()
detector = self.detector_combobox.currentIndex()
check_state = self.reverse_checkbox.checkState()
reverse = True if check_state == QtCore.Qt.Checked else False
if strain_component == StrainComponents.custom:
vector = [self.x_axis.value, self.y_axis.value, self.z_axis.value]
else:
vector = None
self.parent.presenter.addVectors(points, strain_component, alignment, detector,
key_in=vector, reverse=reverse)
self.parent.scenes._rendered_alignment = alignment
def closeEvent(self, event):
self.parent.scenes.changeRenderedAlignment(0)
event.accept()
class PickPointDialog(QtWidgets.QWidget):
dock_flag = DockFlag.Full
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.title = 'Add Measurement Points Graphically'
self.setMinimumWidth(500)
self.plane_offset_range = (-1., 1.)
self.slider_range = (-10000000, 10000000)
self.sample_scale = 20
self.path_pen = QtGui.QPen(QtGui.QColor(255, 0, 0), 0)
self.point_pen = QtGui.QPen(QtGui.QColor(200, 0, 0), 0)
self.main_layout = QtWidgets.QVBoxLayout()
self.setLayout(self.main_layout)
button_layout = QtWidgets.QHBoxLayout()
self.help_button = create_tool_button(tooltip='Help', style_name='ToolButton',
status_tip='Display shortcuts for the cross-section view',
icon_path=path_for('question.png'))
self.help_button.clicked.connect(self.showHelp)
self.reset_button = create_tool_button(tooltip='Reset View', style_name='ToolButton',
status_tip='Reset camera transformation of the cross-section view',
icon_path=path_for('refresh.png'))
self.execute_button = QtWidgets.QPushButton('Add Points')
self.execute_button.clicked.connect(self.addPoints)
button_layout.addWidget(self.help_button)
button_layout.addWidget(self.reset_button)
button_layout.addStretch(1)
button_layout.addWidget(self.execute_button)
self.main_layout.addLayout(button_layout)
self.splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
self.splitter.setChildrenCollapsible(False)
self.main_layout.addWidget(self.splitter)
self.createGraphicsView()
self.reset_button.clicked.connect(self.view.reset)
self.createControlPanel()
self.prepareMesh()
self.parent_model.sample_changed.connect(self.prepareMesh)
self.parent_model.measurement_points_changed.connect(self.updateCrossSection)
self.initializing = True
def showEvent(self, event):
if self.initializing:
self.view.fitInView(self.view.anchor, QtCore.Qt.KeepAspectRatio)
self.initializing = False
super().showEvent(event)
def closeEvent(self, event):
self.parent.scenes.removePlane()
event.accept()
def prepareMesh(self):
self.mesh = None
samples = self.parent_model.sample
for _, sample in samples.items():
if self.mesh is None:
self.mesh = sample.copy()
else:
self.mesh.append(sample)
self.scene.clear()
self.tabs.setEnabled(self.mesh is not None)
if self.mesh is not None:
self.setPlane(self.plane_combobox.currentText())
else:
self.parent.scenes.removePlane()
self.view.reset()
def updateStatusBar(self, point):
if self.view.rect().contains(point):
transform = self.view.scene_transform.inverted()[0]
scene_pt = transform.map(self.view.mapToScene(point)) / self.sample_scale
world_pt = [scene_pt.x(), scene_pt.y(), -self.old_distance] @ self.matrix.transpose()
cursor_text = f'X: {world_pt[0]:.3f} Y: {world_pt[1]:.3f} Z: {world_pt[2]:.3f}'
self.parent.cursor_label.setText(cursor_text)
else:
self.parent.cursor_label.clear()
def createGraphicsView(self):
self.scene = GraphicsScene(self.sample_scale, self)
self.view = GraphicsView(self.scene)
self.view.mouse_moved.connect(self.updateStatusBar)
self.view.setMinimumHeight(350)
self.splitter.addWidget(self.view)
def createControlPanel(self):
self.tabs = QtWidgets.QTabWidget()
self.tabs.setMinimumHeight(250)
self.tabs.setTabPosition(QtWidgets.QTabWidget.South)
self.splitter.addWidget(self.tabs)
self.createPlaneTab()
self.createSelectionToolsTab()
self.createGridOptionsTab()
point_manager = PointManager(PointType.Measurement, self.parent)
self.tabs.addTab(create_scroll_area(point_manager), 'Point Manager')
def createPlaneTab(self):
layout = QtWidgets.QVBoxLayout()
layout.addWidget(QtWidgets.QLabel('Specify Plane:'))
self.plane_combobox = QtWidgets.QComboBox()
self.plane_combobox.setView(QtWidgets.QListView())
self.plane_combobox.addItems([p.value for p in PlaneOptions])
self.plane_combobox.currentTextChanged.connect(self.setPlane)
self.createCustomPlaneBox()
layout.addWidget(self.plane_combobox)
layout.addWidget(self.custom_plane_widget)
layout.addSpacing(20)
slider_layout = QtWidgets.QHBoxLayout()
slider_layout.addWidget(QtWidgets.QLabel('Plane Distance from Origin (mm):'))
self.plane_lineedit = QtWidgets.QLineEdit()
validator = QtGui.QDoubleValidator(self.plane_lineedit)
validator.setNotation(QtGui.QDoubleValidator.StandardNotation)
validator.setDecimals(3)
self.plane_lineedit.setValidator(validator)
self.plane_lineedit.textEdited.connect(self.updateSlider)
self.plane_lineedit.editingFinished.connect(self.movePlane)
slider_layout.addStretch(1)
slider_layout.addWidget(self.plane_lineedit)
layout.addLayout(slider_layout)
self.plane_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.plane_slider.setMinimum(self.slider_range[0])
self.plane_slider.setMaximum(self.slider_range[1])
self.plane_slider.setFocusPolicy(QtCore.Qt.StrongFocus)
self.plane_slider.setSingleStep(1)
self.plane_slider.sliderMoved.connect(self.updateLineEdit)
self.plane_slider.sliderReleased.connect(self.movePlane)
layout.addWidget(self.plane_slider)
layout.addStretch(1)
plane_tab = QtWidgets.QWidget()
plane_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(plane_tab), 'Define Plane')
def createSelectionToolsTab(self):
layout = QtWidgets.QVBoxLayout()
selector_layout = QtWidgets.QHBoxLayout()
selector_layout.addWidget(QtWidgets.QLabel('Select Geometry of Points: '))
self.button_group = QtWidgets.QButtonGroup()
self.button_group.buttonClicked[int].connect(self.changeSceneMode)
self.object_selector = create_tool_button(checkable=True, checked=True, tooltip='Select Points',
status_tip='Select movable points from the cross-section view',
style_name='MidToolButton', icon_path=path_for('select.png'))
self.point_selector = create_tool_button(checkable=True, tooltip='Draw a Point',
status_tip='Draw a single point at the selected position',
style_name='MidToolButton', icon_path=path_for('point.png'))
self.line_selector = create_tool_button(checkable=True, tooltip='Draw Points on Line',
status_tip='Draw equally spaced points on the selected line',
style_name='MidToolButton', icon_path=path_for('line_tool.png'))
self.area_selector = create_tool_button(checkable=True, tooltip='Draw Points on Area',
status_tip='Draw a grid of points on the selected area',
style_name='MidToolButton', icon_path=path_for('area_tool.png'))
self.button_group.addButton(self.object_selector, GraphicsScene.Mode.Select.value)
self.button_group.addButton(self.point_selector, GraphicsScene.Mode.Draw_point.value)
self.button_group.addButton(self.line_selector, GraphicsScene.Mode.Draw_line.value)
self.button_group.addButton(self.area_selector, GraphicsScene.Mode.Draw_area.value)
selector_layout.addWidget(self.object_selector)
selector_layout.addWidget(self.point_selector)
selector_layout.addWidget(self.line_selector)
selector_layout.addWidget(self.area_selector)
selector_layout.addStretch(1)
self.createLineToolWidget()
self.createAreaToolWidget()
layout.addLayout(selector_layout)
layout.addWidget(self.line_tool_widget)
layout.addWidget(self.area_tool_widget)
layout.addStretch(1)
select_tab = QtWidgets.QWidget()
select_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(select_tab), 'Selection Tools')
def createGridOptionsTab(self):
layout = QtWidgets.QVBoxLayout()
self.show_grid_checkbox = QtWidgets.QCheckBox('Show Grid')
self.show_grid_checkbox.stateChanged.connect(self.showGrid)
self.snap_to_grid_checkbox = QtWidgets.QCheckBox('Snap Selection to Grid')
self.snap_to_grid_checkbox.stateChanged.connect(self.snapToGrid)
self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)
layout.addWidget(self.show_grid_checkbox)
layout.addWidget(self.snap_to_grid_checkbox)
self.createGridWidget()
layout.addWidget(self.grid_widget)
layout.addStretch(1)
grid_tab = QtWidgets.QWidget()
grid_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(grid_tab), 'Grid Options')
def createCustomPlaneBox(self):
self.custom_plane_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
self.form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_axis = FormControl('X', 1.0, required=True, number=True)
self.x_axis.range(-1.0, 1.0)
self.y_axis = FormControl('Y', 0.0, required=True, number=True)
self.y_axis.range(-1.0, 1.0)
self.z_axis = FormControl('Z', 0.0, required=True, number=True)
self.z_axis.range(-1.0, 1.0)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.setCustomPlane)
layout.addWidget(self.form_group)
self.custom_plane_widget.setLayout(layout)
def createLineToolWidget(self):
self.line_tool_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 20, 0, 0)
layout.addWidget(QtWidgets.QLabel('Number of Points: '))
self.line_point_count_spinbox = QtWidgets.QSpinBox()
self.line_point_count_spinbox.setValue(self.scene.line_tool_size)
self.line_point_count_spinbox.setRange(2, 100)
self.line_point_count_spinbox.valueChanged.connect(self.scene.setLineToolSize)
layout.addWidget(self.line_point_count_spinbox)
self.line_tool_widget.setVisible(False)
self.line_tool_widget.setLayout(layout)
def createAreaToolWidget(self):
self.area_tool_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 20, 0, 0)
layout.addWidget(QtWidgets.QLabel('Number of Points: '))
self.area_x_spinbox = QtWidgets.QSpinBox()
self.area_x_spinbox.setValue(self.scene.area_tool_size[0])
self.area_x_spinbox.setRange(2, 100)
self.area_y_spinbox = QtWidgets.QSpinBox()
self.area_y_spinbox.setValue(self.scene.area_tool_size[1])
self.area_y_spinbox.setRange(2, 100)
stretch_factor = 3
layout.addStretch(1)
layout.addWidget(QtWidgets.QLabel('X: '))
self.area_x_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),
self.area_y_spinbox.value()))
layout.addWidget(self.area_x_spinbox, stretch_factor)
layout.addStretch(1)
layout.addWidget(QtWidgets.QLabel('Y: '))
self.area_y_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),
self.area_y_spinbox.value()))
layout.addWidget(self.area_y_spinbox, stretch_factor)
self.area_tool_widget.setVisible(False)
self.area_tool_widget.setLayout(layout)
def createGridWidget(self):
self.grid_widget = QtWidgets.QWidget(self)
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(0, 20, 0, 0)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel('Grid Type: '))
grid_combobox = QtWidgets.QComboBox()
grid_combobox.setView(QtWidgets.QListView())
grid_combobox.addItems([g.value for g in Grid.Type])
grid_combobox.currentTextChanged.connect(lambda value: self.setGridType(Grid.Type(value)))
layout.addWidget(grid_combobox)
main_layout.addLayout(layout)
main_layout.addSpacing(20)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel('Grid Size: '))
self.grid_x_label = QtWidgets.QLabel('')
self.grid_x_spinbox = QtWidgets.QDoubleSpinBox()
self.grid_x_spinbox.setDecimals(1)
self.grid_x_spinbox.setSingleStep(0.1)
self.grid_x_spinbox.valueChanged.connect(self.changeGridSize)
self.grid_y_label = QtWidgets.QLabel('')
self.grid_y_spinbox = QtWidgets.QDoubleSpinBox()
self.grid_y_spinbox.setDecimals(1)
self.grid_y_spinbox.setSingleStep(0.1)
self.grid_y_spinbox.valueChanged.connect(self.changeGridSize)
stretch_factor = 3
layout.addStretch(1)
layout.addWidget(self.grid_x_label)
layout.addWidget(self.grid_x_spinbox, stretch_factor)
layout.addStretch(1)
layout.addWidget(self.grid_y_label)
layout.addWidget(self.grid_y_spinbox, stretch_factor)
main_layout.addLayout(layout)
self.setGridType(self.view.grid.type)
self.grid_widget.setVisible(False)
self.grid_widget.setLayout(main_layout)
def changeGridSize(self):
if self.view.grid.type == Grid.Type.Box:
grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)
grid_y = int(self.grid_y_spinbox.value() * self.sample_scale)
else:
grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)
grid_y = self.grid_y_spinbox.value()
self.view.setGridSize((grid_x, grid_y))
def setGridType(self, grid_type):
self.view.setGridType(grid_type)
size = self.view.grid.size
if grid_type == Grid.Type.Box:
self.grid_x_label.setText('X (mm): ')
self.grid_y_label.setText('Y (mm): ')
self.grid_x_spinbox.setValue(size[0])
self.grid_y_spinbox.setValue(size[1])
self.grid_x_spinbox.setRange(0.1, 1000)
self.grid_y_spinbox.setRange(0.1, 1000)
else:
self.grid_x_label.setText('Radius (mm): ')
self.grid_y_label.setText('Angle (degree): ')
self.grid_x_spinbox.setValue(size[0])
self.grid_y_spinbox.setValue(size[1])
self.grid_x_spinbox.setRange(0.1, 1000)
self.grid_y_spinbox.setRange(0.1, 360)
def changeSceneMode(self, button_id):
self.scene.mode = GraphicsScene.Mode(button_id)
self.line_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_line)
self.area_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_area)
def showHelp(self):
self.view.show_help = False if self.view.has_foreground else True
self.scene.update()
def showGrid(self, state):
self.view.show_grid = True if state == QtCore.Qt.Checked else False
self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)
self.grid_widget.setVisible(self.view.show_grid)
self.scene.update()
def snapToGrid(self, state):
self.view.snap_to_grid = True if state == QtCore.Qt.Checked else False
def updateSlider(self, value):
if not self.plane_lineedit.hasAcceptableInput():
return
new_distance = clamp(float(value), *self.plane_offset_range)
slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, new_distance))
self.plane_slider.setValue(slider_value)
offset = new_distance - self.old_distance
self.parent.scenes.movePlane(offset * self.plane.normal)
self.old_distance = new_distance
def updateLineEdit(self, value):
new_distance = trunc(map_range(*self.slider_range, *self.plane_offset_range, value), 3)
self.plane_lineedit.setText('{:.3f}'.format(new_distance))
offset = new_distance - self.old_distance
self.parent.scenes.movePlane(offset * self.plane.normal)
self.old_distance = new_distance
def movePlane(self):
distance = clamp(float(self.plane_lineedit.text()), *self.plane_offset_range)
self.plane_lineedit.setText('{:.3f}'.format(distance))
point = distance * self.plane.normal
self.plane = Plane(self.plane.normal, point)
self.updateCrossSection()
def setCustomPlane(self, is_valid):
if is_valid:
normal = np.array([self.x_axis.value, self.y_axis.value, self.z_axis.value])
try:
self.initializePlane(normal, self.mesh.bounding_box.center)
except ValueError:
self.x_axis.validation_label.setText('Bad Normal')
def setPlane(self, selected_text):
if selected_text == PlaneOptions.Custom.value:
self.custom_plane_widget.setVisible(True)
self.form_group.validateGroup()
return
else:
self.custom_plane_widget.setVisible(False)
if selected_text == PlaneOptions.XY.value:
plane_normal = np.array([0., 0., 1.])
elif selected_text == PlaneOptions.XZ.value:
plane_normal = np.array([0., 1., 0.])
else:
plane_normal = np.array([1., 0., 0.])
self.initializePlane(plane_normal, self.mesh.bounding_box.center)
def initializePlane(self, plane_normal, plane_point):
self.plane = Plane(plane_normal, plane_point)
plane_size = self.mesh.bounding_box.radius
self.parent.scenes.drawPlane(self.plane, 2 * plane_size, 2 * plane_size)
distance = self.plane.distanceFromOrigin()
self.plane_offset_range = (distance - plane_size, distance + plane_size)
slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, distance))
self.plane_slider.setValue(slider_value)
self.plane_lineedit.setText('{:.3f}'.format(distance))
self.old_distance = distance
self.matrix = self.__lookAt(-Vector3(self.plane.normal))
self.view.resetTransform()
self.updateCrossSection()
def updateCrossSection(self):
self.scene.clear()
segments = mesh_plane_intersection(self.mesh, self.plane)
if len(segments) == 0:
return
segments = np.array(segments)
item = QtWidgets.QGraphicsPathItem()
cross_section_path = QtGui.QPainterPath()
rotated_segments = self.sample_scale * (segments @ self.matrix)
for i in range(0, rotated_segments.shape[0], 2):
start = rotated_segments[i, :]
cross_section_path.moveTo(start[0], start[1])
end = rotated_segments[i + 1, :]
cross_section_path.lineTo(end[0], end[1])
item.setPath(cross_section_path)
item.setPen(self.path_pen)
item.setTransform(self.view.scene_transform)
self.scene.addItem(item)
rect = item.boundingRect()
anchor = rect.center()
ab = self.plane.point - self.parent_model.measurement_points.points
d = np.einsum('ij,ij->i', np.expand_dims(self.plane.normal, axis=0), ab)
index = np.where(np.abs(d) < VECTOR_EPS)[0]
rotated_points = self.parent_model.measurement_points.points[index, :]
rotated_points = rotated_points @ self.matrix
for i, p in zip(index, rotated_points):
point = QtCore.QPointF(p[0], p[1]) * self.sample_scale
point = self.view.scene_transform.map(point)
item = GraphicsPointItem(point, size=self.scene.point_size)
item.setToolTip(f'Point {i + 1}')
item.fixed = True
item.makeControllable(self.scene.mode == GraphicsScene.Mode.Select)
item.setPen(self.point_pen)
self.scene.addItem(item)
rect = rect.united(item.boundingRect().translated(point))
rect.united(rect.translated(anchor - rect.center()))
self.view.setSceneRect(rect)
self.view.fitInView(rect, QtCore.Qt.KeepAspectRatio)
self.view.anchor = rect
@staticmethod
def __lookAt(forward):
rot_matrix = Matrix33.identity()
up = Vector3([0., -1., 0.]) if -VECTOR_EPS < forward[1] < VECTOR_EPS else Vector3([0., 0., 1.])
left = up ^ forward
left.normalize()
up = forward ^ left
rot_matrix.c1[:3] = left
rot_matrix.c2[:3] = up
rot_matrix.c3[:3] = forward
return rot_matrix
def addPoints(self):
if len(self.scene.items()) < 2:
return
points_2d = []
transform = self.view.scene_transform.inverted()[0]
for item in self.scene.items():
if isinstance(item, GraphicsPointItem) and not item.fixed:
pos = transform.map(item.pos()) / self.sample_scale
points_2d.append([pos.x(), pos.y(), -self.old_distance])
self.scene.removeItem(item)
if not points_2d:
return
points = points_2d[::-1] @ self.matrix.transpose()
enabled = [True] * points.shape[0]
self.parent.presenter.addPoints(list(zip(points, enabled)), PointType.Measurement, False)
class AlignSample(QtWidgets.QWidget):
dock_flag = DockFlag.Upper
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent.scenes.switchToInstrumentScene()
self.title = 'Align Sample with 6D pose'
self.setMinimumWidth(450)
self.main_layout = QtWidgets.QVBoxLayout()
self.setLayout(self.main_layout)
self.main_layout.addSpacing(20)
self.main_layout.addWidget(FormTitle('Create Transformation for Alignment'))
self.main_layout.addSpacing(10)
self.main_layout.addWidget(QtWidgets.QLabel('Translation along the X, Y, and Z axis (mm):'))
self.position_form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_position = FormControl('X', 0.0, required=True, number=True)
self.y_position = FormControl('Y', 0.0, required=True, number=True)
self.z_position = FormControl('Z', 0.0, required=True, number=True)
self.position_form_group.addControl(self.x_position)
self.position_form_group.addControl(self.y_position)
self.position_form_group.addControl(self.z_position)
self.position_form_group.groupValidation.connect(self.formValidation)
self.main_layout.addWidget(self.position_form_group)
self.main_layout.addWidget(QtWidgets.QLabel('Rotation around the X, Y, and Z axis (degrees):'))
self.orientation_form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_rotation = FormControl('X', 0.0, required=True, number=True)
self.x_rotation.range(-360.0, 360.0)
self.y_rotation = FormControl('Y', 0.0, required=True, number=True)
self.y_rotation.range(-360.0, 360.0)
self.z_rotation = FormControl('Z', 0.0, required=True, number=True)
self.z_rotation.range(-360.0, 360.0)
self.orientation_form_group.addControl(self.x_rotation)
self.orientation_form_group.addControl(self.y_rotation)
self.orientation_form_group.addControl(self.z_rotation)
self.orientation_form_group.groupValidation.connect(self.formValidation)
self.main_layout.addWidget(self.orientation_form_group)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton('Align Sample')
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
def formValidation(self):
if self.position_form_group.valid and self.orientation_form_group.valid:
self.execute_button.setEnabled(True)
else:
self.execute_button.setDisabled(True)
def executeButtonClicked(self):
pose = [self.x_position.value, self.y_position.value, self.z_position.value,
self.z_rotation.value, self.y_rotation.value, self.x_rotation.value]
self.parent.presenter.alignSampleWithPose(pose)
| true
| true
|
f71a74d3749d44e5926e7af02f116135904cbcf5
| 3,574
|
py
|
Python
|
src/vacuum/webserver.py
|
nesyamun/vacuum
|
e58c24e4ff9f88d674e75b17a96c705d88189422
|
[
"MIT"
] | 2
|
2021-03-15T15:44:23.000Z
|
2021-04-08T20:58:24.000Z
|
src/vacuum/webserver.py
|
nesyamun/vacuum
|
e58c24e4ff9f88d674e75b17a96c705d88189422
|
[
"MIT"
] | null | null | null |
src/vacuum/webserver.py
|
nesyamun/vacuum
|
e58c24e4ff9f88d674e75b17a96c705d88189422
|
[
"MIT"
] | 3
|
2021-03-15T15:44:37.000Z
|
2022-03-05T03:44:23.000Z
|
from asyncio import AbstractEventLoop, Task, get_event_loop
from dataclasses import asdict
from datetime import datetime
from functools import wraps
from typing import Callable, Optional, Tuple
from quart import Quart, request
from werkzeug.exceptions import HTTPException
from .config import config
from .logger import get_logger, set_quart_logger_formatter
from .postgres import POSTGRES_HEALTHCHECK_TASK_NAME, postgres_healthcheck
from .state import state
from .streamer import STREAMING_TASK_NAME, stream
logger = get_logger(__name__)
app = Quart(__name__)
set_quart_logger_formatter()
def response(func: Callable) -> Callable:
@wraps(func)
async def inner(*args, **kwargs) -> dict:
extra: Optional[dict] = await func(*args, **kwargs)
if not extra:
extra = {"success": True}
return {
**asdict(state),
**{
"server_time": datetime.now(),
"path": request.path,
"method": request.method,
"status": "200 OK",
"status_code": 200,
},
**extra,
}
return inner
def error(code: int, status: str) -> Callable:
def wrapper(func: Callable) -> Callable:
@wraps(func)
async def inner(*args, **kwargs) -> Tuple[dict, int]:
extra: Optional[dict] = await func(*args, **kwargs)
if not extra:
extra = {}
return (
{
**{
"server_time": datetime.now(),
"success": False,
"path": request.path,
"method": request.method,
"status": f"{code} {status}",
"status_code": code,
},
**extra,
},
code,
)
return inner
return wrapper
@app.route("/healthz", methods=["GET"])
async def healthz() -> Tuple[str, int]:
return "", 200
@app.route("/status", methods=["GET"])
@response
async def status() -> None:
pass
@app.route("/start", methods=["POST"])
@response
async def start() -> dict:
logger.info("starting")
if state.streaming:
return {"success": True, "message": "Currently streaming"}
if not state.postgres:
return {"success": False, "message": "Postgres not available"}
loop: AbstractEventLoop = get_event_loop()
loop.create_task(stream(), name=STREAMING_TASK_NAME)
state.streaming = True
return {"success": True, "message": "Started streaming"}
@app.route("/stop", methods=["POST"])
@response
async def stop() -> dict:
logger.info("stopping")
if not state.streaming:
return {"success": True, "message": "Not currently streaming"}
for task in Task.all_tasks():
if task.get_name() == STREAMING_TASK_NAME:
task.cancel()
break
state.streaming = False
return {"success": True, "message": "Stopped streaming"}
@app.errorhandler(404)
@error(404, "Not Found")
async def page_not_found(e: HTTPException) -> None:
pass
@app.errorhandler(405)
@error(405, "Method Not Allowed")
async def method_not_allowed(e: HTTPException) -> None:
pass
@app.before_serving
async def startup() -> None:
loop: AbstractEventLoop = get_event_loop()
loop.create_task(postgres_healthcheck(), name=POSTGRES_HEALTHCHECK_TASK_NAME)
def webserver() -> None:
app.run(host=config["webserver"]["host"], port=config["webserver"]["port"])
| 25.528571
| 81
| 0.592334
|
from asyncio import AbstractEventLoop, Task, get_event_loop
from dataclasses import asdict
from datetime import datetime
from functools import wraps
from typing import Callable, Optional, Tuple
from quart import Quart, request
from werkzeug.exceptions import HTTPException
from .config import config
from .logger import get_logger, set_quart_logger_formatter
from .postgres import POSTGRES_HEALTHCHECK_TASK_NAME, postgres_healthcheck
from .state import state
from .streamer import STREAMING_TASK_NAME, stream
logger = get_logger(__name__)
app = Quart(__name__)
set_quart_logger_formatter()
def response(func: Callable) -> Callable:
@wraps(func)
async def inner(*args, **kwargs) -> dict:
extra: Optional[dict] = await func(*args, **kwargs)
if not extra:
extra = {"success": True}
return {
**asdict(state),
**{
"server_time": datetime.now(),
"path": request.path,
"method": request.method,
"status": "200 OK",
"status_code": 200,
},
**extra,
}
return inner
def error(code: int, status: str) -> Callable:
def wrapper(func: Callable) -> Callable:
@wraps(func)
async def inner(*args, **kwargs) -> Tuple[dict, int]:
extra: Optional[dict] = await func(*args, **kwargs)
if not extra:
extra = {}
return (
{
**{
"server_time": datetime.now(),
"success": False,
"path": request.path,
"method": request.method,
"status": f"{code} {status}",
"status_code": code,
},
**extra,
},
code,
)
return inner
return wrapper
@app.route("/healthz", methods=["GET"])
async def healthz() -> Tuple[str, int]:
return "", 200
@app.route("/status", methods=["GET"])
@response
async def status() -> None:
pass
@app.route("/start", methods=["POST"])
@response
async def start() -> dict:
logger.info("starting")
if state.streaming:
return {"success": True, "message": "Currently streaming"}
if not state.postgres:
return {"success": False, "message": "Postgres not available"}
loop: AbstractEventLoop = get_event_loop()
loop.create_task(stream(), name=STREAMING_TASK_NAME)
state.streaming = True
return {"success": True, "message": "Started streaming"}
@app.route("/stop", methods=["POST"])
@response
async def stop() -> dict:
logger.info("stopping")
if not state.streaming:
return {"success": True, "message": "Not currently streaming"}
for task in Task.all_tasks():
if task.get_name() == STREAMING_TASK_NAME:
task.cancel()
break
state.streaming = False
return {"success": True, "message": "Stopped streaming"}
@app.errorhandler(404)
@error(404, "Not Found")
async def page_not_found(e: HTTPException) -> None:
pass
@app.errorhandler(405)
@error(405, "Method Not Allowed")
async def method_not_allowed(e: HTTPException) -> None:
pass
@app.before_serving
async def startup() -> None:
loop: AbstractEventLoop = get_event_loop()
loop.create_task(postgres_healthcheck(), name=POSTGRES_HEALTHCHECK_TASK_NAME)
def webserver() -> None:
app.run(host=config["webserver"]["host"], port=config["webserver"]["port"])
| true
| true
|
f71a75c9c5f86132584053248cbb481ec3e2449c
| 6,138
|
py
|
Python
|
poetry/console/config/application_config.py
|
michielboekhoff/poetry
|
92b1e61c45f13868ffab663fa3e9be2e26e8c368
|
[
"MIT"
] | null | null | null |
poetry/console/config/application_config.py
|
michielboekhoff/poetry
|
92b1e61c45f13868ffab663fa3e9be2e26e8c368
|
[
"MIT"
] | null | null | null |
poetry/console/config/application_config.py
|
michielboekhoff/poetry
|
92b1e61c45f13868ffab663fa3e9be2e26e8c368
|
[
"MIT"
] | null | null | null |
import logging
from cleo.config import ApplicationConfig as BaseApplicationConfig
from clikit.api.event import PRE_HANDLE
from clikit.api.event import PreHandleEvent
from clikit.api.formatter import Style
from clikit.api.io import Input
from clikit.api.io import InputStream
from clikit.api.io import Output
from clikit.api.io import OutputStream
from clikit.api.io.flags import DEBUG
from clikit.api.io.flags import VERBOSE
from clikit.api.io.flags import VERY_VERBOSE
from clikit.formatter import AnsiFormatter
from clikit.formatter import PlainFormatter
from clikit.io.input_stream import StandardInputStream
from clikit.io.output_stream import ErrorOutputStream
from clikit.io.output_stream import StandardOutputStream
from poetry.console.commands.command import Command
from poetry.console.commands.env_command import EnvCommand
from poetry.console.logging import IOFormatter
from poetry.console.logging import IOHandler
class ApplicationConfig(BaseApplicationConfig):
def configure(self):
super(ApplicationConfig, self).configure()
self.add_style(Style("c1").fg("cyan"))
self.add_style(Style("info").fg("blue"))
self.add_style(Style("comment").fg("green"))
self.add_style(Style("error").fg("red").bold())
self.add_style(Style("warning").fg("yellow"))
self.add_style(Style("debug").fg("black").bold())
self.add_event_listener(PRE_HANDLE, self.register_command_loggers)
self.add_event_listener(PRE_HANDLE, self.set_env)
def register_command_loggers(
self, event, event_name, _ # type: PreHandleEvent # type: str
): # type: (...) -> None
command = event.command.config.handler
if not isinstance(command, Command):
return
io = event.io
if not command.loggers:
return
handler = IOHandler(io)
handler.setFormatter(IOFormatter())
for logger in command.loggers:
logger = logging.getLogger(logger)
logger.handlers = [handler]
logger.propagate = False
level = logging.WARNING
if io.is_debug():
level = logging.DEBUG
elif io.is_very_verbose() or io.is_verbose():
level = logging.INFO
logger.setLevel(level)
def set_env(self, event, event_name, _): # type: (PreHandleEvent, str, _) -> None
from poetry.semver import parse_constraint
from poetry.utils.env import EnvManager
command = event.command.config.handler # type: EnvCommand
if not isinstance(command, EnvCommand):
return
io = event.io
poetry = command.poetry
env_manager = EnvManager(poetry)
env = env_manager.create_venv(io)
if env.is_venv() and io.is_verbose():
io.write_line("Using virtualenv: <comment>{}</>".format(env.path))
command.set_env(env)
def resolve_help_command(
self, event, event_name, dispatcher
): # type: (PreResolveEvent, str, EventDispatcher) -> None
args = event.raw_args
application = event.application
if args.has_option_token("-h") or args.has_option_token("--help"):
from clikit.api.resolver import ResolvedCommand
resolved_command = self.command_resolver.resolve(args, application)
# If the current command is the run one, skip option
# check and interpret them as part of the executed command
if resolved_command.command.name == "run":
event.set_resolved_command(resolved_command)
return event.stop_propagation()
command = application.get_command("help")
# Enable lenient parsing
parsed_args = command.parse(args, True)
event.set_resolved_command(ResolvedCommand(command, parsed_args))
event.stop_propagation()
def create_io(
self,
application,
args,
input_stream=None,
output_stream=None,
error_stream=None,
): # type: (Application, RawArgs, InputStream, OutputStream, OutputStream) -> IO
if input_stream is None:
input_stream = StandardInputStream()
if output_stream is None:
output_stream = StandardOutputStream()
if error_stream is None:
error_stream = ErrorOutputStream()
style_set = application.config.style_set
if output_stream.supports_ansi():
output_formatter = AnsiFormatter(style_set)
else:
output_formatter = PlainFormatter(style_set)
if error_stream.supports_ansi():
error_formatter = AnsiFormatter(style_set)
else:
error_formatter = PlainFormatter(style_set)
io = self.io_class(
Input(input_stream),
Output(output_stream, output_formatter),
Output(error_stream, error_formatter),
)
resolved_command = application.resolve_command(args)
# If the current command is the run one, skip option
# check and interpret them as part of the executed command
if resolved_command.command.name == "run":
return io
if args.has_option_token("--no-ansi"):
formatter = PlainFormatter(style_set)
io.output.set_formatter(formatter)
io.error_output.set_formatter(formatter)
elif args.has_option_token("--ansi"):
formatter = AnsiFormatter(style_set, True)
io.output.set_formatter(formatter)
io.error_output.set_formatter(formatter)
if args.has_option_token("-vvv") or self.is_debug():
io.set_verbosity(DEBUG)
elif args.has_option_token("-vv"):
io.set_verbosity(VERY_VERBOSE)
elif args.has_option_token("-v"):
io.set_verbosity(VERBOSE)
if args.has_option_token("--quiet") or args.has_option_token("-q"):
io.set_quiet(True)
if args.has_option_token("--no-interaction") or args.has_option_token("-n"):
io.set_interactive(False)
return io
| 34.677966
| 86
| 0.655914
|
import logging
from cleo.config import ApplicationConfig as BaseApplicationConfig
from clikit.api.event import PRE_HANDLE
from clikit.api.event import PreHandleEvent
from clikit.api.formatter import Style
from clikit.api.io import Input
from clikit.api.io import InputStream
from clikit.api.io import Output
from clikit.api.io import OutputStream
from clikit.api.io.flags import DEBUG
from clikit.api.io.flags import VERBOSE
from clikit.api.io.flags import VERY_VERBOSE
from clikit.formatter import AnsiFormatter
from clikit.formatter import PlainFormatter
from clikit.io.input_stream import StandardInputStream
from clikit.io.output_stream import ErrorOutputStream
from clikit.io.output_stream import StandardOutputStream
from poetry.console.commands.command import Command
from poetry.console.commands.env_command import EnvCommand
from poetry.console.logging import IOFormatter
from poetry.console.logging import IOHandler
class ApplicationConfig(BaseApplicationConfig):
def configure(self):
super(ApplicationConfig, self).configure()
self.add_style(Style("c1").fg("cyan"))
self.add_style(Style("info").fg("blue"))
self.add_style(Style("comment").fg("green"))
self.add_style(Style("error").fg("red").bold())
self.add_style(Style("warning").fg("yellow"))
self.add_style(Style("debug").fg("black").bold())
self.add_event_listener(PRE_HANDLE, self.register_command_loggers)
self.add_event_listener(PRE_HANDLE, self.set_env)
def register_command_loggers(
self, event, event_name, _ command = event.command.config.handler
if not isinstance(command, Command):
return
io = event.io
if not command.loggers:
return
handler = IOHandler(io)
handler.setFormatter(IOFormatter())
for logger in command.loggers:
logger = logging.getLogger(logger)
logger.handlers = [handler]
logger.propagate = False
level = logging.WARNING
if io.is_debug():
level = logging.DEBUG
elif io.is_very_verbose() or io.is_verbose():
level = logging.INFO
logger.setLevel(level)
def set_env(self, event, event_name, _):
from poetry.semver import parse_constraint
from poetry.utils.env import EnvManager
command = event.command.config.handler
if not isinstance(command, EnvCommand):
return
io = event.io
poetry = command.poetry
env_manager = EnvManager(poetry)
env = env_manager.create_venv(io)
if env.is_venv() and io.is_verbose():
io.write_line("Using virtualenv: <comment>{}</>".format(env.path))
command.set_env(env)
def resolve_help_command(
self, event, event_name, dispatcher
):
args = event.raw_args
application = event.application
if args.has_option_token("-h") or args.has_option_token("--help"):
from clikit.api.resolver import ResolvedCommand
resolved_command = self.command_resolver.resolve(args, application)
if resolved_command.command.name == "run":
event.set_resolved_command(resolved_command)
return event.stop_propagation()
command = application.get_command("help")
parsed_args = command.parse(args, True)
event.set_resolved_command(ResolvedCommand(command, parsed_args))
event.stop_propagation()
def create_io(
self,
application,
args,
input_stream=None,
output_stream=None,
error_stream=None,
):
if input_stream is None:
input_stream = StandardInputStream()
if output_stream is None:
output_stream = StandardOutputStream()
if error_stream is None:
error_stream = ErrorOutputStream()
style_set = application.config.style_set
if output_stream.supports_ansi():
output_formatter = AnsiFormatter(style_set)
else:
output_formatter = PlainFormatter(style_set)
if error_stream.supports_ansi():
error_formatter = AnsiFormatter(style_set)
else:
error_formatter = PlainFormatter(style_set)
io = self.io_class(
Input(input_stream),
Output(output_stream, output_formatter),
Output(error_stream, error_formatter),
)
resolved_command = application.resolve_command(args)
if resolved_command.command.name == "run":
return io
if args.has_option_token("--no-ansi"):
formatter = PlainFormatter(style_set)
io.output.set_formatter(formatter)
io.error_output.set_formatter(formatter)
elif args.has_option_token("--ansi"):
formatter = AnsiFormatter(style_set, True)
io.output.set_formatter(formatter)
io.error_output.set_formatter(formatter)
if args.has_option_token("-vvv") or self.is_debug():
io.set_verbosity(DEBUG)
elif args.has_option_token("-vv"):
io.set_verbosity(VERY_VERBOSE)
elif args.has_option_token("-v"):
io.set_verbosity(VERBOSE)
if args.has_option_token("--quiet") or args.has_option_token("-q"):
io.set_quiet(True)
if args.has_option_token("--no-interaction") or args.has_option_token("-n"):
io.set_interactive(False)
return io
| true
| true
|
f71a76296b3a7b1e16734137964be646122469c5
| 8,766
|
py
|
Python
|
userbot/__init__.py
|
PratikGoswamiPM/OpenUserBot
|
1ba7845522a5d5619d2705421a303aa82ce35abb
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-07-18T06:57:28.000Z
|
2021-07-18T06:57:28.000Z
|
userbot/__init__.py
|
PratikGoswamiPM/OpenUserBot
|
1ba7845522a5d5619d2705421a303aa82ce35abb
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/__init__.py
|
PratikGoswamiPM/OpenUserBot
|
1ba7845522a5d5619d2705421a303aa82ce35abb
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# thanks to penn5 for bug fixing
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pymongo import MongoClient
from redis import StrictRedis
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
# Photo Chat - Get this value from http://antiddos.systems
API_TOKEN = os.environ.get("API_TOKEN", None)
API_URL = os.environ.get("API_URL", "http://antiddos.systems")
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# set to True if you want to log PMs to your PM_LOGGR_BOT_API_ID
NC_LOG_P_M_S = bool(os.environ.get("NC_LOG_P_M_S", False))
# send .get_id in any channel to forward all your NEW PMs to this group
PM_LOGGR_BOT_API_ID = int(os.environ.get("PM_LOGGR_BOT_API_ID", "-100"))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/mkaraniya/OpenUserBot.git")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# For MONGO based DataBase
MONGO_URI = os.environ.get("MONGO_URI", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Lydia API
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
# set blacklist_chats where you do not want userbot's features
UB_BLACK_LIST_CHAT = os.environ.get("UB_BLACK_LIST_CHAT", "")
# Telegraph
TELEGRAPH_SHORT_NAME = os.environ.get("TELEGRAPH_SHORT_NAME", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
TERM_ALIAS = os.environ.get("TERM_ALIAS", "OUB")
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA", None)
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS_API_TOKEN = os.environ.get("GENIUS", None)
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS = os.environ.get("GENIUS_API_TOKEN", None)
# Init Mongo
MONGOCLIENT = MongoClient(MONGO_URI, 27017, serverSelectionTimeoutMS=1)
MONGO = MONGOCLIENT.userbot
# bit.ly module
BITLY_TOKEN = os.environ.get("BITLY_TOKEN", None)
def is_mongo_alive():
try:
MONGOCLIENT.server_info()
except BaseException:
return False
return True
# Init Redis
# Redis will be hosted inside the docker container that hosts the bot
# We need redis for just caching, so we just leave it to non-persistent
REDIS = StrictRedis(host='localhost', port=6379, db=0)
def is_redis_alive():
try:
REDIS.ping()
return True
except BaseException:
return False
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
ENABLE_KILLME = True
CMD_HELP = {}
ISAFK = False
AFKREASON = None
| 31.876364
| 143
| 0.722222
|
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pymongo import MongoClient
from redis import StrictRedis
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
API_TOKEN = os.environ.get("API_TOKEN", None)
API_URL = os.environ.get("API_URL", "http://antiddos.systems")
STRING_SESSION = os.environ.get("STRING_SESSION", None)
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
NC_LOG_P_M_S = bool(os.environ.get("NC_LOG_P_M_S", False))
PM_LOGGR_BOT_API_ID = int(os.environ.get("PM_LOGGR_BOT_API_ID", "-100"))
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/mkaraniya/OpenUserBot.git")
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
DB_URI = os.environ.get("DATABASE_URL", None)
MONGO_URI = os.environ.get("MONGO_URI", None)
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
UB_BLACK_LIST_CHAT = os.environ.get("UB_BLACK_LIST_CHAT", "")
# Telegraph
TELEGRAPH_SHORT_NAME = os.environ.get("TELEGRAPH_SHORT_NAME", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
TERM_ALIAS = os.environ.get("TERM_ALIAS", "OUB")
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA", None)
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS_API_TOKEN = os.environ.get("GENIUS", None)
# Genius lyrics get this value from https://genius.com/developers both has same values
GENIUS = os.environ.get("GENIUS_API_TOKEN", None)
# Init Mongo
MONGOCLIENT = MongoClient(MONGO_URI, 27017, serverSelectionTimeoutMS=1)
MONGO = MONGOCLIENT.userbot
# bit.ly module
BITLY_TOKEN = os.environ.get("BITLY_TOKEN", None)
def is_mongo_alive():
try:
MONGOCLIENT.server_info()
except BaseException:
return False
return True
# Init Redis
# Redis will be hosted inside the docker container that hosts the bot
# We need redis for just caching, so we just leave it to non-persistent
REDIS = StrictRedis(host='localhost', port=6379, db=0)
def is_redis_alive():
try:
REDIS.ping()
return True
except BaseException:
return False
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
ENABLE_KILLME = True
CMD_HELP = {}
ISAFK = False
AFKREASON = None
| true
| true
|
f71a763946c4caf38418e8a819b9202fc549a816
| 15,744
|
py
|
Python
|
superset/connectors/druid/views.py
|
whelan9453/incubator-superset
|
4e3cea45a5136a28442eea50fddc6cf423a9ddd5
|
[
"Apache-2.0"
] | null | null | null |
superset/connectors/druid/views.py
|
whelan9453/incubator-superset
|
4e3cea45a5136a28442eea50fddc6cf423a9ddd5
|
[
"Apache-2.0"
] | 2
|
2019-11-11T11:16:32.000Z
|
2019-12-13T07:12:09.000Z
|
superset/connectors/druid/views.py
|
whelan9453/incubator-superset
|
4e3cea45a5136a28442eea50fddc6cf423a9ddd5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import json
import logging
from datetime import datetime
from flask import flash, Markup, redirect
from flask_appbuilder import CompactCRUDMixin, expose
from flask_appbuilder.fieldwidgets import Select2Widget
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import gettext as __, lazy_gettext as _
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from superset import appbuilder, db, security_manager
from superset.connectors.base.views import DatasourceModelView
from superset.connectors.connector_registry import ConnectorRegistry
from superset.utils import core as utils
from superset.views.base import (
BaseSupersetView,
DatasourceFilter,
DeleteMixin,
get_datasource_exist_error_msg,
ListWidgetWithCheckboxes,
SupersetModelView,
validate_json,
YamlExportMixin,
)
from . import models
class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView):
datamodel = SQLAInterface(models.DruidColumn)
list_title = _("Columns")
show_title = _("Show Druid Column")
add_title = _("Add Druid Column")
edit_title = _("Edit Druid Column")
list_widget = ListWidgetWithCheckboxes
edit_columns = [
"column_name",
"verbose_name",
"description",
"dimension_spec_json",
"datasource",
"groupby",
"filterable",
]
add_columns = edit_columns
list_columns = ["column_name", "verbose_name", "type", "groupby", "filterable"]
can_delete = False
page_size = 500
label_columns = {
"column_name": _("Column"),
"type": _("Type"),
"datasource": _("Datasource"),
"groupby": _("Groupable"),
"filterable": _("Filterable"),
}
description_columns = {
"filterable": _(
"Whether this column is exposed in the `Filters` section "
"of the explore view."
),
"dimension_spec_json": utils.markdown(
"this field can be used to specify "
"a `dimensionSpec` as documented [here]"
"(http://druid.io/docs/latest/querying/dimensionspecs.html). "
"Make sure to input valid JSON and that the "
"`outputName` matches the `column_name` defined "
"above.",
True,
),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session().query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_update(self, col):
# If a dimension spec JSON is given, ensure that it is
# valid JSON and that `outputName` is specified
if col.dimension_spec_json:
try:
dimension_spec = json.loads(col.dimension_spec_json)
except ValueError as e:
raise ValueError("Invalid Dimension Spec JSON: " + str(e))
if not isinstance(dimension_spec, dict):
raise ValueError("Dimension Spec must be a JSON object")
if "outputName" not in dimension_spec:
raise ValueError("Dimension Spec does not contain `outputName`")
if "dimension" not in dimension_spec:
raise ValueError("Dimension Spec is missing `dimension`")
# `outputName` should be the same as the `column_name`
if dimension_spec["outputName"] != col.column_name:
raise ValueError(
"`outputName` [{}] unequal to `column_name` [{}]".format(
dimension_spec["outputName"], col.column_name
)
)
def post_update(self, col):
col.refresh_metrics()
def post_add(self, col):
self.post_update(col)
appbuilder.add_view_no_menu(DruidColumnInlineView)
class DruidMetricInlineView(CompactCRUDMixin, SupersetModelView):
datamodel = SQLAInterface(models.DruidMetric)
list_title = _("Metrics")
show_title = _("Show Druid Metric")
add_title = _("Add Druid Metric")
edit_title = _("Edit Druid Metric")
list_columns = ["metric_name", "verbose_name", "metric_type"]
edit_columns = [
"metric_name",
"description",
"verbose_name",
"metric_type",
"json",
"datasource",
"d3format",
"warning_text",
]
add_columns = edit_columns
page_size = 500
validators_columns = {"json": [validate_json]}
description_columns = {
"metric_type": utils.markdown(
"use `postagg` as the metric type if you are defining a "
"[Druid Post Aggregation]"
"(http://druid.io/docs/latest/querying/post-aggregations.html)",
True,
)
}
label_columns = {
"metric_name": _("Metric"),
"description": _("Description"),
"verbose_name": _("Verbose Name"),
"metric_type": _("Type"),
"json": _("JSON"),
"datasource": _("Druid Datasource"),
"warning_text": _("Warning Message"),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session().query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
appbuilder.add_view_no_menu(DruidMetricInlineView)
class DruidClusterModelView(SupersetModelView, DeleteMixin, YamlExportMixin):
datamodel = SQLAInterface(models.DruidCluster)
list_title = _("Druid Clusters")
show_title = _("Show Druid Cluster")
add_title = _("Add Druid Cluster")
edit_title = _("Edit Druid Cluster")
add_columns = [
"verbose_name",
"broker_host",
"broker_port",
"broker_user",
"broker_pass",
"broker_endpoint",
"cache_timeout",
"cluster_name",
]
edit_columns = add_columns
list_columns = ["cluster_name", "metadata_last_refreshed"]
search_columns = ("cluster_name",)
label_columns = {
"cluster_name": _("Cluster"),
"broker_host": _("Broker Host"),
"broker_port": _("Broker Port"),
"broker_user": _("Broker Username"),
"broker_pass": _("Broker Password"),
"broker_endpoint": _("Broker Endpoint"),
"verbose_name": _("Verbose Name"),
"cache_timeout": _("Cache Timeout"),
"metadata_last_refreshed": _("Metadata Last Refreshed"),
}
description_columns = {
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this cluster. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the global timeout if undefined."
),
"broker_user": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
"broker_pass": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
}
yaml_dict_key = "databases"
edit_form_extra_fields = {
"cluster_name": QuerySelectField(
"Cluster",
query_factory=lambda: db.session().query(models.DruidCluster),
widget=Select2Widget(extra_classes="readonly"),
)
}
def pre_add(self, cluster):
security_manager.add_permission_view_menu("database_access", cluster.perm)
def pre_update(self, cluster):
self.pre_add(cluster)
def _delete(self, pk):
DeleteMixin._delete(self, pk)
appbuilder.add_view(
DruidClusterModelView,
name="Druid Clusters",
label=__("Druid Clusters"),
icon="fa-cubes",
category="Sources",
category_label=__("Sources"),
category_icon="fa-database",
)
class DruidDatasourceModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):
datamodel = SQLAInterface(models.DruidDatasource)
list_title = _("Druid Datasources")
show_title = _("Show Druid Datasource")
add_title = _("Add Druid Datasource")
edit_title = _("Edit Druid Datasource")
list_columns = ["datasource_link", "cluster", "changed_by_", "modified"]
order_columns = ["datasource_link", "modified"]
related_views = [DruidColumnInlineView, DruidMetricInlineView]
edit_columns = [
"datasource_name",
"cluster",
"description",
"owners",
"is_hidden",
"filter_select_enabled",
"fetch_values_from",
"default_endpoint",
"offset",
"cache_timeout",
]
search_columns = ("datasource_name", "cluster", "description", "owners")
add_columns = edit_columns
show_columns = add_columns + ["perm", "slices"]
page_size = 500
base_order = ("datasource_name", "asc")
description_columns = {
"slices": _(
"The list of charts associated with this table. By "
"altering this datasource, you may change how these associated "
"charts behave. "
"Also note that charts need to point to a datasource, so "
"this form will fail at saving if removing charts from a "
"datasource. If you want to change the datasource for a chart, "
"overwrite the chart from the 'explore view'"
),
"offset": _("Timezone offset (in hours) for this datasource"),
"description": Markup(
'Supports <a href="'
'https://daringfireball.net/projects/markdown/">markdown</a>'
),
"fetch_values_from": _(
"Time expression to use as a predicate when retrieving "
"distinct values to populate the filter component. "
"Only applies when `Enable Filter Select` is on. If "
"you enter `7 days ago`, the distinct list of values in "
"the filter will be populated based on the distinct value over "
"the past week"
),
"filter_select_enabled": _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
"from the backend on the fly"
),
"default_endpoint": _(
"Redirects to this endpoint when clicking on the datasource "
"from the datasource list"
),
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this datasource. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the cluster timeout if undefined."
),
}
base_filters = [["id", DatasourceFilter, lambda: []]]
label_columns = {
"slices": _("Associated Charts"),
"datasource_link": _("Data Source"),
"cluster": _("Cluster"),
"description": _("Description"),
"owners": _("Owners"),
"is_hidden": _("Is Hidden"),
"filter_select_enabled": _("Enable Filter Select"),
"default_endpoint": _("Default Endpoint"),
"offset": _("Time Offset"),
"cache_timeout": _("Cache Timeout"),
"datasource_name": _("Datasource Name"),
"fetch_values_from": _("Fetch Values From"),
"changed_by_": _("Changed By"),
"modified": _("Modified"),
}
def pre_add(self, datasource):
with db.session.no_autoflush:
query = db.session.query(models.DruidDatasource).filter(
models.DruidDatasource.datasource_name == datasource.datasource_name,
models.DruidDatasource.cluster_name == datasource.cluster.id,
)
if db.session.query(query.exists()).scalar():
raise Exception(get_datasource_exist_error_msg(datasource.full_name))
def post_add(self, datasource):
datasource.refresh_metrics()
security_manager.add_permission_view_menu(
"datasource_access", datasource.get_perm()
)
if datasource.schema:
security_manager.add_permission_view_menu(
"schema_access", datasource.schema_perm
)
def post_update(self, datasource):
self.post_add(datasource)
def _delete(self, pk):
DeleteMixin._delete(self, pk)
appbuilder.add_view(
DruidDatasourceModelView,
"Druid Datasources",
label=__("Druid Datasources"),
category="Sources",
category_label=__("Sources"),
icon="fa-cube",
)
class Druid(BaseSupersetView):
"""The base views for Superset!"""
@has_access
@expose("/refresh_datasources/")
def refresh_datasources(self, refresh_all=True):
"""endpoint that refreshes druid datasources metadata"""
session = db.session()
DruidCluster = ConnectorRegistry.sources["druid"].cluster_class
for cluster in session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refresh_all=refresh_all)
except Exception as e:
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(e)
),
"danger",
)
logging.exception(e)
pass
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_("Refreshed metadata from cluster [{}]").format(
cluster.cluster_name
),
"info",
)
session.commit()
return redirect("/druiddatasourcemodelview/list/")
@has_access
@expose("/scan_new_datasources/")
def scan_new_datasources(self):
"""
Calling this endpoint will cause a scan for new
datasources only and add them.
"""
return self.refresh_datasources(refresh_all=False)
appbuilder.add_view_no_menu(Druid)
appbuilder.add_link(
"Scan New Datasources",
label=__("Scan New Datasources"),
href="/druid/scan_new_datasources/",
category="Sources",
category_label=__("Sources"),
category_icon="fa-database",
icon="fa-refresh",
)
appbuilder.add_link(
"Refresh Druid Metadata",
label=__("Refresh Druid Metadata"),
href="/druid/refresh_datasources/",
category="Sources",
category_label=__("Sources"),
category_icon="fa-database",
icon="fa-cog",
)
appbuilder.add_separator("Sources")
| 34.151844
| 85
| 0.624111
|
import json
import logging
from datetime import datetime
from flask import flash, Markup, redirect
from flask_appbuilder import CompactCRUDMixin, expose
from flask_appbuilder.fieldwidgets import Select2Widget
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import gettext as __, lazy_gettext as _
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from superset import appbuilder, db, security_manager
from superset.connectors.base.views import DatasourceModelView
from superset.connectors.connector_registry import ConnectorRegistry
from superset.utils import core as utils
from superset.views.base import (
BaseSupersetView,
DatasourceFilter,
DeleteMixin,
get_datasource_exist_error_msg,
ListWidgetWithCheckboxes,
SupersetModelView,
validate_json,
YamlExportMixin,
)
from . import models
class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView):
datamodel = SQLAInterface(models.DruidColumn)
list_title = _("Columns")
show_title = _("Show Druid Column")
add_title = _("Add Druid Column")
edit_title = _("Edit Druid Column")
list_widget = ListWidgetWithCheckboxes
edit_columns = [
"column_name",
"verbose_name",
"description",
"dimension_spec_json",
"datasource",
"groupby",
"filterable",
]
add_columns = edit_columns
list_columns = ["column_name", "verbose_name", "type", "groupby", "filterable"]
can_delete = False
page_size = 500
label_columns = {
"column_name": _("Column"),
"type": _("Type"),
"datasource": _("Datasource"),
"groupby": _("Groupable"),
"filterable": _("Filterable"),
}
description_columns = {
"filterable": _(
"Whether this column is exposed in the `Filters` section "
"of the explore view."
),
"dimension_spec_json": utils.markdown(
"this field can be used to specify "
"a `dimensionSpec` as documented [here]"
"(http://druid.io/docs/latest/querying/dimensionspecs.html). "
"Make sure to input valid JSON and that the "
"`outputName` matches the `column_name` defined "
"above.",
True,
),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session().query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_update(self, col):
if col.dimension_spec_json:
try:
dimension_spec = json.loads(col.dimension_spec_json)
except ValueError as e:
raise ValueError("Invalid Dimension Spec JSON: " + str(e))
if not isinstance(dimension_spec, dict):
raise ValueError("Dimension Spec must be a JSON object")
if "outputName" not in dimension_spec:
raise ValueError("Dimension Spec does not contain `outputName`")
if "dimension" not in dimension_spec:
raise ValueError("Dimension Spec is missing `dimension`")
if dimension_spec["outputName"] != col.column_name:
raise ValueError(
"`outputName` [{}] unequal to `column_name` [{}]".format(
dimension_spec["outputName"], col.column_name
)
)
def post_update(self, col):
col.refresh_metrics()
def post_add(self, col):
self.post_update(col)
appbuilder.add_view_no_menu(DruidColumnInlineView)
class DruidMetricInlineView(CompactCRUDMixin, SupersetModelView):
datamodel = SQLAInterface(models.DruidMetric)
list_title = _("Metrics")
show_title = _("Show Druid Metric")
add_title = _("Add Druid Metric")
edit_title = _("Edit Druid Metric")
list_columns = ["metric_name", "verbose_name", "metric_type"]
edit_columns = [
"metric_name",
"description",
"verbose_name",
"metric_type",
"json",
"datasource",
"d3format",
"warning_text",
]
add_columns = edit_columns
page_size = 500
validators_columns = {"json": [validate_json]}
description_columns = {
"metric_type": utils.markdown(
"use `postagg` as the metric type if you are defining a "
"[Druid Post Aggregation]"
"(http://druid.io/docs/latest/querying/post-aggregations.html)",
True,
)
}
label_columns = {
"metric_name": _("Metric"),
"description": _("Description"),
"verbose_name": _("Verbose Name"),
"metric_type": _("Type"),
"json": _("JSON"),
"datasource": _("Druid Datasource"),
"warning_text": _("Warning Message"),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session().query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
appbuilder.add_view_no_menu(DruidMetricInlineView)
class DruidClusterModelView(SupersetModelView, DeleteMixin, YamlExportMixin):
datamodel = SQLAInterface(models.DruidCluster)
list_title = _("Druid Clusters")
show_title = _("Show Druid Cluster")
add_title = _("Add Druid Cluster")
edit_title = _("Edit Druid Cluster")
add_columns = [
"verbose_name",
"broker_host",
"broker_port",
"broker_user",
"broker_pass",
"broker_endpoint",
"cache_timeout",
"cluster_name",
]
edit_columns = add_columns
list_columns = ["cluster_name", "metadata_last_refreshed"]
search_columns = ("cluster_name",)
label_columns = {
"cluster_name": _("Cluster"),
"broker_host": _("Broker Host"),
"broker_port": _("Broker Port"),
"broker_user": _("Broker Username"),
"broker_pass": _("Broker Password"),
"broker_endpoint": _("Broker Endpoint"),
"verbose_name": _("Verbose Name"),
"cache_timeout": _("Cache Timeout"),
"metadata_last_refreshed": _("Metadata Last Refreshed"),
}
description_columns = {
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this cluster. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the global timeout if undefined."
),
"broker_user": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
"broker_pass": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
}
yaml_dict_key = "databases"
edit_form_extra_fields = {
"cluster_name": QuerySelectField(
"Cluster",
query_factory=lambda: db.session().query(models.DruidCluster),
widget=Select2Widget(extra_classes="readonly"),
)
}
def pre_add(self, cluster):
security_manager.add_permission_view_menu("database_access", cluster.perm)
def pre_update(self, cluster):
self.pre_add(cluster)
def _delete(self, pk):
DeleteMixin._delete(self, pk)
appbuilder.add_view(
DruidClusterModelView,
name="Druid Clusters",
label=__("Druid Clusters"),
icon="fa-cubes",
category="Sources",
category_label=__("Sources"),
category_icon="fa-database",
)
class DruidDatasourceModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):
datamodel = SQLAInterface(models.DruidDatasource)
list_title = _("Druid Datasources")
show_title = _("Show Druid Datasource")
add_title = _("Add Druid Datasource")
edit_title = _("Edit Druid Datasource")
list_columns = ["datasource_link", "cluster", "changed_by_", "modified"]
order_columns = ["datasource_link", "modified"]
related_views = [DruidColumnInlineView, DruidMetricInlineView]
edit_columns = [
"datasource_name",
"cluster",
"description",
"owners",
"is_hidden",
"filter_select_enabled",
"fetch_values_from",
"default_endpoint",
"offset",
"cache_timeout",
]
search_columns = ("datasource_name", "cluster", "description", "owners")
add_columns = edit_columns
show_columns = add_columns + ["perm", "slices"]
page_size = 500
base_order = ("datasource_name", "asc")
description_columns = {
"slices": _(
"The list of charts associated with this table. By "
"altering this datasource, you may change how these associated "
"charts behave. "
"Also note that charts need to point to a datasource, so "
"this form will fail at saving if removing charts from a "
"datasource. If you want to change the datasource for a chart, "
"overwrite the chart from the 'explore view'"
),
"offset": _("Timezone offset (in hours) for this datasource"),
"description": Markup(
'Supports <a href="'
'https://daringfireball.net/projects/markdown/">markdown</a>'
),
"fetch_values_from": _(
"Time expression to use as a predicate when retrieving "
"distinct values to populate the filter component. "
"Only applies when `Enable Filter Select` is on. If "
"you enter `7 days ago`, the distinct list of values in "
"the filter will be populated based on the distinct value over "
"the past week"
),
"filter_select_enabled": _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
"from the backend on the fly"
),
"default_endpoint": _(
"Redirects to this endpoint when clicking on the datasource "
"from the datasource list"
),
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this datasource. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the cluster timeout if undefined."
),
}
base_filters = [["id", DatasourceFilter, lambda: []]]
label_columns = {
"slices": _("Associated Charts"),
"datasource_link": _("Data Source"),
"cluster": _("Cluster"),
"description": _("Description"),
"owners": _("Owners"),
"is_hidden": _("Is Hidden"),
"filter_select_enabled": _("Enable Filter Select"),
"default_endpoint": _("Default Endpoint"),
"offset": _("Time Offset"),
"cache_timeout": _("Cache Timeout"),
"datasource_name": _("Datasource Name"),
"fetch_values_from": _("Fetch Values From"),
"changed_by_": _("Changed By"),
"modified": _("Modified"),
}
def pre_add(self, datasource):
with db.session.no_autoflush:
query = db.session.query(models.DruidDatasource).filter(
models.DruidDatasource.datasource_name == datasource.datasource_name,
models.DruidDatasource.cluster_name == datasource.cluster.id,
)
if db.session.query(query.exists()).scalar():
raise Exception(get_datasource_exist_error_msg(datasource.full_name))
def post_add(self, datasource):
datasource.refresh_metrics()
security_manager.add_permission_view_menu(
"datasource_access", datasource.get_perm()
)
if datasource.schema:
security_manager.add_permission_view_menu(
"schema_access", datasource.schema_perm
)
def post_update(self, datasource):
self.post_add(datasource)
def _delete(self, pk):
DeleteMixin._delete(self, pk)
appbuilder.add_view(
DruidDatasourceModelView,
"Druid Datasources",
label=__("Druid Datasources"),
category="Sources",
category_label=__("Sources"),
icon="fa-cube",
)
class Druid(BaseSupersetView):
@has_access
@expose("/refresh_datasources/")
def refresh_datasources(self, refresh_all=True):
session = db.session()
DruidCluster = ConnectorRegistry.sources["druid"].cluster_class
for cluster in session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refresh_all=refresh_all)
except Exception as e:
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(e)
),
"danger",
)
logging.exception(e)
pass
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_("Refreshed metadata from cluster [{}]").format(
cluster.cluster_name
),
"info",
)
session.commit()
return redirect("/druiddatasourcemodelview/list/")
@has_access
@expose("/scan_new_datasources/")
def scan_new_datasources(self):
return self.refresh_datasources(refresh_all=False)
appbuilder.add_view_no_menu(Druid)
appbuilder.add_link(
"Scan New Datasources",
label=__("Scan New Datasources"),
href="/druid/scan_new_datasources/",
category="Sources",
category_label=__("Sources"),
category_icon="fa-database",
icon="fa-refresh",
)
appbuilder.add_link(
"Refresh Druid Metadata",
label=__("Refresh Druid Metadata"),
href="/druid/refresh_datasources/",
category="Sources",
category_label=__("Sources"),
category_icon="fa-database",
icon="fa-cog",
)
appbuilder.add_separator("Sources")
| true
| true
|
f71a774da43e92d9a5b2ea6f28b39201e558710f
| 2,045
|
py
|
Python
|
speech_recognition.py
|
pmaen/biopython
|
b6cafe09b3670762d0768cbf2df36fb21b4bd5af
|
[
"MIT"
] | 1
|
2020-12-24T13:06:31.000Z
|
2020-12-24T13:06:31.000Z
|
speech_recognition.py
|
pmaen/biopython
|
b6cafe09b3670762d0768cbf2df36fb21b4bd5af
|
[
"MIT"
] | null | null | null |
speech_recognition.py
|
pmaen/biopython
|
b6cafe09b3670762d0768cbf2df36fb21b4bd5af
|
[
"MIT"
] | null | null | null |
import os.path
import speech_recognition as sr
import moviepy.editor as mp
from pydub import AudioSegment
from pydub.utils import make_chunks
import time
import glob
import re
import math
from pathlib import Path
import soundfile as sf
lang = input("Please choose the language for voice recognition by language code. (deutsch: de-DE)\n")
filename = input("Please enter the whole file path including the extension:\n")
fileaudio = filename + ".wav"
title = input("What's the topic?\n")
start_time= time.time()
clip = mp.VideoFileClip(filename)
clip.audio.write_audiofile(fileaudio)
myaudio = AudioSegment.from_file(fileaudio, "wav")
chunk_length_ms = 60000 # pydub calculates in millisec
chunks = make_chunks(myaudio,chunk_length_ms)
r = sr.Recognizer()
for i, chunk in enumerate(chunks):
chunk_name = "{0}.wav".format(i)
print ("exporting", chunk_name)
chunk.export(chunk_name, format="wav")
audio = sr.AudioFile(chunk_name)
x, fs = sf.read(chunk_name)
vol_rms = x.max() - x.min()
if vol_rms <= 6.103515625e-05:
os.remove(chunk_name)
print(chunk_name + "was empty and therefore deleted.")
else:
with audio as source:
audio_file = r.record(source)
result = r.recognize_google(audio_file, language=lang)
with open(chunk_name + ".rectext" ,mode ='w') as file:
file.write(result)
print("Part " + str(i) + " finished.")
os.remove(chunk_name)
file_pattern = re.compile(r'.*?(\d+).*?')
def get_order(file):
match = file_pattern.match(Path(file).name)
if not match:
return math.inf
return int(match.groups()[0])
read_files = sorted(glob.glob("*.rectext"), key=get_order)
with open(filename + "_transcript.txt", "w") as outfile:
for f in read_files:
with open(f, "r") as infile:
outfile.write(infile.read())
outfile.write("\n")
cleanup = glob.glob("*.rectext")
for rectextfile in cleanup:
os.remove(rectextfile)
print("Done after %.2f seconds."% (time.time() - start_time))
| 31.953125
| 101
| 0.681174
|
import os.path
import speech_recognition as sr
import moviepy.editor as mp
from pydub import AudioSegment
from pydub.utils import make_chunks
import time
import glob
import re
import math
from pathlib import Path
import soundfile as sf
lang = input("Please choose the language for voice recognition by language code. (deutsch: de-DE)\n")
filename = input("Please enter the whole file path including the extension:\n")
fileaudio = filename + ".wav"
title = input("What's the topic?\n")
start_time= time.time()
clip = mp.VideoFileClip(filename)
clip.audio.write_audiofile(fileaudio)
myaudio = AudioSegment.from_file(fileaudio, "wav")
chunk_length_ms = 60000 # pydub calculates in millisec
chunks = make_chunks(myaudio,chunk_length_ms)
r = sr.Recognizer()
for i, chunk in enumerate(chunks):
chunk_name = "{0}.wav".format(i)
print ("exporting", chunk_name)
chunk.export(chunk_name, format="wav")
audio = sr.AudioFile(chunk_name)
x, fs = sf.read(chunk_name)
vol_rms = x.max() - x.min()
if vol_rms <= 6.103515625e-05:
os.remove(chunk_name)
print(chunk_name + "was empty and therefore deleted.")
else:
with audio as source:
audio_file = r.record(source)
result = r.recognize_google(audio_file, language=lang)
with open(chunk_name + ".rectext" ,mode ='w') as file:
file.write(result)
print("Part " + str(i) + " finished.")
os.remove(chunk_name)
file_pattern = re.compile(r'.*?(\d+).*?')
def get_order(file):
match = file_pattern.match(Path(file).name)
if not match:
return math.inf
return int(match.groups()[0])
read_files = sorted(glob.glob("*.rectext"), key=get_order)
with open(filename + "_transcript.txt", "w") as outfile:
for f in read_files:
with open(f, "r") as infile:
outfile.write(infile.read())
outfile.write("\n")
cleanup = glob.glob("*.rectext")
for rectextfile in cleanup:
os.remove(rectextfile)
print("Done after %.2f seconds."% (time.time() - start_time))
| true
| true
|
f71a77a49def227a97ac06d0cce2532e8e039b8f
| 2,042
|
py
|
Python
|
code/game/goldspinner.py
|
LordZagreus/LodeRunner
|
68aab36be47cabe31e52f3ee43520bdafcdf3c95
|
[
"MIT"
] | 1
|
2017-10-31T22:26:22.000Z
|
2017-10-31T22:26:22.000Z
|
code/game/goldspinner.py
|
team-sparrow/LodeRunner
|
68aab36be47cabe31e52f3ee43520bdafcdf3c95
|
[
"MIT"
] | 2
|
2019-07-05T03:17:18.000Z
|
2019-07-08T16:15:29.000Z
|
code/game/goldspinner.py
|
team-sparrow/LodeRunner
|
68aab36be47cabe31e52f3ee43520bdafcdf3c95
|
[
"MIT"
] | 1
|
2020-10-15T09:03:20.000Z
|
2020-10-15T09:03:20.000Z
|
import math
from particle import Particle
#from glfunctions import draw_sprite
from code.constants.common import GOLD_SPINNER_LIFESPAN, TILE_WIDTH, TILE_HEIGHT
from code.controllers.intervalcontroller import IntervalController
class GoldSpinner(Particle):
def __init__(self, x, y, dest_x, dest_y):
Particle.__init__(self, x, y, 0, 0, 0) # I don't care about tile index / particle index stuff
# No alpha delay
self.alpha_wait = 0
# These things don't have gravity...
self.gravity = 0
self.max_gravity = 0
# Calculate the distance between spawn and target
distance = math.sqrt( ((x - dest_x) * (x - dest_x)) + ((y - dest_y) * (y - dest_y)) )
# Calculate the angle between the spawn location and the target location...
radians = (math.pi / 4)
# Prevent division by 0
if (dest_x != x):
radians = math.atan( float(abs(dest_y - y)) / float(abs(dest_x - x)) )
# The gold spinner has a given lifspan. We must cross the distance in that duration...
speed = float(distance) / float(GOLD_SPINNER_LIFESPAN)
# Define rate of movement
self.dx = int( math.cos(radians) * speed )
self.dy = int( math.sin(radians) * speed )
# Adjust +/- for the direction this gold is headed...
if (x > dest_x):
self.dx *= -1
if (y > dest_y):
self.dy *= -1
# Based on destination coordinates and the time this particle is allowed to exist,
# calculate an appropriate alpha fade speed...
self.alpha_controller.set_speed_out( (1 / float(GOLD_SPINNER_LIFESPAN)) )
# Define a rotational speed
self.rotational_speed = -10
def render(self, sx, sy, gold_sprite, window_controller):
window_controller.get_gfx_controller().draw_sprite(sx + self.get_x(), sy + self.get_y(), TILE_WIDTH, TILE_HEIGHT, gold_sprite, frame = 0, gl_color = (1, 1, 1, self.alpha_controller.get_interval()), degrees = self.degrees)
| 31.90625
| 229
| 0.639079
|
import math
from particle import Particle
from code.constants.common import GOLD_SPINNER_LIFESPAN, TILE_WIDTH, TILE_HEIGHT
from code.controllers.intervalcontroller import IntervalController
class GoldSpinner(Particle):
def __init__(self, x, y, dest_x, dest_y):
Particle.__init__(self, x, y, 0, 0, 0)
# No alpha delay
self.alpha_wait = 0
# These things don't have gravity...
self.gravity = 0
self.max_gravity = 0
distance = math.sqrt( ((x - dest_x) * (x - dest_x)) + ((y - dest_y) * (y - dest_y)) )
radians = (math.pi / 4)
if (dest_x != x):
radians = math.atan( float(abs(dest_y - y)) / float(abs(dest_x - x)) )
speed = float(distance) / float(GOLD_SPINNER_LIFESPAN)
self.dx = int( math.cos(radians) * speed )
self.dy = int( math.sin(radians) * speed )
if (x > dest_x):
self.dx *= -1
if (y > dest_y):
self.dy *= -1
self.alpha_controller.set_speed_out( (1 / float(GOLD_SPINNER_LIFESPAN)) )
self.rotational_speed = -10
def render(self, sx, sy, gold_sprite, window_controller):
window_controller.get_gfx_controller().draw_sprite(sx + self.get_x(), sy + self.get_y(), TILE_WIDTH, TILE_HEIGHT, gold_sprite, frame = 0, gl_color = (1, 1, 1, self.alpha_controller.get_interval()), degrees = self.degrees)
| true
| true
|
f71a77c1632e053843e6fa96b6402b20781b54ae
| 561
|
py
|
Python
|
audiovisual/indico_audiovisual/blueprint.py
|
pferreir/indico-plugins-cern
|
0fc2eb6b1aa3c3083a813477886a6632f148a4d9
|
[
"MIT"
] | null | null | null |
audiovisual/indico_audiovisual/blueprint.py
|
pferreir/indico-plugins-cern
|
0fc2eb6b1aa3c3083a813477886a6632f148a4d9
|
[
"MIT"
] | null | null | null |
audiovisual/indico_audiovisual/blueprint.py
|
pferreir/indico-plugins-cern
|
0fc2eb6b1aa3c3083a813477886a6632f148a4d9
|
[
"MIT"
] | null | null | null |
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2019 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.plugins import IndicoPluginBlueprint
from indico_audiovisual.controllers import RHRequestList
blueprint = IndicoPluginBlueprint('audiovisual', __name__, url_prefix='/service/audiovisual')
blueprint.add_url_rule('/', 'request_list', RHRequestList)
| 33
| 93
| 0.798574
|
from __future__ import unicode_literals
from indico.core.plugins import IndicoPluginBlueprint
from indico_audiovisual.controllers import RHRequestList
blueprint = IndicoPluginBlueprint('audiovisual', __name__, url_prefix='/service/audiovisual')
blueprint.add_url_rule('/', 'request_list', RHRequestList)
| true
| true
|
f71a787e6cf602bff2ff9c173e3363f87c7e53c4
| 42,908
|
py
|
Python
|
ForgeSVN/forgesvn/tests/model/test_repository.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 113
|
2015-03-25T10:33:37.000Z
|
2022-02-16T20:55:06.000Z
|
ForgeSVN/forgesvn/tests/model/test_repository.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 4
|
2017-08-04T16:19:07.000Z
|
2020-06-08T19:01:33.000Z
|
ForgeSVN/forgesvn/tests/model/test_repository.py
|
rohankumardubey/allura
|
9c490a051ca912d28b81ce656441d6fed100cb24
|
[
"Apache-2.0"
] | 36
|
2015-08-14T16:27:39.000Z
|
2022-02-16T20:54:35.000Z
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import os
import shutil
import unittest
from unittest import skipUnless
import pkg_resources
from itertools import count, product
from datetime import datetime
from zipfile import ZipFile
from io import BytesIO
from collections import defaultdict
from tg import tmpl_context as c, app_globals as g
import mock
from alluratest.tools import assert_equal, assert_in
from datadiff.tools import assert_equals
import tg
import ming
from ming.base import Object
from ming.orm import session, ThreadLocalORMSession
from testfixtures import TempDirectory
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from allura.model.repo_refresh import send_notifications
from allura.lib import helpers as h
from allura.webhooks import RepoPushWebhookSender
from allura.tests.model.test_repo import RepoImplTestBase
from forgesvn import model as SM
from forgesvn.model.svn import svn_path_exists
from forgesvn.tests import with_svn
from allura.tests.decorators import with_tool
import six
from io import open
from six.moves import range
class TestNewRepo(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_last_commit_for(self):
tree = self.rev.tree
for row in tree.ls():
assert row['last_commit']['author'] is not None
def test_commit(self):
latest_rev = 7
assert self.rev.primary() is self.rev
assert self.rev.index_id().startswith('allura/model/repo/Commit#')
self.rev.author_url
self.rev.committer_url
assert_equal(self.rev.tree._id, self.rev.tree_id)
assert_equal(self.rev.shorthand_id(), '[r{}]'.format(latest_rev))
assert_equal(self.rev.symbolic_ids, ([], []))
assert_equal(self.rev.url(), '/p/test/src/{}/'.format(latest_rev))
all_cis = list(self.repo.log(self.rev._id, limit=25))
assert_equal(len(all_cis), latest_rev)
self.rev.tree.ls()
assert_equal(self.rev.tree.readme(), ('README', 'This is readme\nAnother Line\n'))
assert_equal(self.rev.tree.path(), '/')
assert_equal(self.rev.tree.url(), '/p/test/src/{}/tree/'.format(latest_rev))
self.rev.tree.by_name['README']
assert self.rev.tree.is_blob('README') is True
assert_equal(self.rev.tree['a']['b']['c'].ls(), [])
self.assertRaises(KeyError, lambda: self.rev.tree['a']['b']['d'])
assert_equal(self.rev.authored_user, None)
assert_equal(self.rev.committed_user, None)
assert_equal(
sorted(self.rev.webhook_info.keys()),
sorted(['id', 'url', 'timestamp', 'message', 'author',
'committer', 'added', 'removed', 'renamed', 'modified', 'copied']))
class TestSVNRepo(unittest.TestCase, RepoImplTestBase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
@with_tool('test', 'SVN', 'svn-tags', 'SVN with tags')
def setup_with_tools(self):
setup_global_objects()
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
with h.push_context('test', 'src', neighborhood='Projects'):
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
with h.push_context('test', 'svn-tags', neighborhood='Projects'):
c.app.repo.name = 'testsvn-trunk-tags-branches'
c.app.repo.fs_path = repo_dir
self.svn_tags = c.app.repo
self.svn_tags.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src', neighborhood='Projects')
def test_init(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
shutil.rmtree(dirname)
def test_fork(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
hook_data = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n',
hook_data)
self.assertIn('exec $DIR/post-commit-user "$@"\n', hook_data)
repo.refresh(notify=False)
assert len(list(repo.log(limit=100)))
shutil.rmtree(dirname)
@mock.patch('forgesvn.model.svn.tg')
def test_can_hotcopy(self, tg):
from forgesvn.model.svn import SVNImplementation
func = SVNImplementation.can_hotcopy
obj = mock.Mock(spec=SVNImplementation)
for combo in product(
['file:///myfile', 'http://myfile'],
[True, False],
['version 1.7', 'version 1.6', 'version 2.0.3']):
source_url = combo[0]
tg.config = {'scm.svn.hotcopy': combo[1]}
stdout = combo[2]
obj.check_call.return_value = stdout, '', 0
expected = (source_url.startswith('file://') and
tg.config['scm.svn.hotcopy'] and
stdout != 'version 1.6')
result = func(obj, source_url)
assert result == expected
@mock.patch('forgesvn.model.svn.g.post_event')
def test_clone(self, post_event):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
c = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n', c)
self.assertIn('exec $DIR/post-commit-user "$@"\n', c)
repo.refresh(notify=False)
assert len(list(repo.log(limit=100)))
shutil.rmtree(dirname)
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'SVN Repository', i
def test_log_id_only(self):
entries = list(self.repo.log(id_only=True, limit=25))
assert_equal(entries, [7, 6, 5, 4, 3, 2, 1])
def test_log(self):
entries = list(self.repo.log(id_only=False, limit=25))
assert_equal(entries[len(entries)-6:], # only 6, so this test doesn't have to change when commits added
[
{'parents': [5],
'refs': [],
'committed': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': 'coldmind', 'email': ''},
'message': '',
'rename_details': {},
'id': 6,
'authored': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': 'coldmind',
'email': ''
}, 'size': None},
{'parents': [4],
'refs': [],
'committed': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': 'rick446',
'email': ''},
'message': 'Copied a => b',
'rename_details': {},
'id': 5,
'authored': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [3],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': 'rick446',
'email': ''},
'message': 'Remove hello.txt',
'rename_details': {},
'id': 4,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [2],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': 'rick446',
'email': ''},
'message': 'Modify readme',
'rename_details': {},
'id': 3,
'authored':
{'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [1],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': 'rick446',
'email': ''},
'message': 'Add path',
'rename_details': {},
'id': 2,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': 'rick446',
'email': ''},
'message': 'Create readme',
'rename_details': {},
'id': 1,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': 'rick446',
'email': ''},
'size': None}])
def test_log_file(self):
entries = list(self.repo.log(path='/README', id_only=False, limit=25))
assert_equal(entries, [
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': 'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': 'rick446'},
'id': 3,
'message': 'Modify readme',
'parents': [2],
'refs': [],
'size': 28,
'rename_details': {}},
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': 'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': 'rick446'},
'id': 1,
'message': 'Create readme',
'parents': [],
'refs': [],
'size': 15,
'rename_details': {}},
])
def test_is_file(self):
assert self.repo.is_file('/README')
assert not self.repo.is_file('/a')
def test_paged_diffs(self):
entry = self.repo.commit(next(self.repo.log(2, id_only=True, limit=1)))
self.assertEqual(entry.diffs, entry.paged_diffs())
self.assertEqual(entry.diffs, entry.paged_diffs(start=0))
added_expected = entry.diffs.added[1:3]
expected = dict(
copied=[], changed=[], removed=[], renamed=[],
added=added_expected, total=4)
actual = entry.paged_diffs(start=1, end=3)
self.assertEqual(expected, actual)
fake_id = self.repo._impl._oid(100)
empty = M.repository.Commit(_id=fake_id, repo=self.repo).paged_diffs()
self.assertEqual(sorted(actual.keys()), sorted(empty.keys()))
def test_diff_create_file(self):
entry = self.repo.commit(next(self.repo.log(1, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], renamed=[],
removed=[], added=['/README'], total=1))
def test_diff_create_path(self):
entry = self.repo.commit(next(self.repo.log(2, id_only=True, limit=1)))
actual = entry.diffs
actual.added = sorted(actual.added)
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], removed=[], renamed=[],
added=sorted([
'/a', '/a/b', '/a/b/c',
'/a/b/c/hello.txt']), total=4))
def test_diff_modify_file(self):
entry = self.repo.commit(next(self.repo.log(3, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=['/README'], renamed=[],
removed=[], added=[], total=1))
def test_diff_delete(self):
entry = self.repo.commit(next(self.repo.log(4, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], renamed=[],
removed=['/a/b/c/hello.txt'], added=[], total=1))
def test_diff_copy(self):
entry = self.repo.commit(next(self.repo.log(5, id_only=True, limit=1)))
assert_equals(dict(entry.diffs), dict(
copied=[{'new': '/b', 'old': '/a', 'ratio': 1}], renamed=[],
changed=[], removed=[], added=[], total=1))
def test_commit(self):
entry = self.repo.commit(1)
assert entry.committed.name == 'rick446'
assert entry.message
def test_svn_path_exists(self):
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
assert svn_path_exists("file://%s/a" % repo_path)
assert svn_path_exists("file://%s" % repo_path)
assert not svn_path_exists("file://%s/badpath" % repo_path)
with mock.patch('forgesvn.model.svn.pysvn') as pysvn:
svn_path_exists('dummy')
pysvn.Client.return_value.info2.assert_called_once_with(
'dummy',
revision=pysvn.Revision.return_value,
recurse=False)
@skipUnless(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball(self):
tmpdir = tg.config['scm.repos.tarball.root']
assert_equal(self.repo.tarball_path,
os.path.join(tmpdir, 'svn/t/te/test/testsvn'))
assert_equal(self.repo.tarball_url('1'),
'file:///svn/t/te/test/testsvn/test-src-r1.zip')
self.repo.tarball('1')
assert os.path.isfile(
os.path.join(tmpdir, "svn/t/te/test/testsvn/test-src-r1.zip"))
tarball_zip = ZipFile(
os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-r1.zip'), 'r')
assert_equal(tarball_zip.namelist(),
['test-src-r1/', 'test-src-r1/README'])
shutil.rmtree(self.repo.tarball_path.encode('utf-8'),
ignore_errors=True)
@skipUnless(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_paths(self):
rev = '19'
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
# a tag
self.svn_tags.tarball(rev, '/tags/tag-1.0/')
fn = tarball_path + 'test-svn-tags-r19-tags-tag-1.0.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
tag_content = sorted(['test-svn-tags-r19-tags-tag-1.0/',
'test-svn-tags-r19-tags-tag-1.0/svn-commit.tmp',
'test-svn-tags-r19-tags-tag-1.0/README'])
assert_equal(sorted(snapshot.namelist()), tag_content)
os.remove(fn)
# a directory (of tags)
self.svn_tags.tarball(rev, '/tags/')
fn = tarball_path + 'test-svn-tags-r19-tags.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
tags_content = sorted(['test-svn-tags-r19-tags/',
'test-svn-tags-r19-tags/tag-1.0/',
'test-svn-tags-r19-tags/tag-1.0/svn-commit.tmp',
'test-svn-tags-r19-tags/tag-1.0/README'])
assert_equal(sorted(snapshot.namelist()), tags_content)
os.remove(fn)
# no path, but there are trunk in the repo
# expect snapshot of trunk
self.svn_tags.tarball(rev)
fn = tarball_path + 'test-svn-tags-r19-trunk.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
trunk_content = sorted(['test-svn-tags-r19-trunk/',
'test-svn-tags-r19-trunk/aaa.txt',
'test-svn-tags-r19-trunk/bbb.txt',
'test-svn-tags-r19-trunk/ccc.txt',
'test-svn-tags-r19-trunk/README'])
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
# no path, and no trunk dir
# expect snapshot of repo root
h.set_context('test', 'src', neighborhood='Projects')
fn = os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-r1.zip')
self.repo.tarball('1')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(snapshot.namelist(), ['test-src-r1/', 'test-src-r1/README'])
shutil.rmtree(os.path.join(tmpdir, 'svn/t/te/test/testsvn/'),
ignore_errors=True)
shutil.rmtree(tarball_path, ignore_errors=True)
def test_is_empty(self):
assert not self.repo.is_empty()
with TempDirectory() as d:
repo2 = SM.Repository(
name='test',
fs_path=d.path,
url_path='/test/',
tool='svn',
status='creating')
repo2.init()
assert repo2.is_empty()
repo2.refresh()
ThreadLocalORMSession.flush_all()
assert repo2.is_empty()
def test_webhook_payload(self):
sender = RepoPushWebhookSender()
all_commits = list(self.repo.all_commit_ids())
start = len(all_commits) - 6 # only get a few so test doesn't have to change after new testdata commits
cids = all_commits[start:start+2]
payload = sender.get_payload(commit_ids=cids)
expected_payload = {
'size': 2,
'after': 'r6',
'before': 'r4',
'commits': [{
'id': 'r6',
'url': 'http://localhost/p/test/src/6/',
'timestamp': datetime(2013, 11, 8, 13, 38, 11, 152000),
'message': '',
'author': {'name': 'coldmind',
'email': '',
'username': ''},
'committer': {'name': 'coldmind',
'email': '',
'username': ''},
'added': ['/ЗРЯЧИЙ_ТА_ПОБАЧИТЬ'],
'removed': [],
'modified': [],
'copied': [],
'renamed': [],
}, {
'id': 'r5',
'url': 'http://localhost/p/test/src/5/',
'timestamp': datetime(2010, 11, 18, 20, 14, 21, 515000),
'message': 'Copied a => b',
'author': {'name': 'rick446',
'email': '',
'username': ''},
'committer': {'name': 'rick446',
'email': '',
'username': ''},
'added': [],
'removed': [],
'modified': [],
'copied': [
{'new': '/b', 'old': '/a', 'ratio': 1},
],
'renamed': [],
}],
'repository': {
'name': 'SVN',
'full_name': '/p/test/src/',
'url': 'http://localhost/p/test/src/',
},
}
assert_equals(payload, expected_payload)
class TestSVNRev(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit(1)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_url(self):
assert self.rev.url().endswith('/1/')
def test_primary(self):
assert self.rev.primary() == self.rev
def test_shorthand(self):
assert self.rev.shorthand_id() == '[r1]'
def test_diff(self):
diffs = (self.rev.diffs.added
+ self.rev.diffs.removed
+ self.rev.diffs.changed
+ self.rev.diffs.copied)
for d in diffs:
print(d)
def _oid(self, rev_id):
return '%s:%s' % (self.repo._id, rev_id)
def test_log(self):
# path only
commits = list(self.repo.log(self.repo.head, id_only=True, limit=25))
assert_equal(commits, [7, 6, 5, 4, 3, 2, 1])
commits = list(self.repo.log(self.repo.head, 'README', id_only=True, limit=25))
assert_equal(commits, [3, 1])
commits = list(self.repo.log(1, 'README', id_only=True, limit=25))
assert_equal(commits, [1])
commits = list(self.repo.log(self.repo.head, 'a/b/c/', id_only=True, limit=25))
assert_equal(commits, [4, 2])
commits = list(self.repo.log(3, 'a/b/c/', id_only=True, limit=25))
assert_equal(commits, [2])
assert_equal(
list(self.repo.log(self.repo.head, 'does/not/exist', id_only=True, limit=25)), [])
def test_notification_email(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
ThreadLocalORMSession.flush_all()
send_notifications(self.repo, [self.repo.rev_to_commit_id(1)])
ThreadLocalORMSession.flush_all()
n = M.Notification.query.find({'subject': '[test:src] New commit [r1] by rick446'}).first()
assert n
assert_in('By rick446', n.text)
assert_in('Create readme', n.text)
class _Test(unittest.TestCase):
idgen = ('obj_%d' % i for i in count())
def _make_tree(self, object_id, **kwargs):
t, isnew = M.repository.Tree.upsert(object_id)
repo = getattr(self, 'repo', None)
t.repo = repo
for k, v in six.iteritems(kwargs):
if isinstance(v, six.string_types):
obj = M.repository.Blob(
t, k, next(self.idgen))
t.blob_ids.append(Object(
name=k, id=obj._id))
else:
obj = self._make_tree(next(self.idgen), **v)
t.tree_ids.append(Object(
name=k, id=obj._id))
session(t).flush()
return t
def _make_commit(self, object_id, **tree_parts):
ci, isnew = M.repository.Commit.upsert(object_id)
if isnew:
ci.committed.email = c.user.email_addresses[0]
ci.authored.email = c.user.email_addresses[0]
dt = datetime.utcnow()
# BSON datetime resolution is to 1 millisecond, not 1 microsecond
# like Python. Round this now so it'll match the value that's
# pulled from MongoDB in the tests.
ci.authored.date = dt.replace(microsecond=dt.microsecond // 1000 * 1000)
ci.message = 'summary\n\nddescription'
ci.set_context(self.repo)
ci.tree_id = 't_' + object_id
ci.tree = self._make_tree(ci.tree_id, **tree_parts)
return ci, isnew
def _make_log(self, ci):
session(ci).flush(ci)
def setUp(self):
setup_basic_test()
setup_global_objects()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
self.prefix = tg.config.get('scm.repos.root', '/')
class _TestWithRepo(_Test):
def setUp(self):
super(_TestWithRepo, self).setUp()
h.set_context('test', neighborhood='Projects')
c.project.install_app('svn', 'test1')
h.set_context('test', 'test1', neighborhood='Projects')
self.repo = M.Repository(name='test1', tool='svn')
self.repo._impl = mock.Mock(spec=M.RepositoryImplementation())
self.repo._impl.shorthand_for_commit = M.RepositoryImplementation.shorthand_for_commit
self.repo._impl.url_for_commit = (
lambda *a, **kw: M.RepositoryImplementation.url_for_commit(
self.repo._impl, *a, **kw))
self.repo._impl._repo = self.repo
self.repo._impl.all_commit_ids = lambda *a, **kw: []
self.repo._impl.commit().symbolic_ids = None
ThreadLocalORMSession.flush_all()
class _TestWithRepoAndCommit(_TestWithRepo):
def setUp(self):
super(_TestWithRepoAndCommit, self).setUp()
self.ci, isnew = self._make_commit('foo')
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
class TestRepo(_TestWithRepo):
def test_create(self):
assert self.repo.fs_path == os.path.join(self.prefix, 'svn/p/test/')
assert self.repo.url_path == '/p/test/'
assert self.repo.full_fs_path == os.path.join(
self.prefix, 'svn/p/test/test1')
def test_passthrough(self):
argless = ['init']
for fn in argless:
getattr(self.repo, fn)()
getattr(self.repo._impl, fn).assert_called_with()
unary = ['commit', 'open_blob']
for fn in unary:
getattr(self.repo, fn)('foo')
getattr(self.repo._impl, fn).assert_called_with('foo')
def test_shorthand_for_commit(self):
self.assertEqual(
self.repo.shorthand_for_commit('a' * 40),
'[aaaaaa]')
def test_url_for_commit(self):
self.assertEqual(
self.repo.url_for_commit('a' * 40),
'/p/test/test1/ci/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/')
@mock.patch('allura.model.repository.g.post_event')
def test_init_as_clone(self, post_event):
self.repo.init_as_clone('srcpath', 'srcname', 'srcurl')
assert self.repo.upstream_repo.name == 'srcname'
assert self.repo.upstream_repo.url == 'srcurl'
assert self.repo._impl.clone_from.called_with('srcpath')
post_event.assert_called_once_with('repo_cloned', 'srcurl', 'srcpath')
def test_latest(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
assert self.repo.latest() is ci
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'Repository', i
assert i['name_s'] == 'test1', i
def test_scm_host_url(self):
assert_equal(self.repo.clone_url('rw', 'nobody'),
'svn+ssh://nobody@localhost:8022/scm-repo/p/test/test1/')
assert_equal(self.repo.clone_url('https', 'nobody'),
'https://nobody@localhost:8022/scm-repo/p/test/test1/')
with h.push_config(self.repo.app.config.options, external_checkout_url='https://$username@foo.com/'):
assert_equal(self.repo.clone_url('https', 'user'),
'https://user@foo.com/')
def test_guess_type(self):
assert self.repo.guess_type('foo.txt') == ('text/plain', None)
assert self.repo.guess_type('foo.gbaer') == (
'application/octet-stream', None)
assert self.repo.guess_type('foo.html') == ('text/html', None)
assert self.repo.guess_type('.gitignore') == ('text/plain', None)
def test_refresh(self):
committer_name = 'Test Committer'
committer_email = 'test@example.com'
ci = mock.Mock()
ci.authored.name = committer_name
ci.committed.name = committer_name
ci.committed.email = committer_email
ci.author_url = '/u/test-committer/'
ci.activity_name = '[deadbeef]'
ci.activity_url = 'url'
ci.activity_extras = {}
del ci.node_id
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo._impl.all_commit_ids = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo.symbolics_for_commit = mock.Mock(
return_value=[['master', 'branch'], []])
def refresh_commit_info(oid, seen, lazy=False):
M.repository.CommitDoc(dict(
authored=dict(
name=committer_name,
date=datetime(2010, 10, 8, 15, 32, 48, 0),
email=committer_email),
_id=oid)).m.insert()
self.repo._impl.refresh_commit_info = refresh_commit_info
_id = lambda oid: getattr(oid, '_id', str(oid))
self.repo.shorthand_for_commit = lambda oid: '[' + _id(oid) + ']'
self.repo.url_for_commit = lambda oid: '/ci/' + _id(oid) + '/'
self.repo.refresh()
ThreadLocalORMSession.flush_all()
notifications = M.Notification.query.find().all()
for n in notifications:
if '100 new commits' in n.subject:
assert_in('By Test Committer on 10/08/2010 15:32', n.text)
assert_in('http://localhost/ci/foo99/', n.text)
break
else:
assert False, 'Did not find notification'
assert M.Feed.query.find(dict(
author_name=committer_name)).count() == 100
def test_refresh_private(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
# make unreadable by *anonymous, so additional notification logic
# executes
self.repo.acl = []
c.project.acl = []
self.repo.refresh()
def test_push_upstream_context(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
with self.repo.push_upstream_context():
assert c.project.shortname == 'test'
finally:
M.Project.app_instance = old_app_instance
def test_pending_upstream_merges(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
self.repo.pending_upstream_merges()
finally:
M.Project.app_instance = old_app_instance
class TestRepoObject(_TestWithRepoAndCommit):
def test_upsert(self):
obj0, isnew0 = M.repository.Tree.upsert('foo1')
obj1, isnew1 = M.repository.Tree.upsert('foo1')
assert obj0 is obj1
assert isnew0 and not isnew1
def test_artifact_methods(self):
assert self.ci.index_id(
) == 'allura/model/repo/Commit#foo', self.ci.index_id()
assert self.ci.primary() is self.ci, self.ci.primary()
class TestCommit(_TestWithRepo):
def setUp(self):
super(TestCommit, self).setUp()
self.ci, isnew = self._make_commit(
'foo',
a=dict(
a=dict(
a='',
b='',),
b=''))
self.tree = self.ci.tree
impl = M.RepositoryImplementation()
impl._repo = self.repo
self.repo._impl.shorthand_for_commit = impl.shorthand_for_commit
self.repo._impl.url_for_commit = impl.url_for_commit
def test_upsert(self):
obj0, isnew0 = M.repository.Commit.upsert('foo')
obj1, isnew1 = M.repository.Commit.upsert('foo')
assert obj0 is obj1
assert not isnew1
u = M.User.by_username('test-admin')
assert self.ci.author_url == u.url()
assert self.ci.committer_url == u.url()
assert self.ci.tree is self.tree
assert self.ci.summary == 'summary'
assert self.ci.shorthand_id() == '[foo]'
assert self.ci.url() == '/p/test/test1/ci/foo/'
def test_get_path(self):
b = self.ci.get_path('a/a/a')
assert isinstance(b, M.repository.Blob)
x = self.ci.get_path('a/a')
assert isinstance(x, M.repository.Tree)
def _unique_blobs(self):
def counter():
counter.i += 1
return counter.i
counter.i = 0
blobs = defaultdict(counter)
return lambda blob: BytesIO(str(blobs[blob.path()]))
def test_diffs_file_renames(self):
def open_blob(blob):
blobs = {
'a': 'Leia',
'/b/a/a': 'Darth Vader',
'/b/a/b': 'Luke Skywalker',
'/b/b': 'Death Star will destroy you',
'/b/c': 'Luke Skywalker', # moved from /b/a/b
# moved from /b/b and modified
'/b/a/z': 'Death Star will destroy you\nALL',
}
return BytesIO(blobs.get(blob.path(), ''))
self.repo._impl.open_blob = open_blob
self.repo._impl.commit = mock.Mock(return_value=self.ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
'changed': [],
'copied': [],
'renamed': [],
'removed': [],
'total': 5,
}
assert_equal(self.ci.diffs.added,
['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (self.ci.diffs.copied
== self.ci.diffs.changed
== self.ci.diffs.removed
== [])
ci, isnew = self._make_commit(
'bar',
b=dict(
a=dict(
a='',
b='',),
b=''))
ci.parent_ids = ['foo']
self._make_log(ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'],
'renamed': [],
'copied': [],
'changed': [],
'removed': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
'total': 10,
}
assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== [])
ci, isnew = self._make_commit(
'baz',
b=dict(
a=dict(
z=''),
c=''))
ci.parent_ids = ['bar']
self._make_log(ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['b/c', 'b/a/z'],
'removed': ['/b/a/b', 'b/b'],
'changed': [],
'copied': [
{
'new': 'b/c',
'old': 'b/a/b',
'ratio': 1,
'diff': '',
},
{
'new': 'b/a/z',
'old': 'b/b',
'ratio': 1,
'diff': '',
},
],
'renamed': [],
'total': 2
}
assert_equal(ci.diffs.added, ['b/a/z', 'b/c'])
assert_equal(ci.diffs.changed, [])
assert_equal(ci.diffs.removed, ['/b/a/b', 'b/b'])
# see mock for open_blob
assert_equal(len(ci.diffs.copied), 2)
assert_equal(ci.diffs.copied[1]['old'], 'b/a/b')
assert_equal(ci.diffs.copied[1]['new'], 'b/c')
assert_equal(ci.diffs.copied[1]['ratio'], 1)
assert_equal(ci.diffs.copied[1]['diff'], '')
assert_equal(ci.diffs.copied[0]['old'], 'b/b')
assert_equal(ci.diffs.copied[0]['new'], 'b/a/z')
def test_context(self):
self.ci.context()
class TestRename(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn-rename'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_log_file_with_rename(self):
entry = list(self.repo.log(path='/dir/b.txt', id_only=False, limit=1))[0]
assert_equal(entry['id'], 3)
assert_equal(entry['rename_details']['path'], '/dir/a.txt')
assert_equal(
entry['rename_details']['commit_url'],
self.repo.url_for_commit(2) # previous revision
)
def test_check_changed_path(self):
changed_path = {'copyfrom_path': '/test/path', 'path': '/test/path2'}
result = self.repo._impl._check_changed_path(
changed_path, '/test/path2/file.txt')
assert_equal({'path': '/test/path2/file.txt',
'copyfrom_path': '/test/path/file.txt'}, result)
class TestDirectRepoAccess(object):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_paged_diffs(self):
_id = self.repo._impl._oid(6)
diffs = self.repo.commit(_id).diffs
expected = {
'added': ['/ЗРЯЧИЙ_ТА_ПОБАЧИТЬ'],
'removed': [],
'changed': [],
'copied': [],
'renamed': [],
'total': 1,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(2)
diffs = self.repo.commit(_id).diffs
expected = {
'added': ['/a', '/a/b', '/a/b/c', '/a/b/c/hello.txt'],
'removed': [],
'changed': [],
'renamed': [],
'copied': [],
'total': 4,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(3)
diffs = self.repo.commit(_id).diffs
expected = {
'added': [],
'removed': [],
'renamed': [],
'changed': ['/README'],
'copied': [],
'total': 1,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(4)
diffs = self.repo.commit(_id).diffs
expected = {
'added': [],
'removed': ['/a/b/c/hello.txt'],
'changed': [],
'renamed': [],
'copied': [],
'total': 1,
}
assert_equals(diffs, expected)
| 37.837743
| 119
| 0.53969
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import os
import shutil
import unittest
from unittest import skipUnless
import pkg_resources
from itertools import count, product
from datetime import datetime
from zipfile import ZipFile
from io import BytesIO
from collections import defaultdict
from tg import tmpl_context as c, app_globals as g
import mock
from alluratest.tools import assert_equal, assert_in
from datadiff.tools import assert_equals
import tg
import ming
from ming.base import Object
from ming.orm import session, ThreadLocalORMSession
from testfixtures import TempDirectory
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from allura.model.repo_refresh import send_notifications
from allura.lib import helpers as h
from allura.webhooks import RepoPushWebhookSender
from allura.tests.model.test_repo import RepoImplTestBase
from forgesvn import model as SM
from forgesvn.model.svn import svn_path_exists
from forgesvn.tests import with_svn
from allura.tests.decorators import with_tool
import six
from io import open
from six.moves import range
class TestNewRepo(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_last_commit_for(self):
tree = self.rev.tree
for row in tree.ls():
assert row['last_commit']['author'] is not None
def test_commit(self):
latest_rev = 7
assert self.rev.primary() is self.rev
assert self.rev.index_id().startswith('allura/model/repo/Commit#')
self.rev.author_url
self.rev.committer_url
assert_equal(self.rev.tree._id, self.rev.tree_id)
assert_equal(self.rev.shorthand_id(), '[r{}]'.format(latest_rev))
assert_equal(self.rev.symbolic_ids, ([], []))
assert_equal(self.rev.url(), '/p/test/src/{}/'.format(latest_rev))
all_cis = list(self.repo.log(self.rev._id, limit=25))
assert_equal(len(all_cis), latest_rev)
self.rev.tree.ls()
assert_equal(self.rev.tree.readme(), ('README', 'This is readme\nAnother Line\n'))
assert_equal(self.rev.tree.path(), '/')
assert_equal(self.rev.tree.url(), '/p/test/src/{}/tree/'.format(latest_rev))
self.rev.tree.by_name['README']
assert self.rev.tree.is_blob('README') is True
assert_equal(self.rev.tree['a']['b']['c'].ls(), [])
self.assertRaises(KeyError, lambda: self.rev.tree['a']['b']['d'])
assert_equal(self.rev.authored_user, None)
assert_equal(self.rev.committed_user, None)
assert_equal(
sorted(self.rev.webhook_info.keys()),
sorted(['id', 'url', 'timestamp', 'message', 'author',
'committer', 'added', 'removed', 'renamed', 'modified', 'copied']))
class TestSVNRepo(unittest.TestCase, RepoImplTestBase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
@with_tool('test', 'SVN', 'svn-tags', 'SVN with tags')
def setup_with_tools(self):
setup_global_objects()
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
with h.push_context('test', 'src', neighborhood='Projects'):
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
with h.push_context('test', 'svn-tags', neighborhood='Projects'):
c.app.repo.name = 'testsvn-trunk-tags-branches'
c.app.repo.fs_path = repo_dir
self.svn_tags = c.app.repo
self.svn_tags.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src', neighborhood='Projects')
def test_init(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
shutil.rmtree(dirname)
def test_fork(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
hook_data = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n',
hook_data)
self.assertIn('exec $DIR/post-commit-user "$@"\n', hook_data)
repo.refresh(notify=False)
assert len(list(repo.log(limit=100)))
shutil.rmtree(dirname)
@mock.patch('forgesvn.model.svn.tg')
def test_can_hotcopy(self, tg):
from forgesvn.model.svn import SVNImplementation
func = SVNImplementation.can_hotcopy
obj = mock.Mock(spec=SVNImplementation)
for combo in product(
['file:///myfile', 'http://myfile'],
[True, False],
['version 1.7', 'version 1.6', 'version 2.0.3']):
source_url = combo[0]
tg.config = {'scm.svn.hotcopy': combo[1]}
stdout = combo[2]
obj.check_call.return_value = stdout, '', 0
expected = (source_url.startswith('file://') and
tg.config['scm.svn.hotcopy'] and
stdout != 'version 1.6')
result = func(obj, source_url)
assert result == expected
@mock.patch('forgesvn.model.svn.g.post_event')
def test_clone(self, post_event):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
c = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n', c)
self.assertIn('exec $DIR/post-commit-user "$@"\n', c)
repo.refresh(notify=False)
assert len(list(repo.log(limit=100)))
shutil.rmtree(dirname)
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'SVN Repository', i
def test_log_id_only(self):
entries = list(self.repo.log(id_only=True, limit=25))
assert_equal(entries, [7, 6, 5, 4, 3, 2, 1])
def test_log(self):
entries = list(self.repo.log(id_only=False, limit=25))
assert_equal(entries[len(entries)-6:],
[
{'parents': [5],
'refs': [],
'committed': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': 'coldmind', 'email': ''},
'message': '',
'rename_details': {},
'id': 6,
'authored': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': 'coldmind',
'email': ''
}, 'size': None},
{'parents': [4],
'refs': [],
'committed': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': 'rick446',
'email': ''},
'message': 'Copied a => b',
'rename_details': {},
'id': 5,
'authored': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [3],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': 'rick446',
'email': ''},
'message': 'Remove hello.txt',
'rename_details': {},
'id': 4,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [2],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': 'rick446',
'email': ''},
'message': 'Modify readme',
'rename_details': {},
'id': 3,
'authored':
{'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [1],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': 'rick446',
'email': ''},
'message': 'Add path',
'rename_details': {},
'id': 2,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': 'rick446',
'email': ''},
'size': None},
{'parents': [],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': 'rick446',
'email': ''},
'message': 'Create readme',
'rename_details': {},
'id': 1,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': 'rick446',
'email': ''},
'size': None}])
def test_log_file(self):
entries = list(self.repo.log(path='/README', id_only=False, limit=25))
assert_equal(entries, [
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': 'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': 'rick446'},
'id': 3,
'message': 'Modify readme',
'parents': [2],
'refs': [],
'size': 28,
'rename_details': {}},
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': 'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': 'rick446'},
'id': 1,
'message': 'Create readme',
'parents': [],
'refs': [],
'size': 15,
'rename_details': {}},
])
def test_is_file(self):
assert self.repo.is_file('/README')
assert not self.repo.is_file('/a')
def test_paged_diffs(self):
entry = self.repo.commit(next(self.repo.log(2, id_only=True, limit=1)))
self.assertEqual(entry.diffs, entry.paged_diffs())
self.assertEqual(entry.diffs, entry.paged_diffs(start=0))
added_expected = entry.diffs.added[1:3]
expected = dict(
copied=[], changed=[], removed=[], renamed=[],
added=added_expected, total=4)
actual = entry.paged_diffs(start=1, end=3)
self.assertEqual(expected, actual)
fake_id = self.repo._impl._oid(100)
empty = M.repository.Commit(_id=fake_id, repo=self.repo).paged_diffs()
self.assertEqual(sorted(actual.keys()), sorted(empty.keys()))
def test_diff_create_file(self):
entry = self.repo.commit(next(self.repo.log(1, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], renamed=[],
removed=[], added=['/README'], total=1))
def test_diff_create_path(self):
entry = self.repo.commit(next(self.repo.log(2, id_only=True, limit=1)))
actual = entry.diffs
actual.added = sorted(actual.added)
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], removed=[], renamed=[],
added=sorted([
'/a', '/a/b', '/a/b/c',
'/a/b/c/hello.txt']), total=4))
def test_diff_modify_file(self):
entry = self.repo.commit(next(self.repo.log(3, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=['/README'], renamed=[],
removed=[], added=[], total=1))
def test_diff_delete(self):
entry = self.repo.commit(next(self.repo.log(4, id_only=True, limit=1)))
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], renamed=[],
removed=['/a/b/c/hello.txt'], added=[], total=1))
def test_diff_copy(self):
entry = self.repo.commit(next(self.repo.log(5, id_only=True, limit=1)))
assert_equals(dict(entry.diffs), dict(
copied=[{'new': '/b', 'old': '/a', 'ratio': 1}], renamed=[],
changed=[], removed=[], added=[], total=1))
def test_commit(self):
entry = self.repo.commit(1)
assert entry.committed.name == 'rick446'
assert entry.message
def test_svn_path_exists(self):
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
assert svn_path_exists("file://%s/a" % repo_path)
assert svn_path_exists("file://%s" % repo_path)
assert not svn_path_exists("file://%s/badpath" % repo_path)
with mock.patch('forgesvn.model.svn.pysvn') as pysvn:
svn_path_exists('dummy')
pysvn.Client.return_value.info2.assert_called_once_with(
'dummy',
revision=pysvn.Revision.return_value,
recurse=False)
@skipUnless(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball(self):
tmpdir = tg.config['scm.repos.tarball.root']
assert_equal(self.repo.tarball_path,
os.path.join(tmpdir, 'svn/t/te/test/testsvn'))
assert_equal(self.repo.tarball_url('1'),
'file:///svn/t/te/test/testsvn/test-src-r1.zip')
self.repo.tarball('1')
assert os.path.isfile(
os.path.join(tmpdir, "svn/t/te/test/testsvn/test-src-r1.zip"))
tarball_zip = ZipFile(
os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-r1.zip'), 'r')
assert_equal(tarball_zip.namelist(),
['test-src-r1/', 'test-src-r1/README'])
shutil.rmtree(self.repo.tarball_path.encode('utf-8'),
ignore_errors=True)
@skipUnless(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_paths(self):
rev = '19'
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
# a tag
self.svn_tags.tarball(rev, '/tags/tag-1.0/')
fn = tarball_path + 'test-svn-tags-r19-tags-tag-1.0.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
tag_content = sorted(['test-svn-tags-r19-tags-tag-1.0/',
'test-svn-tags-r19-tags-tag-1.0/svn-commit.tmp',
'test-svn-tags-r19-tags-tag-1.0/README'])
assert_equal(sorted(snapshot.namelist()), tag_content)
os.remove(fn)
# a directory (of tags)
self.svn_tags.tarball(rev, '/tags/')
fn = tarball_path + 'test-svn-tags-r19-tags.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
tags_content = sorted(['test-svn-tags-r19-tags/',
'test-svn-tags-r19-tags/tag-1.0/',
'test-svn-tags-r19-tags/tag-1.0/svn-commit.tmp',
'test-svn-tags-r19-tags/tag-1.0/README'])
assert_equal(sorted(snapshot.namelist()), tags_content)
os.remove(fn)
# no path, but there are trunk in the repo
# expect snapshot of trunk
self.svn_tags.tarball(rev)
fn = tarball_path + 'test-svn-tags-r19-trunk.zip'
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
trunk_content = sorted(['test-svn-tags-r19-trunk/',
'test-svn-tags-r19-trunk/aaa.txt',
'test-svn-tags-r19-trunk/bbb.txt',
'test-svn-tags-r19-trunk/ccc.txt',
'test-svn-tags-r19-trunk/README'])
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
# no path, and no trunk dir
# expect snapshot of repo root
h.set_context('test', 'src', neighborhood='Projects')
fn = os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-r1.zip')
self.repo.tarball('1')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(snapshot.namelist(), ['test-src-r1/', 'test-src-r1/README'])
shutil.rmtree(os.path.join(tmpdir, 'svn/t/te/test/testsvn/'),
ignore_errors=True)
shutil.rmtree(tarball_path, ignore_errors=True)
def test_is_empty(self):
assert not self.repo.is_empty()
with TempDirectory() as d:
repo2 = SM.Repository(
name='test',
fs_path=d.path,
url_path='/test/',
tool='svn',
status='creating')
repo2.init()
assert repo2.is_empty()
repo2.refresh()
ThreadLocalORMSession.flush_all()
assert repo2.is_empty()
def test_webhook_payload(self):
sender = RepoPushWebhookSender()
all_commits = list(self.repo.all_commit_ids())
start = len(all_commits) - 6 # only get a few so test doesn't have to change after new testdata commits
cids = all_commits[start:start+2]
payload = sender.get_payload(commit_ids=cids)
expected_payload = {
'size': 2,
'after': 'r6',
'before': 'r4',
'commits': [{
'id': 'r6',
'url': 'http://localhost/p/test/src/6/',
'timestamp': datetime(2013, 11, 8, 13, 38, 11, 152000),
'message': '',
'author': {'name': 'coldmind',
'email': '',
'username': ''},
'committer': {'name': 'coldmind',
'email': '',
'username': ''},
'added': ['/ЗРЯЧИЙ_ТА_ПОБАЧИТЬ'],
'removed': [],
'modified': [],
'copied': [],
'renamed': [],
}, {
'id': 'r5',
'url': 'http://localhost/p/test/src/5/',
'timestamp': datetime(2010, 11, 18, 20, 14, 21, 515000),
'message': 'Copied a => b',
'author': {'name': 'rick446',
'email': '',
'username': ''},
'committer': {'name': 'rick446',
'email': '',
'username': ''},
'added': [],
'removed': [],
'modified': [],
'copied': [
{'new': '/b', 'old': '/a', 'ratio': 1},
],
'renamed': [],
}],
'repository': {
'name': 'SVN',
'full_name': '/p/test/src/',
'url': 'http://localhost/p/test/src/',
},
}
assert_equals(payload, expected_payload)
class TestSVNRev(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit(1)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_url(self):
assert self.rev.url().endswith('/1/')
def test_primary(self):
assert self.rev.primary() == self.rev
def test_shorthand(self):
assert self.rev.shorthand_id() == '[r1]'
def test_diff(self):
diffs = (self.rev.diffs.added
+ self.rev.diffs.removed
+ self.rev.diffs.changed
+ self.rev.diffs.copied)
for d in diffs:
print(d)
def _oid(self, rev_id):
return '%s:%s' % (self.repo._id, rev_id)
def test_log(self):
commits = list(self.repo.log(self.repo.head, id_only=True, limit=25))
assert_equal(commits, [7, 6, 5, 4, 3, 2, 1])
commits = list(self.repo.log(self.repo.head, 'README', id_only=True, limit=25))
assert_equal(commits, [3, 1])
commits = list(self.repo.log(1, 'README', id_only=True, limit=25))
assert_equal(commits, [1])
commits = list(self.repo.log(self.repo.head, 'a/b/c/', id_only=True, limit=25))
assert_equal(commits, [4, 2])
commits = list(self.repo.log(3, 'a/b/c/', id_only=True, limit=25))
assert_equal(commits, [2])
assert_equal(
list(self.repo.log(self.repo.head, 'does/not/exist', id_only=True, limit=25)), [])
def test_notification_email(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
ThreadLocalORMSession.flush_all()
send_notifications(self.repo, [self.repo.rev_to_commit_id(1)])
ThreadLocalORMSession.flush_all()
n = M.Notification.query.find({'subject': '[test:src] New commit [r1] by rick446'}).first()
assert n
assert_in('By rick446', n.text)
assert_in('Create readme', n.text)
class _Test(unittest.TestCase):
idgen = ('obj_%d' % i for i in count())
def _make_tree(self, object_id, **kwargs):
t, isnew = M.repository.Tree.upsert(object_id)
repo = getattr(self, 'repo', None)
t.repo = repo
for k, v in six.iteritems(kwargs):
if isinstance(v, six.string_types):
obj = M.repository.Blob(
t, k, next(self.idgen))
t.blob_ids.append(Object(
name=k, id=obj._id))
else:
obj = self._make_tree(next(self.idgen), **v)
t.tree_ids.append(Object(
name=k, id=obj._id))
session(t).flush()
return t
def _make_commit(self, object_id, **tree_parts):
ci, isnew = M.repository.Commit.upsert(object_id)
if isnew:
ci.committed.email = c.user.email_addresses[0]
ci.authored.email = c.user.email_addresses[0]
dt = datetime.utcnow()
ci.authored.date = dt.replace(microsecond=dt.microsecond // 1000 * 1000)
ci.message = 'summary\n\nddescription'
ci.set_context(self.repo)
ci.tree_id = 't_' + object_id
ci.tree = self._make_tree(ci.tree_id, **tree_parts)
return ci, isnew
def _make_log(self, ci):
session(ci).flush(ci)
def setUp(self):
setup_basic_test()
setup_global_objects()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
self.prefix = tg.config.get('scm.repos.root', '/')
class _TestWithRepo(_Test):
def setUp(self):
super(_TestWithRepo, self).setUp()
h.set_context('test', neighborhood='Projects')
c.project.install_app('svn', 'test1')
h.set_context('test', 'test1', neighborhood='Projects')
self.repo = M.Repository(name='test1', tool='svn')
self.repo._impl = mock.Mock(spec=M.RepositoryImplementation())
self.repo._impl.shorthand_for_commit = M.RepositoryImplementation.shorthand_for_commit
self.repo._impl.url_for_commit = (
lambda *a, **kw: M.RepositoryImplementation.url_for_commit(
self.repo._impl, *a, **kw))
self.repo._impl._repo = self.repo
self.repo._impl.all_commit_ids = lambda *a, **kw: []
self.repo._impl.commit().symbolic_ids = None
ThreadLocalORMSession.flush_all()
class _TestWithRepoAndCommit(_TestWithRepo):
def setUp(self):
super(_TestWithRepoAndCommit, self).setUp()
self.ci, isnew = self._make_commit('foo')
ThreadLocalORMSession.flush_all()
class TestRepo(_TestWithRepo):
def test_create(self):
assert self.repo.fs_path == os.path.join(self.prefix, 'svn/p/test/')
assert self.repo.url_path == '/p/test/'
assert self.repo.full_fs_path == os.path.join(
self.prefix, 'svn/p/test/test1')
def test_passthrough(self):
argless = ['init']
for fn in argless:
getattr(self.repo, fn)()
getattr(self.repo._impl, fn).assert_called_with()
unary = ['commit', 'open_blob']
for fn in unary:
getattr(self.repo, fn)('foo')
getattr(self.repo._impl, fn).assert_called_with('foo')
def test_shorthand_for_commit(self):
self.assertEqual(
self.repo.shorthand_for_commit('a' * 40),
'[aaaaaa]')
def test_url_for_commit(self):
self.assertEqual(
self.repo.url_for_commit('a' * 40),
'/p/test/test1/ci/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/')
@mock.patch('allura.model.repository.g.post_event')
def test_init_as_clone(self, post_event):
self.repo.init_as_clone('srcpath', 'srcname', 'srcurl')
assert self.repo.upstream_repo.name == 'srcname'
assert self.repo.upstream_repo.url == 'srcurl'
assert self.repo._impl.clone_from.called_with('srcpath')
post_event.assert_called_once_with('repo_cloned', 'srcurl', 'srcpath')
def test_latest(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
assert self.repo.latest() is ci
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'Repository', i
assert i['name_s'] == 'test1', i
def test_scm_host_url(self):
assert_equal(self.repo.clone_url('rw', 'nobody'),
'svn+ssh://nobody@localhost:8022/scm-repo/p/test/test1/')
assert_equal(self.repo.clone_url('https', 'nobody'),
'https://nobody@localhost:8022/scm-repo/p/test/test1/')
with h.push_config(self.repo.app.config.options, external_checkout_url='https://$username@foo.com/'):
assert_equal(self.repo.clone_url('https', 'user'),
'https://user@foo.com/')
def test_guess_type(self):
assert self.repo.guess_type('foo.txt') == ('text/plain', None)
assert self.repo.guess_type('foo.gbaer') == (
'application/octet-stream', None)
assert self.repo.guess_type('foo.html') == ('text/html', None)
assert self.repo.guess_type('.gitignore') == ('text/plain', None)
def test_refresh(self):
committer_name = 'Test Committer'
committer_email = 'test@example.com'
ci = mock.Mock()
ci.authored.name = committer_name
ci.committed.name = committer_name
ci.committed.email = committer_email
ci.author_url = '/u/test-committer/'
ci.activity_name = '[deadbeef]'
ci.activity_url = 'url'
ci.activity_extras = {}
del ci.node_id
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo._impl.all_commit_ids = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo.symbolics_for_commit = mock.Mock(
return_value=[['master', 'branch'], []])
def refresh_commit_info(oid, seen, lazy=False):
M.repository.CommitDoc(dict(
authored=dict(
name=committer_name,
date=datetime(2010, 10, 8, 15, 32, 48, 0),
email=committer_email),
_id=oid)).m.insert()
self.repo._impl.refresh_commit_info = refresh_commit_info
_id = lambda oid: getattr(oid, '_id', str(oid))
self.repo.shorthand_for_commit = lambda oid: '[' + _id(oid) + ']'
self.repo.url_for_commit = lambda oid: '/ci/' + _id(oid) + '/'
self.repo.refresh()
ThreadLocalORMSession.flush_all()
notifications = M.Notification.query.find().all()
for n in notifications:
if '100 new commits' in n.subject:
assert_in('By Test Committer on 10/08/2010 15:32', n.text)
assert_in('http://localhost/ci/foo99/', n.text)
break
else:
assert False, 'Did not find notification'
assert M.Feed.query.find(dict(
author_name=committer_name)).count() == 100
def test_refresh_private(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo.acl = []
c.project.acl = []
self.repo.refresh()
def test_push_upstream_context(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
with self.repo.push_upstream_context():
assert c.project.shortname == 'test'
finally:
M.Project.app_instance = old_app_instance
def test_pending_upstream_merges(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
self.repo.pending_upstream_merges()
finally:
M.Project.app_instance = old_app_instance
class TestRepoObject(_TestWithRepoAndCommit):
def test_upsert(self):
obj0, isnew0 = M.repository.Tree.upsert('foo1')
obj1, isnew1 = M.repository.Tree.upsert('foo1')
assert obj0 is obj1
assert isnew0 and not isnew1
def test_artifact_methods(self):
assert self.ci.index_id(
) == 'allura/model/repo/Commit#foo', self.ci.index_id()
assert self.ci.primary() is self.ci, self.ci.primary()
class TestCommit(_TestWithRepo):
def setUp(self):
super(TestCommit, self).setUp()
self.ci, isnew = self._make_commit(
'foo',
a=dict(
a=dict(
a='',
b='',),
b=''))
self.tree = self.ci.tree
impl = M.RepositoryImplementation()
impl._repo = self.repo
self.repo._impl.shorthand_for_commit = impl.shorthand_for_commit
self.repo._impl.url_for_commit = impl.url_for_commit
def test_upsert(self):
obj0, isnew0 = M.repository.Commit.upsert('foo')
obj1, isnew1 = M.repository.Commit.upsert('foo')
assert obj0 is obj1
assert not isnew1
u = M.User.by_username('test-admin')
assert self.ci.author_url == u.url()
assert self.ci.committer_url == u.url()
assert self.ci.tree is self.tree
assert self.ci.summary == 'summary'
assert self.ci.shorthand_id() == '[foo]'
assert self.ci.url() == '/p/test/test1/ci/foo/'
def test_get_path(self):
b = self.ci.get_path('a/a/a')
assert isinstance(b, M.repository.Blob)
x = self.ci.get_path('a/a')
assert isinstance(x, M.repository.Tree)
def _unique_blobs(self):
def counter():
counter.i += 1
return counter.i
counter.i = 0
blobs = defaultdict(counter)
return lambda blob: BytesIO(str(blobs[blob.path()]))
def test_diffs_file_renames(self):
def open_blob(blob):
blobs = {
'a': 'Leia',
'/b/a/a': 'Darth Vader',
'/b/a/b': 'Luke Skywalker',
'/b/b': 'Death Star will destroy you',
'/b/c': 'Luke Skywalker',
'/b/a/z': 'Death Star will destroy you\nALL',
}
return BytesIO(blobs.get(blob.path(), ''))
self.repo._impl.open_blob = open_blob
self.repo._impl.commit = mock.Mock(return_value=self.ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
'changed': [],
'copied': [],
'renamed': [],
'removed': [],
'total': 5,
}
assert_equal(self.ci.diffs.added,
['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (self.ci.diffs.copied
== self.ci.diffs.changed
== self.ci.diffs.removed
== [])
ci, isnew = self._make_commit(
'bar',
b=dict(
a=dict(
a='',
b='',),
b=''))
ci.parent_ids = ['foo']
self._make_log(ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'],
'renamed': [],
'copied': [],
'changed': [],
'removed': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
'total': 10,
}
assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== [])
ci, isnew = self._make_commit(
'baz',
b=dict(
a=dict(
z=''),
c=''))
ci.parent_ids = ['bar']
self._make_log(ci)
self.repo._impl.paged_diffs.return_value = {
'added': ['b/c', 'b/a/z'],
'removed': ['/b/a/b', 'b/b'],
'changed': [],
'copied': [
{
'new': 'b/c',
'old': 'b/a/b',
'ratio': 1,
'diff': '',
},
{
'new': 'b/a/z',
'old': 'b/b',
'ratio': 1,
'diff': '',
},
],
'renamed': [],
'total': 2
}
assert_equal(ci.diffs.added, ['b/a/z', 'b/c'])
assert_equal(ci.diffs.changed, [])
assert_equal(ci.diffs.removed, ['/b/a/b', 'b/b'])
assert_equal(len(ci.diffs.copied), 2)
assert_equal(ci.diffs.copied[1]['old'], 'b/a/b')
assert_equal(ci.diffs.copied[1]['new'], 'b/c')
assert_equal(ci.diffs.copied[1]['ratio'], 1)
assert_equal(ci.diffs.copied[1]['diff'], '')
assert_equal(ci.diffs.copied[0]['old'], 'b/b')
assert_equal(ci.diffs.copied[0]['new'], 'b/a/z')
def test_context(self):
self.ci.context()
class TestRename(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn-rename'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_log_file_with_rename(self):
entry = list(self.repo.log(path='/dir/b.txt', id_only=False, limit=1))[0]
assert_equal(entry['id'], 3)
assert_equal(entry['rename_details']['path'], '/dir/a.txt')
assert_equal(
entry['rename_details']['commit_url'],
self.repo.url_for_commit(2)
)
def test_check_changed_path(self):
changed_path = {'copyfrom_path': '/test/path', 'path': '/test/path2'}
result = self.repo._impl._check_changed_path(
changed_path, '/test/path2/file.txt')
assert_equal({'path': '/test/path2/file.txt',
'copyfrom_path': '/test/path/file.txt'}, result)
class TestDirectRepoAccess(object):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
c.app.repo.name = 'testsvn'
c.app.repo.fs_path = repo_dir
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_paged_diffs(self):
_id = self.repo._impl._oid(6)
diffs = self.repo.commit(_id).diffs
expected = {
'added': ['/ЗРЯЧИЙ_ТА_ПОБАЧИТЬ'],
'removed': [],
'changed': [],
'copied': [],
'renamed': [],
'total': 1,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(2)
diffs = self.repo.commit(_id).diffs
expected = {
'added': ['/a', '/a/b', '/a/b/c', '/a/b/c/hello.txt'],
'removed': [],
'changed': [],
'renamed': [],
'copied': [],
'total': 4,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(3)
diffs = self.repo.commit(_id).diffs
expected = {
'added': [],
'removed': [],
'renamed': [],
'changed': ['/README'],
'copied': [],
'total': 1,
}
assert_equals(diffs, expected)
_id = self.repo._impl._oid(4)
diffs = self.repo.commit(_id).diffs
expected = {
'added': [],
'removed': ['/a/b/c/hello.txt'],
'changed': [],
'renamed': [],
'copied': [],
'total': 1,
}
assert_equals(diffs, expected)
| true
| true
|
f71a788ec1af6202640b3afb171e260ba38421a6
| 18,525
|
py
|
Python
|
lib/spack/spack/cmd/install.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
lib/spack/spack/cmd/install.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2021-03-15T09:26:41.000Z
|
2022-02-28T15:08:23.000Z
|
lib/spack/spack/cmd/install.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import os
import shutil
import sys
import textwrap
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import spack.build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.fetch_strategy
import spack.monitor
import spack.paths
import spack.report
from spack.error import SpackError
from spack.installer import PackageInstaller
description = "build and install packages"
section = "build"
level = "short"
def update_kwargs_from_args(args, kwargs):
"""Parse cli arguments and construct a dictionary
that will be passed to the package installer."""
kwargs.update({
'fail_fast': args.fail_fast,
'keep_prefix': args.keep_prefix,
'keep_stage': args.keep_stage,
'restage': not args.dont_restage,
'install_source': args.install_source,
'verbose': args.verbose,
'fake': args.fake,
'dirty': args.dirty,
'use_cache': args.use_cache,
'cache_only': args.cache_only,
'include_build_deps': args.include_build_deps,
'explicit': True, # Always true for install command
'stop_at': args.until,
'unsigned': args.unsigned,
'full_hash_match': args.full_hash_match,
})
kwargs.update({
'install_deps': ('dependencies' in args.things_to_install),
'install_package': ('package' in args.things_to_install)
})
if hasattr(args, 'setup'):
setups = set()
for arglist_s in args.setup:
for arg in [x.strip() for x in arglist_s.split(',')]:
setups.add(arg)
kwargs['setup'] = setups
tty.msg('Setup={0}'.format(kwargs['setup']))
def setup_parser(subparser):
subparser.add_argument(
'--only',
default='package,dependencies',
dest='things_to_install',
choices=['package', 'dependencies'],
help="""select the mode of installation.
the default is to install the package along with all its dependencies.
alternatively one can decide to install only the package or only
the dependencies"""
)
subparser.add_argument(
'-u', '--until', type=str, dest='until', default=None,
help="phase to stop after when installing (default None)")
arguments.add_common_arguments(subparser, ['jobs'])
subparser.add_argument(
'--overwrite', action='store_true',
help="reinstall an existing spec, even if it has dependents")
subparser.add_argument(
'--fail-fast', action='store_true',
help="stop all builds if any build fails (default is best effort)")
subparser.add_argument(
'--keep-prefix', action='store_true',
help="don't remove the install prefix if installation fails")
subparser.add_argument(
'--keep-stage', action='store_true',
help="don't remove the build stage if installation succeeds")
subparser.add_argument(
'--dont-restage', action='store_true',
help="if a partial install is detected, don't delete prior state")
cache_group = subparser.add_mutually_exclusive_group()
cache_group.add_argument(
'--use-cache', action='store_true', dest='use_cache', default=True,
help="check for pre-built Spack packages in mirrors (default)")
cache_group.add_argument(
'--no-cache', action='store_false', dest='use_cache', default=True,
help="do not check for pre-built Spack packages in mirrors")
cache_group.add_argument(
'--cache-only', action='store_true', dest='cache_only', default=False,
help="only install package from binary mirrors")
monitor_group = spack.monitor.get_monitor_group(subparser) # noqa
subparser.add_argument(
'--include-build-deps', action='store_true', dest='include_build_deps',
default=False, help="""include build deps when installing from cache,
which is useful for CI pipeline troubleshooting""")
subparser.add_argument(
'--no-check-signature', action='store_true',
dest='unsigned', default=False,
help="do not check signatures of binary packages")
subparser.add_argument(
'--require-full-hash-match', action='store_true',
dest='full_hash_match', default=False, help="""when installing from
binary mirrors, do not install binary package unless the full hash of the
remote spec matches that of the local spec""")
subparser.add_argument(
'--show-log-on-error', action='store_true',
help="print full build log to stderr if build fails")
subparser.add_argument(
'--source', action='store_true', dest='install_source',
help="install source files in prefix")
arguments.add_common_arguments(subparser, ['no_checksum', 'deprecated'])
subparser.add_argument(
'-v', '--verbose', action='store_true',
help="display verbose build output while installing")
subparser.add_argument(
'--fake', action='store_true',
help="fake install for debug purposes.")
subparser.add_argument(
'--only-concrete', action='store_true', default=False,
help='(with environment) only install already concretized specs')
subparser.add_argument(
'--no-add', action='store_true', default=False,
help="""(with environment) only install specs provided as argument
if they are already in the concretized environment""")
subparser.add_argument(
'-f', '--file', action='append', default=[],
dest='specfiles', metavar='SPEC_YAML_FILE',
help="install from file. Read specs to install from .yaml files")
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
testing = subparser.add_mutually_exclusive_group()
testing.add_argument(
'--test', default=None,
choices=['root', 'all'],
help="""If 'root' is chosen, run package tests during
installation for top-level packages (but skip tests for dependencies).
if 'all' is chosen, run package tests during installation for all
packages. If neither are chosen, don't run tests for any packages."""
)
testing.add_argument(
'--run-tests', action='store_true',
help='run package tests during installation (same as --test=all)'
)
subparser.add_argument(
'--log-format',
default=None,
choices=spack.report.valid_formats,
help="format to be used for log files"
)
subparser.add_argument(
'--log-file',
default=None,
help="filename for the log file. if not passed a default will be used"
)
subparser.add_argument(
'--help-cdash',
action='store_true',
help="Show usage instructions for CDash reporting"
)
arguments.add_cdash_args(subparser, False)
arguments.add_common_arguments(subparser, ['yes_to_all', 'spec'])
def default_log_file(spec):
"""Computes the default filename for the log file and creates
the corresponding directory if not present
"""
fmt = 'test-{x.name}-{x.version}-{hash}.xml'
basename = fmt.format(x=spec, hash=spec.dag_hash())
dirname = fs.os.path.join(spack.paths.reports_path, 'junit')
fs.mkdirp(dirname)
return fs.os.path.join(dirname, basename)
def install_specs(cli_args, kwargs, specs):
"""Do the actual installation.
Args:
cli_args (argparse.Namespace): argparse namespace with command arguments
kwargs (dict): keyword arguments
specs (list): list of (abstract, concrete) spec tuples
"""
# handle active environment, if any
env = ev.get_env(cli_args, 'install')
try:
if env:
specs_to_install = []
specs_to_add = []
for abstract, concrete in specs:
# This won't find specs added to the env since last
# concretize, therefore should we consider enforcing
# concretization of the env before allowing to install
# specs?
m_spec = env.matching_spec(abstract)
# If there is any ambiguity in the above call to matching_spec
# (i.e. if more than one spec in the environment matches), then
# SpackEnvironmentError is rasied, with a message listing the
# the matches. Getting to this point means there were either
# no matches or exactly one match.
if not m_spec:
tty.debug('{0} matched nothing in the env'.format(
abstract.name))
# no matches in the env
if cli_args.no_add:
msg = ('You asked to install {0} without adding it ' +
'(--no-add), but no such spec exists in ' +
'environment').format(abstract.name)
tty.die(msg)
else:
tty.debug('adding {0} as a root'.format(abstract.name))
specs_to_add.append((abstract, concrete))
continue
tty.debug('exactly one match for {0} in env -> {1}'.format(
m_spec.name, m_spec.dag_hash()))
if m_spec in env.roots() or cli_args.no_add:
# either the single match is a root spec (and --no-add is
# the default for roots) or --no-add was stated explictly
tty.debug('just install {0}'.format(m_spec.name))
specs_to_install.append(m_spec)
else:
# the single match is not a root (i.e. it's a dependency),
# and --no-add was not specified, so we'll add it as a
# root before installing
tty.debug('add {0} then install it'.format(m_spec.name))
specs_to_add.append((abstract, concrete))
if specs_to_add:
tty.debug('Adding the following specs as roots:')
for abstract, concrete in specs_to_add:
tty.debug(' {0}'.format(abstract.name))
with env.write_transaction():
specs_to_install.append(
env.concretize_and_add(abstract, concrete))
env.write(regenerate=False)
# Install the validated list of cli specs
if specs_to_install:
tty.debug('Installing the following cli specs:')
for s in specs_to_install:
tty.debug(' {0}'.format(s.name))
env.install_specs(specs_to_install, args=cli_args, **kwargs)
else:
installs = [(concrete.package, kwargs) for _, concrete in specs]
builder = PackageInstaller(installs)
builder.install()
except spack.build_environment.InstallError as e:
if cli_args.show_log_on_error:
e.print_context()
if not os.path.exists(e.pkg.build_log_path):
tty.error("'spack install' created no log.")
else:
sys.stderr.write('Full build log:\n')
with open(e.pkg.build_log_path) as log:
shutil.copyfileobj(log, sys.stderr)
raise
def install(parser, args, **kwargs):
if args.help_cdash:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
environment variables:
SPACK_CDASH_AUTH_TOKEN
authentication token to present to CDash
'''))
arguments.add_cdash_args(parser, True)
parser.print_help()
return
# The user wants to monitor builds using github.com/spack/spack-monitor
if args.use_monitor:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
disable_auth=args.monitor_disable_auth,
tags=args.monitor_tags,
save_local=args.monitor_save_local,
)
reporter = spack.report.collect_info(
spack.package.PackageInstaller, '_install_task', args.log_format, args)
if args.log_file:
reporter.filename = args.log_file
if args.run_tests:
tty.warn("Deprecated option: --run-tests: use --test=all instead")
def get_tests(specs):
if args.test == 'all' or args.run_tests:
return True
elif args.test == 'root':
return [spec.name for spec in specs]
else:
return False
if not args.spec and not args.specfiles:
# if there are no args but an active environment
# then install the packages from it.
env = ev.get_env(args, 'install')
if env:
tests = get_tests(env.user_specs)
kwargs['tests'] = tests
if not args.only_concrete:
with env.write_transaction():
concretized_specs = env.concretize(tests=tests)
ev.display_specs(concretized_specs)
# save view regeneration for later, so that we only do it
# once, as it can be slow.
env.write(regenerate=False)
specs = env.all_specs()
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
# Tell the monitor about the specs
if args.use_monitor and specs:
monitor.new_configuration(specs)
tty.msg("Installing environment {0}".format(env.name))
with reporter('build'):
env.install_all(args, **kwargs)
tty.debug("Regenerating environment views for {0}"
.format(env.name))
with env.write_transaction():
# write env to trigger view generation and modulefile
# generation
env.write()
return
else:
msg = "install requires a package argument or active environment"
if 'spack.yaml' in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
# have intended to use that
msg += "\n\n"
msg += "Did you mean to install using the `spack.yaml`"
msg += " in this directory? Try: \n"
msg += " spack env activate .\n"
msg += " spack install\n"
msg += " OR\n"
msg += " spack --env . install"
tty.die(msg)
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
if args.deprecated:
spack.config.set('config:deprecated', True, scope='command_line')
# Parse cli arguments and construct a dictionary
# that will be passed to the package installer
update_kwargs_from_args(args, kwargs)
# 1. Abstract specs from cli
abstract_specs = spack.cmd.parse_specs(args.spec)
tests = get_tests(abstract_specs)
kwargs['tests'] = tests
try:
specs = spack.cmd.parse_specs(
args.spec, concretize=True, tests=tests)
except SpackError as e:
tty.debug(e)
reporter.concretization_report(e.message)
raise
# 2. Concrete specs from yaml files
for file in args.specfiles:
with open(file, 'r') as f:
s = spack.spec.Spec.from_yaml(f)
concretized = s.concretized()
if concretized.dag_hash() != s.dag_hash():
msg = 'skipped invalid file "{0}". '
msg += 'The file does not contain a concrete spec.'
tty.warn(msg.format(file))
continue
abstract_specs.append(s)
specs.append(concretized)
if len(specs) == 0:
tty.die('The `spack install` command requires a spec to install.')
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
with reporter('build'):
if args.overwrite:
installed = list(filter(lambda x: x,
map(spack.store.db.query_one, specs)))
if not args.yes_to_all:
display_args = {
'long': True,
'show_flags': True,
'variants': True
}
if installed:
tty.msg('The following package specs will be '
'reinstalled:\n')
spack.cmd.display_specs(installed, **display_args)
not_installed = list(filter(lambda x: x not in installed,
specs))
if not_installed:
tty.msg('The following package specs are not installed and'
' the --overwrite flag was given. The package spec'
' will be newly installed:\n')
spack.cmd.display_specs(not_installed, **display_args)
# We have some specs, so one of the above must have been true
answer = tty.get_yes_or_no(
'Do you want to proceed?', default=False
)
if not answer:
tty.die('Reinstallation aborted.')
# overwrite all concrete explicit specs from this build
kwargs['overwrite'] = [spec.dag_hash() for spec in specs]
# Update install_args with the monitor args, needed for build task
kwargs.update({
"monitor_disable_auth": args.monitor_disable_auth,
"monitor_keep_going": args.monitor_keep_going,
"monitor_host": args.monitor_host,
"use_monitor": args.use_monitor,
"monitor_prefix": args.monitor_prefix,
})
# If we are using the monitor, we send configs. and create build
# The full_hash is the main package id, the build_hash for others
if args.use_monitor and specs:
monitor.new_configuration(specs)
install_specs(args, kwargs, zip(abstract_specs, specs))
| 39.33121
| 80
| 0.601404
|
import argparse
import os
import shutil
import sys
import textwrap
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import spack.build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.fetch_strategy
import spack.monitor
import spack.paths
import spack.report
from spack.error import SpackError
from spack.installer import PackageInstaller
description = "build and install packages"
section = "build"
level = "short"
def update_kwargs_from_args(args, kwargs):
kwargs.update({
'fail_fast': args.fail_fast,
'keep_prefix': args.keep_prefix,
'keep_stage': args.keep_stage,
'restage': not args.dont_restage,
'install_source': args.install_source,
'verbose': args.verbose,
'fake': args.fake,
'dirty': args.dirty,
'use_cache': args.use_cache,
'cache_only': args.cache_only,
'include_build_deps': args.include_build_deps,
'explicit': True,
'stop_at': args.until,
'unsigned': args.unsigned,
'full_hash_match': args.full_hash_match,
})
kwargs.update({
'install_deps': ('dependencies' in args.things_to_install),
'install_package': ('package' in args.things_to_install)
})
if hasattr(args, 'setup'):
setups = set()
for arglist_s in args.setup:
for arg in [x.strip() for x in arglist_s.split(',')]:
setups.add(arg)
kwargs['setup'] = setups
tty.msg('Setup={0}'.format(kwargs['setup']))
def setup_parser(subparser):
subparser.add_argument(
'--only',
default='package,dependencies',
dest='things_to_install',
choices=['package', 'dependencies'],
help="""select the mode of installation.
the default is to install the package along with all its dependencies.
alternatively one can decide to install only the package or only
the dependencies"""
)
subparser.add_argument(
'-u', '--until', type=str, dest='until', default=None,
help="phase to stop after when installing (default None)")
arguments.add_common_arguments(subparser, ['jobs'])
subparser.add_argument(
'--overwrite', action='store_true',
help="reinstall an existing spec, even if it has dependents")
subparser.add_argument(
'--fail-fast', action='store_true',
help="stop all builds if any build fails (default is best effort)")
subparser.add_argument(
'--keep-prefix', action='store_true',
help="don't remove the install prefix if installation fails")
subparser.add_argument(
'--keep-stage', action='store_true',
help="don't remove the build stage if installation succeeds")
subparser.add_argument(
'--dont-restage', action='store_true',
help="if a partial install is detected, don't delete prior state")
cache_group = subparser.add_mutually_exclusive_group()
cache_group.add_argument(
'--use-cache', action='store_true', dest='use_cache', default=True,
help="check for pre-built Spack packages in mirrors (default)")
cache_group.add_argument(
'--no-cache', action='store_false', dest='use_cache', default=True,
help="do not check for pre-built Spack packages in mirrors")
cache_group.add_argument(
'--cache-only', action='store_true', dest='cache_only', default=False,
help="only install package from binary mirrors")
monitor_group = spack.monitor.get_monitor_group(subparser) # noqa
subparser.add_argument(
'--include-build-deps', action='store_true', dest='include_build_deps',
default=False, help="""include build deps when installing from cache,
which is useful for CI pipeline troubleshooting""")
subparser.add_argument(
'--no-check-signature', action='store_true',
dest='unsigned', default=False,
help="do not check signatures of binary packages")
subparser.add_argument(
'--require-full-hash-match', action='store_true',
dest='full_hash_match', default=False, help="""when installing from
binary mirrors, do not install binary package unless the full hash of the
remote spec matches that of the local spec""")
subparser.add_argument(
'--show-log-on-error', action='store_true',
help="print full build log to stderr if build fails")
subparser.add_argument(
'--source', action='store_true', dest='install_source',
help="install source files in prefix")
arguments.add_common_arguments(subparser, ['no_checksum', 'deprecated'])
subparser.add_argument(
'-v', '--verbose', action='store_true',
help="display verbose build output while installing")
subparser.add_argument(
'--fake', action='store_true',
help="fake install for debug purposes.")
subparser.add_argument(
'--only-concrete', action='store_true', default=False,
help='(with environment) only install already concretized specs')
subparser.add_argument(
'--no-add', action='store_true', default=False,
help="""(with environment) only install specs provided as argument
if they are already in the concretized environment""")
subparser.add_argument(
'-f', '--file', action='append', default=[],
dest='specfiles', metavar='SPEC_YAML_FILE',
help="install from file. Read specs to install from .yaml files")
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
testing = subparser.add_mutually_exclusive_group()
testing.add_argument(
'--test', default=None,
choices=['root', 'all'],
help="""If 'root' is chosen, run package tests during
installation for top-level packages (but skip tests for dependencies).
if 'all' is chosen, run package tests during installation for all
packages. If neither are chosen, don't run tests for any packages."""
)
testing.add_argument(
'--run-tests', action='store_true',
help='run package tests during installation (same as --test=all)'
)
subparser.add_argument(
'--log-format',
default=None,
choices=spack.report.valid_formats,
help="format to be used for log files"
)
subparser.add_argument(
'--log-file',
default=None,
help="filename for the log file. if not passed a default will be used"
)
subparser.add_argument(
'--help-cdash',
action='store_true',
help="Show usage instructions for CDash reporting"
)
arguments.add_cdash_args(subparser, False)
arguments.add_common_arguments(subparser, ['yes_to_all', 'spec'])
def default_log_file(spec):
fmt = 'test-{x.name}-{x.version}-{hash}.xml'
basename = fmt.format(x=spec, hash=spec.dag_hash())
dirname = fs.os.path.join(spack.paths.reports_path, 'junit')
fs.mkdirp(dirname)
return fs.os.path.join(dirname, basename)
def install_specs(cli_args, kwargs, specs):
env = ev.get_env(cli_args, 'install')
try:
if env:
specs_to_install = []
specs_to_add = []
for abstract, concrete in specs:
# concretize, therefore should we consider enforcing
# concretization of the env before allowing to install
# specs?
m_spec = env.matching_spec(abstract)
# If there is any ambiguity in the above call to matching_spec
# (i.e. if more than one spec in the environment matches), then
# SpackEnvironmentError is rasied, with a message listing the
# the matches. Getting to this point means there were either
# no matches or exactly one match.
if not m_spec:
tty.debug('{0} matched nothing in the env'.format(
abstract.name))
# no matches in the env
if cli_args.no_add:
msg = ('You asked to install {0} without adding it ' +
'(--no-add), but no such spec exists in ' +
'environment').format(abstract.name)
tty.die(msg)
else:
tty.debug('adding {0} as a root'.format(abstract.name))
specs_to_add.append((abstract, concrete))
continue
tty.debug('exactly one match for {0} in env -> {1}'.format(
m_spec.name, m_spec.dag_hash()))
if m_spec in env.roots() or cli_args.no_add:
# either the single match is a root spec (and --no-add is
# the default for roots) or --no-add was stated explictly
tty.debug('just install {0}'.format(m_spec.name))
specs_to_install.append(m_spec)
else:
# the single match is not a root (i.e. it's a dependency),
# root before installing
tty.debug('add {0} then install it'.format(m_spec.name))
specs_to_add.append((abstract, concrete))
if specs_to_add:
tty.debug('Adding the following specs as roots:')
for abstract, concrete in specs_to_add:
tty.debug(' {0}'.format(abstract.name))
with env.write_transaction():
specs_to_install.append(
env.concretize_and_add(abstract, concrete))
env.write(regenerate=False)
# Install the validated list of cli specs
if specs_to_install:
tty.debug('Installing the following cli specs:')
for s in specs_to_install:
tty.debug(' {0}'.format(s.name))
env.install_specs(specs_to_install, args=cli_args, **kwargs)
else:
installs = [(concrete.package, kwargs) for _, concrete in specs]
builder = PackageInstaller(installs)
builder.install()
except spack.build_environment.InstallError as e:
if cli_args.show_log_on_error:
e.print_context()
if not os.path.exists(e.pkg.build_log_path):
tty.error("'spack install' created no log.")
else:
sys.stderr.write('Full build log:\n')
with open(e.pkg.build_log_path) as log:
shutil.copyfileobj(log, sys.stderr)
raise
def install(parser, args, **kwargs):
if args.help_cdash:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
environment variables:
SPACK_CDASH_AUTH_TOKEN
authentication token to present to CDash
'''))
arguments.add_cdash_args(parser, True)
parser.print_help()
return
# The user wants to monitor builds using github.com/spack/spack-monitor
if args.use_monitor:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
disable_auth=args.monitor_disable_auth,
tags=args.monitor_tags,
save_local=args.monitor_save_local,
)
reporter = spack.report.collect_info(
spack.package.PackageInstaller, '_install_task', args.log_format, args)
if args.log_file:
reporter.filename = args.log_file
if args.run_tests:
tty.warn("Deprecated option: --run-tests: use --test=all instead")
def get_tests(specs):
if args.test == 'all' or args.run_tests:
return True
elif args.test == 'root':
return [spec.name for spec in specs]
else:
return False
if not args.spec and not args.specfiles:
# if there are no args but an active environment
# then install the packages from it.
env = ev.get_env(args, 'install')
if env:
tests = get_tests(env.user_specs)
kwargs['tests'] = tests
if not args.only_concrete:
with env.write_transaction():
concretized_specs = env.concretize(tests=tests)
ev.display_specs(concretized_specs)
# save view regeneration for later, so that we only do it
# once, as it can be slow.
env.write(regenerate=False)
specs = env.all_specs()
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
# Tell the monitor about the specs
if args.use_monitor and specs:
monitor.new_configuration(specs)
tty.msg("Installing environment {0}".format(env.name))
with reporter('build'):
env.install_all(args, **kwargs)
tty.debug("Regenerating environment views for {0}"
.format(env.name))
with env.write_transaction():
# write env to trigger view generation and modulefile
# generation
env.write()
return
else:
msg = "install requires a package argument or active environment"
if 'spack.yaml' in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
msg += "\n\n"
msg += "Did you mean to install using the `spack.yaml`"
msg += " in this directory? Try: \n"
msg += " spack env activate .\n"
msg += " spack install\n"
msg += " OR\n"
msg += " spack --env . install"
tty.die(msg)
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
if args.deprecated:
spack.config.set('config:deprecated', True, scope='command_line')
update_kwargs_from_args(args, kwargs)
abstract_specs = spack.cmd.parse_specs(args.spec)
tests = get_tests(abstract_specs)
kwargs['tests'] = tests
try:
specs = spack.cmd.parse_specs(
args.spec, concretize=True, tests=tests)
except SpackError as e:
tty.debug(e)
reporter.concretization_report(e.message)
raise
for file in args.specfiles:
with open(file, 'r') as f:
s = spack.spec.Spec.from_yaml(f)
concretized = s.concretized()
if concretized.dag_hash() != s.dag_hash():
msg = 'skipped invalid file "{0}". '
msg += 'The file does not contain a concrete spec.'
tty.warn(msg.format(file))
continue
abstract_specs.append(s)
specs.append(concretized)
if len(specs) == 0:
tty.die('The `spack install` command requires a spec to install.')
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
with reporter('build'):
if args.overwrite:
installed = list(filter(lambda x: x,
map(spack.store.db.query_one, specs)))
if not args.yes_to_all:
display_args = {
'long': True,
'show_flags': True,
'variants': True
}
if installed:
tty.msg('The following package specs will be '
'reinstalled:\n')
spack.cmd.display_specs(installed, **display_args)
not_installed = list(filter(lambda x: x not in installed,
specs))
if not_installed:
tty.msg('The following package specs are not installed and'
' the --overwrite flag was given. The package spec'
' will be newly installed:\n')
spack.cmd.display_specs(not_installed, **display_args)
answer = tty.get_yes_or_no(
'Do you want to proceed?', default=False
)
if not answer:
tty.die('Reinstallation aborted.')
kwargs['overwrite'] = [spec.dag_hash() for spec in specs]
kwargs.update({
"monitor_disable_auth": args.monitor_disable_auth,
"monitor_keep_going": args.monitor_keep_going,
"monitor_host": args.monitor_host,
"use_monitor": args.use_monitor,
"monitor_prefix": args.monitor_prefix,
})
if args.use_monitor and specs:
monitor.new_configuration(specs)
install_specs(args, kwargs, zip(abstract_specs, specs))
| true
| true
|
f71a797ad5205b8689b1860ffa5202d4a2793de5
| 4,727
|
py
|
Python
|
serene_index/tests/test_make_solr_document.py
|
NICTA/serene-etl
|
1d446012c0d08a95b8fbbbe8237735320a2fe2a4
|
[
"Apache-2.0"
] | null | null | null |
serene_index/tests/test_make_solr_document.py
|
NICTA/serene-etl
|
1d446012c0d08a95b8fbbbe8237735320a2fe2a4
|
[
"Apache-2.0"
] | null | null | null |
serene_index/tests/test_make_solr_document.py
|
NICTA/serene-etl
|
1d446012c0d08a95b8fbbbe8237735320a2fe2a4
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
import importlib
import ast
import json
import logging
from ConfigParser import ConfigParser
from io import StringIO
from mock import patch
#ignore pycountry debug logging
quiet = logging.getLogger('pycountry.db')
quiet.setLevel(logging.ERROR)
class TestMake_solr_document(TestCase):
record = {
"src_file_rec": "travel/travel.csv:1",
"dob": "17/04/1979",
"name": "George Jetson",
"passport_no": "99999999",
"passport_country": "NZ",
"departure_port": "SYD",
"arrival_port": "AKL"
}
meta = {
"src_file_cid": 10
}
result = {
'Airport.country_ss': [u'NZ', u'AU'],
'Airport.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'Airport_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'],
'Departed.timestamp_ss': [u'2013-12-10T00:00:00Z'],
'Entity_ss': [u'GEORGE JETSON'],
'Event_ss': [u'UNKNOWN'],
'Flight_ss': [u'UNKNOWN'],
'IssuedDocument.country_ss': [u'NZ'],
'IssuedDocument_ss': [u'99999999'],
'Location.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'Location_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'],
'Passport.country_ss': [u'NZ'],
'Passport_ss': [u'99999999'],
'Person_ss': [u'GEORGE JETSON'],
'Port.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'Port_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'],
'Travelled.timestamp_ss': [u'2013-12-10T00:00:00Z'],
'arrival_port_ss': [u'AKL'],
'attr_types': [u'Airport.geoloc', u'Departed.timestamp', u'Airport.country', u'city', u'dob', u'country',
u'IssuedDocument.country', u'Location.geoloc', u'arrival_port', u'departure_port', u'geoloc',
u'Travelled.timestamp', u'timestamp', u'Passport.country',u'Port.geoloc'],
'city_ss': [u'Sydney', u'Auckland'],
'country_ss': [u'NZ', u'AU'],
'data': u'["Person","GEORGE JETSON",[["dob","1979-04-17T00:00:00Z"]],[["Holds",[],[["Passport","99999999",'
'[["country","NZ"]],[["Travelled",[["timestamp","2013-12-10T00:00:00Z"]],[["Flight","UNKNOWN",'
'[["departure_port","SYD"],["arrival_port","AKL"]],[["Departed",[["timestamp",'
'"2013-12-10T00:00:00Z"]],[["Airport","Sydney Intl (SYD)",[["country","AU"],'
'["geoloc","-33.946111,151.177222"],["city","Sydney"]],[]]]],["Arrived",[],[["Airport",'
'"Auckland Intl (AKL)",[["country","NZ"],["geoloc","-37.008056,174.791667"],["city","Auckland"]],'
'[]]]]]]]]]]]]]]',
'departure_port_ss': [u'SYD'],
'dob_dts': [u'1979-04-17T00:00:00Z'],
'geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'id': u'6daf5bc24d75fe36d25700633359fbe7c166d66ef19fc8eba236fa8670e7fa40',
'link_types': [u'Travelled', u'Holds', u'Arrived', u'Departed'],
'object_types': [u'Flight', u'Entity', u'Person', u'Airport', u'Location', u'Passport', u'IssuedDocument',
u'Port', u'Event'],
'raw': u'{"arrival_port":"AKL","departure_port":"SYD","dob":"17/04/1979","name":"George Jetson",' +
'"passport_country":"NZ","passport_no":"99999999"}',
'src_file_cid': 10,
'src_file_rec': [u'test/file_1:1'],
'timestamp_ss': [u'2013-12-10T00:00:00Z']
}
@patch('serene_metadata.config.SereneConfig')
def setUp(self, mock_config):
from serene_index.helpers.index_helpers import mk_error_counter, make_solr_document
from serene_metadata import generate_example_metadata
self.error_counter = mk_error_counter()
module = importlib.import_module('serene_index.modules.module_flight')
self.builder = getattr(module, 'record_builder', None)
generated_metadata = generate_example_metadata()
print json.dumps(generated_metadata, indent=1)
self.meta.update(generated_metadata)
self.result.update(generated_metadata)
self.solr_doc = make_solr_document(r=self.record, builder=self.builder, base=self.meta, debug=False, error_counter=self.error_counter)
def tearDown(self):
self.error_counter = None
def test_make_solr_document(self):
self.maxDiff = None
self.assertDictEqual(self.result, self.solr_doc)
# a = json.dumps(self.solr_doc, indent=1, sort_keys=True)
# b = json.dumps(self.result, indent=1, sort_keys=True)
def test_json_correct(self):
rec = json.dumps(self.solr_doc)
self.assertTrue(json.loads(rec), 'Solr document does not parse as json')
| 43.768519
| 142
| 0.606727
|
from unittest import TestCase
import importlib
import ast
import json
import logging
from ConfigParser import ConfigParser
from io import StringIO
from mock import patch
quiet = logging.getLogger('pycountry.db')
quiet.setLevel(logging.ERROR)
class TestMake_solr_document(TestCase):
record = {
"src_file_rec": "travel/travel.csv:1",
"dob": "17/04/1979",
"name": "George Jetson",
"passport_no": "99999999",
"passport_country": "NZ",
"departure_port": "SYD",
"arrival_port": "AKL"
}
meta = {
"src_file_cid": 10
}
result = {
'Airport.country_ss': [u'NZ', u'AU'],
'Airport.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'Airport_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'],
'Departed.timestamp_ss': [u'2013-12-10T00:00:00Z'],
'Entity_ss': [u'GEORGE JETSON'],
'Event_ss': [u'UNKNOWN'],
'Flight_ss': [u'UNKNOWN'],
'IssuedDocument.country_ss': [u'NZ'],
'IssuedDocument_ss': [u'99999999'],
'Location.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'Location_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'],
'Passport.country_ss': [u'NZ'],
'Passport_ss': [u'99999999'],
'Person_ss': [u'GEORGE JETSON'],
'Port.geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'Port_ss': [u'Sydney Intl (SYD)', u'Auckland Intl (AKL)'],
'Travelled.timestamp_ss': [u'2013-12-10T00:00:00Z'],
'arrival_port_ss': [u'AKL'],
'attr_types': [u'Airport.geoloc', u'Departed.timestamp', u'Airport.country', u'city', u'dob', u'country',
u'IssuedDocument.country', u'Location.geoloc', u'arrival_port', u'departure_port', u'geoloc',
u'Travelled.timestamp', u'timestamp', u'Passport.country',u'Port.geoloc'],
'city_ss': [u'Sydney', u'Auckland'],
'country_ss': [u'NZ', u'AU'],
'data': u'["Person","GEORGE JETSON",[["dob","1979-04-17T00:00:00Z"]],[["Holds",[],[["Passport","99999999",'
'[["country","NZ"]],[["Travelled",[["timestamp","2013-12-10T00:00:00Z"]],[["Flight","UNKNOWN",'
'[["departure_port","SYD"],["arrival_port","AKL"]],[["Departed",[["timestamp",'
'"2013-12-10T00:00:00Z"]],[["Airport","Sydney Intl (SYD)",[["country","AU"],'
'["geoloc","-33.946111,151.177222"],["city","Sydney"]],[]]]],["Arrived",[],[["Airport",'
'"Auckland Intl (AKL)",[["country","NZ"],["geoloc","-37.008056,174.791667"],["city","Auckland"]],'
'[]]]]]]]]]]]]]]',
'departure_port_ss': [u'SYD'],
'dob_dts': [u'1979-04-17T00:00:00Z'],
'geoloc': [u'-37.008056,174.791667', u'-33.946111,151.177222'],
'id': u'6daf5bc24d75fe36d25700633359fbe7c166d66ef19fc8eba236fa8670e7fa40',
'link_types': [u'Travelled', u'Holds', u'Arrived', u'Departed'],
'object_types': [u'Flight', u'Entity', u'Person', u'Airport', u'Location', u'Passport', u'IssuedDocument',
u'Port', u'Event'],
'raw': u'{"arrival_port":"AKL","departure_port":"SYD","dob":"17/04/1979","name":"George Jetson",' +
'"passport_country":"NZ","passport_no":"99999999"}',
'src_file_cid': 10,
'src_file_rec': [u'test/file_1:1'],
'timestamp_ss': [u'2013-12-10T00:00:00Z']
}
@patch('serene_metadata.config.SereneConfig')
def setUp(self, mock_config):
from serene_index.helpers.index_helpers import mk_error_counter, make_solr_document
from serene_metadata import generate_example_metadata
self.error_counter = mk_error_counter()
module = importlib.import_module('serene_index.modules.module_flight')
self.builder = getattr(module, 'record_builder', None)
generated_metadata = generate_example_metadata()
print json.dumps(generated_metadata, indent=1)
self.meta.update(generated_metadata)
self.result.update(generated_metadata)
self.solr_doc = make_solr_document(r=self.record, builder=self.builder, base=self.meta, debug=False, error_counter=self.error_counter)
def tearDown(self):
self.error_counter = None
def test_make_solr_document(self):
self.maxDiff = None
self.assertDictEqual(self.result, self.solr_doc)
def test_json_correct(self):
rec = json.dumps(self.solr_doc)
self.assertTrue(json.loads(rec), 'Solr document does not parse as json')
| false
| true
|
f71a7a522882e618e8873734efaa5c00541a1526
| 2,196
|
py
|
Python
|
onnx/backend/test/case/node/batchnorm.py
|
cnheider/onnx
|
8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933
|
[
"MIT"
] | 137
|
2020-04-28T12:28:32.000Z
|
2022-03-18T10:48:25.000Z
|
onnx/backend/test/case/node/batchnorm.py
|
cnheider/onnx
|
8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933
|
[
"MIT"
] | 24
|
2020-05-06T08:06:42.000Z
|
2021-12-31T07:46:13.000Z
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/backend/test/case/node/batchnorm.py
|
lablup/training_results_v0.7
|
f5bb59aa0f8b18b602763abe47d1d24d0d54b197
|
[
"Apache-2.0"
] | 51
|
2019-07-12T05:10:25.000Z
|
2021-07-28T16:19:06.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class BatchNormalization(Base):
@staticmethod
def export(): # type: () -> None
def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore
dims_x = len(x.shape)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
mean = mean.reshape(-1, *dim_ones)
var = var.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
# input size: (1, 2, 1, 3)
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
s = np.array([1.0, 1.5]).astype(np.float32)
bias = np.array([0, 1]).astype(np.float32)
mean = np.array([0, 3]).astype(np.float32)
var = np.array([1, 1.5]).astype(np.float32)
y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
)
# output size: (1, 2, 1, 3)
expect(node, inputs=[x, s, bias, mean, var], outputs=[y],
name='test_batchnorm_example')
# input size: (2, 3, 4, 5)
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
mean = np.random.randn(3).astype(np.float32)
var = np.random.rand(3).astype(np.float32)
epsilon = 1e-2
y = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
epsilon=epsilon,
)
# output size: (2, 3, 4, 5)
expect(node, inputs=[x, s, bias, mean, var], outputs=[y],
name='test_batchnorm_epsilon')
| 34.857143
| 86
| 0.551002
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import onnx
from ..base import Base
from . import expect
class BatchNormalization(Base):
@staticmethod
def export():
def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5):
dims_x = len(x.shape)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
mean = mean.reshape(-1, *dim_ones)
var = var.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
s = np.array([1.0, 1.5]).astype(np.float32)
bias = np.array([0, 1]).astype(np.float32)
mean = np.array([0, 3]).astype(np.float32)
var = np.array([1, 1.5]).astype(np.float32)
y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
)
expect(node, inputs=[x, s, bias, mean, var], outputs=[y],
name='test_batchnorm_example')
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
mean = np.random.randn(3).astype(np.float32)
var = np.random.rand(3).astype(np.float32)
epsilon = 1e-2
y = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 's', 'bias', 'mean', 'var'],
outputs=['y'],
epsilon=epsilon,
)
expect(node, inputs=[x, s, bias, mean, var], outputs=[y],
name='test_batchnorm_epsilon')
| true
| true
|
f71a7adb1bcafed077beb9dfb10755cea07d0e0b
| 1,239
|
py
|
Python
|
examples/example.py
|
Mizux/cmake-pybind11
|
d3b89746546734990eae5a86532674bf3462a2f3
|
[
"Apache-2.0"
] | null | null | null |
examples/example.py
|
Mizux/cmake-pybind11
|
d3b89746546734990eae5a86532674bf3462a2f3
|
[
"Apache-2.0"
] | null | null | null |
examples/example.py
|
Mizux/cmake-pybind11
|
d3b89746546734990eae5a86532674bf3462a2f3
|
[
"Apache-2.0"
] | null | null | null |
import cmakepybind11
from cmakepybind11.foo import pyFoo
from cmakepybind11.bar import pyBar
from cmakepybind11.foobar import pyFooBar
print(f'version: {cmakepybind11.__version__}')
# foo
print(f'Foo: {dir(pyFoo.Foo)}')
pyFoo.free_function(2147483647) # max int
pyFoo.free_function(2147483647+1) # max int + 1
f = pyFoo.Foo()
print(f'class Foo: {dir(f)}')
f.static_function(1)
f.static_function(2147483647)
f.static_function(2147483647+1)
f.int = 13
assert(f.int == 13)
f.int64 = 31
assert(f.int64 == 31)
# bar
print(f'Bar: {dir(pyBar.Bar)}')
pyBar.free_function(2147483647) # max int
pyBar.free_function(2147483647+1) # max int + 1
b = pyBar.Bar()
print(f'class Bar: {dir(b)}')
b.static_function(1)
b.static_function(2147483647)
b.static_function(2147483647+1)
b.int = 13
assert(b.int == 13)
b.int64 = 31
assert(b.int64 == 31)
# foobar
print(f'FooBar: {dir(pyFooBar.FooBar)}')
pyFooBar.free_function(2147483647) # max int
pyFooBar.free_function(2147483647+1) # max int + 1
fb = pyFooBar.FooBar()
print(f'class FooBar: {dir(fb)}')
fb.static_function(1)
fb.static_function(2147483647)
fb.static_function(2147483647+1)
fb.foo_int = 13
fb.bar_int = 17
assert(fb.int == 30)
fb.foo_int64 = 31
fb.bar_int64 = 37
assert(fb.int64 == 68)
| 21.736842
| 50
| 0.736885
|
import cmakepybind11
from cmakepybind11.foo import pyFoo
from cmakepybind11.bar import pyBar
from cmakepybind11.foobar import pyFooBar
print(f'version: {cmakepybind11.__version__}')
print(f'Foo: {dir(pyFoo.Foo)}')
pyFoo.free_function(2147483647)
pyFoo.free_function(2147483647+1)
f = pyFoo.Foo()
print(f'class Foo: {dir(f)}')
f.static_function(1)
f.static_function(2147483647)
f.static_function(2147483647+1)
f.int = 13
assert(f.int == 13)
f.int64 = 31
assert(f.int64 == 31)
print(f'Bar: {dir(pyBar.Bar)}')
pyBar.free_function(2147483647)
pyBar.free_function(2147483647+1)
b = pyBar.Bar()
print(f'class Bar: {dir(b)}')
b.static_function(1)
b.static_function(2147483647)
b.static_function(2147483647+1)
b.int = 13
assert(b.int == 13)
b.int64 = 31
assert(b.int64 == 31)
print(f'FooBar: {dir(pyFooBar.FooBar)}')
pyFooBar.free_function(2147483647)
pyFooBar.free_function(2147483647+1)
fb = pyFooBar.FooBar()
print(f'class FooBar: {dir(fb)}')
fb.static_function(1)
fb.static_function(2147483647)
fb.static_function(2147483647+1)
fb.foo_int = 13
fb.bar_int = 17
assert(fb.int == 30)
fb.foo_int64 = 31
fb.bar_int64 = 37
assert(fb.int64 == 68)
| true
| true
|
f71a7b8e9aca170c7b5bfc6407b13285000df309
| 19,338
|
py
|
Python
|
.history/implementations/pixelda/pixelda_20190101224024.py
|
Napkin-DL/PyTorch-GAN
|
4668fb434a74a4e4771631944e4abfb0ec1c8795
|
[
"MIT"
] | null | null | null |
.history/implementations/pixelda/pixelda_20190101224024.py
|
Napkin-DL/PyTorch-GAN
|
4668fb434a74a4e4771631944e4abfb0ec1c8795
|
[
"MIT"
] | null | null | null |
.history/implementations/pixelda/pixelda_20190101224024.py
|
Napkin-DL/PyTorch-GAN
|
4668fb434a74a4e4771631944e4abfb0ec1c8795
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
print("cuda : {}".format(cuda))
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class ResidualBlock_back(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features)
)
def forward(self, x):
return x + self.block(x)
class sencode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(sencode_ResidualBlock, self).__init__()
### ENCODER
self.sencode_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(8*in_features),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
encode_x = self.sencode_block(x)
return x, encode_x
class sdecode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(sdecode_ResidualBlock, self).__init__()
self.sdecode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True),
)
def forward(self, encode_x):
decode_x = self.sdecode_block(encode_x)
decode_x = decode_x[:, :, :-1, :-1]
decode_x = F.sigmoid(decode_x)
return decode_x
class tencode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(tencode_ResidualBlock, self).__init__()
### ENCODER
self.tencode_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(8*in_features),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
encode_x = self.tencode_block(x)
return x, encode_x
class tdecode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(tdecode_ResidualBlock, self).__init__()
self.tdecode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True),
)
def forward(self, encode_x):
decode_x = self.tdecode_block(encode_x)
decode_x = decode_x[:, :, :-1, :-1]
decode_x = F.sigmoid(decode_x)
return decode_x
class target_encode_Generator(nn.Module):
def __init__(self):
super(target_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.tfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.tl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(tencode_ResidualBlock())
self.tencode_resblocks = nn.Sequential(*resblocks)
def forward(self, img, z):
gen_input = torch.cat((img, self.tfc(z).view(*img.shape)), 1)
out = self.tl1(gen_input)
x, encode_out = self.tencode_resblocks(out)
return x, encode_out
class source_encode_Generator(nn.Module):
def __init__(self):
super(source_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.sfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.sl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(sencode_ResidualBlock())
self.sencode_resblocks = nn.Sequential(*resblocks)
def forward(self, img, z):
gen_input = torch.cat((img, self.sfc(z).view(*img.shape)), 1)
out = self.sl1(gen_input)
x, encode_out = self.sencode_resblocks(out)
return x, encode_out
class target_decode_Generator(nn.Module):
def __init__(self):
super(target_decode_Generator, self).__init__()
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(tdecode_ResidualBlock())
self.target_decode_resblocks = nn.Sequential(*resblocks)
self.tl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, encode_out):
out = img + self.target_decode_resblocks(encode_out)
img_ = self.tl2(out)
return img_
class source_decode_Generator(nn.Module):
def __init__(self):
super(source_decode_Generator, self).__init__()
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(sdecode_ResidualBlock())
self.source_decode_resblocks = nn.Sequential(*resblocks)
self.sl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, encode_out):
out = img + self.source_decode_resblocks(encode_out)
img_ = self.sl2(out)
return img_
class encode_Discriminator(nn.Module):
def __init__(self):
super(encode_Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(256, 512, normalization=False),
*block(512, 1024),
nn.Conv2d(1024, 1, 3, 1, 1)
)
def forward(self, encode_x):
validity = self.model(encode_x)
return validity
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class encode_Classifier(nn.Module):
def __init__(self):
super(encode_Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(256, 512, normalization=False),
*block(512, 1024)
*block(1024, 2048)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(2048*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
encode_adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
target_encode_generator = target_encode_Generator()
target_decode_generator = target_decode_Generator()
source_encode_generator = source_encode_Generator()
source_decode_generator = source_decode_Generator()
encode_discriminator = encode_Discriminator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
target_encode_generator.cuda()
target_decode_generator.cuda()
source_encode_generator.cuda()
source_decode_generator.cuda()
encode_discriminator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
encode_adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
target_encode_generator.apply(weights_init_normal)
target_decode_generator.apply(weights_init_normal)
source_encode_generator.apply(weights_init_normal)
source_decode_generator.apply(weights_init_normal)
encode_discriminator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(),
source_encode_generator.parameters(), target_decode_generator.parameters(),
source_decode_generator.parameters(),
classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
imgs_A_x, encode_fake_B = source_encode_generator(imgs_A, z)
decode_fake_B = source_decode_generator(imgs_A_x, encode_fake_B)
# Perform task on translated source image
label_pred = classifier(decode_fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \
0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
imgs_B_x, encode_real_B = target_encode_generator(imgs_B, z)
decode_real_B = target_decode_generator(imgs_B_x, encode_real_B)
# Measure discriminator's ability to classify real from generated samples
encode_real_loss = adversarial_loss(encode_discriminator(encode_real_B), valid)
encode_fake_loss = adversarial_loss(encode_discriminator(encode_fake_B.detach()), fake)
decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)
decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)
encode_d_loss = (encode_real_loss + encode_fake_loss) / 2
decode_d_loss = (decode_real_loss + decode_fake_loss) / 2
d_loss = encode_d_loss + decode_d_loss
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| 37.917647
| 145
| 0.631554
|
import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
print("cuda : {}".format(cuda))
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class ResidualBlock_back(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features)
)
def forward(self, x):
return x + self.block(x)
class sencode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(sencode_ResidualBlock, self).__init__()
de_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(8*in_features),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
encode_x = self.sencode_block(x)
return x, encode_x
class sdecode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(sdecode_ResidualBlock, self).__init__()
self.sdecode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True),
)
def forward(self, encode_x):
decode_x = self.sdecode_block(encode_x)
decode_x = decode_x[:, :, :-1, :-1]
decode_x = F.sigmoid(decode_x)
return decode_x
class tencode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(tencode_ResidualBlock, self).__init__()
de_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(8*in_features),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
encode_x = self.tencode_block(x)
return x, encode_x
class tdecode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(tdecode_ResidualBlock, self).__init__()
self.tdecode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True),
)
def forward(self, encode_x):
decode_x = self.tdecode_block(encode_x)
decode_x = decode_x[:, :, :-1, :-1]
decode_x = F.sigmoid(decode_x)
return decode_x
class target_encode_Generator(nn.Module):
def __init__(self):
super(target_encode_Generator, self).__init__()
self.tfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.tl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(tencode_ResidualBlock())
self.tencode_resblocks = nn.Sequential(*resblocks)
def forward(self, img, z):
gen_input = torch.cat((img, self.tfc(z).view(*img.shape)), 1)
out = self.tl1(gen_input)
x, encode_out = self.tencode_resblocks(out)
return x, encode_out
class source_encode_Generator(nn.Module):
def __init__(self):
super(source_encode_Generator, self).__init__()
self.sfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.sl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(sencode_ResidualBlock())
self.sencode_resblocks = nn.Sequential(*resblocks)
def forward(self, img, z):
gen_input = torch.cat((img, self.sfc(z).view(*img.shape)), 1)
out = self.sl1(gen_input)
x, encode_out = self.sencode_resblocks(out)
return x, encode_out
class target_decode_Generator(nn.Module):
def __init__(self):
super(target_decode_Generator, self).__init__()
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(tdecode_ResidualBlock())
self.target_decode_resblocks = nn.Sequential(*resblocks)
self.tl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, encode_out):
out = img + self.target_decode_resblocks(encode_out)
img_ = self.tl2(out)
return img_
class source_decode_Generator(nn.Module):
def __init__(self):
super(source_decode_Generator, self).__init__()
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(sdecode_ResidualBlock())
self.source_decode_resblocks = nn.Sequential(*resblocks)
self.sl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, encode_out):
out = img + self.source_decode_resblocks(encode_out)
img_ = self.sl2(out)
return img_
class encode_Discriminator(nn.Module):
def __init__(self):
super(encode_Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(256, 512, normalization=False),
*block(512, 1024),
nn.Conv2d(1024, 1, 3, 1, 1)
)
def forward(self, encode_x):
validity = self.model(encode_x)
return validity
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class encode_Classifier(nn.Module):
def __init__(self):
super(encode_Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(256, 512, normalization=False),
*block(512, 1024)
*block(1024, 2048)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(2048*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
adversarial_loss = torch.nn.MSELoss()
encode_adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
lambda_adv = 1
lambda_task = 0.1
target_encode_generator = target_encode_Generator()
target_decode_generator = target_decode_Generator()
source_encode_generator = source_encode_Generator()
source_decode_generator = source_decode_Generator()
encode_discriminator = encode_Discriminator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
target_encode_generator.cuda()
target_decode_generator.cuda()
source_encode_generator.cuda()
source_decode_generator.cuda()
encode_discriminator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
encode_adversarial_loss.cuda()
task_loss.cuda()
target_encode_generator.apply(weights_init_normal)
target_decode_generator.apply(weights_init_normal)
source_encode_generator.apply(weights_init_normal)
source_decode_generator.apply(weights_init_normal)
encode_discriminator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
optimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(),
source_encode_generator.parameters(), target_decode_generator.parameters(),
source_decode_generator.parameters(),
classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
optimizer_G.zero_grad()
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
imgs_A_x, encode_fake_B = source_encode_generator(imgs_A, z)
decode_fake_B = source_decode_generator(imgs_A_x, encode_fake_B)
label_pred = classifier(decode_fake_B)
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \
0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
imgs_B_x, encode_real_B = target_encode_generator(imgs_B, z)
decode_real_B = target_decode_generator(imgs_B_x, encode_real_B)
# Measure discriminator's ability to classify real from generated samples
encode_real_loss = adversarial_loss(encode_discriminator(encode_real_B), valid)
encode_fake_loss = adversarial_loss(encode_discriminator(encode_fake_B.detach()), fake)
decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)
decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)
encode_d_loss = (encode_real_loss + encode_fake_loss) / 2
decode_d_loss = (decode_real_loss + decode_fake_loss) / 2
d_loss = encode_d_loss + decode_d_loss
d_loss.backward()
optimizer_D.step()
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| true
| true
|
f71a7bc3530cae6fe552775aa2d6f0317c406877
| 481
|
py
|
Python
|
ocean_utils/http_requests/requests_session.py
|
oceanprotocol/common-utils-py
|
f577f4762841496584e114baaec0d476e73c700e
|
[
"Apache-2.0"
] | 1
|
2020-12-02T13:49:43.000Z
|
2020-12-02T13:49:43.000Z
|
common_utils_py/http_requests/requests_session.py
|
nevermined-io/common-utils-py
|
4a02843d4f4771935b6f057badac844fef6f6f13
|
[
"Apache-2.0"
] | 2
|
2021-08-24T13:14:47.000Z
|
2021-12-01T17:06:29.000Z
|
common_utils_py/http_requests/requests_session.py
|
nevermined-io/common-utils-py
|
4a02843d4f4771935b6f057badac844fef6f6f13
|
[
"Apache-2.0"
] | null | null | null |
import requests
from requests.adapters import HTTPAdapter
def get_requests_session():
"""
Set connection pool maxsize and block value to avoid `connection pool full` warnings.
:return: requests session
"""
session = requests.sessions.Session()
session.mount('http://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
session.mount('https://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
return session
| 32.066667
| 97
| 0.738046
|
import requests
from requests.adapters import HTTPAdapter
def get_requests_session():
session = requests.sessions.Session()
session.mount('http://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
session.mount('https://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
return session
| true
| true
|
f71a7d056b1aa807f43b720faed6745239c9c75f
| 1,872
|
py
|
Python
|
app.py
|
ssvfx41/tk-houdini-geometrynode
|
03454d3c6773b0a48531ab24ace60928f11c4a4e
|
[
"MIT"
] | null | null | null |
app.py
|
ssvfx41/tk-houdini-geometrynode
|
03454d3c6773b0a48531ab24ace60928f11c4a4e
|
[
"MIT"
] | null | null | null |
app.py
|
ssvfx41/tk-houdini-geometrynode
|
03454d3c6773b0a48531ab24ace60928f11c4a4e
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Pixomondo
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the MIT License included in this
# distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the MIT License. All rights
# not expressly granted therein are reserved by Pixomondo.
"""
Geometry Output App for Houdini
"""
import sgtk
class GeometryOutputNode(sgtk.platform.Application):
def init_app(self):
module = self.import_module("tk_houdini_geometrynode")
self.handler = module.ToolkitGeometryNodeHandler(self)
def convert_to_geometry_nodes(self):
"""
Convert all Shotgun Geometry nodes found in the current Script to regular
Geometry nodes. Additional toolkit information will be stored in
user data named 'tk_*'
"""
self.handler.convert_sg_to_geometry_nodes()
def convert_from_geometry_nodes(self):
"""
Convert all regular Geometry nodes that have previously been converted
from Shotgun Geometry nodes, back into Shotgun Geometry nodes.
"""
self.handler.convert_geometry_to_sg_nodes()
def get_nodes(self):
"""
Returns a list of hou.node objects for each tk alembic node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-geometrynode"]
>>> tk_alembic_nodes = app.get_nodes()
"""
self.log_debug("Retrieving tk-houdini-geometrynode nodes...")
tk_houdini_geometrynode = self.import_module("tk_houdini_geometrynode")
nodes = tk_houdini_geometrynode.ToolkitGeometryNodeHandler.\
get_all_tk_geometry_nodes()
self.log_debug("Found %s tk-houdini-geometrynode nodes." % (len(nodes),))
return nodes
| 33.428571
| 81
| 0.6875
|
import sgtk
class GeometryOutputNode(sgtk.platform.Application):
def init_app(self):
module = self.import_module("tk_houdini_geometrynode")
self.handler = module.ToolkitGeometryNodeHandler(self)
def convert_to_geometry_nodes(self):
self.handler.convert_sg_to_geometry_nodes()
def convert_from_geometry_nodes(self):
self.handler.convert_geometry_to_sg_nodes()
def get_nodes(self):
self.log_debug("Retrieving tk-houdini-geometrynode nodes...")
tk_houdini_geometrynode = self.import_module("tk_houdini_geometrynode")
nodes = tk_houdini_geometrynode.ToolkitGeometryNodeHandler.\
get_all_tk_geometry_nodes()
self.log_debug("Found %s tk-houdini-geometrynode nodes." % (len(nodes),))
return nodes
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.