hexsha
stringlengths
40
40
size
int64
4
996k
ext
stringclasses
8 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
4
996k
avg_line_length
float64
1.33
58.2k
max_line_length
int64
2
323k
alphanum_fraction
float64
0
0.97
content_no_comment
stringlengths
0
946k
is_comment_constant_removed
bool
2 classes
is_sharp_comment_removed
bool
1 class
7902d8e807adae1a3a78bab4487a5f5792dbc4b9
1,985
py
Python
step_exec_lib/utils/git.py
giantswarm/step-exec-lib
74866d7eb7b149256d27e75fab56d4be662a1c63
[ "Apache-2.0" ]
1
2021-09-20T20:16:27.000Z
2021-09-20T20:16:27.000Z
step_exec_lib/utils/git.py
giantswarm/step-exec-lib
74866d7eb7b149256d27e75fab56d4be662a1c63
[ "Apache-2.0" ]
32
2021-06-02T11:36:55.000Z
2022-03-03T10:26:35.000Z
step_exec_lib/utils/git.py
giantswarm/step-exec-lib
74866d7eb7b149256d27e75fab56d4be662a1c63
[ "Apache-2.0" ]
1
2021-10-20T18:42:29.000Z
2021-10-20T18:42:29.000Z
"""Module with git related utilities.""" import git class GitRepoVersionInfo: """ Provides application versions information based on the tags and commits in the repo """ def __init__(self, path: str): """ Create an instance of GitRepoVersionInfo :param path: The path to search for git information. It searches for '.git' in this folder or any parent folder. """ self._is_repo = False try: self._repo = git.Repo(path, search_parent_directories=True) self._is_repo = True except git.exc.InvalidGitRepositoryError: self._repo = None @property def is_git_repo(self) -> bool: """ Checks if the path given in constructor is a sub-path of a valid git repo. :return: Boolean true, if repo was found. """ return self._is_repo def get_git_version(self, strip_v_in_version: bool = True) -> str: """ Gets application version in the format [last-tag]-[last-commit-sha]. :param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3), this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'. If there's a "-", "." or "_" separator after "v", it is removed as well. :return: The version string """ if not self._is_repo: raise git.exc.InvalidGitRepositoryError() tags = sorted(self._repo.tags, key=lambda t: t.commit.committed_date) latest_tag = None if len(tags) == 0 else tags[-1] ver = "0.0.0" if latest_tag is None else latest_tag.name if strip_v_in_version and ver.startswith("v"): txt_ver = ver.lstrip("v") txt_ver = txt_ver.lstrip("-_.") else: txt_ver = ver sha = self._repo.head.commit.hexsha if latest_tag is not None and sha == latest_tag.commit.hexsha: return txt_ver return f"{txt_ver}-{sha}"
37.45283
112
0.608564
import git class GitRepoVersionInfo: def __init__(self, path: str): self._is_repo = False try: self._repo = git.Repo(path, search_parent_directories=True) self._is_repo = True except git.exc.InvalidGitRepositoryError: self._repo = None @property def is_git_repo(self) -> bool: return self._is_repo def get_git_version(self, strip_v_in_version: bool = True) -> str: if not self._is_repo: raise git.exc.InvalidGitRepositoryError() tags = sorted(self._repo.tags, key=lambda t: t.commit.committed_date) latest_tag = None if len(tags) == 0 else tags[-1] ver = "0.0.0" if latest_tag is None else latest_tag.name if strip_v_in_version and ver.startswith("v"): txt_ver = ver.lstrip("v") txt_ver = txt_ver.lstrip("-_.") else: txt_ver = ver sha = self._repo.head.commit.hexsha if latest_tag is not None and sha == latest_tag.commit.hexsha: return txt_ver return f"{txt_ver}-{sha}"
true
true
7902d8ebff36d5e5e69aff22bc91fb65ad7633e6
1,481
py
Python
persister/observations/serializers.py
City-of-Helsinki/hel-data-pipe
e473237cd00a54a791337ac611e99556dc37ea35
[ "MIT" ]
1
2021-02-25T14:21:41.000Z
2021-02-25T14:21:41.000Z
persister/observations/serializers.py
City-of-Helsinki/hel-data-pipe
e473237cd00a54a791337ac611e99556dc37ea35
[ "MIT" ]
9
2020-11-23T11:56:56.000Z
2021-02-25T12:20:05.000Z
persister/observations/serializers.py
City-of-Helsinki/hel-data-pipe
e473237cd00a54a791337ac611e99556dc37ea35
[ "MIT" ]
1
2021-07-25T12:16:53.000Z
2021-07-25T12:16:53.000Z
from rest_framework import serializers from rest_framework.reverse import reverse from .models import Channel, Datasource, Value class ChannelSerializer(serializers.HyperlinkedModelSerializer): values = serializers.SerializerMethodField() latest = serializers.SerializerMethodField() def get_values(self, obj): request = self.context.get("request") return request.build_absolute_uri( reverse( "datasource-channel-values-list", kwargs={"datasource_pk": obj.datasource_id, "channel_pk": obj.id}, ) ) def get_latest(self, obj): request = self.context.get("request") return request.build_absolute_uri( reverse( "datasource-channel-values-latest", kwargs={"datasource_pk": obj.datasource_id, "channel_pk": obj.id}, ) ) class Meta: model = Channel fields = ("id", "uniquename", "name", "values", "latest") class ValueSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Value fields = ("id", "time", "value") class DatasourceSerializer(serializers.HyperlinkedModelSerializer): channels = serializers.HyperlinkedIdentityField( view_name="datasource-channels-list", lookup_url_kwarg="datasource_pk" ) class Meta: model = Datasource fields = ("id", "devid", "name", "description", "lat", "lon", "channels")
30.22449
82
0.643484
from rest_framework import serializers from rest_framework.reverse import reverse from .models import Channel, Datasource, Value class ChannelSerializer(serializers.HyperlinkedModelSerializer): values = serializers.SerializerMethodField() latest = serializers.SerializerMethodField() def get_values(self, obj): request = self.context.get("request") return request.build_absolute_uri( reverse( "datasource-channel-values-list", kwargs={"datasource_pk": obj.datasource_id, "channel_pk": obj.id}, ) ) def get_latest(self, obj): request = self.context.get("request") return request.build_absolute_uri( reverse( "datasource-channel-values-latest", kwargs={"datasource_pk": obj.datasource_id, "channel_pk": obj.id}, ) ) class Meta: model = Channel fields = ("id", "uniquename", "name", "values", "latest") class ValueSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Value fields = ("id", "time", "value") class DatasourceSerializer(serializers.HyperlinkedModelSerializer): channels = serializers.HyperlinkedIdentityField( view_name="datasource-channels-list", lookup_url_kwarg="datasource_pk" ) class Meta: model = Datasource fields = ("id", "devid", "name", "description", "lat", "lon", "channels")
true
true
7902dc375978742d4c188ae2eca723f620e2598c
28,296
py
Python
moto/ec2/responses/instances.py
adtsys-cloud/moto-aws-mock
666cb97c508fc4e8a8bb52e70e4fb43b49af6812
[ "Apache-2.0" ]
null
null
null
moto/ec2/responses/instances.py
adtsys-cloud/moto-aws-mock
666cb97c508fc4e8a8bb52e70e4fb43b49af6812
[ "Apache-2.0" ]
null
null
null
moto/ec2/responses/instances.py
adtsys-cloud/moto-aws-mock
666cb97c508fc4e8a8bb52e70e4fb43b49af6812
[ "Apache-2.0" ]
1
2019-08-14T14:14:14.000Z
2019-08-14T14:14:14.000Z
from __future__ import unicode_literals from boto.ec2.instancetype import InstanceType from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \ dict_from_querystring, optional_from_querystring class InstanceResponse(BaseResponse): def describe_instances(self): filter_dict = filters_from_querystring(self.querystring) instance_ids = instance_ids_from_querystring(self.querystring) if instance_ids: reservations = self.ec2_backend.get_reservations_by_instance_ids( instance_ids, filters=filter_dict) else: reservations = self.ec2_backend.all_reservations( make_copy=True, filters=filter_dict) template = self.response_template(EC2_DESCRIBE_INSTANCES) return template.render(reservations=reservations) def run_instances(self): min_count = int(self.querystring.get('MinCount', ['1'])[0]) image_id = self.querystring.get('ImageId')[0] user_data = self.querystring.get('UserData') security_group_names = self._get_multi_param('SecurityGroup') security_group_ids = self._get_multi_param('SecurityGroupId') nics = dict_from_querystring("NetworkInterface", self.querystring) instance_type = self.querystring.get("InstanceType", ["m1.small"])[0] placement = self.querystring.get( "Placement.AvailabilityZone", [None])[0] subnet_id = self.querystring.get("SubnetId", [None])[0] private_ip = self.querystring.get("PrivateIpAddress", [None])[0] associate_public_ip = self.querystring.get( "AssociatePublicIpAddress", [None])[0] key_name = self.querystring.get("KeyName", [None])[0] if self.is_not_dryrun('RunInstance'): new_reservation = self.ec2_backend.add_instances( image_id, min_count, user_data, security_group_names, instance_type=instance_type, placement=placement, subnet_id=subnet_id, key_name=key_name, security_group_ids=security_group_ids, nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip) template = self.response_template(EC2_RUN_INSTANCES) return template.render(reservation=new_reservation) def terminate_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('TerminateInstance'): instances = self.ec2_backend.terminate_instances(instance_ids) template = self.response_template(EC2_TERMINATE_INSTANCES) return template.render(instances=instances) def reboot_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('RebootInstance'): instances = self.ec2_backend.reboot_instances(instance_ids) template = self.response_template(EC2_REBOOT_INSTANCES) return template.render(instances=instances) def stop_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('StopInstance'): instances = self.ec2_backend.stop_instances(instance_ids) template = self.response_template(EC2_STOP_INSTANCES) return template.render(instances=instances) def start_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('StartInstance'): instances = self.ec2_backend.start_instances(instance_ids) template = self.response_template(EC2_START_INSTANCES) return template.render(instances=instances) def describe_instance_status(self): instance_ids = instance_ids_from_querystring(self.querystring) include_all_instances = optional_from_querystring('IncludeAllInstances', self.querystring) == 'true' if instance_ids: instances = self.ec2_backend.get_multi_instances_by_id( instance_ids) elif include_all_instances: instances = self.ec2_backend.all_instances() else: instances = self.ec2_backend.all_running_instances() template = self.response_template(EC2_INSTANCE_STATUS) return template.render(instances=instances) def describe_instance_types(self): instance_types = [InstanceType( name='t1.micro', cores=1, memory=644874240, disk=0)] template = self.response_template(EC2_DESCRIBE_INSTANCE_TYPES) return template.render(instance_types=instance_types) def describe_instance_attribute(self): # TODO this and modify below should raise IncorrectInstanceState if # instance not in stopped state attribute = self.querystring.get("Attribute")[0] key = camelcase_to_underscores(attribute) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] instance, value = self.ec2_backend.describe_instance_attribute( instance_id, key) if key == "group_set": template = self.response_template( EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE) else: template = self.response_template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE) return template.render(instance=instance, attribute=attribute, value=value) def modify_instance_attribute(self): handlers = [self._dot_value_instance_attribute_handler, self._block_device_mapping_handler, self._security_grp_instance_attribute_handler] for handler in handlers: success = handler() if success: return success msg = "This specific call to ModifyInstanceAttribute has not been" \ " implemented in Moto yet. Feel free to open an issue at" \ " https://github.com/spulec/moto/issues" raise NotImplementedError(msg) def _block_device_mapping_handler(self): """ Handles requests which are generated by code similar to: instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) The querystring contains information similar to: BlockDeviceMapping.1.Ebs.DeleteOnTermination : ['true'] BlockDeviceMapping.1.DeviceName : ['/dev/sda1'] For now we only support the "BlockDeviceMapping.1.Ebs.DeleteOnTermination" configuration, but it should be trivial to add anything else. """ mapping_counter = 1 mapping_device_name_fmt = 'BlockDeviceMapping.%s.DeviceName' mapping_del_on_term_fmt = 'BlockDeviceMapping.%s.Ebs.DeleteOnTermination' while True: mapping_device_name = mapping_device_name_fmt % mapping_counter if mapping_device_name not in self.querystring.keys(): break mapping_del_on_term = mapping_del_on_term_fmt % mapping_counter del_on_term_value_str = self.querystring[mapping_del_on_term][0] del_on_term_value = True if 'true' == del_on_term_value_str else False device_name_value = self.querystring[mapping_device_name][0] instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] instance = self.ec2_backend.get_instance(instance_id) if self.is_not_dryrun('ModifyInstanceAttribute'): block_device_type = instance.block_device_mapping[ device_name_value] block_device_type.delete_on_termination = del_on_term_value # +1 for the next device mapping_counter += 1 if mapping_counter > 1: return EC2_MODIFY_INSTANCE_ATTRIBUTE def _dot_value_instance_attribute_handler(self): attribute_key = None for key, value in self.querystring.items(): if '.Value' in key: attribute_key = key break if not attribute_key: return if self.is_not_dryrun('Modify' + attribute_key.split(".")[0]): value = self.querystring.get(attribute_key)[0] normalized_attribute = camelcase_to_underscores( attribute_key.split(".")[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] self.ec2_backend.modify_instance_attribute( instance_id, normalized_attribute, value) return EC2_MODIFY_INSTANCE_ATTRIBUTE def _security_grp_instance_attribute_handler(self): new_security_grp_list = [] for key, value in self.querystring.items(): if 'GroupId.' in key: new_security_grp_list.append(self.querystring.get(key)[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] if self.is_not_dryrun('ModifyInstanceSecurityGroups'): self.ec2_backend.modify_instance_security_groups( instance_id, new_security_grp_list) return EC2_MODIFY_INSTANCE_ATTRIBUTE EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <reservationId>{{ reservation.id }}</reservationId> <ownerId>123456789012</ownerId> <groupSet> <item> <groupId>sg-245f6a01</groupId> <groupName>default</groupName> </item> </groupSet> <instancesSet> {% for instance in reservation.instances %} <item> <instanceId>{{ instance.id }}</instanceId> <imageId>{{ instance.image_id }}</imageId> <instanceState> <code>0</code> <name>pending</name> </instanceState> <privateDnsName>{{ instance.private_dns }}</privateDnsName> <publicDnsName>{{ instance.public_dns }}</publicDnsName> <dnsName>{{ instance.public_dns }}</dnsName> <reason/> <keyName>{{ instance.key_name }}</keyName> <amiLaunchIndex>0</amiLaunchIndex> <instanceType>{{ instance.instance_type }}</instanceType> <launchTime>{{ instance.launch_time }}</launchTime> <placement> <availabilityZone>{{ instance.placement}}</availabilityZone> <groupName/> <tenancy>default</tenancy> </placement> <monitoring> <state>enabled</state> </monitoring> {% if instance.nics %} {% if instance.nics[0].subnet %} <subnetId>{{ instance.nics[0].subnet.id }}</subnetId> <vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId> {% endif %} <privateIpAddress>{{ instance.private_ip }}</privateIpAddress> {% if instance.public_ip %} <ipAddress>{{ instance.public_ip }}</ipAddress> {% endif %} {% else %} <subnetId>{{ instance.subnet_id }}</subnetId> {% endif %} <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in instance.dynamic_group_list %} <item> <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> </item> {% endfor %} </groupSet> {% if instance.platform %} <platform>{{ instance.platform }}</platform> {% endif %} <virtualizationType>{{ instance.virtualization_type }}</virtualizationType> <architecture>{{ instance.architecture }}</architecture> <kernelId>{{ instance.kernel }}</kernelId> <clientToken/> <hypervisor>xen</hypervisor> <ebsOptimized>false</ebsOptimized> <networkInterfaceSet> {% for nic in instance.nics.values() %} <item> <networkInterfaceId>{{ nic.id }}</networkInterfaceId> {% if nic.subnet %} <subnetId>{{ nic.subnet.id }}</subnetId> <vpcId>{{ nic.subnet.vpc_id }}</vpcId> {% endif %} <description>Primary network interface</description> <ownerId>123456789012</ownerId> <status>in-use</status> <macAddress>1b:2b:3c:4d:5e:6f</macAddress> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in nic.group_set %} <item> <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> </item> {% endfor %} </groupSet> <attachment> <attachmentId>{{ nic.attachment_id }}</attachmentId> <deviceIndex>{{ nic.device_index }}</deviceIndex> <status>attached</status> <attachTime>2015-01-01T00:00:00Z</attachTime> <deleteOnTermination>true</deleteOnTermination> </attachment> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} <privateIpAddressesSet> <item> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <primary>true</primary> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} </item> </privateIpAddressesSet> </item> {% endfor %} </networkInterfaceSet> </item> {% endfor %} </instancesSet> </RunInstancesResponse>""" EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>fdcdcab1-ae5c-489e-9c33-4637c5dda355</requestId> <reservationSet> {% for reservation in reservations %} <item> <reservationId>{{ reservation.id }}</reservationId> <ownerId>123456789012</ownerId> <groupSet> {% for group in reservation.dynamic_group_list %} <item> {% if group.id %} <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> {% else %} <groupId>{{ group }}</groupId> {% endif %} </item> {% endfor %} </groupSet> <instancesSet> {% for instance in reservation.instances %} <item> <instanceId>{{ instance.id }}</instanceId> <imageId>{{ instance.image_id }}</imageId> <instanceState> <code>{{ instance._state.code }}</code> <name>{{ instance._state.name }}</name> </instanceState> <privateDnsName>{{ instance.private_dns }}</privateDnsName> <publicDnsName>{{ instance.public_dns }}</publicDnsName> <dnsName>{{ instance.public_dns }}</dnsName> <reason>{{ instance._reason }}</reason> <keyName>{{ instance.key_name }}</keyName> <amiLaunchIndex>0</amiLaunchIndex> <productCodes/> <instanceType>{{ instance.instance_type }}</instanceType> <launchTime>{{ instance.launch_time }}</launchTime> <placement> <availabilityZone>{{ instance.placement }}</availabilityZone> <groupName/> <tenancy>default</tenancy> </placement> {% if instance.platform %} <platform>{{ instance.platform }}</platform> {% endif %} <monitoring> <state>disabled</state> </monitoring> {% if instance.nics %} {% if instance.nics[0].subnet %} <subnetId>{{ instance.nics[0].subnet.id }}</subnetId> <vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId> {% endif %} <privateIpAddress>{{ instance.private_ip }}</privateIpAddress> {% if instance.nics[0].public_ip %} <ipAddress>{{ instance.nics[0].public_ip }}</ipAddress> {% endif %} {% endif %} <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in instance.dynamic_group_list %} <item> {% if group.id %} <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> {% else %} <groupId>{{ group }}</groupId> {% endif %} </item> {% endfor %} </groupSet> <stateReason> <code>{{ instance._state_reason.code }}</code> <message>{{ instance._state_reason.message }}</message> </stateReason> <architecture>{{ instance.architecture }}</architecture> <kernelId>{{ instance.kernel }}</kernelId> <rootDeviceType>ebs</rootDeviceType> <rootDeviceName>/dev/sda1</rootDeviceName> <blockDeviceMapping> {% for device_name,deviceobject in instance.get_block_device_mapping %} <item> <deviceName>{{ device_name }}</deviceName> <ebs> <volumeId>{{ deviceobject.volume_id }}</volumeId> <status>{{ deviceobject.status }}</status> <attachTime>{{ deviceobject.attach_time }}</attachTime> <deleteOnTermination>{{ deviceobject.delete_on_termination }}</deleteOnTermination> <size>{{deviceobject.size}}</size> </ebs> </item> {% endfor %} </blockDeviceMapping> <virtualizationType>{{ instance.virtualization_type }}</virtualizationType> <clientToken>ABCDE1234567890123</clientToken> <tagSet> {% for tag in instance.get_tags() %} <item> <resourceId>{{ tag.resource_id }}</resourceId> <resourceType>{{ tag.resource_type }}</resourceType> <key>{{ tag.key }}</key> <value>{{ tag.value }}</value> </item> {% endfor %} </tagSet> <hypervisor>xen</hypervisor> <networkInterfaceSet> {% for nic in instance.nics.values() %} <item> <networkInterfaceId>{{ nic.id }}</networkInterfaceId> {% if nic.subnet %} <subnetId>{{ nic.subnet.id }}</subnetId> <vpcId>{{ nic.subnet.vpc_id }}</vpcId> {% endif %} <description>Primary network interface</description> <ownerId>123456789012</ownerId> <status>in-use</status> <macAddress>1b:2b:3c:4d:5e:6f</macAddress> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in nic.group_set %} <item> {% if group.id %} <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> {% else %} <groupId>{{ group }}</groupId> {% endif %} </item> {% endfor %} </groupSet> <attachment> <attachmentId>{{ nic.attachment_id }}</attachmentId> <deviceIndex>{{ nic.device_index }}</deviceIndex> <status>attached</status> <attachTime>2015-01-01T00:00:00Z</attachTime> <deleteOnTermination>true</deleteOnTermination> </attachment> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} <privateIpAddressesSet> <item> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <primary>true</primary> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} </item> </privateIpAddressesSet> </item> {% endfor %} </networkInterfaceSet> </item> {% endfor %} </instancesSet> </item> {% endfor %} </reservationSet> </DescribeInstancesResponse>""" EC2_TERMINATE_INSTANCES = """ <TerminateInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instancesSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <previousState> <code>16</code> <name>running</name> </previousState> <currentState> <code>32</code> <name>shutting-down</name> </currentState> </item> {% endfor %} </instancesSet> </TerminateInstancesResponse>""" EC2_STOP_INSTANCES = """ <StopInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instancesSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <previousState> <code>16</code> <name>running</name> </previousState> <currentState> <code>64</code> <name>stopping</name> </currentState> </item> {% endfor %} </instancesSet> </StopInstancesResponse>""" EC2_START_INSTANCES = """ <StartInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instancesSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <previousState> <code>16</code> <name>running</name> </previousState> <currentState> <code>0</code> <name>pending</name> </currentState> </item> {% endfor %} </instancesSet> </StartInstancesResponse>""" EC2_REBOOT_INSTANCES = """<RebootInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <return>true</return> </RebootInstancesResponse>""" EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instanceId>{{ instance.id }}</instanceId> <{{ attribute }}> <value>{{ value }}</value> </{{ attribute }}> </DescribeInstanceAttributeResponse>""" EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instanceId>{{ instance.id }}</instanceId> <{{ attribute }}> {% for sg_id in value %} <item> <groupId>{{ sg_id }}</groupId> </item> {% endfor %} </{{ attribute }}> </DescribeInstanceAttributeResponse>""" EC2_MODIFY_INSTANCE_ATTRIBUTE = """<ModifyInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <return>true</return> </ModifyInstanceAttributeResponse>""" EC2_INSTANCE_STATUS = """<?xml version="1.0" encoding="UTF-8"?> <DescribeInstanceStatusResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instanceStatusSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <availabilityZone>{{ instance.placement }}</availabilityZone> <instanceState> <code>{{ instance.state_code }}</code> <name>{{ instance.state }}</name> </instanceState> {% if instance.state_code == 16 %} <systemStatus> <status>ok</status> <details> <item> <name>reachability</name> <status>passed</status> </item> </details> </systemStatus> <instanceStatus> <status>ok</status> <details> <item> <name>reachability</name> <status>passed</status> </item> </details> </instanceStatus> {% else %} <systemStatus> <status>not-applicable</status> </systemStatus> <instanceStatus> <status>not-applicable</status> </instanceStatus> {% endif %} </item> {% endfor %} </instanceStatusSet> </DescribeInstanceStatusResponse>""" EC2_DESCRIBE_INSTANCE_TYPES = """<?xml version="1.0" encoding="UTF-8"?> <DescribeInstanceTypesResponse xmlns="http://api.outscale.com/wsdl/fcuext/2014-04-15/"> <requestId>f8b86168-d034-4e65-b48d-3b84c78e64af</requestId> <instanceTypeSet> {% for instance_type in instance_types %} <item> <name>{{ instance_type.name }}</name> <vcpu>{{ instance_type.cores }}</vcpu> <memory>{{ instance_type.memory }}</memory> <storageSize>{{ instance_type.disk }}</storageSize> <storageCount>{{ instance_type.storageCount }}</storageCount> <maxIpAddresses>{{ instance_type.maxIpAddresses }}</maxIpAddresses> <ebsOptimizedAvailable>{{ instance_type.ebsOptimizedAvailable }}</ebsOptimizedAvailable> </item> {% endfor %} </instanceTypeSet> </DescribeInstanceTypesResponse>"""
43.666667
130
0.555556
from __future__ import unicode_literals from boto.ec2.instancetype import InstanceType from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \ dict_from_querystring, optional_from_querystring class InstanceResponse(BaseResponse): def describe_instances(self): filter_dict = filters_from_querystring(self.querystring) instance_ids = instance_ids_from_querystring(self.querystring) if instance_ids: reservations = self.ec2_backend.get_reservations_by_instance_ids( instance_ids, filters=filter_dict) else: reservations = self.ec2_backend.all_reservations( make_copy=True, filters=filter_dict) template = self.response_template(EC2_DESCRIBE_INSTANCES) return template.render(reservations=reservations) def run_instances(self): min_count = int(self.querystring.get('MinCount', ['1'])[0]) image_id = self.querystring.get('ImageId')[0] user_data = self.querystring.get('UserData') security_group_names = self._get_multi_param('SecurityGroup') security_group_ids = self._get_multi_param('SecurityGroupId') nics = dict_from_querystring("NetworkInterface", self.querystring) instance_type = self.querystring.get("InstanceType", ["m1.small"])[0] placement = self.querystring.get( "Placement.AvailabilityZone", [None])[0] subnet_id = self.querystring.get("SubnetId", [None])[0] private_ip = self.querystring.get("PrivateIpAddress", [None])[0] associate_public_ip = self.querystring.get( "AssociatePublicIpAddress", [None])[0] key_name = self.querystring.get("KeyName", [None])[0] if self.is_not_dryrun('RunInstance'): new_reservation = self.ec2_backend.add_instances( image_id, min_count, user_data, security_group_names, instance_type=instance_type, placement=placement, subnet_id=subnet_id, key_name=key_name, security_group_ids=security_group_ids, nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip) template = self.response_template(EC2_RUN_INSTANCES) return template.render(reservation=new_reservation) def terminate_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('TerminateInstance'): instances = self.ec2_backend.terminate_instances(instance_ids) template = self.response_template(EC2_TERMINATE_INSTANCES) return template.render(instances=instances) def reboot_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('RebootInstance'): instances = self.ec2_backend.reboot_instances(instance_ids) template = self.response_template(EC2_REBOOT_INSTANCES) return template.render(instances=instances) def stop_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('StopInstance'): instances = self.ec2_backend.stop_instances(instance_ids) template = self.response_template(EC2_STOP_INSTANCES) return template.render(instances=instances) def start_instances(self): instance_ids = instance_ids_from_querystring(self.querystring) if self.is_not_dryrun('StartInstance'): instances = self.ec2_backend.start_instances(instance_ids) template = self.response_template(EC2_START_INSTANCES) return template.render(instances=instances) def describe_instance_status(self): instance_ids = instance_ids_from_querystring(self.querystring) include_all_instances = optional_from_querystring('IncludeAllInstances', self.querystring) == 'true' if instance_ids: instances = self.ec2_backend.get_multi_instances_by_id( instance_ids) elif include_all_instances: instances = self.ec2_backend.all_instances() else: instances = self.ec2_backend.all_running_instances() template = self.response_template(EC2_INSTANCE_STATUS) return template.render(instances=instances) def describe_instance_types(self): instance_types = [InstanceType( name='t1.micro', cores=1, memory=644874240, disk=0)] template = self.response_template(EC2_DESCRIBE_INSTANCE_TYPES) return template.render(instance_types=instance_types) def describe_instance_attribute(self): attribute = self.querystring.get("Attribute")[0] key = camelcase_to_underscores(attribute) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] instance, value = self.ec2_backend.describe_instance_attribute( instance_id, key) if key == "group_set": template = self.response_template( EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE) else: template = self.response_template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE) return template.render(instance=instance, attribute=attribute, value=value) def modify_instance_attribute(self): handlers = [self._dot_value_instance_attribute_handler, self._block_device_mapping_handler, self._security_grp_instance_attribute_handler] for handler in handlers: success = handler() if success: return success msg = "This specific call to ModifyInstanceAttribute has not been" \ " implemented in Moto yet. Feel free to open an issue at" \ " https://github.com/spulec/moto/issues" raise NotImplementedError(msg) def _block_device_mapping_handler(self): mapping_counter = 1 mapping_device_name_fmt = 'BlockDeviceMapping.%s.DeviceName' mapping_del_on_term_fmt = 'BlockDeviceMapping.%s.Ebs.DeleteOnTermination' while True: mapping_device_name = mapping_device_name_fmt % mapping_counter if mapping_device_name not in self.querystring.keys(): break mapping_del_on_term = mapping_del_on_term_fmt % mapping_counter del_on_term_value_str = self.querystring[mapping_del_on_term][0] del_on_term_value = True if 'true' == del_on_term_value_str else False device_name_value = self.querystring[mapping_device_name][0] instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] instance = self.ec2_backend.get_instance(instance_id) if self.is_not_dryrun('ModifyInstanceAttribute'): block_device_type = instance.block_device_mapping[ device_name_value] block_device_type.delete_on_termination = del_on_term_value mapping_counter += 1 if mapping_counter > 1: return EC2_MODIFY_INSTANCE_ATTRIBUTE def _dot_value_instance_attribute_handler(self): attribute_key = None for key, value in self.querystring.items(): if '.Value' in key: attribute_key = key break if not attribute_key: return if self.is_not_dryrun('Modify' + attribute_key.split(".")[0]): value = self.querystring.get(attribute_key)[0] normalized_attribute = camelcase_to_underscores( attribute_key.split(".")[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] self.ec2_backend.modify_instance_attribute( instance_id, normalized_attribute, value) return EC2_MODIFY_INSTANCE_ATTRIBUTE def _security_grp_instance_attribute_handler(self): new_security_grp_list = [] for key, value in self.querystring.items(): if 'GroupId.' in key: new_security_grp_list.append(self.querystring.get(key)[0]) instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] if self.is_not_dryrun('ModifyInstanceSecurityGroups'): self.ec2_backend.modify_instance_security_groups( instance_id, new_security_grp_list) return EC2_MODIFY_INSTANCE_ATTRIBUTE EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <reservationId>{{ reservation.id }}</reservationId> <ownerId>123456789012</ownerId> <groupSet> <item> <groupId>sg-245f6a01</groupId> <groupName>default</groupName> </item> </groupSet> <instancesSet> {% for instance in reservation.instances %} <item> <instanceId>{{ instance.id }}</instanceId> <imageId>{{ instance.image_id }}</imageId> <instanceState> <code>0</code> <name>pending</name> </instanceState> <privateDnsName>{{ instance.private_dns }}</privateDnsName> <publicDnsName>{{ instance.public_dns }}</publicDnsName> <dnsName>{{ instance.public_dns }}</dnsName> <reason/> <keyName>{{ instance.key_name }}</keyName> <amiLaunchIndex>0</amiLaunchIndex> <instanceType>{{ instance.instance_type }}</instanceType> <launchTime>{{ instance.launch_time }}</launchTime> <placement> <availabilityZone>{{ instance.placement}}</availabilityZone> <groupName/> <tenancy>default</tenancy> </placement> <monitoring> <state>enabled</state> </monitoring> {% if instance.nics %} {% if instance.nics[0].subnet %} <subnetId>{{ instance.nics[0].subnet.id }}</subnetId> <vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId> {% endif %} <privateIpAddress>{{ instance.private_ip }}</privateIpAddress> {% if instance.public_ip %} <ipAddress>{{ instance.public_ip }}</ipAddress> {% endif %} {% else %} <subnetId>{{ instance.subnet_id }}</subnetId> {% endif %} <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in instance.dynamic_group_list %} <item> <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> </item> {% endfor %} </groupSet> {% if instance.platform %} <platform>{{ instance.platform }}</platform> {% endif %} <virtualizationType>{{ instance.virtualization_type }}</virtualizationType> <architecture>{{ instance.architecture }}</architecture> <kernelId>{{ instance.kernel }}</kernelId> <clientToken/> <hypervisor>xen</hypervisor> <ebsOptimized>false</ebsOptimized> <networkInterfaceSet> {% for nic in instance.nics.values() %} <item> <networkInterfaceId>{{ nic.id }}</networkInterfaceId> {% if nic.subnet %} <subnetId>{{ nic.subnet.id }}</subnetId> <vpcId>{{ nic.subnet.vpc_id }}</vpcId> {% endif %} <description>Primary network interface</description> <ownerId>123456789012</ownerId> <status>in-use</status> <macAddress>1b:2b:3c:4d:5e:6f</macAddress> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in nic.group_set %} <item> <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> </item> {% endfor %} </groupSet> <attachment> <attachmentId>{{ nic.attachment_id }}</attachmentId> <deviceIndex>{{ nic.device_index }}</deviceIndex> <status>attached</status> <attachTime>2015-01-01T00:00:00Z</attachTime> <deleteOnTermination>true</deleteOnTermination> </attachment> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} <privateIpAddressesSet> <item> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <primary>true</primary> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} </item> </privateIpAddressesSet> </item> {% endfor %} </networkInterfaceSet> </item> {% endfor %} </instancesSet> </RunInstancesResponse>""" EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>fdcdcab1-ae5c-489e-9c33-4637c5dda355</requestId> <reservationSet> {% for reservation in reservations %} <item> <reservationId>{{ reservation.id }}</reservationId> <ownerId>123456789012</ownerId> <groupSet> {% for group in reservation.dynamic_group_list %} <item> {% if group.id %} <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> {% else %} <groupId>{{ group }}</groupId> {% endif %} </item> {% endfor %} </groupSet> <instancesSet> {% for instance in reservation.instances %} <item> <instanceId>{{ instance.id }}</instanceId> <imageId>{{ instance.image_id }}</imageId> <instanceState> <code>{{ instance._state.code }}</code> <name>{{ instance._state.name }}</name> </instanceState> <privateDnsName>{{ instance.private_dns }}</privateDnsName> <publicDnsName>{{ instance.public_dns }}</publicDnsName> <dnsName>{{ instance.public_dns }}</dnsName> <reason>{{ instance._reason }}</reason> <keyName>{{ instance.key_name }}</keyName> <amiLaunchIndex>0</amiLaunchIndex> <productCodes/> <instanceType>{{ instance.instance_type }}</instanceType> <launchTime>{{ instance.launch_time }}</launchTime> <placement> <availabilityZone>{{ instance.placement }}</availabilityZone> <groupName/> <tenancy>default</tenancy> </placement> {% if instance.platform %} <platform>{{ instance.platform }}</platform> {% endif %} <monitoring> <state>disabled</state> </monitoring> {% if instance.nics %} {% if instance.nics[0].subnet %} <subnetId>{{ instance.nics[0].subnet.id }}</subnetId> <vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId> {% endif %} <privateIpAddress>{{ instance.private_ip }}</privateIpAddress> {% if instance.nics[0].public_ip %} <ipAddress>{{ instance.nics[0].public_ip }}</ipAddress> {% endif %} {% endif %} <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in instance.dynamic_group_list %} <item> {% if group.id %} <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> {% else %} <groupId>{{ group }}</groupId> {% endif %} </item> {% endfor %} </groupSet> <stateReason> <code>{{ instance._state_reason.code }}</code> <message>{{ instance._state_reason.message }}</message> </stateReason> <architecture>{{ instance.architecture }}</architecture> <kernelId>{{ instance.kernel }}</kernelId> <rootDeviceType>ebs</rootDeviceType> <rootDeviceName>/dev/sda1</rootDeviceName> <blockDeviceMapping> {% for device_name,deviceobject in instance.get_block_device_mapping %} <item> <deviceName>{{ device_name }}</deviceName> <ebs> <volumeId>{{ deviceobject.volume_id }}</volumeId> <status>{{ deviceobject.status }}</status> <attachTime>{{ deviceobject.attach_time }}</attachTime> <deleteOnTermination>{{ deviceobject.delete_on_termination }}</deleteOnTermination> <size>{{deviceobject.size}}</size> </ebs> </item> {% endfor %} </blockDeviceMapping> <virtualizationType>{{ instance.virtualization_type }}</virtualizationType> <clientToken>ABCDE1234567890123</clientToken> <tagSet> {% for tag in instance.get_tags() %} <item> <resourceId>{{ tag.resource_id }}</resourceId> <resourceType>{{ tag.resource_type }}</resourceType> <key>{{ tag.key }}</key> <value>{{ tag.value }}</value> </item> {% endfor %} </tagSet> <hypervisor>xen</hypervisor> <networkInterfaceSet> {% for nic in instance.nics.values() %} <item> <networkInterfaceId>{{ nic.id }}</networkInterfaceId> {% if nic.subnet %} <subnetId>{{ nic.subnet.id }}</subnetId> <vpcId>{{ nic.subnet.vpc_id }}</vpcId> {% endif %} <description>Primary network interface</description> <ownerId>123456789012</ownerId> <status>in-use</status> <macAddress>1b:2b:3c:4d:5e:6f</macAddress> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck> <groupSet> {% for group in nic.group_set %} <item> {% if group.id %} <groupId>{{ group.id }}</groupId> <groupName>{{ group.name }}</groupName> {% else %} <groupId>{{ group }}</groupId> {% endif %} </item> {% endfor %} </groupSet> <attachment> <attachmentId>{{ nic.attachment_id }}</attachmentId> <deviceIndex>{{ nic.device_index }}</deviceIndex> <status>attached</status> <attachTime>2015-01-01T00:00:00Z</attachTime> <deleteOnTermination>true</deleteOnTermination> </attachment> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} <privateIpAddressesSet> <item> <privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress> <primary>true</primary> {% if nic.public_ip %} <association> <publicIp>{{ nic.public_ip }}</publicIp> <ipOwnerId>123456789012</ipOwnerId> </association> {% endif %} </item> </privateIpAddressesSet> </item> {% endfor %} </networkInterfaceSet> </item> {% endfor %} </instancesSet> </item> {% endfor %} </reservationSet> </DescribeInstancesResponse>""" EC2_TERMINATE_INSTANCES = """ <TerminateInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instancesSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <previousState> <code>16</code> <name>running</name> </previousState> <currentState> <code>32</code> <name>shutting-down</name> </currentState> </item> {% endfor %} </instancesSet> </TerminateInstancesResponse>""" EC2_STOP_INSTANCES = """ <StopInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instancesSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <previousState> <code>16</code> <name>running</name> </previousState> <currentState> <code>64</code> <name>stopping</name> </currentState> </item> {% endfor %} </instancesSet> </StopInstancesResponse>""" EC2_START_INSTANCES = """ <StartInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instancesSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <previousState> <code>16</code> <name>running</name> </previousState> <currentState> <code>0</code> <name>pending</name> </currentState> </item> {% endfor %} </instancesSet> </StartInstancesResponse>""" EC2_REBOOT_INSTANCES = """<RebootInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <return>true</return> </RebootInstancesResponse>""" EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instanceId>{{ instance.id }}</instanceId> <{{ attribute }}> <value>{{ value }}</value> </{{ attribute }}> </DescribeInstanceAttributeResponse>""" EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instanceId>{{ instance.id }}</instanceId> <{{ attribute }}> {% for sg_id in value %} <item> <groupId>{{ sg_id }}</groupId> </item> {% endfor %} </{{ attribute }}> </DescribeInstanceAttributeResponse>""" EC2_MODIFY_INSTANCE_ATTRIBUTE = """<ModifyInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <return>true</return> </ModifyInstanceAttributeResponse>""" EC2_INSTANCE_STATUS = """<?xml version="1.0" encoding="UTF-8"?> <DescribeInstanceStatusResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <instanceStatusSet> {% for instance in instances %} <item> <instanceId>{{ instance.id }}</instanceId> <availabilityZone>{{ instance.placement }}</availabilityZone> <instanceState> <code>{{ instance.state_code }}</code> <name>{{ instance.state }}</name> </instanceState> {% if instance.state_code == 16 %} <systemStatus> <status>ok</status> <details> <item> <name>reachability</name> <status>passed</status> </item> </details> </systemStatus> <instanceStatus> <status>ok</status> <details> <item> <name>reachability</name> <status>passed</status> </item> </details> </instanceStatus> {% else %} <systemStatus> <status>not-applicable</status> </systemStatus> <instanceStatus> <status>not-applicable</status> </instanceStatus> {% endif %} </item> {% endfor %} </instanceStatusSet> </DescribeInstanceStatusResponse>""" EC2_DESCRIBE_INSTANCE_TYPES = """<?xml version="1.0" encoding="UTF-8"?> <DescribeInstanceTypesResponse xmlns="http://api.outscale.com/wsdl/fcuext/2014-04-15/"> <requestId>f8b86168-d034-4e65-b48d-3b84c78e64af</requestId> <instanceTypeSet> {% for instance_type in instance_types %} <item> <name>{{ instance_type.name }}</name> <vcpu>{{ instance_type.cores }}</vcpu> <memory>{{ instance_type.memory }}</memory> <storageSize>{{ instance_type.disk }}</storageSize> <storageCount>{{ instance_type.storageCount }}</storageCount> <maxIpAddresses>{{ instance_type.maxIpAddresses }}</maxIpAddresses> <ebsOptimizedAvailable>{{ instance_type.ebsOptimizedAvailable }}</ebsOptimizedAvailable> </item> {% endfor %} </instanceTypeSet> </DescribeInstanceTypesResponse>"""
true
true
7902dc777d5ed3f5fa0b3b74cc379243016a2542
1,362
py
Python
pcat2py/class/20de4144-5cc5-11e4-af55-00155d01fe08.py
phnomcobra/PCAT2PY
937c3b365cdc5ac69b78f59070be0a21bdb53db0
[ "MIT" ]
null
null
null
pcat2py/class/20de4144-5cc5-11e4-af55-00155d01fe08.py
phnomcobra/PCAT2PY
937c3b365cdc5ac69b78f59070be0a21bdb53db0
[ "MIT" ]
null
null
null
pcat2py/class/20de4144-5cc5-11e4-af55-00155d01fe08.py
phnomcobra/PCAT2PY
937c3b365cdc5ac69b78f59070be0a21bdb53db0
[ "MIT" ]
null
null
null
#!/usr/bin/python ################################################################################ # 20de4144-5cc5-11e4-af55-00155d01fe08 # # Justin Dierking # justindierking@hardbitsolutions.com # phnomcobra@gmail.com # # 10/24/2014 Original Construction ################################################################################ class Finding: def __init__(self): self.output = [] self.is_compliant = False self.uuid = "20de4144-5cc5-11e4-af55-00155d01fe08" def check(self, cli): # Initialize Compliance self.is_compliant = False # Get Registry DWORD dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\EMET\SysSettings', 'ASLR') # Output Lines self.output = [r'HKLM:\Software\Policies\Microsoft\EMET\SysSettings', ('ASLR=' + str(dword))] if dword == 3: self.is_compliant = True return self.is_compliant def fix(self, cli): cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'") cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\EMET'") cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\EMET\SysSettings'") cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\EMET\SysSettings' -name 'ASLR' -value 3 -Type DWord")
35.842105
136
0.586637
true
true
7902dca67a3ff3802d9c0e607da4999164f9281f
30
py
Python
__init__.py
captainalan/linglosspy
3043478d2b4ac3071f80355736a3c72c56b75a81
[ "MIT" ]
null
null
null
__init__.py
captainalan/linglosspy
3043478d2b4ac3071f80355736a3c72c56b75a81
[ "MIT" ]
null
null
null
__init__.py
captainalan/linglosspy
3043478d2b4ac3071f80355736a3c72c56b75a81
[ "MIT" ]
null
null
null
from lingloss import Lingloss
15
29
0.866667
from lingloss import Lingloss
true
true
7902dd0410910126b5adddb1a52fce449cbc5d4b
32,348
py
Python
pandas/core/arrays/period.py
aidanmontare-edu/pandas
41aac9f2bccfc9b20cb2e9d0c839d8b7393e2b08
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "MIT-0", "ECL-2.0", "BSD-3-Clause" ]
1
2020-06-24T09:01:49.000Z
2020-06-24T09:01:49.000Z
pandas/core/arrays/period.py
aidanmontare-edu/pandas
41aac9f2bccfc9b20cb2e9d0c839d8b7393e2b08
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "MIT-0", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
pandas/core/arrays/period.py
aidanmontare-edu/pandas
41aac9f2bccfc9b20cb2e9d0c839d8b7393e2b08
[ "PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "MIT-0", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
from datetime import timedelta import operator from typing import Any, Callable, List, Optional, Sequence, Type, Union import numpy as np from pandas._libs.tslibs import ( NaT, NaTType, Timedelta, delta_to_nanoseconds, iNaT, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import Tick, delta_to_tick from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, PeriodMixin, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import AnyArrayLike from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( TD64NS_DTYPE, ensure_object, is_datetime64_dtype, is_float_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndexClass, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna, notna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com from pandas.tseries.offsets import DateOffset def _field_accessor(name: str, docstring=None): def f(self): base = self.freq._period_dtype_code result = get_period_field_arr(name, self.asi8, base) return result f.__name__ = name f.__doc__ = docstring return property(f) class PeriodArray(PeriodMixin, dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps): """ Pandas ExtensionArray for storing Period data. Users should use :func:`period_array` to create new instances. Parameters ---------- values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex] The data to store. These should be arrays that can be directly converted to ordinals without inference or copy (PeriodArray, ndarray[int64]), or a box around such an array (Series[period], PeriodIndex). freq : str or DateOffset The `freq` to use for the array. Mostly applicable when `values` is an ndarray of integers, when `freq` is required. When `values` is a PeriodArray (or box around), it's checked that ``values.freq`` matches `freq`. dtype : PeriodDtype, optional A PeriodDtype instance from which to extract a `freq`. If both `freq` and `dtype` are specified, then the frequencies must match. copy : bool, default False Whether to copy the ordinals before storing. Attributes ---------- None Methods ------- None See Also -------- period_array : Create a new PeriodArray. PeriodIndex : Immutable Index for period data. Notes ----- There are two components to a PeriodArray - ordinals : integer ndarray - freq : pd.tseries.offsets.Offset The values are physically stored as a 1-D ndarray of integers. These are called "ordinals" and represent some kind of offset from a base. The `freq` indicates the span covered by each element of the array. All elements in the PeriodArray have the same `freq`. """ # array priority higher than numpy scalars __array_priority__ = 1000 _typ = "periodarray" # ABCPeriodArray _scalar_type = Period _recognized_scalars = (Period,) _is_recognized_dtype = is_period_dtype # Names others delegate to us _other_ops: List[str] = [] _bool_ops = ["is_leap_year"] _object_ops = ["start_time", "end_time", "freq"] _field_ops = [ "year", "month", "day", "hour", "minute", "second", "weekofyear", "weekday", "week", "dayofweek", "dayofyear", "quarter", "qyear", "days_in_month", "daysinmonth", ] _datetimelike_ops = _field_ops + _object_ops + _bool_ops _datetimelike_methods = ["strftime", "to_timestamp", "asfreq"] # -------------------------------------------------------------------- # Constructors def __init__(self, values, freq=None, dtype=None, copy=False): freq = validate_dtype_freq(dtype, freq) if freq is not None: freq = Period._maybe_convert_freq(freq) if isinstance(values, ABCSeries): values = values._values if not isinstance(values, type(self)): raise TypeError("Incorrect dtype") elif isinstance(values, ABCPeriodIndex): values = values._values if isinstance(values, type(self)): if freq is not None and freq != values.freq: raise raise_on_incompatible(values, freq) values, freq = values._data, values.freq values = np.array(values, dtype="int64", copy=copy) self._data = values if freq is None: raise ValueError("freq is not specified and cannot be inferred") self._dtype = PeriodDtype(freq) @classmethod def _simple_new(cls, values: np.ndarray, freq=None, **kwargs) -> "PeriodArray": # alias for PeriodArray.__init__ assertion_msg = "Should be numpy array of type i8" assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg return cls(values, freq=freq, **kwargs) @classmethod def _from_sequence( cls: Type["PeriodArray"], scalars: Union[Sequence[Optional[Period]], AnyArrayLike], dtype: Optional[PeriodDtype] = None, copy: bool = False, ) -> "PeriodArray": if dtype: freq = dtype.freq else: freq = None if isinstance(scalars, cls): validate_dtype_freq(scalars.dtype, freq) if copy: scalars = scalars.copy() return scalars periods = np.asarray(scalars, dtype=object) if copy: periods = periods.copy() freq = freq or libperiod.extract_freq(periods) ordinals = libperiod.extract_ordinals(periods, freq) return cls(ordinals, freq=freq) @classmethod def _from_sequence_of_strings( cls, strings, dtype=None, copy=False ) -> "PeriodArray": return cls._from_sequence(strings, dtype, copy) @classmethod def _from_datetime64(cls, data, freq, tz=None) -> "PeriodArray": """ Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq] """ data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq) @classmethod def _generate_range(cls, start, end, periods, freq, fields): periods = dtl.validate_periods(periods) if freq is not None: freq = Period._maybe_convert_freq(freq) field_count = len(fields) if start is not None or end is not None: if field_count > 0: raise ValueError( "Can either instantiate from fields or endpoints, but not both" ) subarr, freq = _get_ordinal_range(start, end, periods, freq) elif field_count > 0: subarr, freq = _range_from_fields(freq=freq, **fields) else: raise ValueError("Not enough parameters to construct Period range") return subarr, freq # ----------------------------------------------------------------- # DatetimeLike Interface def _unbox_scalar(self, value: Union[Period, NaTType]) -> int: if value is NaT: return value.value elif isinstance(value, self._scalar_type): self._check_compatible_with(value) return value.ordinal else: raise ValueError(f"'value' should be a Period. Got '{value}' instead.") def _scalar_from_string(self, value: str) -> Period: return Period(value, freq=self.freq) def _check_compatible_with(self, other, setitem: bool = False): if other is NaT: return if self.freqstr != other.freqstr: raise raise_on_incompatible(self, other) # -------------------------------------------------------------------- # Data / Attributes @cache_readonly def dtype(self) -> PeriodDtype: return self._dtype # error: Read-only property cannot override read-write property [misc] @property # type: ignore def freq(self) -> DateOffset: """ Return the frequency object for this PeriodArray. """ return self.dtype.freq def __array__(self, dtype=None) -> np.ndarray: if dtype == "i8": return self.asi8 elif dtype == bool: return ~self._isnan # This will raise TypeError for non-object dtypes return np.array(list(self), dtype=object) def __arrow_array__(self, type=None): """ Convert myself into a pyarrow Array. """ import pyarrow from pandas.core.arrays._arrow_utils import ArrowPeriodType if type is not None: if pyarrow.types.is_integer(type): return pyarrow.array(self._data, mask=self.isna(), type=type) elif isinstance(type, ArrowPeriodType): # ensure we have the same freq if self.freqstr != type.freq: raise TypeError( "Not supported to convert PeriodArray to array with different " f"'freq' ({self.freqstr} vs {type.freq})" ) else: raise TypeError( f"Not supported to convert PeriodArray to '{type}' type" ) period_type = ArrowPeriodType(self.freqstr) storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64") return pyarrow.ExtensionArray.from_storage(period_type, storage_array) # -------------------------------------------------------------------- # Vectorized analogues of Period properties year = _field_accessor( "year", """ The year of the period. """, ) month = _field_accessor( "month", """ The month as January=1, December=12. """, ) day = _field_accessor( "day", """ The days of the period. """, ) hour = _field_accessor( "hour", """ The hour of the period. """, ) minute = _field_accessor( "minute", """ The minute of the period. """, ) second = _field_accessor( "second", """ The second of the period. """, ) weekofyear = _field_accessor( "week", """ The week ordinal of the year. """, ) week = weekofyear dayofweek = _field_accessor( "weekday", """ The day of the week with Monday=0, Sunday=6. """, ) weekday = dayofweek dayofyear = day_of_year = _field_accessor( "day_of_year", """ The ordinal day of the year. """, ) quarter = _field_accessor( "quarter", """ The quarter of the date. """, ) qyear = _field_accessor("qyear") days_in_month = _field_accessor( "days_in_month", """ The number of days in the month. """, ) daysinmonth = days_in_month @property def is_leap_year(self) -> np.ndarray: """ Logical indicating if the date belongs to a leap year. """ return isleapyear_arr(np.asarray(self.year)) @property def start_time(self): return self.to_timestamp(how="start") @property def end_time(self): return self.to_timestamp(how="end") def to_timestamp(self, freq=None, how="start"): """ Cast to DatetimeArray/Index. Parameters ---------- freq : str or DateOffset, optional Target frequency. The default is 'D' for week or longer, 'S' otherwise. how : {'s', 'e', 'start', 'end'} Whether to use the start or end of the time period being converted. Returns ------- DatetimeArray/Index """ from pandas.core.arrays import DatetimeArray how = libperiod.validate_end_alias(how) end = how == "E" if end: if freq == "B" or self.freq == "B": # roll forward to ensure we land on B date adjust = Timedelta(1, "D") - Timedelta(1, "ns") return self.to_timestamp(how="start") + adjust else: adjust = Timedelta(1, "ns") return (self + self.freq).to_timestamp(how="start") - adjust if freq is None: freq = self._get_to_timestamp_base() base = freq else: freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code new_data = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base) return DatetimeArray(new_data)._with_freq("infer") # -------------------------------------------------------------------- def _time_shift(self, periods, freq=None): """ Shift each value by `periods`. Note this is different from ExtensionArray.shift, which shifts the *position* of each element, padding the end with missing values. Parameters ---------- periods : int Number of periods to shift by. freq : pandas.DateOffset, pandas.Timedelta, or str Frequency increment to shift by. """ if freq is not None: raise TypeError( "`freq` argument is not supported for " f"{type(self).__name__}._time_shift" ) values = self.asi8 + periods * self.freq.n if self._hasnans: values[self._isnan] = iNaT return type(self)(values, freq=self.freq) @property def _box_func(self): return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq) def asfreq(self, freq=None, how: str = "E") -> "PeriodArray": """ Convert the Period Array/Index to the specified frequency `freq`. Parameters ---------- freq : str A frequency. how : str {'E', 'S'} Whether the elements should be aligned to the end or start within pa period. * 'E', 'END', or 'FINISH' for end, * 'S', 'START', or 'BEGIN' for start. January 31st ('END') vs. January 1st ('START') for example. Returns ------- Period Array/Index Constructed with the new frequency. Examples -------- >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], dtype='period[A-DEC]', freq='A-DEC') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', '2015-12'], dtype='period[M]', freq='M') >>> pidx.asfreq('M', how='S') PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', '2015-01'], dtype='period[M]', freq='M') """ how = libperiod.validate_end_alias(how) freq = Period._maybe_convert_freq(freq) base1 = self.freq._period_dtype_code base2 = freq._period_dtype_code asi8 = self.asi8 # self.freq.n can't be negative or 0 end = how == "E" if end: ordinal = asi8 + self.freq.n - 1 else: ordinal = asi8 new_data = period_asfreq_arr(ordinal, base1, base2, end) if self._hasnans: new_data[self._isnan] = iNaT return type(self)(new_data, freq=freq) # ------------------------------------------------------------------ # Rendering Methods def _formatter(self, boxed: bool = False): if boxed: return str return "'{}'".format def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): """ actually format my specific types """ values = self.astype(object) if date_format: formatter = lambda dt: dt.strftime(date_format) else: formatter = lambda dt: str(dt) if self._hasnans: mask = self._isnan values[mask] = na_rep imask = ~mask values[imask] = np.array([formatter(dt) for dt in values[imask]]) else: values = np.array([formatter(dt) for dt in values]) return values # ------------------------------------------------------------------ def astype(self, dtype, copy: bool = True): # We handle Period[T] -> Period[U] # Our parent handles everything else. dtype = pandas_dtype(dtype) if is_period_dtype(dtype): return self.asfreq(dtype.freq) return super().astype(dtype, copy=copy) # ------------------------------------------------------------------ # Arithmetic Methods def _sub_datelike(self, other): assert other is not NaT return NotImplemented def _sub_period(self, other): # If the operation is well-defined, we return an object-Index # of DateOffsets. Null entries are filled with pd.NaT self._check_compatible_with(other) asi8 = self.asi8 new_data = asi8 - other.ordinal new_data = np.array([self.freq * x for x in new_data]) if self._hasnans: new_data[self._isnan] = NaT return new_data def _sub_period_array(self, other): """ Subtract a Period Array/Index from self. This is only valid if self is itself a Period Array/Index, raises otherwise. Both objects must have the same frequency. Parameters ---------- other : PeriodIndex or PeriodArray Returns ------- result : np.ndarray[object] Array of DateOffset objects; nulls represented by NaT. """ if self.freq != other.freq: msg = DIFFERENT_FREQ.format( cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr ) raise IncompatibleFrequency(msg) new_values = algos.checked_add_with_arr( self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan ) new_values = np.array([self.freq.base * x for x in new_values]) if self._hasnans or other._hasnans: mask = (self._isnan) | (other._isnan) new_values[mask] = NaT return new_values def _addsub_int_array( self, other: np.ndarray, op: Callable[[Any, Any], Any], ) -> "PeriodArray": """ Add or subtract array of integers; equivalent to applying `_time_shift` pointwise. Parameters ---------- other : np.ndarray[integer-dtype] op : {operator.add, operator.sub} Returns ------- result : PeriodArray """ assert op in [operator.add, operator.sub] if op is operator.sub: other = -other res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan) res_values = res_values.view("i8") res_values[self._isnan] = iNaT return type(self)(res_values, freq=self.freq) def _add_offset(self, other: DateOffset): assert not isinstance(other, Tick) if other.base != self.freq.base: raise raise_on_incompatible(self, other) # Note: when calling parent class's _add_timedeltalike_scalar, # it will call delta_to_nanoseconds(delta). Because delta here # is an integer, delta_to_nanoseconds will return it unchanged. result = super()._add_timedeltalike_scalar(other.n) return type(self)(result, freq=self.freq) def _add_timedeltalike_scalar(self, other): """ Parameters ---------- other : timedelta, Tick, np.timedelta64 Returns ------- PeriodArray """ if not isinstance(self.freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise raise_on_incompatible(self, other) if notna(other): # special handling for np.timedelta64("NaT"), avoid calling # _check_timedeltalike_freq_compat as that would raise TypeError other = self._check_timedeltalike_freq_compat(other) # Note: when calling parent class's _add_timedeltalike_scalar, # it will call delta_to_nanoseconds(delta). Because delta here # is an integer, delta_to_nanoseconds will return it unchanged. return super()._add_timedeltalike_scalar(other) def _add_timedelta_arraylike(self, other): """ Parameters ---------- other : TimedeltaArray or ndarray[timedelta64] Returns ------- result : ndarray[int64] """ if not isinstance(self.freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise TypeError( f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}" ) if not np.all(isna(other)): delta = self._check_timedeltalike_freq_compat(other) else: # all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT return self + np.timedelta64("NaT") ordinals = self._addsub_int_array(delta, operator.add).asi8 return type(self)(ordinals, dtype=self.dtype) def _check_timedeltalike_freq_compat(self, other): """ Arithmetic operations with timedelta-like scalars or array `other` are only valid if `other` is an integer multiple of `self.freq`. If the operation is valid, find that integer multiple. Otherwise, raise because the operation is invalid. Parameters ---------- other : timedelta, np.timedelta64, Tick, ndarray[timedelta64], TimedeltaArray, TimedeltaIndex Returns ------- multiple : int or ndarray[int64] Raises ------ IncompatibleFrequency """ assert isinstance(self.freq, Tick) # checked by calling function base_nanos = self.freq.base.nanos if isinstance(other, (timedelta, np.timedelta64, Tick)): nanos = delta_to_nanoseconds(other) elif isinstance(other, np.ndarray): # numpy timedelta64 array; all entries must be compatible assert other.dtype.kind == "m" if other.dtype != TD64NS_DTYPE: # i.e. non-nano unit # TODO: disallow unit-less timedelta64 other = other.astype(TD64NS_DTYPE) nanos = other.view("i8") else: # TimedeltaArray/Index nanos = other.asi8 if np.all(nanos % base_nanos == 0): # nanos being added is an integer multiple of the # base-frequency to self.freq delta = nanos // base_nanos # delta is the integer (or integer-array) number of periods # by which will be added to self. return delta raise raise_on_incompatible(self, other) def raise_on_incompatible(left, right): """ Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : None, DateOffset, Period, ndarray, or timedelta-like Returns ------- IncompatibleFrequency Exception to be raised by the caller. """ # GH#24283 error message format depends on whether right is scalar if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None: other_freq = None elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)): other_freq = right.freqstr else: other_freq = delta_to_tick(Timedelta(right)).freqstr msg = DIFFERENT_FREQ.format( cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq ) return IncompatibleFrequency(msg) # ------------------------------------------------------------------- # Constructor Helpers def period_array( data: Union[Sequence[Optional[Period]], AnyArrayLike], freq: Optional[Union[str, Tick]] = None, copy: bool = False, ) -> PeriodArray: """ Construct a new PeriodArray from a sequence of Period scalars. Parameters ---------- data : Sequence of Period objects A sequence of Period objects. These are required to all have the same ``freq.`` Missing values can be indicated by ``None`` or ``pandas.NaT``. freq : str, Tick, or Offset The frequency of every element of the array. This can be specified to avoid inferring the `freq` from `data`. copy : bool, default False Whether to ensure a copy of the data is made. Returns ------- PeriodArray See Also -------- PeriodArray pandas.PeriodIndex Examples -------- >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A')]) <PeriodArray> ['2017', '2018'] Length: 2, dtype: period[A-DEC] >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] Length: 3, dtype: period[A-DEC] Integers that look like years are handled >>> period_array([2000, 2001, 2002], freq='D') <PeriodArray> ['2000-01-01', '2001-01-01', '2002-01-01'] Length: 3, dtype: period[D] Datetime-like strings may also be passed >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') <PeriodArray> ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] Length: 4, dtype: period[Q-DEC] """ data_dtype = getattr(data, "dtype", None) if is_datetime64_dtype(data_dtype): return PeriodArray._from_datetime64(data, freq) if is_period_dtype(data_dtype): return PeriodArray(data, freq) # other iterable of some kind if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): data = list(data) data = np.asarray(data) dtype: Optional[PeriodDtype] if freq: dtype = PeriodDtype(freq) else: dtype = None if is_float_dtype(data) and len(data) > 0: raise TypeError("PeriodIndex does not allow floating point in construction") data = ensure_object(data) return PeriodArray._from_sequence(data, dtype=dtype) def validate_dtype_freq(dtype, freq): """ If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq """ if freq is not None: freq = to_offset(freq) if dtype is not None: dtype = pandas_dtype(dtype) if not is_period_dtype(dtype): raise ValueError("dtype must be PeriodDtype") if freq is None: freq = dtype.freq elif freq != dtype.freq: raise IncompatibleFrequency("specified freq and dtype are different") return freq def dt64arr_to_periodarr(data, freq, tz=None): """ Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the `freq` on the `data` if `data` is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int] freq : Tick The frequency extracted from the Series or DatetimeIndex if that's used. """ if data.dtype != np.dtype("M8[ns]"): raise ValueError(f"Wrong dtype: {data.dtype}") if freq is None: if isinstance(data, ABCIndexClass): data, freq = data._values, data.freq elif isinstance(data, ABCSeries): data, freq = data._values, data.dt.freq freq = Period._maybe_convert_freq(freq) if isinstance(data, (ABCIndexClass, ABCSeries)): data = data._values base = freq._period_dtype_code return libperiod.dt64arr_to_periodarr(data.view("i8"), base, tz), freq def _get_ordinal_range(start, end, periods, freq, mult=1): if com.count_not_none(start, end, periods) != 2: raise ValueError( "Of the three parameters: start, end, and periods, " "exactly two must be specified" ) if freq is not None: freq = to_offset(freq) mult = freq.n if start is not None: start = Period(start, freq) if end is not None: end = Period(end, freq) is_start_per = isinstance(start, Period) is_end_per = isinstance(end, Period) if is_start_per and is_end_per and start.freq != end.freq: raise ValueError("start and end must have same freq") if start is NaT or end is NaT: raise ValueError("start and end must not be NaT") if freq is None: if is_start_per: freq = start.freq elif is_end_per: freq = end.freq else: # pragma: no cover raise ValueError("Could not infer freq from start/end") if periods is not None: periods = periods * mult if start is None: data = np.arange( end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64 ) else: data = np.arange( start.ordinal, start.ordinal + periods, mult, dtype=np.int64 ) else: data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) return data, freq def _range_from_fields( year=None, month=None, quarter=None, day=None, hour=None, minute=None, second=None, freq=None, ): if hour is None: hour = 0 if minute is None: minute = 0 if second is None: second = 0 if day is None: day = 1 ordinals = [] if quarter is not None: if freq is None: freq = to_offset("Q") base = FreqGroup.FR_QTR else: freq = to_offset(freq) base = libperiod.freq_to_dtype_code(freq) if base != FreqGroup.FR_QTR: raise AssertionError("base must equal FR_QTR") year, quarter = _make_field_arrays(year, quarter) for y, q in zip(year, quarter): y, m = libperiod.quarter_to_myear(y, q, freq) val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: freq = to_offset(freq) base = libperiod.freq_to_dtype_code(freq) arrays = _make_field_arrays(year, month, day, hour, minute, second) for y, mth, d, h, mn, s in zip(*arrays): ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base)) return np.array(ordinals, dtype=np.int64), freq def _make_field_arrays(*fields): length = None for x in fields: if isinstance(x, (list, np.ndarray, ABCSeries)): if length is not None and len(x) != length: raise ValueError("Mismatched Period array lengths") elif length is None: length = len(x) arrays = [ np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) else np.repeat(x, length) for x in fields ] return arrays
30.260056
88
0.579912
from datetime import timedelta import operator from typing import Any, Callable, List, Optional, Sequence, Type, Union import numpy as np from pandas._libs.tslibs import ( NaT, NaTType, Timedelta, delta_to_nanoseconds, iNaT, period as libperiod, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import Tick, delta_to_tick from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, PeriodMixin, get_period_field_arr, period_asfreq_arr, ) from pandas._typing import AnyArrayLike from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( TD64NS_DTYPE, ensure_object, is_datetime64_dtype, is_float_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( ABCIndexClass, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna, notna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com from pandas.tseries.offsets import DateOffset def _field_accessor(name: str, docstring=None): def f(self): base = self.freq._period_dtype_code result = get_period_field_arr(name, self.asi8, base) return result f.__name__ = name f.__doc__ = docstring return property(f) class PeriodArray(PeriodMixin, dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps): __array_priority__ = 1000 _typ = "periodarray" _scalar_type = Period _recognized_scalars = (Period,) _is_recognized_dtype = is_period_dtype _other_ops: List[str] = [] _bool_ops = ["is_leap_year"] _object_ops = ["start_time", "end_time", "freq"] _field_ops = [ "year", "month", "day", "hour", "minute", "second", "weekofyear", "weekday", "week", "dayofweek", "dayofyear", "quarter", "qyear", "days_in_month", "daysinmonth", ] _datetimelike_ops = _field_ops + _object_ops + _bool_ops _datetimelike_methods = ["strftime", "to_timestamp", "asfreq"] def __init__(self, values, freq=None, dtype=None, copy=False): freq = validate_dtype_freq(dtype, freq) if freq is not None: freq = Period._maybe_convert_freq(freq) if isinstance(values, ABCSeries): values = values._values if not isinstance(values, type(self)): raise TypeError("Incorrect dtype") elif isinstance(values, ABCPeriodIndex): values = values._values if isinstance(values, type(self)): if freq is not None and freq != values.freq: raise raise_on_incompatible(values, freq) values, freq = values._data, values.freq values = np.array(values, dtype="int64", copy=copy) self._data = values if freq is None: raise ValueError("freq is not specified and cannot be inferred") self._dtype = PeriodDtype(freq) @classmethod def _simple_new(cls, values: np.ndarray, freq=None, **kwargs) -> "PeriodArray": assertion_msg = "Should be numpy array of type i8" assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg return cls(values, freq=freq, **kwargs) @classmethod def _from_sequence( cls: Type["PeriodArray"], scalars: Union[Sequence[Optional[Period]], AnyArrayLike], dtype: Optional[PeriodDtype] = None, copy: bool = False, ) -> "PeriodArray": if dtype: freq = dtype.freq else: freq = None if isinstance(scalars, cls): validate_dtype_freq(scalars.dtype, freq) if copy: scalars = scalars.copy() return scalars periods = np.asarray(scalars, dtype=object) if copy: periods = periods.copy() freq = freq or libperiod.extract_freq(periods) ordinals = libperiod.extract_ordinals(periods, freq) return cls(ordinals, freq=freq) @classmethod def _from_sequence_of_strings( cls, strings, dtype=None, copy=False ) -> "PeriodArray": return cls._from_sequence(strings, dtype, copy) @classmethod def _from_datetime64(cls, data, freq, tz=None) -> "PeriodArray": data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq) @classmethod def _generate_range(cls, start, end, periods, freq, fields): periods = dtl.validate_periods(periods) if freq is not None: freq = Period._maybe_convert_freq(freq) field_count = len(fields) if start is not None or end is not None: if field_count > 0: raise ValueError( "Can either instantiate from fields or endpoints, but not both" ) subarr, freq = _get_ordinal_range(start, end, periods, freq) elif field_count > 0: subarr, freq = _range_from_fields(freq=freq, **fields) else: raise ValueError("Not enough parameters to construct Period range") return subarr, freq def _unbox_scalar(self, value: Union[Period, NaTType]) -> int: if value is NaT: return value.value elif isinstance(value, self._scalar_type): self._check_compatible_with(value) return value.ordinal else: raise ValueError(f"'value' should be a Period. Got '{value}' instead.") def _scalar_from_string(self, value: str) -> Period: return Period(value, freq=self.freq) def _check_compatible_with(self, other, setitem: bool = False): if other is NaT: return if self.freqstr != other.freqstr: raise raise_on_incompatible(self, other) @cache_readonly def dtype(self) -> PeriodDtype: return self._dtype @property def freq(self) -> DateOffset: return self.dtype.freq def __array__(self, dtype=None) -> np.ndarray: if dtype == "i8": return self.asi8 elif dtype == bool: return ~self._isnan return np.array(list(self), dtype=object) def __arrow_array__(self, type=None): import pyarrow from pandas.core.arrays._arrow_utils import ArrowPeriodType if type is not None: if pyarrow.types.is_integer(type): return pyarrow.array(self._data, mask=self.isna(), type=type) elif isinstance(type, ArrowPeriodType): if self.freqstr != type.freq: raise TypeError( "Not supported to convert PeriodArray to array with different " f"'freq' ({self.freqstr} vs {type.freq})" ) else: raise TypeError( f"Not supported to convert PeriodArray to '{type}' type" ) period_type = ArrowPeriodType(self.freqstr) storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64") return pyarrow.ExtensionArray.from_storage(period_type, storage_array) year = _field_accessor( "year", """ The year of the period. """, ) month = _field_accessor( "month", """ The month as January=1, December=12. """, ) day = _field_accessor( "day", """ The days of the period. """, ) hour = _field_accessor( "hour", """ The hour of the period. """, ) minute = _field_accessor( "minute", """ The minute of the period. """, ) second = _field_accessor( "second", """ The second of the period. """, ) weekofyear = _field_accessor( "week", """ The week ordinal of the year. """, ) week = weekofyear dayofweek = _field_accessor( "weekday", """ The day of the week with Monday=0, Sunday=6. """, ) weekday = dayofweek dayofyear = day_of_year = _field_accessor( "day_of_year", """ The ordinal day of the year. """, ) quarter = _field_accessor( "quarter", """ The quarter of the date. """, ) qyear = _field_accessor("qyear") days_in_month = _field_accessor( "days_in_month", """ The number of days in the month. """, ) daysinmonth = days_in_month @property def is_leap_year(self) -> np.ndarray: return isleapyear_arr(np.asarray(self.year)) @property def start_time(self): return self.to_timestamp(how="start") @property def end_time(self): return self.to_timestamp(how="end") def to_timestamp(self, freq=None, how="start"): from pandas.core.arrays import DatetimeArray how = libperiod.validate_end_alias(how) end = how == "E" if end: if freq == "B" or self.freq == "B": adjust = Timedelta(1, "D") - Timedelta(1, "ns") return self.to_timestamp(how="start") + adjust else: adjust = Timedelta(1, "ns") return (self + self.freq).to_timestamp(how="start") - adjust if freq is None: freq = self._get_to_timestamp_base() base = freq else: freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code new_data = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base) return DatetimeArray(new_data)._with_freq("infer") def _time_shift(self, periods, freq=None): if freq is not None: raise TypeError( "`freq` argument is not supported for " f"{type(self).__name__}._time_shift" ) values = self.asi8 + periods * self.freq.n if self._hasnans: values[self._isnan] = iNaT return type(self)(values, freq=self.freq) @property def _box_func(self): return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq) def asfreq(self, freq=None, how: str = "E") -> "PeriodArray": how = libperiod.validate_end_alias(how) freq = Period._maybe_convert_freq(freq) base1 = self.freq._period_dtype_code base2 = freq._period_dtype_code asi8 = self.asi8 end = how == "E" if end: ordinal = asi8 + self.freq.n - 1 else: ordinal = asi8 new_data = period_asfreq_arr(ordinal, base1, base2, end) if self._hasnans: new_data[self._isnan] = iNaT return type(self)(new_data, freq=freq) # ------------------------------------------------------------------ # Rendering Methods def _formatter(self, boxed: bool = False): if boxed: return str return "'{}'".format def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): values = self.astype(object) if date_format: formatter = lambda dt: dt.strftime(date_format) else: formatter = lambda dt: str(dt) if self._hasnans: mask = self._isnan values[mask] = na_rep imask = ~mask values[imask] = np.array([formatter(dt) for dt in values[imask]]) else: values = np.array([formatter(dt) for dt in values]) return values # ------------------------------------------------------------------ def astype(self, dtype, copy: bool = True): # We handle Period[T] -> Period[U] # Our parent handles everything else. dtype = pandas_dtype(dtype) if is_period_dtype(dtype): return self.asfreq(dtype.freq) return super().astype(dtype, copy=copy) # ------------------------------------------------------------------ # Arithmetic Methods def _sub_datelike(self, other): assert other is not NaT return NotImplemented def _sub_period(self, other): # If the operation is well-defined, we return an object-Index # of DateOffsets. Null entries are filled with pd.NaT self._check_compatible_with(other) asi8 = self.asi8 new_data = asi8 - other.ordinal new_data = np.array([self.freq * x for x in new_data]) if self._hasnans: new_data[self._isnan] = NaT return new_data def _sub_period_array(self, other): if self.freq != other.freq: msg = DIFFERENT_FREQ.format( cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr ) raise IncompatibleFrequency(msg) new_values = algos.checked_add_with_arr( self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan ) new_values = np.array([self.freq.base * x for x in new_values]) if self._hasnans or other._hasnans: mask = (self._isnan) | (other._isnan) new_values[mask] = NaT return new_values def _addsub_int_array( self, other: np.ndarray, op: Callable[[Any, Any], Any], ) -> "PeriodArray": assert op in [operator.add, operator.sub] if op is operator.sub: other = -other res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan) res_values = res_values.view("i8") res_values[self._isnan] = iNaT return type(self)(res_values, freq=self.freq) def _add_offset(self, other: DateOffset): assert not isinstance(other, Tick) if other.base != self.freq.base: raise raise_on_incompatible(self, other) # Note: when calling parent class's _add_timedeltalike_scalar, result = super()._add_timedeltalike_scalar(other.n) return type(self)(result, freq=self.freq) def _add_timedeltalike_scalar(self, other): if not isinstance(self.freq, Tick): raise raise_on_incompatible(self, other) if notna(other): other = self._check_timedeltalike_freq_compat(other) # it will call delta_to_nanoseconds(delta). Because delta here # is an integer, delta_to_nanoseconds will return it unchanged. return super()._add_timedeltalike_scalar(other) def _add_timedelta_arraylike(self, other): if not isinstance(self.freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise TypeError( f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}" ) if not np.all(isna(other)): delta = self._check_timedeltalike_freq_compat(other) else: # all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT return self + np.timedelta64("NaT") ordinals = self._addsub_int_array(delta, operator.add).asi8 return type(self)(ordinals, dtype=self.dtype) def _check_timedeltalike_freq_compat(self, other): assert isinstance(self.freq, Tick) # checked by calling function base_nanos = self.freq.base.nanos if isinstance(other, (timedelta, np.timedelta64, Tick)): nanos = delta_to_nanoseconds(other) elif isinstance(other, np.ndarray): # numpy timedelta64 array; all entries must be compatible assert other.dtype.kind == "m" if other.dtype != TD64NS_DTYPE: # i.e. non-nano unit # TODO: disallow unit-less timedelta64 other = other.astype(TD64NS_DTYPE) nanos = other.view("i8") else: # TimedeltaArray/Index nanos = other.asi8 if np.all(nanos % base_nanos == 0): # nanos being added is an integer multiple of the # base-frequency to self.freq delta = nanos // base_nanos # delta is the integer (or integer-array) number of periods # by which will be added to self. return delta raise raise_on_incompatible(self, other) def raise_on_incompatible(left, right): # GH#24283 error message format depends on whether right is scalar if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None: other_freq = None elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)): other_freq = right.freqstr else: other_freq = delta_to_tick(Timedelta(right)).freqstr msg = DIFFERENT_FREQ.format( cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq ) return IncompatibleFrequency(msg) # ------------------------------------------------------------------- # Constructor Helpers def period_array( data: Union[Sequence[Optional[Period]], AnyArrayLike], freq: Optional[Union[str, Tick]] = None, copy: bool = False, ) -> PeriodArray: data_dtype = getattr(data, "dtype", None) if is_datetime64_dtype(data_dtype): return PeriodArray._from_datetime64(data, freq) if is_period_dtype(data_dtype): return PeriodArray(data, freq) # other iterable of some kind if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): data = list(data) data = np.asarray(data) dtype: Optional[PeriodDtype] if freq: dtype = PeriodDtype(freq) else: dtype = None if is_float_dtype(data) and len(data) > 0: raise TypeError("PeriodIndex does not allow floating point in construction") data = ensure_object(data) return PeriodArray._from_sequence(data, dtype=dtype) def validate_dtype_freq(dtype, freq): if freq is not None: freq = to_offset(freq) if dtype is not None: dtype = pandas_dtype(dtype) if not is_period_dtype(dtype): raise ValueError("dtype must be PeriodDtype") if freq is None: freq = dtype.freq elif freq != dtype.freq: raise IncompatibleFrequency("specified freq and dtype are different") return freq def dt64arr_to_periodarr(data, freq, tz=None): if data.dtype != np.dtype("M8[ns]"): raise ValueError(f"Wrong dtype: {data.dtype}") if freq is None: if isinstance(data, ABCIndexClass): data, freq = data._values, data.freq elif isinstance(data, ABCSeries): data, freq = data._values, data.dt.freq freq = Period._maybe_convert_freq(freq) if isinstance(data, (ABCIndexClass, ABCSeries)): data = data._values base = freq._period_dtype_code return libperiod.dt64arr_to_periodarr(data.view("i8"), base, tz), freq def _get_ordinal_range(start, end, periods, freq, mult=1): if com.count_not_none(start, end, periods) != 2: raise ValueError( "Of the three parameters: start, end, and periods, " "exactly two must be specified" ) if freq is not None: freq = to_offset(freq) mult = freq.n if start is not None: start = Period(start, freq) if end is not None: end = Period(end, freq) is_start_per = isinstance(start, Period) is_end_per = isinstance(end, Period) if is_start_per and is_end_per and start.freq != end.freq: raise ValueError("start and end must have same freq") if start is NaT or end is NaT: raise ValueError("start and end must not be NaT") if freq is None: if is_start_per: freq = start.freq elif is_end_per: freq = end.freq else: # pragma: no cover raise ValueError("Could not infer freq from start/end") if periods is not None: periods = periods * mult if start is None: data = np.arange( end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64 ) else: data = np.arange( start.ordinal, start.ordinal + periods, mult, dtype=np.int64 ) else: data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) return data, freq def _range_from_fields( year=None, month=None, quarter=None, day=None, hour=None, minute=None, second=None, freq=None, ): if hour is None: hour = 0 if minute is None: minute = 0 if second is None: second = 0 if day is None: day = 1 ordinals = [] if quarter is not None: if freq is None: freq = to_offset("Q") base = FreqGroup.FR_QTR else: freq = to_offset(freq) base = libperiod.freq_to_dtype_code(freq) if base != FreqGroup.FR_QTR: raise AssertionError("base must equal FR_QTR") year, quarter = _make_field_arrays(year, quarter) for y, q in zip(year, quarter): y, m = libperiod.quarter_to_myear(y, q, freq) val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: freq = to_offset(freq) base = libperiod.freq_to_dtype_code(freq) arrays = _make_field_arrays(year, month, day, hour, minute, second) for y, mth, d, h, mn, s in zip(*arrays): ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base)) return np.array(ordinals, dtype=np.int64), freq def _make_field_arrays(*fields): length = None for x in fields: if isinstance(x, (list, np.ndarray, ABCSeries)): if length is not None and len(x) != length: raise ValueError("Mismatched Period array lengths") elif length is None: length = len(x) arrays = [ np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) else np.repeat(x, length) for x in fields ] return arrays
true
true
7902de16b8a23928735bfe12b81d82e40cd8d2cd
1,048
py
Python
shadowsocksr/encrypt_test.py
hcaijin/ssrspeedtest
9d353d4e43d861c3e1359221ac3faea7b11cb8ef
[ "Apache-2.0" ]
null
null
null
shadowsocksr/encrypt_test.py
hcaijin/ssrspeedtest
9d353d4e43d861c3e1359221ac3faea7b11cb8ef
[ "Apache-2.0" ]
null
null
null
shadowsocksr/encrypt_test.py
hcaijin/ssrspeedtest
9d353d4e43d861c3e1359221ac3faea7b11cb8ef
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import, division, print_function, \ with_statement import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../')) from shadowsocksr.crypto import rc4_md5 from shadowsocksr.crypto import openssl from shadowsocksr.crypto import sodium from shadowsocksr.crypto import table def run(func): try: func() except: pass def run_n(func, name): try: func(name) except: pass def main(): print("\n""rc4_md5") rc4_md5.test() print("\n""aes-256-cfb") openssl.test_aes_256_cfb() print("\n""aes-128-cfb") openssl.test_aes_128_cfb() print("\n""bf-cfb") run(openssl.test_bf_cfb) print("\n""camellia-128-cfb") run_n(openssl.run_method, "camellia-128-cfb") print("\n""cast5-cfb") run_n(openssl.run_method, "cast5-cfb") print("\n""idea-cfb") run_n(openssl.run_method, "idea-cfb") print("\n""seed-cfb") run_n(openssl.run_method, "seed-cfb") print("\n""salsa20") run(sodium.test_salsa20) print("\n""chacha20") run(sodium.test_chacha20) if __name__ == '__main__': main()
20.54902
67
0.714695
from __future__ import absolute_import, division, print_function, \ with_statement import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../')) from shadowsocksr.crypto import rc4_md5 from shadowsocksr.crypto import openssl from shadowsocksr.crypto import sodium from shadowsocksr.crypto import table def run(func): try: func() except: pass def run_n(func, name): try: func(name) except: pass def main(): print("\n""rc4_md5") rc4_md5.test() print("\n""aes-256-cfb") openssl.test_aes_256_cfb() print("\n""aes-128-cfb") openssl.test_aes_128_cfb() print("\n""bf-cfb") run(openssl.test_bf_cfb) print("\n""camellia-128-cfb") run_n(openssl.run_method, "camellia-128-cfb") print("\n""cast5-cfb") run_n(openssl.run_method, "cast5-cfb") print("\n""idea-cfb") run_n(openssl.run_method, "idea-cfb") print("\n""seed-cfb") run_n(openssl.run_method, "seed-cfb") print("\n""salsa20") run(sodium.test_salsa20) print("\n""chacha20") run(sodium.test_chacha20) if __name__ == '__main__': main()
true
true
7902de46893fda5ca73b5b77691185ab7f392a83
6,261
py
Python
Benchmarking/bsds500/bsds/thin.py
cristi161/eecvf
519c488bd47f697ef51e88823f7a751a52677b88
[ "MIT" ]
1
2021-04-02T15:33:12.000Z
2021-04-02T15:33:12.000Z
Benchmarking/bsds500/bsds/thin.py
cristi161/eecvf
519c488bd47f697ef51e88823f7a751a52677b88
[ "MIT" ]
null
null
null
Benchmarking/bsds500/bsds/thin.py
cristi161/eecvf
519c488bd47f697ef51e88823f7a751a52677b88
[ "MIT" ]
1
2021-08-14T09:07:22.000Z
2021-08-14T09:07:22.000Z
import numpy as np # Thinning morphological operation applied using lookup tables. # We convert the 3x3 neighbourhood surrounding a pixel to an index # used to lookup the output in a lookup table. # Bit masks for each neighbour # 1 2 4 # 8 16 32 # 64 128 256 NEIGH_MASK_EAST = 32 NEIGH_MASK_NORTH_EAST = 4 NEIGH_MASK_NORTH = 2 NEIGH_MASK_NORTH_WEST = 1 NEIGH_MASK_WEST = 8 NEIGH_MASK_SOUTH_WEST = 64 NEIGH_MASK_SOUTH = 128 NEIGH_MASK_SOUTH_EAST = 256 NEIGH_MASK_CENTRE = 16 # Masks in a list # MASKS[0] = centre # MASKS[1..8] = start from east, counter-clockwise MASKS = [NEIGH_MASK_CENTRE, NEIGH_MASK_EAST, NEIGH_MASK_NORTH_EAST, NEIGH_MASK_NORTH, NEIGH_MASK_NORTH_WEST, NEIGH_MASK_WEST, NEIGH_MASK_SOUTH_WEST, NEIGH_MASK_SOUTH, NEIGH_MASK_SOUTH_EAST, ] # Constant listing all indices _LUT_INDS = np.arange(512) def binary_image_to_lut_indices(x): """ Convert a binary image to an index image that can be used with a lookup table to perform morphological operations. Non-zero elements in the image are interpreted as 1, zero elements as 0 :param x: a 2D NumPy array. :return: a 2D NumPy array, same shape as x """ if x.ndim != 2: raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim)) # If the dtype of x is not bool, convert if x.dtype != np.bool: x = x != 0 # Add x = np.pad(x, [(1, 1), (1, 1)], mode='constant') # Convert to LUT indices lut_indices = x[:-2, :-2] * NEIGH_MASK_NORTH_WEST + \ x[:-2, 1:-1] * NEIGH_MASK_NORTH + \ x[:-2, 2:] * NEIGH_MASK_NORTH_EAST + \ x[1:-1, :-2] * NEIGH_MASK_WEST + \ x[1:-1, 1:-1] * NEIGH_MASK_CENTRE + \ x[1:-1, 2:] * NEIGH_MASK_EAST + \ x[2:, :-2] * NEIGH_MASK_SOUTH_WEST + \ x[2:, 1:-1] * NEIGH_MASK_SOUTH + \ x[2:, 2:] * NEIGH_MASK_SOUTH_EAST return lut_indices.astype(np.int32) def apply_lut(x, lut): """ Perform a morphological operation on the binary image x using the supplied lookup table :param x: :param lut: :return: """ if lut.ndim != 1: raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim)) if lut.shape[0] != 512: raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0])) lut_indices = binary_image_to_lut_indices(x) return lut[lut_indices] def identity_lut(): """ Create identity lookup tablef :return: """ lut = np.zeros((512,), dtype=bool) inds = np.arange(512) lut[(inds & NEIGH_MASK_CENTRE) != 0] = True return lut def _lut_mutate_mask(lut): """ Get a mask that shows which neighbourhood shapes result in changes to the image :param lut: lookup table :return: mask indicating which lookup indices result in changes """ return lut != identity_lut() def lut_masks_zero(neigh): """ Create a LUT index mask for which the specified neighbour is 0 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask """ if neigh > 8: neigh -= 8 return (_LUT_INDS & MASKS[neigh]) == 0 def lut_masks_one(neigh): """ Create a LUT index mask for which the specified neighbour is 1 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask """ if neigh > 8: neigh -= 8 return (_LUT_INDS & MASKS[neigh]) != 0 def _thin_cond_g1(): """ Thinning morphological operation; condition G1 :return: a LUT index mask """ b = np.zeros(512, dtype=int) for i in range(1, 5): b += lut_masks_zero(2 * i - 1) & (lut_masks_one(2 * i) | lut_masks_one(2 * i + 1)) return b == 1 def _thin_cond_g2(): """ Thinning morphological operation; condition G2 :return: a LUT index mask """ n1 = np.zeros(512, dtype=int) n2 = np.zeros(512, dtype=int) for k in range(1, 5): n1 += (lut_masks_one(2 * k - 1) | lut_masks_one(2 * k)) n2 += (lut_masks_one(2 * k) | lut_masks_one(2 * k + 1)) m = np.minimum(n1, n2) return (m >= 2) & (m <= 3) def _thin_cond_g3(): """ Thinning morphological operation; condition G3 :return: a LUT index mask """ return ((lut_masks_one(2) | lut_masks_one(3) | lut_masks_zero(8)) & lut_masks_one(1)) == 0 def _thin_cond_g3_prime(): """ Thinning morphological operation; condition G3' :return: a LUT index mask """ return ((lut_masks_one(6) | lut_masks_one(7) | lut_masks_zero(4)) & lut_masks_one(5)) == 0 def _thin_iter_1_lut(): """ Thinning morphological operation; lookup table for iteration 1 :return: lookup table """ lut = identity_lut() cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3() lut[cond] = False return lut def _thin_iter_2_lut(): """ Thinning morphological operation; lookup table for iteration 2 :return: lookup table """ lut = identity_lut() cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3_prime() lut[cond] = False return lut def binary_thin(x, max_iter=None): """ Binary thinning morphological operation :param x: a binary image, or an image that is to be converted to a binary image :param max_iter: maximum number of iterations; default is `None` that results in an infinite number of iterations (note that `binary_thin` will automatically terminate when no more changes occur) :return: """ thin1 = _thin_iter_1_lut() thin2 = _thin_iter_2_lut() thin1_mut = _lut_mutate_mask(thin1) thin2_mut = _lut_mutate_mask(thin2) iter_count = 0 while max_iter is None or iter_count < max_iter: # Iter 1 lut_indices = binary_image_to_lut_indices(x) x_mut = thin1_mut[lut_indices] if x_mut.sum() == 0: break x = thin1[lut_indices] # Iter 2 lut_indices = binary_image_to_lut_indices(x) x_mut = thin2_mut[lut_indices] if x_mut.sum() == 0: break x = thin2[lut_indices] iter_count += 1 return x
27.70354
106
0.632487
import numpy as np NEIGH_MASK_EAST = 32 NEIGH_MASK_NORTH_EAST = 4 NEIGH_MASK_NORTH = 2 NEIGH_MASK_NORTH_WEST = 1 NEIGH_MASK_WEST = 8 NEIGH_MASK_SOUTH_WEST = 64 NEIGH_MASK_SOUTH = 128 NEIGH_MASK_SOUTH_EAST = 256 NEIGH_MASK_CENTRE = 16 MASKS = [NEIGH_MASK_CENTRE, NEIGH_MASK_EAST, NEIGH_MASK_NORTH_EAST, NEIGH_MASK_NORTH, NEIGH_MASK_NORTH_WEST, NEIGH_MASK_WEST, NEIGH_MASK_SOUTH_WEST, NEIGH_MASK_SOUTH, NEIGH_MASK_SOUTH_EAST, ] _LUT_INDS = np.arange(512) def binary_image_to_lut_indices(x): if x.ndim != 2: raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim)) if x.dtype != np.bool: x = x != 0 x = np.pad(x, [(1, 1), (1, 1)], mode='constant') lut_indices = x[:-2, :-2] * NEIGH_MASK_NORTH_WEST + \ x[:-2, 1:-1] * NEIGH_MASK_NORTH + \ x[:-2, 2:] * NEIGH_MASK_NORTH_EAST + \ x[1:-1, :-2] * NEIGH_MASK_WEST + \ x[1:-1, 1:-1] * NEIGH_MASK_CENTRE + \ x[1:-1, 2:] * NEIGH_MASK_EAST + \ x[2:, :-2] * NEIGH_MASK_SOUTH_WEST + \ x[2:, 1:-1] * NEIGH_MASK_SOUTH + \ x[2:, 2:] * NEIGH_MASK_SOUTH_EAST return lut_indices.astype(np.int32) def apply_lut(x, lut): if lut.ndim != 1: raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim)) if lut.shape[0] != 512: raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0])) lut_indices = binary_image_to_lut_indices(x) return lut[lut_indices] def identity_lut(): lut = np.zeros((512,), dtype=bool) inds = np.arange(512) lut[(inds & NEIGH_MASK_CENTRE) != 0] = True return lut def _lut_mutate_mask(lut): return lut != identity_lut() def lut_masks_zero(neigh): if neigh > 8: neigh -= 8 return (_LUT_INDS & MASKS[neigh]) == 0 def lut_masks_one(neigh): if neigh > 8: neigh -= 8 return (_LUT_INDS & MASKS[neigh]) != 0 def _thin_cond_g1(): b = np.zeros(512, dtype=int) for i in range(1, 5): b += lut_masks_zero(2 * i - 1) & (lut_masks_one(2 * i) | lut_masks_one(2 * i + 1)) return b == 1 def _thin_cond_g2(): n1 = np.zeros(512, dtype=int) n2 = np.zeros(512, dtype=int) for k in range(1, 5): n1 += (lut_masks_one(2 * k - 1) | lut_masks_one(2 * k)) n2 += (lut_masks_one(2 * k) | lut_masks_one(2 * k + 1)) m = np.minimum(n1, n2) return (m >= 2) & (m <= 3) def _thin_cond_g3(): return ((lut_masks_one(2) | lut_masks_one(3) | lut_masks_zero(8)) & lut_masks_one(1)) == 0 def _thin_cond_g3_prime(): return ((lut_masks_one(6) | lut_masks_one(7) | lut_masks_zero(4)) & lut_masks_one(5)) == 0 def _thin_iter_1_lut(): lut = identity_lut() cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3() lut[cond] = False return lut def _thin_iter_2_lut(): lut = identity_lut() cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3_prime() lut[cond] = False return lut def binary_thin(x, max_iter=None): thin1 = _thin_iter_1_lut() thin2 = _thin_iter_2_lut() thin1_mut = _lut_mutate_mask(thin1) thin2_mut = _lut_mutate_mask(thin2) iter_count = 0 while max_iter is None or iter_count < max_iter: lut_indices = binary_image_to_lut_indices(x) x_mut = thin1_mut[lut_indices] if x_mut.sum() == 0: break x = thin1[lut_indices] lut_indices = binary_image_to_lut_indices(x) x_mut = thin2_mut[lut_indices] if x_mut.sum() == 0: break x = thin2[lut_indices] iter_count += 1 return x
true
true
7902de95884c5145b41ae43eb77adbda086cf022
860
py
Python
virtual_filesystem/localization.py
joaovitor123jv/rontext
0e4d2bb9df879a0331037af9bf49e39e5a6b4e24
[ "MIT" ]
1
2022-02-21T03:54:44.000Z
2022-02-21T03:54:44.000Z
virtual_filesystem/localization.py
joaovitor123jv/rontext
0e4d2bb9df879a0331037af9bf49e39e5a6b4e24
[ "MIT" ]
null
null
null
virtual_filesystem/localization.py
joaovitor123jv/rontext
0e4d2bb9df879a0331037af9bf49e39e5a6b4e24
[ "MIT" ]
null
null
null
import time import threading import subprocess import helpers from settings import Settings def listener(): global data_source print("**** SIDE_THREAD ID == ", threading.get_ident()) while True: return_data = subprocess.run([data_source.settings.loaded['localization_bin']], stdout=subprocess.PIPE) parsed_return = helpers.parse_yaml_string(return_data.stdout.decode('utf8')) data_source.settings.add_runtime('localization', parsed_return) time.sleep(data_source.settings.loaded['localization_plugin_wait_time']) # Waits 1 second till the next localization check def start_plugin(data_source_received): global data_source data_source = data_source_received try: thread = threading.Thread(target=listener) thread.start() except: print("Failed to start localization plugin")
34.4
130
0.737209
import time import threading import subprocess import helpers from settings import Settings def listener(): global data_source print("**** SIDE_THREAD ID == ", threading.get_ident()) while True: return_data = subprocess.run([data_source.settings.loaded['localization_bin']], stdout=subprocess.PIPE) parsed_return = helpers.parse_yaml_string(return_data.stdout.decode('utf8')) data_source.settings.add_runtime('localization', parsed_return) time.sleep(data_source.settings.loaded['localization_plugin_wait_time']) def start_plugin(data_source_received): global data_source data_source = data_source_received try: thread = threading.Thread(target=listener) thread.start() except: print("Failed to start localization plugin")
true
true
7902deadd2fdfe52af38f809cd59af4ffb799d42
1,308
py
Python
python/examples/Custom Shaders/Health Bars/main.py
Thaigun/Griddly
de5972a608a2928172510a0ac81a977c48af6b1f
[ "MIT" ]
null
null
null
python/examples/Custom Shaders/Health Bars/main.py
Thaigun/Griddly
de5972a608a2928172510a0ac81a977c48af6b1f
[ "MIT" ]
null
null
null
python/examples/Custom Shaders/Health Bars/main.py
Thaigun/Griddly
de5972a608a2928172510a0ac81a977c48af6b1f
[ "MIT" ]
null
null
null
import os from griddly import GymWrapperFactory, gd, GymWrapper from griddly.RenderTools import VideoRecorder, RenderToFile if __name__ == "__main__": wrapper = GymWrapperFactory() name = "projectiles_env" current_path = os.path.dirname(os.path.realpath(__file__)) env = GymWrapper( "health_bars.yaml", shader_path="shaders", player_observer_type=gd.ObserverType.SPRITE_2D, global_observer_type=gd.ObserverType.SPRITE_2D, level=0, ) env.reset() reset_global_obs = env.render(observer="global", mode="rgb_array") reset_player_obs = env.render(mode="rgb_array") render_to_file = RenderToFile() render_to_file.render(reset_global_obs, "reset_global.png") render_to_file.render(reset_player_obs, "reset_partial.png") global_recorder = VideoRecorder() global_visualization = env.render(observer="global", mode="rgb_array") global_recorder.start("global_video_test.mp4", global_visualization.shape) for i in range(1000): obs, reward, done, info = env.step(env.action_space.sample()) env.render(observer="global") frame = env.render(observer="global", mode="rgb_array") global_recorder.add_frame(frame) if done: env.reset() global_recorder.close()
30.418605
78
0.700306
import os from griddly import GymWrapperFactory, gd, GymWrapper from griddly.RenderTools import VideoRecorder, RenderToFile if __name__ == "__main__": wrapper = GymWrapperFactory() name = "projectiles_env" current_path = os.path.dirname(os.path.realpath(__file__)) env = GymWrapper( "health_bars.yaml", shader_path="shaders", player_observer_type=gd.ObserverType.SPRITE_2D, global_observer_type=gd.ObserverType.SPRITE_2D, level=0, ) env.reset() reset_global_obs = env.render(observer="global", mode="rgb_array") reset_player_obs = env.render(mode="rgb_array") render_to_file = RenderToFile() render_to_file.render(reset_global_obs, "reset_global.png") render_to_file.render(reset_player_obs, "reset_partial.png") global_recorder = VideoRecorder() global_visualization = env.render(observer="global", mode="rgb_array") global_recorder.start("global_video_test.mp4", global_visualization.shape) for i in range(1000): obs, reward, done, info = env.step(env.action_space.sample()) env.render(observer="global") frame = env.render(observer="global", mode="rgb_array") global_recorder.add_frame(frame) if done: env.reset() global_recorder.close()
true
true
7902ded1dec499c7be66619652159225c6af6d1b
8,899
py
Python
third_party/android_platform/development/scripts/stack.py
lianhuaren/webrtc
096f18c11d8acb0d92820f75fdf934607f424cfc
[ "DOC", "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
third_party/android_platform/development/scripts/stack.py
lianhuaren/webrtc
096f18c11d8acb0d92820f75fdf934607f424cfc
[ "DOC", "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
third_party/android_platform/development/scripts/stack.py
lianhuaren/webrtc
096f18c11d8acb0d92820f75fdf934607f424cfc
[ "DOC", "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """stack symbolizes native crash dumps.""" import getopt import glob import logging import os import sys import stack_core import stack_libs import subprocess import symbol import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, 'build', 'android')) from pylib import constants sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, 'tools', 'python')) import llvm_symbolizer DEFAULT_SYMROOT='/tmp/symbols' # From: https://source.android.com/source/build-numbers.html _ANDROID_M_MAJOR_VERSION=6 def PrintUsage(): """Print usage and exit with error.""" # pylint: disable-msg=C6310 print print " usage: " + sys.argv[0] + " [options] [FILE]" print print " --symbols-dir=path" print " the path to a symbols dir, such as =/tmp/out/target/product/dream/symbols" print print " --chrome-symbols-dir=path" print " the path to a Chrome symbols dir (can be absolute or relative" print " to src), such as =out/Debug/lib.unstripped" print print " --output-directory=path" print " the path to the build output directory, such as out/Debug." print " Ignored if --chrome-symbols-dir is passed." print print " --packed-relocation-adjustments" print " --no-packed-relocation-adjustments" print " turn packed relocation adjustment on and off (default is off)" print " If running on pre-M Android and the stack trace appears to" print " make no sense, try turning this feature on." print print " --symbols-zip=path" print " the path to a symbols zip file, such as =dream-symbols-12345.zip" print print " --more-info" print " --less-info" print " Change the level of detail in the output." print " --more-info is slower and more verbose, but more functions will" print " be fully qualified with namespace/classname and have full" print " argument information. Also, the 'stack data' section will be" print " printed." print print " --arch=arm|arm64|x64|x86|mips" print " the target architecture" print print " --fallback-monochrome" print " fallback to monochrome instead of chrome if fail to detect" print " shared lib which is loaded from APK, this doesn't work for" print " component build." print print " --verbose" print " enable extra logging, particularly for debugging failed symbolization" print print " FILE should contain a stack trace in it somewhere" print " the tool will find that and re-print it with" print " source files and line numbers. If you don't" print " pass FILE, or if file is -, it reads from" print " stdin." print # pylint: enable-msg=C6310 sys.exit(1) def UnzipSymbols(symbolfile, symdir=None): """Unzips a file to DEFAULT_SYMROOT and returns the unzipped location. Args: symbolfile: The .zip file to unzip symdir: Optional temporary directory to use for extraction Returns: A tuple containing (the directory into which the zip file was unzipped, the path to the "symbols" directory in the unzipped file). To clean up, the caller can delete the first element of the tuple. Raises: SymbolDownloadException: When the unzip fails. """ if not symdir: symdir = "%s/%s" % (DEFAULT_SYMROOT, hash(symbolfile)) if not os.path.exists(symdir): os.makedirs(symdir) print "extracting %s..." % symbolfile saveddir = os.getcwd() os.chdir(symdir) try: unzipcode = subprocess.call(["unzip", "-qq", "-o", symbolfile]) if unzipcode > 0: os.remove(symbolfile) raise SymbolDownloadException("failed to extract symbol files (%s)." % symbolfile) finally: os.chdir(saveddir) android_symbols = glob.glob("%s/out/target/product/*/symbols" % symdir) if android_symbols: return (symdir, android_symbols[0]) else: # This is a zip of Chrome symbols, so symbol.CHROME_SYMBOLS_DIR needs to be # updated to point here. symbol.CHROME_SYMBOLS_DIR = symdir return (symdir, symdir) def main(argv): try: options, arguments = getopt.getopt(argv, "", ["packed-relocation-adjustments", "no-packed-relocation-adjustments", "more-info", "less-info", "chrome-symbols-dir=", "output-directory=", "symbols-dir=", "symbols-zip=", "packed-lib=", "arch=", "fallback-monochrome", "verbose", "help"]) except getopt.GetoptError, unused_error: PrintUsage() zip_arg = None more_info = False fallback_monochrome = False arch_defined = False packed_libs = [] for option, value in options: if option == "--help": PrintUsage() elif option == "--symbols-dir": symbol.SYMBOLS_DIR = os.path.expanduser(value) elif option == "--symbols-zip": zip_arg = os.path.expanduser(value) elif option == "--arch": symbol.ARCH = value arch_defined = True elif option == "--chrome-symbols-dir": symbol.CHROME_SYMBOLS_DIR = os.path.join(constants.DIR_SOURCE_ROOT, value) elif option == "--output-directory": constants.SetOutputDirectory(value) elif option == "--packed-lib": packed_libs.append(os.path.expanduser(value)) elif option == "--more-info": more_info = True elif option == "--less-info": more_info = False elif option == "--fallback-monochrome": fallback_monochrome = True elif option == "--verbose": logging.basicConfig(level=logging.DEBUG) elif option in ( '--packed-relocation-adjustments', '--no-packed-relocation-adjustments'): print ('--[no-]packed-relocation-adjustments options are deprecated. ' 'Specify packed libs directory instead.') if len(arguments) > 1: PrintUsage() # Do an up-front test that the output directory is known. if not symbol.CHROME_SYMBOLS_DIR: constants.CheckOutputDirectory() if not arguments or arguments[0] == "-": print "Reading native crash info from stdin" f = sys.stdin else: print "Searching for native crashes in: " + os.path.realpath(arguments[0]) f = open(arguments[0], "r") lines = f.readlines() f.close() rootdir = None if zip_arg: rootdir, symbol.SYMBOLS_DIR = UnzipSymbols(zip_arg) version = stack_libs.GetTargetAndroidVersionNumber(lines) if version is None: print ("Unknown Android release, " "consider passing --packed-lib.") elif version < _ANDROID_M_MAJOR_VERSION and not packed_libs: print ("Pre-M Android release detected, " "but --packed-lib not specified. Stack symbolization may fail.") if (version is None or version < _ANDROID_M_MAJOR_VERSION) and packed_libs: load_vaddrs = stack_libs.GetLoadVaddrs(stripped_libs=packed_libs) else: load_vaddrs = {} print ("Reading Android symbols from: " + os.path.normpath(symbol.SYMBOLS_DIR)) chrome_search_path = symbol.GetLibrarySearchPaths() with llvm_symbolizer.LLVMSymbolizer() as symbolizer: print ("Searching for Chrome symbols from within: " + ':'.join((os.path.normpath(d) for d in chrome_search_path))) stack_core.ConvertTrace(lines, load_vaddrs, more_info, fallback_monochrome, arch_defined, symbolizer) if rootdir: # be a good citizen and clean up...os.rmdir and os.removedirs() don't work cmd = "rm -rf \"%s\"" % rootdir print "\ncleaning up (%s)" % cmd os.system(cmd) if __name__ == "__main__": sys.exit(main(sys.argv[1:])) # vi: ts=2 sw=2
35.454183
90
0.627374
"""stack symbolizes native crash dumps.""" import getopt import glob import logging import os import sys import stack_core import stack_libs import subprocess import symbol import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, 'build', 'android')) from pylib import constants sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, 'tools', 'python')) import llvm_symbolizer DEFAULT_SYMROOT='/tmp/symbols' _ANDROID_M_MAJOR_VERSION=6 def PrintUsage(): """Print usage and exit with error.""" print print " usage: " + sys.argv[0] + " [options] [FILE]" print print " --symbols-dir=path" print " the path to a symbols dir, such as =/tmp/out/target/product/dream/symbols" print print " --chrome-symbols-dir=path" print " the path to a Chrome symbols dir (can be absolute or relative" print " to src), such as =out/Debug/lib.unstripped" print print " --output-directory=path" print " the path to the build output directory, such as out/Debug." print " Ignored if --chrome-symbols-dir is passed." print print " --packed-relocation-adjustments" print " --no-packed-relocation-adjustments" print " turn packed relocation adjustment on and off (default is off)" print " If running on pre-M Android and the stack trace appears to" print " make no sense, try turning this feature on." print print " --symbols-zip=path" print " the path to a symbols zip file, such as =dream-symbols-12345.zip" print print " --more-info" print " --less-info" print " Change the level of detail in the output." print " --more-info is slower and more verbose, but more functions will" print " be fully qualified with namespace/classname and have full" print " argument information. Also, the 'stack data' section will be" print " printed." print print " --arch=arm|arm64|x64|x86|mips" print " the target architecture" print print " --fallback-monochrome" print " fallback to monochrome instead of chrome if fail to detect" print " shared lib which is loaded from APK, this doesn't work for" print " component build." print print " --verbose" print " enable extra logging, particularly for debugging failed symbolization" print print " FILE should contain a stack trace in it somewhere" print " the tool will find that and re-print it with" print " source files and line numbers. If you don't" print " pass FILE, or if file is -, it reads from" print " stdin." print sys.exit(1) def UnzipSymbols(symbolfile, symdir=None): """Unzips a file to DEFAULT_SYMROOT and returns the unzipped location. Args: symbolfile: The .zip file to unzip symdir: Optional temporary directory to use for extraction Returns: A tuple containing (the directory into which the zip file was unzipped, the path to the "symbols" directory in the unzipped file). To clean up, the caller can delete the first element of the tuple. Raises: SymbolDownloadException: When the unzip fails. """ if not symdir: symdir = "%s/%s" % (DEFAULT_SYMROOT, hash(symbolfile)) if not os.path.exists(symdir): os.makedirs(symdir) print "extracting %s..." % symbolfile saveddir = os.getcwd() os.chdir(symdir) try: unzipcode = subprocess.call(["unzip", "-qq", "-o", symbolfile]) if unzipcode > 0: os.remove(symbolfile) raise SymbolDownloadException("failed to extract symbol files (%s)." % symbolfile) finally: os.chdir(saveddir) android_symbols = glob.glob("%s/out/target/product/*/symbols" % symdir) if android_symbols: return (symdir, android_symbols[0]) else: symbol.CHROME_SYMBOLS_DIR = symdir return (symdir, symdir) def main(argv): try: options, arguments = getopt.getopt(argv, "", ["packed-relocation-adjustments", "no-packed-relocation-adjustments", "more-info", "less-info", "chrome-symbols-dir=", "output-directory=", "symbols-dir=", "symbols-zip=", "packed-lib=", "arch=", "fallback-monochrome", "verbose", "help"]) except getopt.GetoptError, unused_error: PrintUsage() zip_arg = None more_info = False fallback_monochrome = False arch_defined = False packed_libs = [] for option, value in options: if option == "--help": PrintUsage() elif option == "--symbols-dir": symbol.SYMBOLS_DIR = os.path.expanduser(value) elif option == "--symbols-zip": zip_arg = os.path.expanduser(value) elif option == "--arch": symbol.ARCH = value arch_defined = True elif option == "--chrome-symbols-dir": symbol.CHROME_SYMBOLS_DIR = os.path.join(constants.DIR_SOURCE_ROOT, value) elif option == "--output-directory": constants.SetOutputDirectory(value) elif option == "--packed-lib": packed_libs.append(os.path.expanduser(value)) elif option == "--more-info": more_info = True elif option == "--less-info": more_info = False elif option == "--fallback-monochrome": fallback_monochrome = True elif option == "--verbose": logging.basicConfig(level=logging.DEBUG) elif option in ( '--packed-relocation-adjustments', '--no-packed-relocation-adjustments'): print ('--[no-]packed-relocation-adjustments options are deprecated. ' 'Specify packed libs directory instead.') if len(arguments) > 1: PrintUsage() if not symbol.CHROME_SYMBOLS_DIR: constants.CheckOutputDirectory() if not arguments or arguments[0] == "-": print "Reading native crash info from stdin" f = sys.stdin else: print "Searching for native crashes in: " + os.path.realpath(arguments[0]) f = open(arguments[0], "r") lines = f.readlines() f.close() rootdir = None if zip_arg: rootdir, symbol.SYMBOLS_DIR = UnzipSymbols(zip_arg) version = stack_libs.GetTargetAndroidVersionNumber(lines) if version is None: print ("Unknown Android release, " "consider passing --packed-lib.") elif version < _ANDROID_M_MAJOR_VERSION and not packed_libs: print ("Pre-M Android release detected, " "but --packed-lib not specified. Stack symbolization may fail.") if (version is None or version < _ANDROID_M_MAJOR_VERSION) and packed_libs: load_vaddrs = stack_libs.GetLoadVaddrs(stripped_libs=packed_libs) else: load_vaddrs = {} print ("Reading Android symbols from: " + os.path.normpath(symbol.SYMBOLS_DIR)) chrome_search_path = symbol.GetLibrarySearchPaths() with llvm_symbolizer.LLVMSymbolizer() as symbolizer: print ("Searching for Chrome symbols from within: " + ':'.join((os.path.normpath(d) for d in chrome_search_path))) stack_core.ConvertTrace(lines, load_vaddrs, more_info, fallback_monochrome, arch_defined, symbolizer) if rootdir: cmd = "rm -rf \"%s\"" % rootdir print "\ncleaning up (%s)" % cmd os.system(cmd) if __name__ == "__main__": sys.exit(main(sys.argv[1:])) # vi: ts=2 sw=2
false
true
7902df876873263fef39fb1c740dddee39dc9c9d
1,732
py
Python
eveparser/parsers/assets.py
Nothing4You/eveparser
f327a9609b91f1ac7afd2a8392f2d62e3d81b422
[ "MIT" ]
null
null
null
eveparser/parsers/assets.py
Nothing4You/eveparser
f327a9609b91f1ac7afd2a8392f2d62e3d81b422
[ "MIT" ]
1
2015-10-09T18:30:26.000Z
2015-10-09T18:31:37.000Z
eveparser/parsers/assets.py
Nothing4You/eveparser
f327a9609b91f1ac7afd2a8392f2d62e3d81b422
[ "MIT" ]
1
2021-08-28T01:15:15.000Z
2021-08-28T01:15:15.000Z
""" eveparser.parsers.assets ~~~~~~~~~~~~~~~~~~~~~~~ Parse eve online asset lists. This also invludes inventory listings. """ import re from eveparser.utils import regex_match_lines, f_int ASSET_LIST_RE = re.compile(r"""^([\S ]*) # name \t([\d,'\.]*) # quantity (\t([\S ]*))? # group (\t([\S ]*))? # category (\t(XLarge|Large|Medium|Small|))? # size (\t(High|Medium|Low|Rigs|[\d ]*))? # slot (\t([\d ,\.]* m3))? # volume (\t([\d]+|))? # meta level (\t([\d]+|))?$ # tech level """, re.X) def parse_assets(lines): """ Parse asset list :param string paste_string: An asset list string """ matches, bad_lines = regex_match_lines(ASSET_LIST_RE, lines) result = [{'name': name, 'quantity': f_int(quantity) or 1, 'group': group, 'category': category, 'size': size, 'slot': slot, 'volume': volume, 'meta_level': meta_level, 'tech_level': tech_level} for (name, quantity, _, group, _, category, _, size, _, slot, _, volume, _, meta_level, _, tech_level) in matches] return result, bad_lines
35.346939
79
0.363164
import re from eveparser.utils import regex_match_lines, f_int ASSET_LIST_RE = re.compile(r"""^([\S ]*) # name \t([\d,'\.]*) # quantity (\t([\S ]*))? # group (\t([\S ]*))? # category (\t(XLarge|Large|Medium|Small|))? # size (\t(High|Medium|Low|Rigs|[\d ]*))? # slot (\t([\d ,\.]* m3))? # volume (\t([\d]+|))? # meta level (\t([\d]+|))?$ # tech level """, re.X) def parse_assets(lines): matches, bad_lines = regex_match_lines(ASSET_LIST_RE, lines) result = [{'name': name, 'quantity': f_int(quantity) or 1, 'group': group, 'category': category, 'size': size, 'slot': slot, 'volume': volume, 'meta_level': meta_level, 'tech_level': tech_level} for (name, quantity, _, group, _, category, _, size, _, slot, _, volume, _, meta_level, _, tech_level) in matches] return result, bad_lines
true
true
7902e12e6925b1486db81a0b8f056078bc2938eb
18,676
py
Python
t/test.py
mapcollab/python-tftpy
b4a649dd499ba0add7583d1a1d6557dc13664a68
[ "MIT" ]
null
null
null
t/test.py
mapcollab/python-tftpy
b4a649dd499ba0add7583d1a1d6557dc13664a68
[ "MIT" ]
null
null
null
t/test.py
mapcollab/python-tftpy
b4a649dd499ba0add7583d1a1d6557dc13664a68
[ "MIT" ]
null
null
null
"""Unit tests for tftpy.""" import unittest import logging import tftpy import os import time import threading from errno import EINTR from multiprocessing import Queue log = tftpy.log class TestTftpyClasses(unittest.TestCase): def setUp(self): tftpy.setLogLevel(logging.DEBUG) def testTftpPacketRRQ(self): log.debug("===> Running testcase testTftpPacketRRQ") options = {} rrq = tftpy.TftpPacketRRQ() rrq.filename = 'myfilename' rrq.mode = 'octet' rrq.options = options rrq.encode() self.assert_(rrq.buffer != None, "Buffer populated") rrq.decode() self.assertEqual(rrq.filename, b"myfilename", "Filename correct") self.assertEqual(rrq.mode, b"octet", "Mode correct") self.assertEqual(rrq.options, options, "Options correct") # repeat test with options rrq.options = { 'blksize': '1024' } rrq.filename = 'myfilename' rrq.mode = 'octet' rrq.encode() self.assert_(rrq.buffer != None, "Buffer populated") rrq.decode() self.assertEqual(rrq.filename, b"myfilename", "Filename correct") self.assertEqual(rrq.mode, b"octet", "Mode correct") self.assertEqual(rrq.options['blksize'], '1024', "Blksize correct") def testTftpPacketWRQ(self): log.debug("===> Running test case testTftpPacketWRQ") options = {} wrq = tftpy.TftpPacketWRQ() wrq.filename = 'myfilename' wrq.mode = 'octet' wrq.options = options wrq.encode() self.assert_(wrq.buffer != None, "Buffer populated") wrq.decode() self.assertEqual(wrq.opcode, 2, "Opcode correct") self.assertEqual(wrq.filename, b"myfilename", "Filename correct") self.assertEqual(wrq.mode, b"octet", "Mode correct") self.assertEqual(wrq.options, options, "Options correct") # repeat test with options wrq.options = { 'blksize': '1024' } wrq.filename = 'myfilename' wrq.mode = 'octet' wrq.encode() self.assert_(wrq.buffer != None, "Buffer populated") wrq.decode() self.assertEqual(wrq.opcode, 2, "Opcode correct") self.assertEqual(wrq.filename, b"myfilename", "Filename correct") self.assertEqual(wrq.mode, b"octet", "Mode correct") self.assertEqual(wrq.options['blksize'], '1024', "Blksize correct") def testTftpPacketDAT(self): log.debug("===> Running testcase testTftpPacketDAT") dat = tftpy.TftpPacketDAT() dat.blocknumber = 5 data = "this is some data" dat.data = data dat.encode() self.assert_(dat.buffer != None, "Buffer populated") dat.decode() self.assertEqual(dat.opcode, 3, "DAT opcode is correct") self.assertEqual(dat.blocknumber, 5, "Block number is correct") self.assertEqual(dat.data, data, "DAT data is correct") def testTftpPacketACK(self): log.debug("===> Running testcase testTftpPacketACK") ack = tftpy.TftpPacketACK() ack.blocknumber = 6 ack.encode() self.assert_(ack.buffer != None, "Buffer populated") ack.decode() self.assertEqual(ack.opcode, 4, "ACK opcode is correct") self.assertEqual(ack.blocknumber, 6, "ACK blocknumber correct") def testTftpPacketERR(self): log.debug("===> Running testcase testTftpPacketERR") err = tftpy.TftpPacketERR() err.errorcode = 4 err.encode() self.assert_(err.buffer != None, "Buffer populated") err.decode() self.assertEqual(err.opcode, 5, "ERR opcode is correct") self.assertEqual(err.errorcode, 4, "ERR errorcode is correct") def testTftpPacketOACK(self): log.debug("===> Running testcase testTftpPacketOACK") oack = tftpy.TftpPacketOACK() # Test that if we make blksize a number, it comes back a string. oack.options = { 'blksize': 2048 } oack.encode() self.assert_(oack.buffer != None, "Buffer populated") oack.decode() self.assertEqual(oack.opcode, 6, "OACK opcode is correct") self.assertEqual(oack.options['blksize'], '2048', "OACK blksize option is correct") # Test string to string oack.options = { 'blksize': '4096' } oack.encode() self.assert_(oack.buffer != None, "Buffer populated") oack.decode() self.assertEqual(oack.opcode, 6, "OACK opcode is correct") self.assertEqual(oack.options['blksize'], '4096', "OACK blksize option is correct") def testTftpPacketFactory(self): log.debug("===> Running testcase testTftpPacketFactory") # Make sure that the correct class is created for the correct opcode. classes = { 1: tftpy.TftpPacketRRQ, 2: tftpy.TftpPacketWRQ, 3: tftpy.TftpPacketDAT, 4: tftpy.TftpPacketACK, 5: tftpy.TftpPacketERR, 6: tftpy.TftpPacketOACK } factory = tftpy.TftpPacketFactory() for opcode in classes: self.assert_(isinstance(factory._TftpPacketFactory__create(opcode), classes[opcode]), "opcode %d returns the correct class" % opcode) class TestTftpyState(unittest.TestCase): def setUp(self): tftpy.setLogLevel(logging.DEBUG) def clientServerUploadOptions(self, options, input=None, transmitname=None, server_kwargs=None): """Fire up a client and a server and do an upload.""" root = '/tmp' home = os.path.dirname(os.path.abspath(__file__)) filename = '640KBFILE' input_path = os.path.join(home, filename) if not input: input = input_path if transmitname: filename = transmitname server_kwargs = server_kwargs or {} server = tftpy.TftpServer(root, **server_kwargs) client = tftpy.TftpClient('localhost', 20001, options) # Fork a server and run the client in this process. child_pid = os.fork() if child_pid: # parent - let the server start try: time.sleep(1) client.upload(filename, input) finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) else: server.listen('localhost', 20001) def clientServerDownloadOptions(self, options, output='/tmp/out'): """Fire up a client and a server and do a download.""" root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) client = tftpy.TftpClient('localhost', 20001, options) # Fork a server and run the client in this process. child_pid = os.fork() if child_pid: # parent - let the server start try: time.sleep(1) client.download('640KBFILE', output) finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) else: server.listen('localhost', 20001) def testClientServerNoOptions(self): self.clientServerDownloadOptions({}) def testClientServerTsizeOptions(self): self.clientServerDownloadOptions({'tsize': 64*1024}) def testClientFileObject(self): output = open('/tmp/out', 'w') self.clientServerDownloadOptions({}, output) def testClientServerBlksize(self): for blksize in [512, 1024, 2048, 4096]: self.clientServerDownloadOptions({'blksize': blksize}) def testClientServerUploadNoOptions(self): self.clientServerUploadOptions({}) def testClientServerUploadFileObj(self): fileobj = open('t/640KBFILE', 'r') self.clientServerUploadOptions({}, input=fileobj) def testClientServerUploadWithSubdirs(self): self.clientServerUploadOptions({}, transmitname='foo/bar/640KBFILE') def testClientServerUploadStartingSlash(self): self.clientServerUploadOptions({}, transmitname='/foo/bar/640KBFILE') def testClientServerUploadOptions(self): for blksize in [512, 1024, 2048, 4096]: self.clientServerUploadOptions({'blksize': blksize}) def customUploadHelper(self, return_func): q = Queue() def upload_open(path, context): q.put('called') return return_func(path) self.clientServerUploadOptions( {}, server_kwargs={'upload_open': upload_open}) self.assertEqual(q.get(True, 1), 'called') def testClientServerUploadCustomOpen(self): self.customUploadHelper(lambda p: open(p, 'wb')) def testClientServerUploadCustomOpenForbids(self): with self.assertRaisesRegexp(tftpy.TftpException, 'Access violation'): self.customUploadHelper(lambda p: None) def testClientServerUploadTsize(self): self.clientServerUploadOptions({'tsize': 64*1024}, transmitname='/foo/bar/640KBFILE') def testClientServerNoOptionsDelay(self): tftpy.TftpStates.DELAY_BLOCK = 10 self.clientServerDownloadOptions({}) tftpy.TftpStates.DELAY_BLOCK = 0 def testServerNoOptions(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) # Testing without the dyn_func_file set. serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) self.assertTrue( isinstance(serverstate, tftpy.TftpContextServer) ) rrq = tftpy.TftpPacketRRQ() rrq.filename = '640KBFILE' rrq.mode = 'octet' rrq.options = {} # Start the download. serverstate.start(rrq.encode().buffer) # At a 512 byte blocksize, this should be 1280 packets exactly. for block in range(1, 1281): # Should be in expectack state. self.assertTrue( isinstance(serverstate.state, tftpy.TftpStateExpectACK) ) ack = tftpy.TftpPacketACK() ack.blocknumber = block % 65536 serverstate.state = serverstate.state.handle(ack, raddress, rport) # The last DAT packet should be empty, indicating a completed # transfer. ack = tftpy.TftpPacketACK() ack.blocknumber = 1281 % 65536 finalstate = serverstate.state.handle(ack, raddress, rport) self.assertTrue( finalstate is None ) def testServerNoOptionsSubdir(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) # Testing without the dyn_func_file set. serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) self.assertTrue( isinstance(serverstate, tftpy.TftpContextServer) ) rrq = tftpy.TftpPacketRRQ() rrq.filename = '640KBFILE' rrq.mode = 'octet' rrq.options = {} # Start the download. serverstate.start(rrq.encode().buffer) # At a 512 byte blocksize, this should be 1280 packets exactly. for block in range(1, 1281): # Should be in expectack state, or None self.assertTrue( isinstance(serverstate.state, tftpy.TftpStateExpectACK) ) ack = tftpy.TftpPacketACK() ack.blocknumber = block % 65536 serverstate.state = serverstate.state.handle(ack, raddress, rport) # The last DAT packet should be empty, indicating a completed # transfer. ack = tftpy.TftpPacketACK() ack.blocknumber = 1281 % 65536 finalstate = serverstate.state.handle(ack, raddress, rport) self.assertTrue( finalstate is None ) def testServerInsecurePath(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) rrq = tftpy.TftpPacketRRQ() rrq.filename = '../setup.py' rrq.mode = 'octet' rrq.options = {} # Start the download. self.assertRaises(tftpy.TftpException, serverstate.start, rrq.encode().buffer) def testServerSecurePath(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) rrq = tftpy.TftpPacketRRQ() rrq.filename = '640KBFILE' rrq.mode = 'octet' rrq.options = {} # Start the download. serverstate.start(rrq.encode().buffer) # Should be in expectack state. self.assertTrue(isinstance(serverstate.state, tftpy.TftpStateExpectACK)) def testServerDownloadWithStopNow(self, output='/tmp/out'): log.debug("===> Running testcase testServerDownloadWithStopNow") root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) client = tftpy.TftpClient('localhost', 20001, {}) # Fork a server and run the client in this process. child_pid = os.fork() if child_pid: try: # parent - let the server start stopped_early = False time.sleep(1) def delay_hook(pkt): time.sleep(0.005) # 5ms client.download('640KBFILE', output, delay_hook) except: log.warn("client threw exception as expected") stopped_early = True finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) self.assertTrue( stopped_early == True, "Server should not exit early" ) else: import signal def handlealarm(signum, frame): server.stop(now=True) signal.signal(signal.SIGALRM, handlealarm) signal.alarm(2) try: server.listen('localhost', 20001) log.error("server didn't throw exception") except Exception as err: log.error("server got unexpected exception %s" % err) # Wait until parent kills us while True: time.sleep(1) def testServerDownloadWithStopNotNow(self, output='/tmp/out'): log.debug("===> Running testcase testServerDownloadWithStopNotNow") root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) client = tftpy.TftpClient('localhost', 20001, {}) # Fork a server and run the client in this process. child_pid = os.fork() if child_pid: try: stopped_early = True # parent - let the server start time.sleep(1) def delay_hook(pkt): time.sleep(0.005) # 5ms client.download('640KBFILE', output, delay_hook) stopped_early = False except: log.warn("client threw exception as expected") finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) self.assertTrue( stopped_early == False, "Server should not exit early" ) else: import signal def handlealarm(signum, frame): server.stop(now=False) signal.signal(signal.SIGALRM, handlealarm) signal.alarm(2) try: server.listen('localhost', 20001) except Exception as err: log.error("server threw exception %s" % err) # Wait until parent kills us while True: time.sleep(1) def testServerDownloadWithDynamicPort(self, output='/tmp/out'): log.debug("===> Running testcase testServerDownloadWithDynamicPort") root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) server_thread = threading.Thread(target=server.listen, kwargs={'listenip': 'localhost', 'listenport': 0}) server_thread.start() try: server.is_running.wait() client = tftpy.TftpClient('localhost', server.listenport, {}) time.sleep(1) client.download('640KBFILE', output) finally: server.stop(now=False) server_thread.join() class TestTftpyLoggers(unittest.TestCase): def setUp(self): tftpy.setLogLevel(logging.DEBUG) def testStreamLogger(self): # Not sure how best to test this. Maybe configure the loggers and look # for any complaints. try: tftpy.addHandler(tftpy.create_streamhandler()) self.assertTrue( True ) except: self.assertTrue( False ) def testFileLogger(self): # Same as previous. try: tftpy.addHandler(tftpy.create_rotatingfilehandler('/tmp/log')) self.assertTrue( True ) except: self.assertTrue( False ) if __name__ == '__main__': unittest.main()
37.277445
93
0.557775
import unittest import logging import tftpy import os import time import threading from errno import EINTR from multiprocessing import Queue log = tftpy.log class TestTftpyClasses(unittest.TestCase): def setUp(self): tftpy.setLogLevel(logging.DEBUG) def testTftpPacketRRQ(self): log.debug("===> Running testcase testTftpPacketRRQ") options = {} rrq = tftpy.TftpPacketRRQ() rrq.filename = 'myfilename' rrq.mode = 'octet' rrq.options = options rrq.encode() self.assert_(rrq.buffer != None, "Buffer populated") rrq.decode() self.assertEqual(rrq.filename, b"myfilename", "Filename correct") self.assertEqual(rrq.mode, b"octet", "Mode correct") self.assertEqual(rrq.options, options, "Options correct") rrq.options = { 'blksize': '1024' } rrq.filename = 'myfilename' rrq.mode = 'octet' rrq.encode() self.assert_(rrq.buffer != None, "Buffer populated") rrq.decode() self.assertEqual(rrq.filename, b"myfilename", "Filename correct") self.assertEqual(rrq.mode, b"octet", "Mode correct") self.assertEqual(rrq.options['blksize'], '1024', "Blksize correct") def testTftpPacketWRQ(self): log.debug("===> Running test case testTftpPacketWRQ") options = {} wrq = tftpy.TftpPacketWRQ() wrq.filename = 'myfilename' wrq.mode = 'octet' wrq.options = options wrq.encode() self.assert_(wrq.buffer != None, "Buffer populated") wrq.decode() self.assertEqual(wrq.opcode, 2, "Opcode correct") self.assertEqual(wrq.filename, b"myfilename", "Filename correct") self.assertEqual(wrq.mode, b"octet", "Mode correct") self.assertEqual(wrq.options, options, "Options correct") wrq.options = { 'blksize': '1024' } wrq.filename = 'myfilename' wrq.mode = 'octet' wrq.encode() self.assert_(wrq.buffer != None, "Buffer populated") wrq.decode() self.assertEqual(wrq.opcode, 2, "Opcode correct") self.assertEqual(wrq.filename, b"myfilename", "Filename correct") self.assertEqual(wrq.mode, b"octet", "Mode correct") self.assertEqual(wrq.options['blksize'], '1024', "Blksize correct") def testTftpPacketDAT(self): log.debug("===> Running testcase testTftpPacketDAT") dat = tftpy.TftpPacketDAT() dat.blocknumber = 5 data = "this is some data" dat.data = data dat.encode() self.assert_(dat.buffer != None, "Buffer populated") dat.decode() self.assertEqual(dat.opcode, 3, "DAT opcode is correct") self.assertEqual(dat.blocknumber, 5, "Block number is correct") self.assertEqual(dat.data, data, "DAT data is correct") def testTftpPacketACK(self): log.debug("===> Running testcase testTftpPacketACK") ack = tftpy.TftpPacketACK() ack.blocknumber = 6 ack.encode() self.assert_(ack.buffer != None, "Buffer populated") ack.decode() self.assertEqual(ack.opcode, 4, "ACK opcode is correct") self.assertEqual(ack.blocknumber, 6, "ACK blocknumber correct") def testTftpPacketERR(self): log.debug("===> Running testcase testTftpPacketERR") err = tftpy.TftpPacketERR() err.errorcode = 4 err.encode() self.assert_(err.buffer != None, "Buffer populated") err.decode() self.assertEqual(err.opcode, 5, "ERR opcode is correct") self.assertEqual(err.errorcode, 4, "ERR errorcode is correct") def testTftpPacketOACK(self): log.debug("===> Running testcase testTftpPacketOACK") oack = tftpy.TftpPacketOACK() oack.options = { 'blksize': 2048 } oack.encode() self.assert_(oack.buffer != None, "Buffer populated") oack.decode() self.assertEqual(oack.opcode, 6, "OACK opcode is correct") self.assertEqual(oack.options['blksize'], '2048', "OACK blksize option is correct") oack.options = { 'blksize': '4096' } oack.encode() self.assert_(oack.buffer != None, "Buffer populated") oack.decode() self.assertEqual(oack.opcode, 6, "OACK opcode is correct") self.assertEqual(oack.options['blksize'], '4096', "OACK blksize option is correct") def testTftpPacketFactory(self): log.debug("===> Running testcase testTftpPacketFactory") classes = { 1: tftpy.TftpPacketRRQ, 2: tftpy.TftpPacketWRQ, 3: tftpy.TftpPacketDAT, 4: tftpy.TftpPacketACK, 5: tftpy.TftpPacketERR, 6: tftpy.TftpPacketOACK } factory = tftpy.TftpPacketFactory() for opcode in classes: self.assert_(isinstance(factory._TftpPacketFactory__create(opcode), classes[opcode]), "opcode %d returns the correct class" % opcode) class TestTftpyState(unittest.TestCase): def setUp(self): tftpy.setLogLevel(logging.DEBUG) def clientServerUploadOptions(self, options, input=None, transmitname=None, server_kwargs=None): root = '/tmp' home = os.path.dirname(os.path.abspath(__file__)) filename = '640KBFILE' input_path = os.path.join(home, filename) if not input: input = input_path if transmitname: filename = transmitname server_kwargs = server_kwargs or {} server = tftpy.TftpServer(root, **server_kwargs) client = tftpy.TftpClient('localhost', 20001, options) child_pid = os.fork() if child_pid: try: time.sleep(1) client.upload(filename, input) finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) else: server.listen('localhost', 20001) def clientServerDownloadOptions(self, options, output='/tmp/out'): root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) client = tftpy.TftpClient('localhost', 20001, options) child_pid = os.fork() if child_pid: try: time.sleep(1) client.download('640KBFILE', output) finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) else: server.listen('localhost', 20001) def testClientServerNoOptions(self): self.clientServerDownloadOptions({}) def testClientServerTsizeOptions(self): self.clientServerDownloadOptions({'tsize': 64*1024}) def testClientFileObject(self): output = open('/tmp/out', 'w') self.clientServerDownloadOptions({}, output) def testClientServerBlksize(self): for blksize in [512, 1024, 2048, 4096]: self.clientServerDownloadOptions({'blksize': blksize}) def testClientServerUploadNoOptions(self): self.clientServerUploadOptions({}) def testClientServerUploadFileObj(self): fileobj = open('t/640KBFILE', 'r') self.clientServerUploadOptions({}, input=fileobj) def testClientServerUploadWithSubdirs(self): self.clientServerUploadOptions({}, transmitname='foo/bar/640KBFILE') def testClientServerUploadStartingSlash(self): self.clientServerUploadOptions({}, transmitname='/foo/bar/640KBFILE') def testClientServerUploadOptions(self): for blksize in [512, 1024, 2048, 4096]: self.clientServerUploadOptions({'blksize': blksize}) def customUploadHelper(self, return_func): q = Queue() def upload_open(path, context): q.put('called') return return_func(path) self.clientServerUploadOptions( {}, server_kwargs={'upload_open': upload_open}) self.assertEqual(q.get(True, 1), 'called') def testClientServerUploadCustomOpen(self): self.customUploadHelper(lambda p: open(p, 'wb')) def testClientServerUploadCustomOpenForbids(self): with self.assertRaisesRegexp(tftpy.TftpException, 'Access violation'): self.customUploadHelper(lambda p: None) def testClientServerUploadTsize(self): self.clientServerUploadOptions({'tsize': 64*1024}, transmitname='/foo/bar/640KBFILE') def testClientServerNoOptionsDelay(self): tftpy.TftpStates.DELAY_BLOCK = 10 self.clientServerDownloadOptions({}) tftpy.TftpStates.DELAY_BLOCK = 0 def testServerNoOptions(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) self.assertTrue( isinstance(serverstate, tftpy.TftpContextServer) ) rrq = tftpy.TftpPacketRRQ() rrq.filename = '640KBFILE' rrq.mode = 'octet' rrq.options = {} serverstate.start(rrq.encode().buffer) for block in range(1, 1281): self.assertTrue( isinstance(serverstate.state, tftpy.TftpStateExpectACK) ) ack = tftpy.TftpPacketACK() ack.blocknumber = block % 65536 serverstate.state = serverstate.state.handle(ack, raddress, rport) ack = tftpy.TftpPacketACK() ack.blocknumber = 1281 % 65536 finalstate = serverstate.state.handle(ack, raddress, rport) self.assertTrue( finalstate is None ) def testServerNoOptionsSubdir(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) self.assertTrue( isinstance(serverstate, tftpy.TftpContextServer) ) rrq = tftpy.TftpPacketRRQ() rrq.filename = '640KBFILE' rrq.mode = 'octet' rrq.options = {} serverstate.start(rrq.encode().buffer) for block in range(1, 1281): self.assertTrue( isinstance(serverstate.state, tftpy.TftpStateExpectACK) ) ack = tftpy.TftpPacketACK() ack.blocknumber = block % 65536 serverstate.state = serverstate.state.handle(ack, raddress, rport) ack = tftpy.TftpPacketACK() ack.blocknumber = 1281 % 65536 finalstate = serverstate.state.handle(ack, raddress, rport) self.assertTrue( finalstate is None ) def testServerInsecurePath(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) rrq = tftpy.TftpPacketRRQ() rrq.filename = '../setup.py' rrq.mode = 'octet' rrq.options = {} self.assertRaises(tftpy.TftpException, serverstate.start, rrq.encode().buffer) def testServerSecurePath(self): raddress = '127.0.0.2' rport = 10000 timeout = 5 root = os.path.dirname(os.path.abspath(__file__)) serverstate = tftpy.TftpContextServer(raddress, rport, timeout, root) rrq = tftpy.TftpPacketRRQ() rrq.filename = '640KBFILE' rrq.mode = 'octet' rrq.options = {} serverstate.start(rrq.encode().buffer) self.assertTrue(isinstance(serverstate.state, tftpy.TftpStateExpectACK)) def testServerDownloadWithStopNow(self, output='/tmp/out'): log.debug("===> Running testcase testServerDownloadWithStopNow") root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) client = tftpy.TftpClient('localhost', 20001, {}) child_pid = os.fork() if child_pid: try: stopped_early = False time.sleep(1) def delay_hook(pkt): time.sleep(0.005) client.download('640KBFILE', output, delay_hook) except: log.warn("client threw exception as expected") stopped_early = True finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) self.assertTrue( stopped_early == True, "Server should not exit early" ) else: import signal def handlealarm(signum, frame): server.stop(now=True) signal.signal(signal.SIGALRM, handlealarm) signal.alarm(2) try: server.listen('localhost', 20001) log.error("server didn't throw exception") except Exception as err: log.error("server got unexpected exception %s" % err) # Wait until parent kills us while True: time.sleep(1) def testServerDownloadWithStopNotNow(self, output='/tmp/out'): log.debug("===> Running testcase testServerDownloadWithStopNotNow") root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) client = tftpy.TftpClient('localhost', 20001, {}) # Fork a server and run the client in this process. child_pid = os.fork() if child_pid: try: stopped_early = True # parent - let the server start time.sleep(1) def delay_hook(pkt): time.sleep(0.005) # 5ms client.download('640KBFILE', output, delay_hook) stopped_early = False except: log.warn("client threw exception as expected") finally: os.kill(child_pid, 15) os.waitpid(child_pid, 0) self.assertTrue( stopped_early == False, "Server should not exit early" ) else: import signal def handlealarm(signum, frame): server.stop(now=False) signal.signal(signal.SIGALRM, handlealarm) signal.alarm(2) try: server.listen('localhost', 20001) except Exception as err: log.error("server threw exception %s" % err) # Wait until parent kills us while True: time.sleep(1) def testServerDownloadWithDynamicPort(self, output='/tmp/out'): log.debug("===> Running testcase testServerDownloadWithDynamicPort") root = os.path.dirname(os.path.abspath(__file__)) server = tftpy.TftpServer(root) server_thread = threading.Thread(target=server.listen, kwargs={'listenip': 'localhost', 'listenport': 0}) server_thread.start() try: server.is_running.wait() client = tftpy.TftpClient('localhost', server.listenport, {}) time.sleep(1) client.download('640KBFILE', output) finally: server.stop(now=False) server_thread.join() class TestTftpyLoggers(unittest.TestCase): def setUp(self): tftpy.setLogLevel(logging.DEBUG) def testStreamLogger(self): # Not sure how best to test this. Maybe configure the loggers and look # for any complaints. try: tftpy.addHandler(tftpy.create_streamhandler()) self.assertTrue( True ) except: self.assertTrue( False ) def testFileLogger(self): # Same as previous. try: tftpy.addHandler(tftpy.create_rotatingfilehandler('/tmp/log')) self.assertTrue( True ) except: self.assertTrue( False ) if __name__ == '__main__': unittest.main()
true
true
7902e14b8974d257576b24fae44d938c6e17498b
7,586
py
Python
tests/integration-tests/tests/tags/test_tag_propagation.py
eshpc/aws-parallelcluster
8cc6169a12661ce1c0025c93ebd9019c26e7219e
[ "Apache-2.0" ]
null
null
null
tests/integration-tests/tests/tags/test_tag_propagation.py
eshpc/aws-parallelcluster
8cc6169a12661ce1c0025c93ebd9019c26e7219e
[ "Apache-2.0" ]
108
2021-10-11T09:12:06.000Z
2022-03-28T09:28:39.000Z
tests/integration-tests/tests/tags/test_tag_propagation.py
yuleiwan/aws-parallelcluster
aad2a3019ef4ad08d702f5acf41b152b3f7a0b46
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. # See the License for the specific language governing permissions and limitations under the License. # import logging import json import logging import subprocess as sp import boto3 import pytest from assertpy import assert_that from utils import get_root_volume_id @pytest.mark.regions(["ap-southeast-1"]) @pytest.mark.instances(["c5.xlarge"]) @pytest.mark.oss(["alinux2"]) @pytest.mark.schedulers(["slurm", "awsbatch"]) @pytest.mark.usefixtures("region", "instance") def test_tag_propagation(pcluster_config_reader, clusters_factory, scheduler, os): """ Verify tags from various sources are propagated to the expected resources. The following resources are checked for tags: - main CFN stack - head node - head node's root EBS volume - compute node (traditional schedulers) - compute node's root EBS volume (traditional schedulers) - shared EBS volume """ config_file_tags = {"ConfigFileTag": "ConfigFileTagValue"} version_tags = {"parallelcluster:version": get_pcluster_version()} cluster_config = pcluster_config_reader() cluster = clusters_factory(cluster_config) cluster_name_tags = {"parallelcluster:cluster-name": cluster.name} test_cases = [ { "resource": "Main CloudFormation Stack", "tag_getter": get_main_stack_tags, "expected_tags": (version_tags, config_file_tags), }, { "resource": "Head Node", "tag_getter": get_head_node_tags, "expected_tags": ( cluster_name_tags, {"Name": "HeadNode", "parallelcluster:node-type": "HeadNode"}, ), }, { "resource": "Head Node Root Volume", "tag_getter": get_head_node_root_volume_tags, "expected_tags": (cluster_name_tags, {"parallelcluster:node-type": "HeadNode"}), "tag_getter_kwargs": {"cluster": cluster, "os": os}, }, { "resource": "Compute Node", "tag_getter": get_compute_node_tags, "expected_tags": ( cluster_name_tags, {"Name": "Compute", "parallelcluster:node-type": "Compute"}, config_file_tags, ), "skip": scheduler == "awsbatch", }, { "resource": "Compute Node Root Volume", "tag_getter": get_compute_node_root_volume_tags, "expected_tags": ( cluster_name_tags, {"parallelcluster:node-type": "Compute"}, config_file_tags if scheduler == "slurm" else {}, ), "tag_getter_kwargs": {"cluster": cluster, "os": os}, "skip": scheduler == "awsbatch", }, { "resource": "Shared EBS Volume", "tag_getter": get_shared_volume_tags, "expected_tags": (version_tags, config_file_tags), }, ] for test_case in test_cases: if test_case.get("skip"): continue logging.info("Verifying tags were propagated to %s", test_case.get("resource")) tag_getter = test_case.get("tag_getter") # Assume tag getters use lone cluster object arg if none explicitly given tag_getter_args = test_case.get("tag_getter_kwargs", {"cluster": cluster}) observed_tags = tag_getter(**tag_getter_args) expected_tags = test_case["expected_tags"] assert_that(observed_tags).contains(*convert_tags_dicts_to_tags_list(expected_tags)) def convert_tags_dicts_to_tags_list(tags_dicts): """Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}].""" tags_list = [] for tags_dict in tags_dicts: tags_list.extend([{"Key": key, "Value": value} for key, value in tags_dict.items()]) return tags_list def get_cloudformation_tags(region, stack_name): """ Return the tags for the CFN stack with the given name The returned values is a list like the following: [ {'Key': 'Key2', 'Value': 'Value2'}, {'Key': 'Key1', 'Value': 'Value1'}, ] """ cfn_client = boto3.client("cloudformation", region_name=region) response = cfn_client.describe_stacks(StackName=stack_name) return response["Stacks"][0]["Tags"] def get_main_stack_tags(cluster): """Return the tags for the cluster's main CFN stack.""" return get_cloudformation_tags(cluster.region, cluster.cfn_name) def get_head_node_instance_id(cluster): """Return the given cluster's head node's instance ID.""" return cluster.cfn_resources.get("HeadNode") def get_ec2_instance_tags(instance_id, region): """Return a list of tags associated with the given EC2 instance.""" logging.info("Getting tags for instance %s", instance_id) return ( boto3.client("ec2", region_name=region) .describe_instances(InstanceIds=[instance_id]) .get("Reservations")[0] .get("Instances")[0] .get("Tags") ) def get_tags_for_volume(volume_id, region): """Return the tags attached to the given EBS volume.""" logging.info("Getting tags for volume %s", volume_id) return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags") def get_head_node_root_volume_tags(cluster, os): """Return the given cluster's head node's root volume's tags.""" head_node_instance_id = get_head_node_instance_id(cluster) root_volume_id = get_root_volume_id(head_node_instance_id, cluster.region, os) return get_tags_for_volume(root_volume_id, cluster.region) def get_head_node_tags(cluster): """Return the given cluster's head node's tags.""" head_node_instance_id = get_head_node_instance_id(cluster) return get_ec2_instance_tags(head_node_instance_id, cluster.region) def get_compute_node_root_volume_tags(cluster, os): """Return the given cluster's compute node's root volume's tags.""" compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute") assert_that(compute_nodes).is_length(1) root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os) return get_tags_for_volume(root_volume_id, cluster.region) def get_compute_node_tags(cluster): """Return the given cluster's compute node's tags.""" compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute") assert_that(compute_nodes).is_length(1) return get_ec2_instance_tags(compute_nodes[0], cluster.region) def get_ebs_volume_tags(volume_id, region): """Return the tags associated with the given EBS volume.""" return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags") def get_shared_volume_tags(cluster): """Return the given cluster's EBS volume's tags.""" shared_volume = cluster.cfn_resources.get("EBS0") return get_ebs_volume_tags(shared_volume, cluster.region) def get_pcluster_version(): """Return the installed version of the pclsuter CLI.""" return json.loads(sp.check_output("pcluster version".split()).decode().strip()).get("version")
38.313131
120
0.676246
import json import logging import subprocess as sp import boto3 import pytest from assertpy import assert_that from utils import get_root_volume_id @pytest.mark.regions(["ap-southeast-1"]) @pytest.mark.instances(["c5.xlarge"]) @pytest.mark.oss(["alinux2"]) @pytest.mark.schedulers(["slurm", "awsbatch"]) @pytest.mark.usefixtures("region", "instance") def test_tag_propagation(pcluster_config_reader, clusters_factory, scheduler, os): config_file_tags = {"ConfigFileTag": "ConfigFileTagValue"} version_tags = {"parallelcluster:version": get_pcluster_version()} cluster_config = pcluster_config_reader() cluster = clusters_factory(cluster_config) cluster_name_tags = {"parallelcluster:cluster-name": cluster.name} test_cases = [ { "resource": "Main CloudFormation Stack", "tag_getter": get_main_stack_tags, "expected_tags": (version_tags, config_file_tags), }, { "resource": "Head Node", "tag_getter": get_head_node_tags, "expected_tags": ( cluster_name_tags, {"Name": "HeadNode", "parallelcluster:node-type": "HeadNode"}, ), }, { "resource": "Head Node Root Volume", "tag_getter": get_head_node_root_volume_tags, "expected_tags": (cluster_name_tags, {"parallelcluster:node-type": "HeadNode"}), "tag_getter_kwargs": {"cluster": cluster, "os": os}, }, { "resource": "Compute Node", "tag_getter": get_compute_node_tags, "expected_tags": ( cluster_name_tags, {"Name": "Compute", "parallelcluster:node-type": "Compute"}, config_file_tags, ), "skip": scheduler == "awsbatch", }, { "resource": "Compute Node Root Volume", "tag_getter": get_compute_node_root_volume_tags, "expected_tags": ( cluster_name_tags, {"parallelcluster:node-type": "Compute"}, config_file_tags if scheduler == "slurm" else {}, ), "tag_getter_kwargs": {"cluster": cluster, "os": os}, "skip": scheduler == "awsbatch", }, { "resource": "Shared EBS Volume", "tag_getter": get_shared_volume_tags, "expected_tags": (version_tags, config_file_tags), }, ] for test_case in test_cases: if test_case.get("skip"): continue logging.info("Verifying tags were propagated to %s", test_case.get("resource")) tag_getter = test_case.get("tag_getter") tag_getter_args = test_case.get("tag_getter_kwargs", {"cluster": cluster}) observed_tags = tag_getter(**tag_getter_args) expected_tags = test_case["expected_tags"] assert_that(observed_tags).contains(*convert_tags_dicts_to_tags_list(expected_tags)) def convert_tags_dicts_to_tags_list(tags_dicts): tags_list = [] for tags_dict in tags_dicts: tags_list.extend([{"Key": key, "Value": value} for key, value in tags_dict.items()]) return tags_list def get_cloudformation_tags(region, stack_name): cfn_client = boto3.client("cloudformation", region_name=region) response = cfn_client.describe_stacks(StackName=stack_name) return response["Stacks"][0]["Tags"] def get_main_stack_tags(cluster): return get_cloudformation_tags(cluster.region, cluster.cfn_name) def get_head_node_instance_id(cluster): return cluster.cfn_resources.get("HeadNode") def get_ec2_instance_tags(instance_id, region): logging.info("Getting tags for instance %s", instance_id) return ( boto3.client("ec2", region_name=region) .describe_instances(InstanceIds=[instance_id]) .get("Reservations")[0] .get("Instances")[0] .get("Tags") ) def get_tags_for_volume(volume_id, region): logging.info("Getting tags for volume %s", volume_id) return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags") def get_head_node_root_volume_tags(cluster, os): head_node_instance_id = get_head_node_instance_id(cluster) root_volume_id = get_root_volume_id(head_node_instance_id, cluster.region, os) return get_tags_for_volume(root_volume_id, cluster.region) def get_head_node_tags(cluster): head_node_instance_id = get_head_node_instance_id(cluster) return get_ec2_instance_tags(head_node_instance_id, cluster.region) def get_compute_node_root_volume_tags(cluster, os): compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute") assert_that(compute_nodes).is_length(1) root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os) return get_tags_for_volume(root_volume_id, cluster.region) def get_compute_node_tags(cluster): compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute") assert_that(compute_nodes).is_length(1) return get_ec2_instance_tags(compute_nodes[0], cluster.region) def get_ebs_volume_tags(volume_id, region): return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags") def get_shared_volume_tags(cluster): shared_volume = cluster.cfn_resources.get("EBS0") return get_ebs_volume_tags(shared_volume, cluster.region) def get_pcluster_version(): return json.loads(sp.check_output("pcluster version".split()).decode().strip()).get("version")
true
true
7902e254b527a3bf21be52288df5e342c0d9e0af
2,074
py
Python
examples/manual-control-opencv.py
elizabethhng/DJITelloPy
6a84675d531431484f6aee6a111fa2796be55628
[ "MIT" ]
null
null
null
examples/manual-control-opencv.py
elizabethhng/DJITelloPy
6a84675d531431484f6aee6a111fa2796be55628
[ "MIT" ]
null
null
null
examples/manual-control-opencv.py
elizabethhng/DJITelloPy
6a84675d531431484f6aee6a111fa2796be55628
[ "MIT" ]
null
null
null
# simple example demonstrating how to control a Tello using your keyboard. # For a more fully featured example see manual-control-pygame.py # # Use W, A, S, D for moving, E, Q for rotating and R, F for going up and down. # When starting the script the Tello will takeoff, pressing ESC makes it land # and the script exit. # 简单的演示如何用键盘控制Tello # 欲使用全手动控制请查看 manual-control-pygame.py # # W, A, S, D 移动, E, Q 转向,R、F上升与下降. # 开始运行程序时Tello会自动起飞,按ESC键降落 # 并且程序会退出 from djitellopy import Tello import cv2, math, time tello = Tello() tello.connect() tello.streamon() frame_read = tello.get_frame_read() height, width, _ = frame_read.frame.shape # tello.takeoff() nSnap = 0 # w = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) w = width h= height folder = "." name = "snapshot" fileName = "%s/%s_%d_%d_" %(folder, name, w, h) while True: # In reality you want to display frames in a seperate thread. Otherwise # they will freeze while the drone moves. # 在实际开发里请在另一个线程中显示摄像头画面,否则画面会在无人机移动时静止 img = frame_read.frame cv2.imshow("drone", img) # height, width, _ = frame_read.frame.shape # video = cv2.VideoWriter('video.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (width, height)) key = cv2.waitKey(1) & 0xff if key == 27: # ESC break elif key == ord('w'): tello.move_forward(30) elif key == ord('s'): tello.move_back(30) elif key == ord('a'): tello.move_left(30) elif key == ord('d'): tello.move_right(30) elif key == ord('e'): tello.rotate_clockwise(30) elif key == ord('q'): tello.rotate_counter_clockwise(30) elif key == ord('r'): tello.send_command_with_return('downvision 0') frame_read = tello.get_frame_read() elif key == ord('f'): tello.send_command_with_return('downvision 1') frame_read = tello.get_frame_read() elif key == ord(' '): print("Saving image ", nSnap) cv2.imwrite("%s%d-jpg"%(fileName, nSnap), img) nSnap += 1 # tello.land()
30.057971
96
0.644648
from djitellopy import Tello import cv2, math, time tello = Tello() tello.connect() tello.streamon() frame_read = tello.get_frame_read() height, width, _ = frame_read.frame.shape nSnap = 0 w = width h= height folder = "." name = "snapshot" fileName = "%s/%s_%d_%d_" %(folder, name, w, h) while True: img = frame_read.frame cv2.imshow("drone", img) key = cv2.waitKey(1) & 0xff if key == 27: break elif key == ord('w'): tello.move_forward(30) elif key == ord('s'): tello.move_back(30) elif key == ord('a'): tello.move_left(30) elif key == ord('d'): tello.move_right(30) elif key == ord('e'): tello.rotate_clockwise(30) elif key == ord('q'): tello.rotate_counter_clockwise(30) elif key == ord('r'): tello.send_command_with_return('downvision 0') frame_read = tello.get_frame_read() elif key == ord('f'): tello.send_command_with_return('downvision 1') frame_read = tello.get_frame_read() elif key == ord(' '): print("Saving image ", nSnap) cv2.imwrite("%s%d-jpg"%(fileName, nSnap), img) nSnap += 1
true
true
7902e2f3b6480d92b7cf134af3054cd65eb1621c
718
py
Python
code/optimizer/schedulers.py
HS-YN/PanoAVQA
657b83421ce64ea18b3e79fb580afc7034403ccc
[ "MIT" ]
3
2022-01-22T17:58:22.000Z
2022-03-30T04:41:50.000Z
code/optimizer/schedulers.py
HS-YN/PanoAVQA
657b83421ce64ea18b3e79fb580afc7034403ccc
[ "MIT" ]
1
2022-01-22T18:02:06.000Z
2022-01-22T18:02:06.000Z
code/optimizer/schedulers.py
HS-YN/PanoAVQA
657b83421ce64ea18b3e79fb580afc7034403ccc
[ "MIT" ]
1
2022-01-29T03:38:13.000Z
2022-01-29T03:38:13.000Z
from torch.optim.lr_scheduler import LambdaLR from transformers import get_linear_schedule_with_warmup from exp import ex def get_no_scheduler(optimizer, num_warmup_steps, num_training_steps): def lr_lambda(current_step): return 1 return LambdaLR(optimizer, lr_lambda) sched_dict = { 'linear': get_linear_schedule_with_warmup, 'none': get_no_scheduler } @ex.capture() def get_scheduler(optimizer, t_total, warmup, scheduler_name, grad_acc_steps): warmup_steps = int(t_total * warmup) scheduler = sched_dict[scheduler_name](optimizer, warmup_steps, t_total) scheduler.accumulated = 0 scheduler.grad_acc_steps = grad_acc_steps return scheduler
27.615385
79
0.747911
from torch.optim.lr_scheduler import LambdaLR from transformers import get_linear_schedule_with_warmup from exp import ex def get_no_scheduler(optimizer, num_warmup_steps, num_training_steps): def lr_lambda(current_step): return 1 return LambdaLR(optimizer, lr_lambda) sched_dict = { 'linear': get_linear_schedule_with_warmup, 'none': get_no_scheduler } @ex.capture() def get_scheduler(optimizer, t_total, warmup, scheduler_name, grad_acc_steps): warmup_steps = int(t_total * warmup) scheduler = sched_dict[scheduler_name](optimizer, warmup_steps, t_total) scheduler.accumulated = 0 scheduler.grad_acc_steps = grad_acc_steps return scheduler
true
true
7902e3b76ef5b2e16744e4cc534a151fafd042b6
665
py
Python
django-website/home/migrations/0012_auto_20170727_1408.py
evonove/evonove
5f5a27245a46a98502f182e3c75aa8e77aa62d42
[ "BSD-3-Clause" ]
9
2016-01-07T14:57:55.000Z
2019-06-25T11:30:57.000Z
django-website/home/migrations/0012_auto_20170727_1408.py
evonove/evonove
5f5a27245a46a98502f182e3c75aa8e77aa62d42
[ "BSD-3-Clause" ]
64
2015-10-20T21:23:56.000Z
2022-01-12T10:03:28.000Z
django-website/home/migrations/0012_auto_20170727_1408.py
evonove/evonove
5f5a27245a46a98502f182e3c75aa8e77aa62d42
[ "BSD-3-Clause" ]
3
2016-08-06T14:29:00.000Z
2021-01-27T10:16:53.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-27 14:08 from __future__ import unicode_literals from django.db import migrations import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('home', '0011_auto_20170727_1324'), ] operations = [ migrations.AddField( model_name='homepage', name='partner_subtitle', field=wagtail.core.fields.RichTextField(blank=True), ), migrations.AddField( model_name='homepage', name='partner_title', field=wagtail.core.fields.RichTextField(blank=True), ), ]
24.62963
64
0.618045
from __future__ import unicode_literals from django.db import migrations import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('home', '0011_auto_20170727_1324'), ] operations = [ migrations.AddField( model_name='homepage', name='partner_subtitle', field=wagtail.core.fields.RichTextField(blank=True), ), migrations.AddField( model_name='homepage', name='partner_title', field=wagtail.core.fields.RichTextField(blank=True), ), ]
true
true
7902e3c36705410abe0c552e3e763ddccb4a35e8
6,927
py
Python
Chapter10/Ch10/friendslist.py
henrryyanez/Tkinter-GUI-Programming-by-Example
c8a326d6034b5e54f77605a8ec840cb8fac89412
[ "MIT" ]
127
2018-08-27T16:34:43.000Z
2022-03-22T19:20:53.000Z
Chapter10/Ch10/friendslist.py
PiotrAdaszewski/Tkinter-GUI-Programming-by-Example
c8a326d6034b5e54f77605a8ec840cb8fac89412
[ "MIT" ]
8
2019-04-11T06:47:36.000Z
2022-03-11T23:23:42.000Z
Chapter10/Ch10/friendslist.py
PiotrAdaszewski/Tkinter-GUI-Programming-by-Example
c8a326d6034b5e54f77605a8ec840cb8fac89412
[ "MIT" ]
85
2018-04-30T19:42:21.000Z
2022-03-30T01:22:54.000Z
import base64 import os import tkinter as tk import tkinter.messagebox as msg import tkinter.ttk as ttk from functools import partial from chatwindow import ChatWindow from requester import Requester from avatarwindow import AvatarWindow from addfriendwindow import AddFriendWindow friend_avatars_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/friends")) default_avatar_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/default.png")) class FriendsList(tk.Tk): def __init__(self, **kwargs): super().__init__(**kwargs) self.title('Tk Chat') self.geometry('700x500') self.menu = tk.Menu(self, bg="lightgrey", fg="black", tearoff=0) self.friends_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0) self.friends_menu.add_command(label="Add Friend", command=self.show_add_friend_window) self.avatar_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0) self.avatar_menu.add_command(label="Change Avatar", command=self.change_avatar) self.menu.add_cascade(label="Friends", menu=self.friends_menu) self.menu.add_cascade(label="Avatar", menu=self.avatar_menu) self.requester = Requester() self.show_login_screen() def show_login_screen(self): self.login_frame = ttk.Frame(self) username_label = ttk.Label(self.login_frame, text="Username") self.username_entry = ttk.Entry(self.login_frame) self.username_entry.focus_force() real_name_label = ttk.Label(self.login_frame, text="Real Name") self.real_name_entry = ttk.Entry(self.login_frame) login_button = ttk.Button(self.login_frame, text="Login", command=self.login) create_account_button = ttk.Button(self.login_frame, text="Create Account", command=self.create_account) username_label.grid(row=0, column=0, sticky='e') self.username_entry.grid(row=0, column=1) real_name_label.grid(row=1, column=0, sticky='e') self.real_name_entry.grid(row=1, column=1) login_button.grid(row=2, column=0, sticky='e') create_account_button.grid(row=2, column=1) for i in range(3): tk.Grid.rowconfigure(self.login_frame, i, weight=1) tk.Grid.columnconfigure(self.login_frame, i, weight=1) self.login_frame.pack(fill=tk.BOTH, expand=1) self.login_event = self.bind("<Return>", self.login) def login(self, event=None): username = self.username_entry.get() real_name = self.real_name_entry.get() if self.requester.login(username, real_name): self.username = username self.real_name = real_name self.unbind("<Return>", self.login_event) self.show_friends() else: msg.showerror("Failed", f"Could not log in as {username}") def create_account(self): username = self.username_entry.get() real_name = self.real_name_entry.get() if self.requester.create_account(username, real_name): self.username = username self.real_name = real_name self.show_friends() else: msg.showerror("Failed", "Account already exists!") def show_friends(self): self.configure(menu=self.menu) self.login_frame.pack_forget() self.canvas = tk.Canvas(self, bg="white") self.canvas_frame = tk.Frame(self.canvas) self.scrollbar = ttk.Scrollbar(self, orient="vertical", command=self.canvas.yview) self.canvas.configure(yscrollcommand=self.scrollbar.set) self.scrollbar.pack(side=tk.LEFT, fill=tk.Y) self.canvas.pack(side=tk.LEFT, expand=1, fill=tk.BOTH) self.friends_area = self.canvas.create_window((0, 0), window=self.canvas_frame, anchor="nw") self.bind_events() self.load_friends() def bind_events(self): self.bind('<Configure>', self.on_frame_resized) self.canvas.bind('<Configure>', self.friends_width) def friends_width(self, event): canvas_width = event.width self.canvas.itemconfig(self.friends_area, width=canvas_width) def on_frame_resized(self, event=None): self.canvas.configure(scrollregion=self.canvas.bbox("all")) def load_friends(self): my_friends = self.requester.get_friends(self.username) for user in my_friends["friends"]: if user['username'] != self.username: friend_frame = ttk.Frame(self.canvas_frame) friend_avatar_path = os.path.join(friend_avatars_dir, f"{user['username']}.png") if user["avatar"]: with open(friend_avatar_path, 'wb') as friend_avatar: img = base64.urlsafe_b64decode(user['avatar']) friend_avatar.write(img) else: friend_avatar_path = default_avatar_path profile_photo = tk.PhotoImage(file=friend_avatar_path) profile_photo_label = ttk.Label(friend_frame, image=profile_photo) profile_photo_label.image = profile_photo friend_name = ttk.Label(friend_frame, text=user['real_name'], anchor=tk.W) message_this_friend = partial(self.open_chat_window, username=user["username"], real_name=user["real_name"], avatar=friend_avatar_path) block_this_friend = partial(self.block_friend, username=user["username"]) message_button = ttk.Button(friend_frame, text="Chat", command=message_this_friend) block_button = ttk.Button(friend_frame, text="Block", command=block_this_friend) profile_photo_label.pack(side=tk.LEFT) friend_name.pack(side=tk.LEFT) message_button.pack(side=tk.RIGHT) block_button.pack(side=tk.RIGHT, padx=(0, 30)) friend_frame.pack(fill=tk.X, expand=1) def reload_friends(self): for child in self.canvas_frame.winfo_children(): child.pack_forget() self.load_friends() def show_add_friend_window(self): AddFriendWindow(self) def add_friend(self, username): if self.requester.add_friend(self.username, username): msg.showinfo("Friend Added", "Friend Added") success = True self.reload_friends() else: msg.showerror("Add Failed", "Friend was not found") success = False return success def open_chat_window(self, username, real_name, avatar): cw = ChatWindow(self, real_name, username, avatar) def block_friend(self, username): self.requester.block_friend(self.username, username) self.reload_friends() def change_avatar(self): AvatarWindow(self) if __name__ == '__main__': f = FriendsList() f.mainloop()
35.891192
151
0.651653
import base64 import os import tkinter as tk import tkinter.messagebox as msg import tkinter.ttk as ttk from functools import partial from chatwindow import ChatWindow from requester import Requester from avatarwindow import AvatarWindow from addfriendwindow import AddFriendWindow friend_avatars_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/friends")) default_avatar_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/default.png")) class FriendsList(tk.Tk): def __init__(self, **kwargs): super().__init__(**kwargs) self.title('Tk Chat') self.geometry('700x500') self.menu = tk.Menu(self, bg="lightgrey", fg="black", tearoff=0) self.friends_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0) self.friends_menu.add_command(label="Add Friend", command=self.show_add_friend_window) self.avatar_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0) self.avatar_menu.add_command(label="Change Avatar", command=self.change_avatar) self.menu.add_cascade(label="Friends", menu=self.friends_menu) self.menu.add_cascade(label="Avatar", menu=self.avatar_menu) self.requester = Requester() self.show_login_screen() def show_login_screen(self): self.login_frame = ttk.Frame(self) username_label = ttk.Label(self.login_frame, text="Username") self.username_entry = ttk.Entry(self.login_frame) self.username_entry.focus_force() real_name_label = ttk.Label(self.login_frame, text="Real Name") self.real_name_entry = ttk.Entry(self.login_frame) login_button = ttk.Button(self.login_frame, text="Login", command=self.login) create_account_button = ttk.Button(self.login_frame, text="Create Account", command=self.create_account) username_label.grid(row=0, column=0, sticky='e') self.username_entry.grid(row=0, column=1) real_name_label.grid(row=1, column=0, sticky='e') self.real_name_entry.grid(row=1, column=1) login_button.grid(row=2, column=0, sticky='e') create_account_button.grid(row=2, column=1) for i in range(3): tk.Grid.rowconfigure(self.login_frame, i, weight=1) tk.Grid.columnconfigure(self.login_frame, i, weight=1) self.login_frame.pack(fill=tk.BOTH, expand=1) self.login_event = self.bind("<Return>", self.login) def login(self, event=None): username = self.username_entry.get() real_name = self.real_name_entry.get() if self.requester.login(username, real_name): self.username = username self.real_name = real_name self.unbind("<Return>", self.login_event) self.show_friends() else: msg.showerror("Failed", f"Could not log in as {username}") def create_account(self): username = self.username_entry.get() real_name = self.real_name_entry.get() if self.requester.create_account(username, real_name): self.username = username self.real_name = real_name self.show_friends() else: msg.showerror("Failed", "Account already exists!") def show_friends(self): self.configure(menu=self.menu) self.login_frame.pack_forget() self.canvas = tk.Canvas(self, bg="white") self.canvas_frame = tk.Frame(self.canvas) self.scrollbar = ttk.Scrollbar(self, orient="vertical", command=self.canvas.yview) self.canvas.configure(yscrollcommand=self.scrollbar.set) self.scrollbar.pack(side=tk.LEFT, fill=tk.Y) self.canvas.pack(side=tk.LEFT, expand=1, fill=tk.BOTH) self.friends_area = self.canvas.create_window((0, 0), window=self.canvas_frame, anchor="nw") self.bind_events() self.load_friends() def bind_events(self): self.bind('<Configure>', self.on_frame_resized) self.canvas.bind('<Configure>', self.friends_width) def friends_width(self, event): canvas_width = event.width self.canvas.itemconfig(self.friends_area, width=canvas_width) def on_frame_resized(self, event=None): self.canvas.configure(scrollregion=self.canvas.bbox("all")) def load_friends(self): my_friends = self.requester.get_friends(self.username) for user in my_friends["friends"]: if user['username'] != self.username: friend_frame = ttk.Frame(self.canvas_frame) friend_avatar_path = os.path.join(friend_avatars_dir, f"{user['username']}.png") if user["avatar"]: with open(friend_avatar_path, 'wb') as friend_avatar: img = base64.urlsafe_b64decode(user['avatar']) friend_avatar.write(img) else: friend_avatar_path = default_avatar_path profile_photo = tk.PhotoImage(file=friend_avatar_path) profile_photo_label = ttk.Label(friend_frame, image=profile_photo) profile_photo_label.image = profile_photo friend_name = ttk.Label(friend_frame, text=user['real_name'], anchor=tk.W) message_this_friend = partial(self.open_chat_window, username=user["username"], real_name=user["real_name"], avatar=friend_avatar_path) block_this_friend = partial(self.block_friend, username=user["username"]) message_button = ttk.Button(friend_frame, text="Chat", command=message_this_friend) block_button = ttk.Button(friend_frame, text="Block", command=block_this_friend) profile_photo_label.pack(side=tk.LEFT) friend_name.pack(side=tk.LEFT) message_button.pack(side=tk.RIGHT) block_button.pack(side=tk.RIGHT, padx=(0, 30)) friend_frame.pack(fill=tk.X, expand=1) def reload_friends(self): for child in self.canvas_frame.winfo_children(): child.pack_forget() self.load_friends() def show_add_friend_window(self): AddFriendWindow(self) def add_friend(self, username): if self.requester.add_friend(self.username, username): msg.showinfo("Friend Added", "Friend Added") success = True self.reload_friends() else: msg.showerror("Add Failed", "Friend was not found") success = False return success def open_chat_window(self, username, real_name, avatar): cw = ChatWindow(self, real_name, username, avatar) def block_friend(self, username): self.requester.block_friend(self.username, username) self.reload_friends() def change_avatar(self): AvatarWindow(self) if __name__ == '__main__': f = FriendsList() f.mainloop()
true
true
7902e3eb23e546d2efa9ea4c1c5b1e405ac5d863
441
py
Python
problems/day-18/part_2.py
stringham/advent-of-code-2020
01cfad88b2d70969976f44efdb66245470d5f925
[ "MIT" ]
1
2020-12-21T10:56:07.000Z
2020-12-21T10:56:07.000Z
problems/day-18/part_2.py
stringham/advent-of-code-2020
01cfad88b2d70969976f44efdb66245470d5f925
[ "MIT" ]
null
null
null
problems/day-18/part_2.py
stringham/advent-of-code-2020
01cfad88b2d70969976f44efdb66245470d5f925
[ "MIT" ]
1
2020-12-23T20:15:16.000Z
2020-12-23T20:15:16.000Z
#!/usr/bin/env python3 import sys import re class Num: def __init__(self, value): self.value = value def __add__(self, num): return Num(self.value * num.value) def __mul__(self, num): return Num(self.value + num.value) s = 0 for line in sys.stdin: line = line.replace("+", "$").replace("*", "+").replace("$", "*") line = re.sub(r"(\d)", r"Num(\1)", line) s += eval(line).value print(s)
18.375
69
0.564626
import sys import re class Num: def __init__(self, value): self.value = value def __add__(self, num): return Num(self.value * num.value) def __mul__(self, num): return Num(self.value + num.value) s = 0 for line in sys.stdin: line = line.replace("+", "$").replace("*", "+").replace("$", "*") line = re.sub(r"(\d)", r"Num(\1)", line) s += eval(line).value print(s)
true
true
7902e4426faf0ddd660b06b21330b01263d90354
9,805
py
Python
test-framework/test-suites/integration/tests/fixtures/add_data.py
anooprajendra/stacki
5e3f51c928ff5367a7441f07bf28f0121e7abdff
[ "BSD-3-Clause" ]
null
null
null
test-framework/test-suites/integration/tests/fixtures/add_data.py
anooprajendra/stacki
5e3f51c928ff5367a7441f07bf28f0121e7abdff
[ "BSD-3-Clause" ]
null
null
null
test-framework/test-suites/integration/tests/fixtures/add_data.py
anooprajendra/stacki
5e3f51c928ff5367a7441f07bf28f0121e7abdff
[ "BSD-3-Clause" ]
null
null
null
import json import subprocess import ipaddress import pytest @pytest.fixture def add_host(): def _inner(hostname, rack, rank, appliance): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') # First use of the fixture adds backend-0-0 _inner('backend-0-0', '0', '0', 'backend') # Then return the inner function, so we can call it inside the test # to get more hosts added return _inner @pytest.fixture def add_host_with_interface(): def _inner(hostname, rack, rank, appliance, interface): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') cmd = f'stack add host interface {hostname} interface={interface}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy interface') _inner('backend-0-0', '0', '0', 'backend', 'eth0') return _inner @pytest.fixture def add_ib_switch(): def _inner(hostname, rack, rank, appliance, make, model, sw_type): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') cmd = f'stack set host attr {hostname} attr=component.make value={make}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set make') cmd = f'stack set host attr {hostname} attr=component.model value={model}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set model') cmd = f'stack set host attr {hostname} attr=switch_type value={sw_type}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set switch type') _inner('switch-0-0', '0', '0', 'switch', 'Mellanox', 'm7800', 'infiniband') return _inner @pytest.fixture def add_ib_switch_partition(): def _inner(switch_name, partition_name, options): cmd = f'stack add switch partition {switch_name} name={partition_name} ' if options is not None: cmd += f'options={options}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy switch partition') _inner('switch-0-0', 'Default', '') return _inner @pytest.fixture def add_switch(): def _inner(hostname, rack, rank, appliance, make, model): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') cmd = f'stack set host attr {hostname} attr=component.make value={make}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set make') cmd = f'stack set host attr {hostname} attr=component.model value={model}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set model') _inner('switch-0-0', '0', '0', 'switch', 'fake', 'unrl') return _inner @pytest.fixture def add_appliance(host): def _inner(name): result = host.run(f'stack add appliance {name}') if result.rc != 0: pytest.fail(f'unable to add dummy appliance "{name}"') # First use of the fixture adds appliance "test" _inner('test') # Then return the inner function, so we can call it inside the test # to get more appliances added return _inner @pytest.fixture def add_box(host): def _inner(name): result = host.run(f'stack add box {name}') if result.rc != 0: pytest.fail(f'unable to add dummy box "{name}"') # First use of the fixture adds box "test" _inner('test') # Then return the inner function, so we can call it inside the test # to get more boxes added return _inner @pytest.fixture def add_cart(host): def _inner(name): result = host.run(f'stack add cart {name}') if result.rc != 0: pytest.fail(f'unable to add dummy cart "{name}"') # First use of the fixture adds cart "test" _inner('test') # Then return the inner function, so we can call it inside the test # to get more carts added return _inner @pytest.fixture def add_environment(host): def _inner(name): result = host.run(f'stack add environment {name}') if result.rc != 0: pytest.fail(f'unable to add dummy environment "{name}"') # First use of the fixture adds environment "test" _inner('test') # Then return the inner function, so we can call it inside the test # to get more environments added return _inner @pytest.fixture def add_group(host): def _inner(name): result = host.run(f'stack add group {name}') if result.rc != 0: pytest.fail(f'unable to add dummy group "{name}"') # First use of the fixture adds group "test" _inner('test') # Then return the inner function, so we can call it inside the test # to get more groups added return _inner @pytest.fixture def add_network(host): """Adds a network to the stacki db. For historical reasons the first test network this creates is pxe=False.""" def _inner(name, address, pxe = False): result = host.run( f'stack add network {name} address={address} mask=255.255.255.0 pxe={pxe}' ) if result.rc != 0: pytest.fail(f'unable to add dummy network "{name}"') # First use of the fixture adds network "test" _inner('test', '192.168.0.0') # Then return the inner function, so we can call it inside the test # to get more networks added return _inner @pytest.fixture def add_host_with_net(host, add_host_with_interface, add_network): """Adds a host with a network. The first network this adds defaults to pxe=True.""" def _inner(hostname, rack, rank, appliance, interface, ip, network, address, pxe): # Add the host with an interface. add_host_with_interface(hostname = hostname, rack = rack, rank = rank, appliance = appliance, interface = interface) # Add the network. add_network(name = network, address = address, pxe = pxe) # Associate it to the interface. result = host.run(f"stack set host interface network {hostname} network={network} interface={interface}") assert result.rc == 0 # Set the interface IP result = host.run(f"stack set host interface ip {hostname} ip={ip} network={network}") assert result.rc == 0 # Add it to the frontend, because a lot of things in stacki expect backends to share networks with # frontends. result = host.run("stack list host interface a:frontend output-format=json") assert result.rc == 0 # Try to figure out if the frontend has an interface on this network already. interface_on_network = False for frontend_interface in json.loads(result.stdout): if frontend_interface["network"] == network: interface_on_network = True break if interface_on_network: return # Need to add an interface to the frontend on this network. Make sure we choose the next latest # interface name so we don't clash with other interface names. latest_interface = max(frontend_interface["interface"] for frontend_interface in json.loads(result.stdout)) # This should be a string, so we tokenize it into characters new_interface = list(latest_interface) new_interface[-1] = str(int(new_interface[-1]) + 1) new_interface = "".join(new_interface) result = host.run(f"stack add host interface a:frontend interface={new_interface} network={network} ip={ipaddress.ip_address(ip) + 1}") assert result.rc == 0 # First use of the add_host_with_interface fixture adds backend-0-0 with interface eth0. # The first use of add_network adds a network called test, but that's not PXE so we don't want to use it. # So the first call of this fixture needs to remove the test network, recreate it as a PXE network, and # associate the network with the host's interface. result = host.run(f"stack remove network test") assert result.rc == 0 add_network(name = "test", address = "192.168.0.0", pxe = True) result = host.run(f"stack set host interface network backend-0-0 network=test interface=eth0 ip=192.168.0.3") assert result.rc == 0 # Add a frontend interface on the network. result = host.run(f"stack add host interface a:frontend interface=eth2 network=test ip=192.168.0.2") assert result.rc == 0 return _inner @pytest.fixture( params = ( ("", "exec=True"), ("", "| bash -x"), ("document=", "exec=True"), ("document=", "| bash -x"), ), ids = ("stack_load_exec", "stack_load_bash", "stack_load_document_exec", "stack_load_document_bash"), ) def stack_load(request, host): """This fixture is used to run `stack load` on the host during integration tests. There are 4 essentially equivalent ways of loading and running a dump.json. Using this test fixture ensures that all 4 are tested. I.E: stack load dump_file exec=True stack load document=dump_file exec=True stack load dump_file | bash -x stack load document=dump_file | bash -x """ param_string, exec_string = request.param def _load(dump_file, **kwargs): if "exec" in kwargs: raise ValueError("Cannot pass exec param to this fixture. It handles it for you.") if "document" in kwargs: raise ValueError("Cannot pass document param to this fixture. It handles it for you.") kwargs_string = " ".join(f"{key}={value}" for key, value in kwargs.items()) return host.run(f"stack load {param_string}{dump_file} {exec_string} {kwargs_string}") return _load @pytest.fixture def fake_local_firmware_file(tmp_path_factory): """Creates a fake local firmware file and returns a pathlib.Path object that points to it.""" # Add a fake piece of firmware. fake_firmware_file = tmp_path_factory.mktemp("fake_firmware") / "foo.img" fake_firmware_file.write_text("foofakefirmware") return fake_firmware_file
33.694158
137
0.715553
import json import subprocess import ipaddress import pytest @pytest.fixture def add_host(): def _inner(hostname, rack, rank, appliance): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') _inner('backend-0-0', '0', '0', 'backend') return _inner @pytest.fixture def add_host_with_interface(): def _inner(hostname, rack, rank, appliance, interface): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') cmd = f'stack add host interface {hostname} interface={interface}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy interface') _inner('backend-0-0', '0', '0', 'backend', 'eth0') return _inner @pytest.fixture def add_ib_switch(): def _inner(hostname, rack, rank, appliance, make, model, sw_type): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') cmd = f'stack set host attr {hostname} attr=component.make value={make}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set make') cmd = f'stack set host attr {hostname} attr=component.model value={model}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set model') cmd = f'stack set host attr {hostname} attr=switch_type value={sw_type}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set switch type') _inner('switch-0-0', '0', '0', 'switch', 'Mellanox', 'm7800', 'infiniband') return _inner @pytest.fixture def add_ib_switch_partition(): def _inner(switch_name, partition_name, options): cmd = f'stack add switch partition {switch_name} name={partition_name} ' if options is not None: cmd += f'options={options}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy switch partition') _inner('switch-0-0', 'Default', '') return _inner @pytest.fixture def add_switch(): def _inner(hostname, rack, rank, appliance, make, model): cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to add a dummy host') cmd = f'stack set host attr {hostname} attr=component.make value={make}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set make') cmd = f'stack set host attr {hostname} attr=component.model value={model}' result = subprocess.run(cmd.split()) if result.returncode != 0: pytest.fail('unable to set model') _inner('switch-0-0', '0', '0', 'switch', 'fake', 'unrl') return _inner @pytest.fixture def add_appliance(host): def _inner(name): result = host.run(f'stack add appliance {name}') if result.rc != 0: pytest.fail(f'unable to add dummy appliance "{name}"') _inner('test') return _inner @pytest.fixture def add_box(host): def _inner(name): result = host.run(f'stack add box {name}') if result.rc != 0: pytest.fail(f'unable to add dummy box "{name}"') _inner('test') return _inner @pytest.fixture def add_cart(host): def _inner(name): result = host.run(f'stack add cart {name}') if result.rc != 0: pytest.fail(f'unable to add dummy cart "{name}"') _inner('test') return _inner @pytest.fixture def add_environment(host): def _inner(name): result = host.run(f'stack add environment {name}') if result.rc != 0: pytest.fail(f'unable to add dummy environment "{name}"') _inner('test') return _inner @pytest.fixture def add_group(host): def _inner(name): result = host.run(f'stack add group {name}') if result.rc != 0: pytest.fail(f'unable to add dummy group "{name}"') _inner('test') return _inner @pytest.fixture def add_network(host): def _inner(name, address, pxe = False): result = host.run( f'stack add network {name} address={address} mask=255.255.255.0 pxe={pxe}' ) if result.rc != 0: pytest.fail(f'unable to add dummy network "{name}"') _inner('test', '192.168.0.0') return _inner @pytest.fixture def add_host_with_net(host, add_host_with_interface, add_network): def _inner(hostname, rack, rank, appliance, interface, ip, network, address, pxe): add_host_with_interface(hostname = hostname, rack = rack, rank = rank, appliance = appliance, interface = interface) add_network(name = network, address = address, pxe = pxe) result = host.run(f"stack set host interface network {hostname} network={network} interface={interface}") assert result.rc == 0 result = host.run(f"stack set host interface ip {hostname} ip={ip} network={network}") assert result.rc == 0 result = host.run("stack list host interface a:frontend output-format=json") assert result.rc == 0 interface_on_network = False for frontend_interface in json.loads(result.stdout): if frontend_interface["network"] == network: interface_on_network = True break if interface_on_network: return latest_interface = max(frontend_interface["interface"] for frontend_interface in json.loads(result.stdout)) # This should be a string, so we tokenize it into characters new_interface = list(latest_interface) new_interface[-1] = str(int(new_interface[-1]) + 1) new_interface = "".join(new_interface) result = host.run(f"stack add host interface a:frontend interface={new_interface} network={network} ip={ipaddress.ip_address(ip) + 1}") assert result.rc == 0 # First use of the add_host_with_interface fixture adds backend-0-0 with interface eth0. # The first use of add_network adds a network called test, but that's not PXE so we don't want to use it. # So the first call of this fixture needs to remove the test network, recreate it as a PXE network, and # associate the network with the host's interface. result = host.run(f"stack remove network test") assert result.rc == 0 add_network(name = "test", address = "192.168.0.0", pxe = True) result = host.run(f"stack set host interface network backend-0-0 network=test interface=eth0 ip=192.168.0.3") assert result.rc == 0 result = host.run(f"stack add host interface a:frontend interface=eth2 network=test ip=192.168.0.2") assert result.rc == 0 return _inner @pytest.fixture( params = ( ("", "exec=True"), ("", "| bash -x"), ("document=", "exec=True"), ("document=", "| bash -x"), ), ids = ("stack_load_exec", "stack_load_bash", "stack_load_document_exec", "stack_load_document_bash"), ) def stack_load(request, host): param_string, exec_string = request.param def _load(dump_file, **kwargs): if "exec" in kwargs: raise ValueError("Cannot pass exec param to this fixture. It handles it for you.") if "document" in kwargs: raise ValueError("Cannot pass document param to this fixture. It handles it for you.") kwargs_string = " ".join(f"{key}={value}" for key, value in kwargs.items()) return host.run(f"stack load {param_string}{dump_file} {exec_string} {kwargs_string}") return _load @pytest.fixture def fake_local_firmware_file(tmp_path_factory): fake_firmware_file = tmp_path_factory.mktemp("fake_firmware") / "foo.img" fake_firmware_file.write_text("foofakefirmware") return fake_firmware_file
true
true
7902e53e75ed2e7ff4746ee7dab879397772e6f7
318
py
Python
oops/rules/fix_alt_space.py
lardnicus/oops
2cabcdb6726f4583f954d5f3671574bd18c7fdf2
[ "MIT" ]
null
null
null
oops/rules/fix_alt_space.py
lardnicus/oops
2cabcdb6726f4583f954d5f3671574bd18c7fdf2
[ "MIT" ]
null
null
null
oops/rules/fix_alt_space.py
lardnicus/oops
2cabcdb6726f4583f954d5f3671574bd18c7fdf2
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 -*- import re from oops.utils import sudo_support @sudo_support def match(command, settings): return ('command not found' in command.stderr.lower() and u' ' in command.script) @sudo_support def get_new_command(command, settings): return re.sub(u' ', ' ', command.script)
19.875
57
0.679245
import re from oops.utils import sudo_support @sudo_support def match(command, settings): return ('command not found' in command.stderr.lower() and u' ' in command.script) @sudo_support def get_new_command(command, settings): return re.sub(u' ', ' ', command.script)
true
true
7902e57ecd4a3ee6ec22340d68150fa75db62d2e
4,321
py
Python
PyStacks/test/templates/test_route53.py
0xack13/PyStacks
13136c43089c241680beb216a233d1846119dd7c
[ "MIT" ]
11
2018-02-15T04:27:05.000Z
2020-10-02T11:20:08.000Z
PyStacks/test/templates/test_route53.py
0xack13/PyStacks
13136c43089c241680beb216a233d1846119dd7c
[ "MIT" ]
3
2018-02-15T05:46:54.000Z
2018-03-05T04:46:51.000Z
PyStacks/test/templates/test_route53.py
0xack13/PyStacks
13136c43089c241680beb216a233d1846119dd7c
[ "MIT" ]
8
2018-03-05T04:40:41.000Z
2021-02-22T08:07:58.000Z
import unittest from PyStacks.PyStacks.template import templateCF class TestTemplate(unittest.TestCase): def test_templateCF_Route53Zone(self): resources = { 'route53_zone': { 'testr53zone': { 'name': 'example.com', 'comment': 'testzonecomment', 'hostedzone': { 'Name': 'testname', 'Tag2': 'testtagstuff' }, 'vpcs': { 'vpc-12345678': 'ap-southeast-2', 'vpc-87654321': 'us-west-2' } } } } expected = { 'testr53zone': { 'Type': 'AWS::Route53::HostedZone', 'Properties': { 'HostedZoneConfig': { 'Comment': 'testzonecomment' }, 'HostedZoneTags': [ { 'Key': 'Name', 'Value': 'testname' }, { 'Key': 'Tag2', 'Value': 'testtagstuff' } ], 'VPCs': [ { 'VPCId': 'vpc-87654321', 'VPCRegion': 'us-west-2' }, { 'VPCId': 'vpc-12345678', 'VPCRegion': 'ap-southeast-2' } ], 'Name': 'example.com' } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) def test_templateCF_Route53Record(self): resources = { 'route53_record': { 'testr53record': { 'comment': 'testcomment', 'zoneid': 'testzoneid', 'recordsets': [ [ 'atest', 'A', '1.2.3.4', '900', '0', 'base' ], [ 'cnametest', 'CNAME', 'example.com', '900', '0', 'base' ] ] } } } expected = { 'testr53record': { 'Type': 'AWS::Route53::RecordSetGroup', 'Properties': { 'Comment': 'testcomment', 'HostedZoneId': { 'Fn::ImportValue': { 'Fn::Sub': [ '${DNSStack}-Route53-testzoneid-Zone', { 'DNSStack': { 'Ref': 'DNSStack' } } ] } }, 'RecordSets': [ { 'Name': 'atest', 'Type': 'A', 'ResourceRecords': ['1.2.3.4'], 'TTL': '900', 'Weight': '0', 'SetIdentifier': 'base' }, { 'Name': 'cnametest', 'Type': 'CNAME', 'ResourceRecords': ['example.com'], 'TTL': '900', 'Weight': '0', 'SetIdentifier': 'base' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) if __name__ == '__main__': unittest.main()
32.734848
70
0.265448
import unittest from PyStacks.PyStacks.template import templateCF class TestTemplate(unittest.TestCase): def test_templateCF_Route53Zone(self): resources = { 'route53_zone': { 'testr53zone': { 'name': 'example.com', 'comment': 'testzonecomment', 'hostedzone': { 'Name': 'testname', 'Tag2': 'testtagstuff' }, 'vpcs': { 'vpc-12345678': 'ap-southeast-2', 'vpc-87654321': 'us-west-2' } } } } expected = { 'testr53zone': { 'Type': 'AWS::Route53::HostedZone', 'Properties': { 'HostedZoneConfig': { 'Comment': 'testzonecomment' }, 'HostedZoneTags': [ { 'Key': 'Name', 'Value': 'testname' }, { 'Key': 'Tag2', 'Value': 'testtagstuff' } ], 'VPCs': [ { 'VPCId': 'vpc-87654321', 'VPCRegion': 'us-west-2' }, { 'VPCId': 'vpc-12345678', 'VPCRegion': 'ap-southeast-2' } ], 'Name': 'example.com' } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) def test_templateCF_Route53Record(self): resources = { 'route53_record': { 'testr53record': { 'comment': 'testcomment', 'zoneid': 'testzoneid', 'recordsets': [ [ 'atest', 'A', '1.2.3.4', '900', '0', 'base' ], [ 'cnametest', 'CNAME', 'example.com', '900', '0', 'base' ] ] } } } expected = { 'testr53record': { 'Type': 'AWS::Route53::RecordSetGroup', 'Properties': { 'Comment': 'testcomment', 'HostedZoneId': { 'Fn::ImportValue': { 'Fn::Sub': [ '${DNSStack}-Route53-testzoneid-Zone', { 'DNSStack': { 'Ref': 'DNSStack' } } ] } }, 'RecordSets': [ { 'Name': 'atest', 'Type': 'A', 'ResourceRecords': ['1.2.3.4'], 'TTL': '900', 'Weight': '0', 'SetIdentifier': 'base' }, { 'Name': 'cnametest', 'Type': 'CNAME', 'ResourceRecords': ['example.com'], 'TTL': '900', 'Weight': '0', 'SetIdentifier': 'base' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) if __name__ == '__main__': unittest.main()
true
true
7902e5fb1d85ee01c08088d7910953865f94a68e
3,290
py
Python
mycalendar/settings.py
spralja/mycalendar
a96a70c646d09a39282a51fea53e0faf0e715284
[ "MIT" ]
null
null
null
mycalendar/settings.py
spralja/mycalendar
a96a70c646d09a39282a51fea53e0faf0e715284
[ "MIT" ]
null
null
null
mycalendar/settings.py
spralja/mycalendar
a96a70c646d09a39282a51fea53e0faf0e715284
[ "MIT" ]
null
null
null
""" Django settings for mycalendar project. Generated by 'django-admin startproject' using Django 3.2.8. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path from decouple import config # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'main', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', #'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mycalendar.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [BASE_DIR / 'templates'] , 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mycalendar.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Europe/Copenhagen' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
25.307692
91
0.700912
from pathlib import Path from decouple import config BASE_DIR = Path(__file__).resolve().parent.parent SECRET_KEY = config('SECRET_KEY') DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'main', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', #'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mycalendar.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [BASE_DIR / 'templates'] , 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mycalendar.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Europe/Copenhagen' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
true
true
7902e5fc584d2617fa4b511750a57b7ae6329745
39,612
py
Python
lib/spack/spack/binary_distribution.py
Mohib-hub/spack
9a350e945ebd8a662151692791d50a62bdf73e90
[ "ECL-2.0", "Apache-2.0", "MIT" ]
null
null
null
lib/spack/spack/binary_distribution.py
Mohib-hub/spack
9a350e945ebd8a662151692791d50a62bdf73e90
[ "ECL-2.0", "Apache-2.0", "MIT" ]
null
null
null
lib/spack/spack/binary_distribution.py
Mohib-hub/spack
9a350e945ebd8a662151692791d50a62bdf73e90
[ "ECL-2.0", "Apache-2.0", "MIT" ]
1
2020-09-21T14:35:49.000Z
2020-09-21T14:35:49.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import codecs import os import re import tarfile import shutil import tempfile import hashlib import glob import platform from contextlib import closing import ruamel.yaml as yaml import json from six.moves.urllib.error import URLError import llnl.util.tty as tty from llnl.util.filesystem import mkdirp import spack.cmd import spack.config as config import spack.fetch_strategy as fs import spack.util.gpg import spack.relocate as relocate import spack.util.spack_yaml as syaml import spack.mirror import spack.util.url as url_util import spack.util.web as web_util from spack.spec import Spec from spack.stage import Stage from spack.util.gpg import Gpg import spack.architecture as architecture _build_cache_relative_path = 'build_cache' BUILD_CACHE_INDEX_TEMPLATE = ''' <html> <head> <title>{title}</title> </head> <body> <ul> {path_list} </ul> </body> </html> ''' BUILD_CACHE_INDEX_ENTRY_TEMPLATE = ' <li><a href="{path}">{path}</a></li>' class NoOverwriteException(spack.error.SpackError): """ Raised when a file exists and must be overwritten. """ def __init__(self, file_path): err_msg = "\n%s\nexists\n" % file_path err_msg += "Use -f option to overwrite." super(NoOverwriteException, self).__init__(err_msg) class NoGpgException(spack.error.SpackError): """ Raised when gpg2 is not in PATH """ def __init__(self, msg): super(NoGpgException, self).__init__(msg) class NoKeyException(spack.error.SpackError): """ Raised when gpg has no default key added. """ def __init__(self, msg): super(NoKeyException, self).__init__(msg) class PickKeyException(spack.error.SpackError): """ Raised when multiple keys can be used to sign. """ def __init__(self, keys): err_msg = "Multiple keys available for signing\n%s\n" % keys err_msg += "Use spack buildcache create -k <key hash> to pick a key." super(PickKeyException, self).__init__(err_msg) class NoVerifyException(spack.error.SpackError): """ Raised if file fails signature verification. """ pass class NoChecksumException(spack.error.SpackError): """ Raised if file fails checksum verification. """ pass class NewLayoutException(spack.error.SpackError): """ Raised if directory layout is different from buildcache. """ def __init__(self, msg): super(NewLayoutException, self).__init__(msg) def build_cache_relative_path(): return _build_cache_relative_path def build_cache_prefix(prefix): return os.path.join(prefix, build_cache_relative_path()) def buildinfo_file_name(prefix): """ Filename of the binary package meta-data file """ name = os.path.join(prefix, ".spack/binary_distribution") return name def read_buildinfo_file(prefix): """ Read buildinfo file """ filename = buildinfo_file_name(prefix) with open(filename, 'r') as inputfile: content = inputfile.read() buildinfo = yaml.load(content) return buildinfo def write_buildinfo_file(spec, workdir, rel=False): """ Create a cache file containing information required for the relocation """ prefix = spec.prefix text_to_relocate = [] binary_to_relocate = [] link_to_relocate = [] blacklist = (".spack", "man") prefix_to_hash = dict() prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash() deps = spack.build_environment.get_rpath_deps(spec.package) for d in deps: prefix_to_hash[str(d.prefix)] = d.dag_hash() # Do this at during tarball creation to save time when tarball unpacked. # Used by make_package_relative to determine binaries to change. for root, dirs, files in os.walk(prefix, topdown=True): dirs[:] = [d for d in dirs if d not in blacklist] for filename in files: path_name = os.path.join(root, filename) m_type, m_subtype = relocate.mime_type(path_name) if os.path.islink(path_name): link = os.readlink(path_name) if os.path.isabs(link): # Relocate absolute links into the spack tree if link.startswith(spack.store.layout.root): rel_path_name = os.path.relpath(path_name, prefix) link_to_relocate.append(rel_path_name) else: msg = 'Absolute link %s to %s ' % (path_name, link) msg += 'outside of prefix %s ' % prefix msg += 'should not be relocated.' tty.warn(msg) if relocate.needs_binary_relocation(m_type, m_subtype): if not filename.endswith('.o'): rel_path_name = os.path.relpath(path_name, prefix) binary_to_relocate.append(rel_path_name) if relocate.needs_text_relocation(m_type, m_subtype): rel_path_name = os.path.relpath(path_name, prefix) text_to_relocate.append(rel_path_name) # Create buildinfo data and write it to disk buildinfo = {} buildinfo['relative_rpaths'] = rel buildinfo['buildpath'] = spack.store.layout.root buildinfo['spackprefix'] = spack.paths.prefix buildinfo['relative_prefix'] = os.path.relpath( prefix, spack.store.layout.root) buildinfo['relocate_textfiles'] = text_to_relocate buildinfo['relocate_binaries'] = binary_to_relocate buildinfo['relocate_links'] = link_to_relocate buildinfo['prefix_to_hash'] = prefix_to_hash filename = buildinfo_file_name(workdir) with open(filename, 'w') as outfile: outfile.write(syaml.dump(buildinfo, default_flow_style=True)) def tarball_directory_name(spec): """ Return name of the tarball directory according to the convention <os>-<architecture>/<compiler>/<package>-<version>/ """ return "%s/%s/%s-%s" % (spec.architecture, str(spec.compiler).replace("@", "-"), spec.name, spec.version) def tarball_name(spec, ext): """ Return the name of the tarfile according to the convention <os>-<architecture>-<package>-<dag_hash><ext> """ return "%s-%s-%s-%s-%s%s" % (spec.architecture, str(spec.compiler).replace("@", "-"), spec.name, spec.version, spec.dag_hash(), ext) def tarball_path_name(spec, ext): """ Return the full path+name for a given spec according to the convention <tarball_directory_name>/<tarball_name> """ return os.path.join(tarball_directory_name(spec), tarball_name(spec, ext)) def checksum_tarball(file): # calculate sha256 hash of tar file block_size = 65536 hasher = hashlib.sha256() with open(file, 'rb') as tfile: buf = tfile.read(block_size) while len(buf) > 0: hasher.update(buf) buf = tfile.read(block_size) return hasher.hexdigest() def sign_tarball(key, force, specfile_path): # Sign the packages if keys available if spack.util.gpg.Gpg.gpg() is None: raise NoGpgException( "gpg2 is not available in $PATH .\n" "Use spack install gnupg and spack load gnupg.") if key is None: keys = Gpg.signing_keys() if len(keys) == 1: key = keys[0] if len(keys) > 1: raise PickKeyException(str(keys)) if len(keys) == 0: msg = "No default key available for signing.\n" msg += "Use spack gpg init and spack gpg create" msg += " to create a default key." raise NoKeyException(msg) if os.path.exists('%s.asc' % specfile_path): if force: os.remove('%s.asc' % specfile_path) else: raise NoOverwriteException('%s.asc' % specfile_path) Gpg.sign(key, specfile_path, '%s.asc' % specfile_path) def generate_package_index(cache_prefix): """Create the build cache index page. Creates (or replaces) the "index.html" page at the location given in cache_prefix. This page contains a link for each binary package (*.yaml) and public key (*.key) under cache_prefix. """ tmpdir = tempfile.mkdtemp() try: index_html_path = os.path.join(tmpdir, 'index.html') file_list = ( entry for entry in web_util.list_url(cache_prefix) if (entry.endswith('.yaml') or entry.endswith('.key'))) with open(index_html_path, 'w') as f: f.write(BUILD_CACHE_INDEX_TEMPLATE.format( title='Spack Package Index', path_list='\n'.join( BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path) for path in file_list))) web_util.push_to_url( index_html_path, url_util.join(cache_prefix, 'index.html'), keep_original=False, extra_args={'ContentType': 'text/html'}) finally: shutil.rmtree(tmpdir) def build_tarball(spec, outdir, force=False, rel=False, unsigned=False, allow_root=False, key=None, regenerate_index=False): """ Build a tarball from given spec and put it into the directory structure used at the mirror (following <tarball_directory_name>). """ if not spec.concrete: raise ValueError('spec must be concrete to build tarball') # set up some paths tmpdir = tempfile.mkdtemp() cache_prefix = build_cache_prefix(tmpdir) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec)) tarfile_path = os.path.join(tarfile_dir, tarfile_name) spackfile_path = os.path.join( cache_prefix, tarball_path_name(spec, '.spack')) remote_spackfile_path = url_util.join( outdir, os.path.relpath(spackfile_path, tmpdir)) mkdirp(tarfile_dir) if web_util.url_exists(remote_spackfile_path): if force: web_util.remove_url(remote_spackfile_path) else: raise NoOverwriteException(url_util.format(remote_spackfile_path)) # need to copy the spec file so the build cache can be downloaded # without concretizing with the current spack packages # and preferences spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml") specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.realpath( os.path.join(cache_prefix, specfile_name)) remote_specfile_path = url_util.join( outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir))) if web_util.url_exists(remote_specfile_path): if force: web_util.remove_url(remote_specfile_path) else: raise NoOverwriteException(url_util.format(remote_specfile_path)) # make a copy of the install directory to work with workdir = os.path.join(tmpdir, os.path.basename(spec.prefix)) # install_tree copies hardlinks # create a temporary tarfile from prefix and exract it to workdir # tarfile preserves hardlinks temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name='%s' % spec.prefix, arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(workdir) os.remove(temp_tarfile_path) # create info for later relocation and create tar write_buildinfo_file(spec, workdir, rel) # optionally make the paths in the binaries relative to each other # in the spack install tree before creating tarball if rel: try: make_package_relative(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) else: try: check_package_relocatable(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) # create gzip compressed tarball of the install prefix with closing(tarfile.open(tarfile_path, 'w:gz')) as tar: tar.add(name='%s' % workdir, arcname='%s' % os.path.basename(spec.prefix)) # remove copy of install directory shutil.rmtree(workdir) # get the sha256 checksum of the tarball checksum = checksum_tarball(tarfile_path) # add sha256 checksum to spec.yaml with open(spec_file, 'r') as inputfile: content = inputfile.read() spec_dict = yaml.load(content) bchecksum = {} bchecksum['hash_algorithm'] = 'sha256' bchecksum['hash'] = checksum spec_dict['binary_cache_checksum'] = bchecksum # Add original install prefix relative to layout root to spec.yaml. # This will be used to determine is the directory layout has changed. buildinfo = {} buildinfo['relative_prefix'] = os.path.relpath( spec.prefix, spack.store.layout.root) buildinfo['relative_rpaths'] = rel spec_dict['buildinfo'] = buildinfo spec_dict['full_hash'] = spec.full_hash() tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format( spec_dict['full_hash'], spec.name, url_util.format(remote_specfile_path))) tty.debug(spec.tree()) with open(specfile_path, 'w') as outfile: outfile.write(syaml.dump(spec_dict)) # sign the tarball and spec file with gpg if not unsigned: sign_tarball(key, force, specfile_path) # put tarball, spec and signature files in .spack archive with closing(tarfile.open(spackfile_path, 'w')) as tar: tar.add(name=tarfile_path, arcname='%s' % tarfile_name) tar.add(name=specfile_path, arcname='%s' % specfile_name) if not unsigned: tar.add(name='%s.asc' % specfile_path, arcname='%s.asc' % specfile_name) # cleanup file moved to archive os.remove(tarfile_path) if not unsigned: os.remove('%s.asc' % specfile_path) web_util.push_to_url( spackfile_path, remote_spackfile_path, keep_original=False) web_util.push_to_url( specfile_path, remote_specfile_path, keep_original=False) tty.msg('Buildache for "%s" written to \n %s' % (spec, remote_spackfile_path)) try: # create an index.html for the build_cache directory so specs can be # found if regenerate_index: generate_package_index(url_util.join( outdir, os.path.relpath(cache_prefix, tmpdir))) finally: shutil.rmtree(tmpdir) return None def download_tarball(spec): """ Download binary tarball for given package into stage area Return True if successful """ if not spack.mirror.MirrorCollection(): tty.die("Please add a spack mirror to allow " + "download of pre-compiled packages.") tarball = tarball_path_name(spec, '.spack') for mirror in spack.mirror.MirrorCollection().values(): url = url_util.join( mirror.fetch_url, _build_cache_relative_path, tarball) # stage the tarball into standard place stage = Stage(url, name="build_cache", keep=True) try: stage.fetch() return stage.save_filename except fs.FetchError: continue return None def make_package_relative(workdir, spec, allow_root): """ Change paths in binaries to relative paths. Change absolute symlinks to relative symlinks. """ prefix = spec.prefix buildinfo = read_buildinfo_file(workdir) old_layout_root = buildinfo['buildpath'] orig_path_names = list() cur_path_names = list() for filename in buildinfo['relocate_binaries']: orig_path_names.append(os.path.join(prefix, filename)) cur_path_names.append(os.path.join(workdir, filename)) if (spec.architecture.platform == 'darwin' or spec.architecture.platform == 'test' and platform.system().lower() == 'darwin'): relocate.make_macho_binaries_relative(cur_path_names, orig_path_names, old_layout_root) if (spec.architecture.platform == 'linux' or spec.architecture.platform == 'test' and platform.system().lower() == 'linux'): relocate.make_elf_binaries_relative(cur_path_names, orig_path_names, old_layout_root) relocate.check_files_relocatable(cur_path_names, allow_root) orig_path_names = list() cur_path_names = list() for linkname in buildinfo.get('relocate_links', []): orig_path_names.append(os.path.join(prefix, linkname)) cur_path_names.append(os.path.join(workdir, linkname)) relocate.make_link_relative(cur_path_names, orig_path_names) def check_package_relocatable(workdir, spec, allow_root): """ Check if package binaries are relocatable. Change links to placeholder links. """ buildinfo = read_buildinfo_file(workdir) cur_path_names = list() for filename in buildinfo['relocate_binaries']: cur_path_names.append(os.path.join(workdir, filename)) relocate.check_files_relocatable(cur_path_names, allow_root) def relocate_package(spec, allow_root): """ Relocate the given package """ workdir = str(spec.prefix) buildinfo = read_buildinfo_file(workdir) new_layout_root = str(spack.store.layout.root) new_prefix = str(spec.prefix) new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root)) new_spack_prefix = str(spack.paths.prefix) old_layout_root = str(buildinfo['buildpath']) old_spack_prefix = str(buildinfo.get('spackprefix')) old_rel_prefix = buildinfo.get('relative_prefix') old_prefix = os.path.join(old_layout_root, old_rel_prefix) rel = buildinfo.get('relative_rpaths') prefix_to_hash = buildinfo.get('prefix_to_hash', None) if (old_rel_prefix != new_rel_prefix and not prefix_to_hash): msg = "Package tarball was created from an install " msg += "prefix with a different directory layout and an older " msg += "buildcache create implementation. It cannot be relocated." raise NewLayoutException(msg) # older buildcaches do not have the prefix_to_hash dictionary # need to set an empty dictionary and add one entry to # prefix_to_prefix to reproduce the old behavior if not prefix_to_hash: prefix_to_hash = dict() hash_to_prefix = dict() hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix) new_deps = spack.build_environment.get_rpath_deps(spec.package) for d in new_deps: hash_to_prefix[d.format('{hash}')] = str(d.prefix) prefix_to_prefix = dict() for orig_prefix, hash in prefix_to_hash.items(): prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None) prefix_to_prefix[old_prefix] = new_prefix prefix_to_prefix[old_layout_root] = new_layout_root tty.debug("Relocating package from", "%s to %s." % (old_layout_root, new_layout_root)) def is_backup_file(file): return file.endswith('~') # Text files containing the prefix text text_names = list() for filename in buildinfo['relocate_textfiles']: text_name = os.path.join(workdir, filename) # Don't add backup files generated by filter_file during install step. if not is_backup_file(text_name): text_names.append(text_name) # If we are installing back to the same location don't replace anything if old_layout_root != new_layout_root: paths_to_relocate = [old_spack_prefix, old_layout_root] paths_to_relocate.extend(prefix_to_hash.keys()) files_to_relocate = list(filter( lambda pathname: not relocate.file_is_relocatable( pathname, paths_to_relocate=paths_to_relocate), map(lambda filename: os.path.join(workdir, filename), buildinfo['relocate_binaries']))) # If the buildcache was not created with relativized rpaths # do the relocation of path in binaries if (spec.architecture.platform == 'darwin' or spec.architecture.platform == 'test' and platform.system().lower() == 'darwin'): relocate.relocate_macho_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) if (spec.architecture.platform == 'linux' or spec.architecture.platform == 'test' and platform.system().lower() == 'linux'): relocate.relocate_elf_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) # Relocate links to the new install prefix link_names = [linkname for linkname in buildinfo.get('relocate_links', [])] relocate.relocate_links(link_names, old_layout_root, new_layout_root, old_prefix, new_prefix, prefix_to_prefix) # For all buildcaches # relocate the install prefixes in text files including dependencies relocate.relocate_text(text_names, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix) # relocate the install prefixes in binary files including dependencies relocate.relocate_text_bin(files_to_relocate, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix) def extract_tarball(spec, filename, allow_root=False, unsigned=False, force=False): """ extract binary tarball for given package into install area """ if os.path.exists(spec.prefix): if force: shutil.rmtree(spec.prefix) else: raise NoOverwriteException(str(spec.prefix)) tmpdir = tempfile.mkdtemp() stagepath = os.path.dirname(filename) spackfile_name = tarball_name(spec, '.spack') spackfile_path = os.path.join(stagepath, spackfile_name) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_path = os.path.join(tmpdir, tarfile_name) specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.join(tmpdir, specfile_name) with closing(tarfile.open(spackfile_path, 'r')) as tar: tar.extractall(tmpdir) # some buildcache tarfiles use bzip2 compression if not os.path.exists(tarfile_path): tarfile_name = tarball_name(spec, '.tar.bz2') tarfile_path = os.path.join(tmpdir, tarfile_name) if not unsigned: if os.path.exists('%s.asc' % specfile_path): try: suppress = config.get('config:suppress_gpg_warnings', False) Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress) except Exception as e: shutil.rmtree(tmpdir) raise e else: shutil.rmtree(tmpdir) raise NoVerifyException( "Package spec file failed signature verification.\n" "Use spack buildcache keys to download " "and install a key for verification from the mirror.") # get the sha256 checksum of the tarball checksum = checksum_tarball(tarfile_path) # get the sha256 checksum recorded at creation spec_dict = {} with open(specfile_path, 'r') as inputfile: content = inputfile.read() spec_dict = syaml.load(content) bchecksum = spec_dict['binary_cache_checksum'] # if the checksums don't match don't install if bchecksum['hash'] != checksum: shutil.rmtree(tmpdir) raise NoChecksumException( "Package tarball failed checksum verification.\n" "It cannot be installed.") new_relative_prefix = str(os.path.relpath(spec.prefix, spack.store.layout.root)) # if the original relative prefix is in the spec file use it buildinfo = spec_dict.get('buildinfo', {}) old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix) rel = buildinfo.get('relative_rpaths') # if the original relative prefix and new relative prefix differ the # directory layout has changed and the buildcache cannot be installed # if it was created with relative rpaths info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s' tty.debug(info % (old_relative_prefix, new_relative_prefix, rel)) # if (old_relative_prefix != new_relative_prefix and (rel)): # shutil.rmtree(tmpdir) # msg = "Package tarball was created from an install " # msg += "prefix with a different directory layout. " # msg += "It cannot be relocated because it " # msg += "uses relative rpaths." # raise NewLayoutException(msg) # extract the tarball in a temp directory with closing(tarfile.open(tarfile_path, 'r')) as tar: tar.extractall(path=tmpdir) # get the parent directory of the file .spack/binary_distribution # this should the directory unpacked from the tarball whose # name is unknown because the prefix naming is unknown bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0] workdir = re.sub('/.spack/binary_distribution$', '', bindist_file) tty.debug('workdir %s' % workdir) # install_tree copies hardlinks # create a temporary tarfile from prefix and exract it to workdir # tarfile preserves hardlinks temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name='%s' % workdir, arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(spec.prefix) os.remove(temp_tarfile_path) # cleanup os.remove(tarfile_path) os.remove(specfile_path) try: relocate_package(spec, allow_root) except Exception as e: shutil.rmtree(spec.prefix) raise e else: manifest_file = os.path.join(spec.prefix, spack.store.layout.metadata_dir, spack.store.layout.manifest_file_name) if not os.path.exists(manifest_file): spec_id = spec.format('{name}/{hash:7}') tty.warn('No manifest file in tarball for spec %s' % spec_id) finally: shutil.rmtree(tmpdir) if os.path.exists(filename): os.remove(filename) # Internal cache for downloaded specs _cached_specs = set() def try_download_specs(urls=None, force=False): ''' Try to download the urls and cache them ''' global _cached_specs if urls is None: return {} for link in urls: with Stage(link, name="build_cache", keep=True) as stage: if force and os.path.exists(stage.save_filename): os.remove(stage.save_filename) if not os.path.exists(stage.save_filename): try: stage.fetch() except fs.FetchError: continue with open(stage.save_filename, 'r') as f: # read the spec from the build cache file. All specs # in build caches are concrete (as they are built) so # we need to mark this spec concrete on read-in. spec = Spec.from_yaml(f) spec._mark_concrete() _cached_specs.add(spec) return _cached_specs def get_spec(spec=None, force=False): """ Check if spec.yaml exists on mirrors and return it if it does """ global _cached_specs urls = set() if spec is None: return {} specfile_name = tarball_name(spec, '.spec.yaml') if not spack.mirror.MirrorCollection(): tty.debug("No Spack mirrors are currently configured") return {} if _cached_specs and spec in _cached_specs: return _cached_specs for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join( mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg("Finding buildcaches in %s" % mirror_dir) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) else: tty.msg("Finding buildcaches at %s" % url_util.format(fetch_url_build_cache)) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) return try_download_specs(urls=urls, force=force) def get_specs(force=False, allarch=False): """ Get spec.yaml's for build caches available on mirror """ arch = architecture.Arch(architecture.platform(), 'default_os', 'default_target') arch_pattern = ('([^-]*-[^-]*-[^-]*)') if not allarch: arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os) regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern) arch_re = re.compile(regex_pattern) if not spack.mirror.MirrorCollection(): tty.debug("No Spack mirrors are currently configured") return {} urls = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join( mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg("Finding buildcaches in %s" % mirror_dir) if os.path.exists(mirror_dir): files = os.listdir(mirror_dir) for file in files: m = arch_re.search(file) if m: link = url_util.join(fetch_url_build_cache, file) urls.add(link) else: tty.msg("Finding buildcaches at %s" % url_util.format(fetch_url_build_cache)) p, links = web_util.spider( url_util.join(fetch_url_build_cache, 'index.html')) for link in links: m = arch_re.search(link) if m: urls.add(link) return try_download_specs(urls=urls, force=force) def get_keys(install=False, trust=False, force=False): """ Get pgp public keys available on mirror with suffix .key or .pub """ if not spack.mirror.MirrorCollection(): tty.die("Please add a spack mirror to allow " + "download of build caches.") keys = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join( mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg("Finding public keys in %s" % mirror_dir) files = os.listdir(str(mirror_dir)) for file in files: if re.search(r'\.key', file) or re.search(r'\.pub', file): link = url_util.join(fetch_url_build_cache, file) keys.add(link) else: tty.msg("Finding public keys at %s" % url_util.format(fetch_url_build_cache)) # For s3 mirror need to request index.html directly p, links = web_util.spider( url_util.join(fetch_url_build_cache, 'index.html'), depth=1) for link in links: if re.search(r'\.key', link) or re.search(r'\.pub', link): keys.add(link) for link in keys: with Stage(link, name="build_cache", keep=True) as stage: if os.path.exists(stage.save_filename) and force: os.remove(stage.save_filename) if not os.path.exists(stage.save_filename): try: stage.fetch() except fs.FetchError: continue tty.msg('Found key %s' % link) if install: if trust: Gpg.trust(stage.save_filename) tty.msg('Added this key to trusted keys.') else: tty.msg('Will not add this key to trusted keys.' 'Use -t to install all downloaded keys') def needs_rebuild(spec, mirror_url, rebuild_on_errors=False): if not spec.concrete: raise ValueError('spec must be concrete to check against mirror') pkg_name = spec.name pkg_version = spec.version pkg_hash = spec.dag_hash() pkg_full_hash = spec.full_hash() tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format( pkg_name, pkg_version, pkg_hash, pkg_full_hash)) tty.debug(spec.tree()) # Try to retrieve the .spec.yaml directly, based on the known # format of the name, in order to determine if the package # needs to be rebuilt. cache_prefix = build_cache_prefix(mirror_url) spec_yaml_file_name = tarball_name(spec, '.spec.yaml') file_path = os.path.join(cache_prefix, spec_yaml_file_name) result_of_error = 'Package ({0}) will {1}be rebuilt'.format( spec.short_spec, '' if rebuild_on_errors else 'not ') try: _, _, yaml_file = web_util.read_from_url(file_path) yaml_contents = codecs.getreader('utf-8')(yaml_file).read() except (URLError, web_util.SpackWebError) as url_err: err_msg = [ 'Unable to determine whether {0} needs rebuilding,', ' caught exception attempting to read from {1}.', ] tty.error(''.join(err_msg).format(spec.short_spec, file_path)) tty.debug(url_err) tty.warn(result_of_error) return rebuild_on_errors if not yaml_contents: tty.error('Reading {0} returned nothing'.format(file_path)) tty.warn(result_of_error) return rebuild_on_errors spec_yaml = syaml.load(yaml_contents) # If either the full_hash didn't exist in the .spec.yaml file, or it # did, but didn't match the one we computed locally, then we should # just rebuild. This can be simplified once the dag_hash and the # full_hash become the same thing. if ('full_hash' not in spec_yaml or spec_yaml['full_hash'] != pkg_full_hash): if 'full_hash' in spec_yaml: reason = 'hash mismatch, remote = {0}, local = {1}'.format( spec_yaml['full_hash'], pkg_full_hash) else: reason = 'full_hash was missing from remote spec.yaml' tty.msg('Rebuilding {0}, reason: {1}'.format( spec.short_spec, reason)) tty.msg(spec.tree()) return True return False def check_specs_against_mirrors(mirrors, specs, output_file=None, rebuild_on_errors=False): """Check all the given specs against buildcaches on the given mirrors and determine if any of the specs need to be rebuilt. Reasons for needing to rebuild include binary cache for spec isn't present on a mirror, or it is present but the full_hash has changed since last time spec was built. Arguments: mirrors (dict): Mirrors to check against specs (iterable): Specs to check against mirrors output_file (string): Path to output file to be written. If provided, mirrors with missing or out-of-date specs will be formatted as a JSON object and written to this file. rebuild_on_errors (boolean): Treat any errors encountered while checking specs as a signal to rebuild package. Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise. """ rebuilds = {} for mirror in spack.mirror.MirrorCollection(mirrors).values(): tty.msg('Checking for built specs at %s' % mirror.fetch_url) rebuild_list = [] for spec in specs: if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors): rebuild_list.append({ 'short_spec': spec.short_spec, 'hash': spec.dag_hash() }) if rebuild_list: rebuilds[mirror.fetch_url] = { 'mirrorName': mirror.name, 'mirrorUrl': mirror.fetch_url, 'rebuildSpecs': rebuild_list } if output_file: with open(output_file, 'w') as outf: outf.write(json.dumps(rebuilds)) return 1 if rebuilds else 0 def _download_buildcache_entry(mirror_root, descriptions): for description in descriptions: description_url = os.path.join(mirror_root, description['url']) path = description['path'] fail_if_missing = description['required'] mkdirp(path) stage = Stage( description_url, name="build_cache", path=path, keep=True) try: stage.fetch() except fs.FetchError as e: tty.debug(e) if fail_if_missing: tty.error('Failed to download required url {0}'.format( description_url)) return False return True def download_buildcache_entry(file_descriptions, mirror_url=None): if not mirror_url and not spack.mirror.MirrorCollection(): tty.die("Please provide or add a spack mirror to allow " + "download of buildcache entries.") if mirror_url: mirror_root = os.path.join( mirror_url, _build_cache_relative_path) return _download_buildcache_entry(mirror_root, file_descriptions) for mirror in spack.mirror.MirrorCollection().values(): mirror_root = os.path.join( mirror.fetch_url, _build_cache_relative_path) if _download_buildcache_entry(mirror_root, file_descriptions): return True else: continue return False
36.542435
79
0.624735
import codecs import os import re import tarfile import shutil import tempfile import hashlib import glob import platform from contextlib import closing import ruamel.yaml as yaml import json from six.moves.urllib.error import URLError import llnl.util.tty as tty from llnl.util.filesystem import mkdirp import spack.cmd import spack.config as config import spack.fetch_strategy as fs import spack.util.gpg import spack.relocate as relocate import spack.util.spack_yaml as syaml import spack.mirror import spack.util.url as url_util import spack.util.web as web_util from spack.spec import Spec from spack.stage import Stage from spack.util.gpg import Gpg import spack.architecture as architecture _build_cache_relative_path = 'build_cache' BUILD_CACHE_INDEX_TEMPLATE = ''' <html> <head> <title>{title}</title> </head> <body> <ul> {path_list} </ul> </body> </html> ''' BUILD_CACHE_INDEX_ENTRY_TEMPLATE = ' <li><a href="{path}">{path}</a></li>' class NoOverwriteException(spack.error.SpackError): def __init__(self, file_path): err_msg = "\n%s\nexists\n" % file_path err_msg += "Use -f option to overwrite." super(NoOverwriteException, self).__init__(err_msg) class NoGpgException(spack.error.SpackError): def __init__(self, msg): super(NoGpgException, self).__init__(msg) class NoKeyException(spack.error.SpackError): def __init__(self, msg): super(NoKeyException, self).__init__(msg) class PickKeyException(spack.error.SpackError): def __init__(self, keys): err_msg = "Multiple keys available for signing\n%s\n" % keys err_msg += "Use spack buildcache create -k <key hash> to pick a key." super(PickKeyException, self).__init__(err_msg) class NoVerifyException(spack.error.SpackError): pass class NoChecksumException(spack.error.SpackError): pass class NewLayoutException(spack.error.SpackError): def __init__(self, msg): super(NewLayoutException, self).__init__(msg) def build_cache_relative_path(): return _build_cache_relative_path def build_cache_prefix(prefix): return os.path.join(prefix, build_cache_relative_path()) def buildinfo_file_name(prefix): name = os.path.join(prefix, ".spack/binary_distribution") return name def read_buildinfo_file(prefix): filename = buildinfo_file_name(prefix) with open(filename, 'r') as inputfile: content = inputfile.read() buildinfo = yaml.load(content) return buildinfo def write_buildinfo_file(spec, workdir, rel=False): prefix = spec.prefix text_to_relocate = [] binary_to_relocate = [] link_to_relocate = [] blacklist = (".spack", "man") prefix_to_hash = dict() prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash() deps = spack.build_environment.get_rpath_deps(spec.package) for d in deps: prefix_to_hash[str(d.prefix)] = d.dag_hash() for root, dirs, files in os.walk(prefix, topdown=True): dirs[:] = [d for d in dirs if d not in blacklist] for filename in files: path_name = os.path.join(root, filename) m_type, m_subtype = relocate.mime_type(path_name) if os.path.islink(path_name): link = os.readlink(path_name) if os.path.isabs(link): if link.startswith(spack.store.layout.root): rel_path_name = os.path.relpath(path_name, prefix) link_to_relocate.append(rel_path_name) else: msg = 'Absolute link %s to %s ' % (path_name, link) msg += 'outside of prefix %s ' % prefix msg += 'should not be relocated.' tty.warn(msg) if relocate.needs_binary_relocation(m_type, m_subtype): if not filename.endswith('.o'): rel_path_name = os.path.relpath(path_name, prefix) binary_to_relocate.append(rel_path_name) if relocate.needs_text_relocation(m_type, m_subtype): rel_path_name = os.path.relpath(path_name, prefix) text_to_relocate.append(rel_path_name) buildinfo = {} buildinfo['relative_rpaths'] = rel buildinfo['buildpath'] = spack.store.layout.root buildinfo['spackprefix'] = spack.paths.prefix buildinfo['relative_prefix'] = os.path.relpath( prefix, spack.store.layout.root) buildinfo['relocate_textfiles'] = text_to_relocate buildinfo['relocate_binaries'] = binary_to_relocate buildinfo['relocate_links'] = link_to_relocate buildinfo['prefix_to_hash'] = prefix_to_hash filename = buildinfo_file_name(workdir) with open(filename, 'w') as outfile: outfile.write(syaml.dump(buildinfo, default_flow_style=True)) def tarball_directory_name(spec): return "%s/%s/%s-%s" % (spec.architecture, str(spec.compiler).replace("@", "-"), spec.name, spec.version) def tarball_name(spec, ext): return "%s-%s-%s-%s-%s%s" % (spec.architecture, str(spec.compiler).replace("@", "-"), spec.name, spec.version, spec.dag_hash(), ext) def tarball_path_name(spec, ext): return os.path.join(tarball_directory_name(spec), tarball_name(spec, ext)) def checksum_tarball(file): block_size = 65536 hasher = hashlib.sha256() with open(file, 'rb') as tfile: buf = tfile.read(block_size) while len(buf) > 0: hasher.update(buf) buf = tfile.read(block_size) return hasher.hexdigest() def sign_tarball(key, force, specfile_path): if spack.util.gpg.Gpg.gpg() is None: raise NoGpgException( "gpg2 is not available in $PATH .\n" "Use spack install gnupg and spack load gnupg.") if key is None: keys = Gpg.signing_keys() if len(keys) == 1: key = keys[0] if len(keys) > 1: raise PickKeyException(str(keys)) if len(keys) == 0: msg = "No default key available for signing.\n" msg += "Use spack gpg init and spack gpg create" msg += " to create a default key." raise NoKeyException(msg) if os.path.exists('%s.asc' % specfile_path): if force: os.remove('%s.asc' % specfile_path) else: raise NoOverwriteException('%s.asc' % specfile_path) Gpg.sign(key, specfile_path, '%s.asc' % specfile_path) def generate_package_index(cache_prefix): tmpdir = tempfile.mkdtemp() try: index_html_path = os.path.join(tmpdir, 'index.html') file_list = ( entry for entry in web_util.list_url(cache_prefix) if (entry.endswith('.yaml') or entry.endswith('.key'))) with open(index_html_path, 'w') as f: f.write(BUILD_CACHE_INDEX_TEMPLATE.format( title='Spack Package Index', path_list='\n'.join( BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path) for path in file_list))) web_util.push_to_url( index_html_path, url_util.join(cache_prefix, 'index.html'), keep_original=False, extra_args={'ContentType': 'text/html'}) finally: shutil.rmtree(tmpdir) def build_tarball(spec, outdir, force=False, rel=False, unsigned=False, allow_root=False, key=None, regenerate_index=False): if not spec.concrete: raise ValueError('spec must be concrete to build tarball') tmpdir = tempfile.mkdtemp() cache_prefix = build_cache_prefix(tmpdir) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec)) tarfile_path = os.path.join(tarfile_dir, tarfile_name) spackfile_path = os.path.join( cache_prefix, tarball_path_name(spec, '.spack')) remote_spackfile_path = url_util.join( outdir, os.path.relpath(spackfile_path, tmpdir)) mkdirp(tarfile_dir) if web_util.url_exists(remote_spackfile_path): if force: web_util.remove_url(remote_spackfile_path) else: raise NoOverwriteException(url_util.format(remote_spackfile_path)) spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml") specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.realpath( os.path.join(cache_prefix, specfile_name)) remote_specfile_path = url_util.join( outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir))) if web_util.url_exists(remote_specfile_path): if force: web_util.remove_url(remote_specfile_path) else: raise NoOverwriteException(url_util.format(remote_specfile_path)) workdir = os.path.join(tmpdir, os.path.basename(spec.prefix)) temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name='%s' % spec.prefix, arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(workdir) os.remove(temp_tarfile_path) write_buildinfo_file(spec, workdir, rel) if rel: try: make_package_relative(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) else: try: check_package_relocatable(workdir, spec, allow_root) except Exception as e: shutil.rmtree(workdir) shutil.rmtree(tarfile_dir) shutil.rmtree(tmpdir) tty.die(e) with closing(tarfile.open(tarfile_path, 'w:gz')) as tar: tar.add(name='%s' % workdir, arcname='%s' % os.path.basename(spec.prefix)) shutil.rmtree(workdir) checksum = checksum_tarball(tarfile_path) with open(spec_file, 'r') as inputfile: content = inputfile.read() spec_dict = yaml.load(content) bchecksum = {} bchecksum['hash_algorithm'] = 'sha256' bchecksum['hash'] = checksum spec_dict['binary_cache_checksum'] = bchecksum buildinfo = {} buildinfo['relative_prefix'] = os.path.relpath( spec.prefix, spack.store.layout.root) buildinfo['relative_rpaths'] = rel spec_dict['buildinfo'] = buildinfo spec_dict['full_hash'] = spec.full_hash() tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format( spec_dict['full_hash'], spec.name, url_util.format(remote_specfile_path))) tty.debug(spec.tree()) with open(specfile_path, 'w') as outfile: outfile.write(syaml.dump(spec_dict)) if not unsigned: sign_tarball(key, force, specfile_path) with closing(tarfile.open(spackfile_path, 'w')) as tar: tar.add(name=tarfile_path, arcname='%s' % tarfile_name) tar.add(name=specfile_path, arcname='%s' % specfile_name) if not unsigned: tar.add(name='%s.asc' % specfile_path, arcname='%s.asc' % specfile_name) os.remove(tarfile_path) if not unsigned: os.remove('%s.asc' % specfile_path) web_util.push_to_url( spackfile_path, remote_spackfile_path, keep_original=False) web_util.push_to_url( specfile_path, remote_specfile_path, keep_original=False) tty.msg('Buildache for "%s" written to \n %s' % (spec, remote_spackfile_path)) try: if regenerate_index: generate_package_index(url_util.join( outdir, os.path.relpath(cache_prefix, tmpdir))) finally: shutil.rmtree(tmpdir) return None def download_tarball(spec): if not spack.mirror.MirrorCollection(): tty.die("Please add a spack mirror to allow " + "download of pre-compiled packages.") tarball = tarball_path_name(spec, '.spack') for mirror in spack.mirror.MirrorCollection().values(): url = url_util.join( mirror.fetch_url, _build_cache_relative_path, tarball) stage = Stage(url, name="build_cache", keep=True) try: stage.fetch() return stage.save_filename except fs.FetchError: continue return None def make_package_relative(workdir, spec, allow_root): prefix = spec.prefix buildinfo = read_buildinfo_file(workdir) old_layout_root = buildinfo['buildpath'] orig_path_names = list() cur_path_names = list() for filename in buildinfo['relocate_binaries']: orig_path_names.append(os.path.join(prefix, filename)) cur_path_names.append(os.path.join(workdir, filename)) if (spec.architecture.platform == 'darwin' or spec.architecture.platform == 'test' and platform.system().lower() == 'darwin'): relocate.make_macho_binaries_relative(cur_path_names, orig_path_names, old_layout_root) if (spec.architecture.platform == 'linux' or spec.architecture.platform == 'test' and platform.system().lower() == 'linux'): relocate.make_elf_binaries_relative(cur_path_names, orig_path_names, old_layout_root) relocate.check_files_relocatable(cur_path_names, allow_root) orig_path_names = list() cur_path_names = list() for linkname in buildinfo.get('relocate_links', []): orig_path_names.append(os.path.join(prefix, linkname)) cur_path_names.append(os.path.join(workdir, linkname)) relocate.make_link_relative(cur_path_names, orig_path_names) def check_package_relocatable(workdir, spec, allow_root): buildinfo = read_buildinfo_file(workdir) cur_path_names = list() for filename in buildinfo['relocate_binaries']: cur_path_names.append(os.path.join(workdir, filename)) relocate.check_files_relocatable(cur_path_names, allow_root) def relocate_package(spec, allow_root): workdir = str(spec.prefix) buildinfo = read_buildinfo_file(workdir) new_layout_root = str(spack.store.layout.root) new_prefix = str(spec.prefix) new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root)) new_spack_prefix = str(spack.paths.prefix) old_layout_root = str(buildinfo['buildpath']) old_spack_prefix = str(buildinfo.get('spackprefix')) old_rel_prefix = buildinfo.get('relative_prefix') old_prefix = os.path.join(old_layout_root, old_rel_prefix) rel = buildinfo.get('relative_rpaths') prefix_to_hash = buildinfo.get('prefix_to_hash', None) if (old_rel_prefix != new_rel_prefix and not prefix_to_hash): msg = "Package tarball was created from an install " msg += "prefix with a different directory layout and an older " msg += "buildcache create implementation. It cannot be relocated." raise NewLayoutException(msg) if not prefix_to_hash: prefix_to_hash = dict() hash_to_prefix = dict() hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix) new_deps = spack.build_environment.get_rpath_deps(spec.package) for d in new_deps: hash_to_prefix[d.format('{hash}')] = str(d.prefix) prefix_to_prefix = dict() for orig_prefix, hash in prefix_to_hash.items(): prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None) prefix_to_prefix[old_prefix] = new_prefix prefix_to_prefix[old_layout_root] = new_layout_root tty.debug("Relocating package from", "%s to %s." % (old_layout_root, new_layout_root)) def is_backup_file(file): return file.endswith('~') text_names = list() for filename in buildinfo['relocate_textfiles']: text_name = os.path.join(workdir, filename) if not is_backup_file(text_name): text_names.append(text_name) # If we are installing back to the same location don't replace anything if old_layout_root != new_layout_root: paths_to_relocate = [old_spack_prefix, old_layout_root] paths_to_relocate.extend(prefix_to_hash.keys()) files_to_relocate = list(filter( lambda pathname: not relocate.file_is_relocatable( pathname, paths_to_relocate=paths_to_relocate), map(lambda filename: os.path.join(workdir, filename), buildinfo['relocate_binaries']))) if (spec.architecture.platform == 'darwin' or spec.architecture.platform == 'test' and platform.system().lower() == 'darwin'): relocate.relocate_macho_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) if (spec.architecture.platform == 'linux' or spec.architecture.platform == 'test' and platform.system().lower() == 'linux'): relocate.relocate_elf_binaries(files_to_relocate, old_layout_root, new_layout_root, prefix_to_prefix, rel, old_prefix, new_prefix) link_names = [linkname for linkname in buildinfo.get('relocate_links', [])] relocate.relocate_links(link_names, old_layout_root, new_layout_root, old_prefix, new_prefix, prefix_to_prefix) relocate.relocate_text(text_names, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix) relocate.relocate_text_bin(files_to_relocate, old_layout_root, new_layout_root, old_prefix, new_prefix, old_spack_prefix, new_spack_prefix, prefix_to_prefix) def extract_tarball(spec, filename, allow_root=False, unsigned=False, force=False): if os.path.exists(spec.prefix): if force: shutil.rmtree(spec.prefix) else: raise NoOverwriteException(str(spec.prefix)) tmpdir = tempfile.mkdtemp() stagepath = os.path.dirname(filename) spackfile_name = tarball_name(spec, '.spack') spackfile_path = os.path.join(stagepath, spackfile_name) tarfile_name = tarball_name(spec, '.tar.gz') tarfile_path = os.path.join(tmpdir, tarfile_name) specfile_name = tarball_name(spec, '.spec.yaml') specfile_path = os.path.join(tmpdir, specfile_name) with closing(tarfile.open(spackfile_path, 'r')) as tar: tar.extractall(tmpdir) if not os.path.exists(tarfile_path): tarfile_name = tarball_name(spec, '.tar.bz2') tarfile_path = os.path.join(tmpdir, tarfile_name) if not unsigned: if os.path.exists('%s.asc' % specfile_path): try: suppress = config.get('config:suppress_gpg_warnings', False) Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress) except Exception as e: shutil.rmtree(tmpdir) raise e else: shutil.rmtree(tmpdir) raise NoVerifyException( "Package spec file failed signature verification.\n" "Use spack buildcache keys to download " "and install a key for verification from the mirror.") checksum = checksum_tarball(tarfile_path) spec_dict = {} with open(specfile_path, 'r') as inputfile: content = inputfile.read() spec_dict = syaml.load(content) bchecksum = spec_dict['binary_cache_checksum'] if bchecksum['hash'] != checksum: shutil.rmtree(tmpdir) raise NoChecksumException( "Package tarball failed checksum verification.\n" "It cannot be installed.") new_relative_prefix = str(os.path.relpath(spec.prefix, spack.store.layout.root)) buildinfo = spec_dict.get('buildinfo', {}) old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix) rel = buildinfo.get('relative_rpaths') info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s' tty.debug(info % (old_relative_prefix, new_relative_prefix, rel)) with closing(tarfile.open(tarfile_path, 'r')) as tar: tar.extractall(path=tmpdir) bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0] workdir = re.sub('/.spack/binary_distribution$', '', bindist_file) tty.debug('workdir %s' % workdir) temp_tarfile_name = tarball_name(spec, '.tar') temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name) with closing(tarfile.open(temp_tarfile_path, 'w')) as tar: tar.add(name='%s' % workdir, arcname='.') with closing(tarfile.open(temp_tarfile_path, 'r')) as tar: tar.extractall(spec.prefix) os.remove(temp_tarfile_path) os.remove(tarfile_path) os.remove(specfile_path) try: relocate_package(spec, allow_root) except Exception as e: shutil.rmtree(spec.prefix) raise e else: manifest_file = os.path.join(spec.prefix, spack.store.layout.metadata_dir, spack.store.layout.manifest_file_name) if not os.path.exists(manifest_file): spec_id = spec.format('{name}/{hash:7}') tty.warn('No manifest file in tarball for spec %s' % spec_id) finally: shutil.rmtree(tmpdir) if os.path.exists(filename): os.remove(filename) _cached_specs = set() def try_download_specs(urls=None, force=False): global _cached_specs if urls is None: return {} for link in urls: with Stage(link, name="build_cache", keep=True) as stage: if force and os.path.exists(stage.save_filename): os.remove(stage.save_filename) if not os.path.exists(stage.save_filename): try: stage.fetch() except fs.FetchError: continue with open(stage.save_filename, 'r') as f: spec = Spec.from_yaml(f) spec._mark_concrete() _cached_specs.add(spec) return _cached_specs def get_spec(spec=None, force=False): global _cached_specs urls = set() if spec is None: return {} specfile_name = tarball_name(spec, '.spec.yaml') if not spack.mirror.MirrorCollection(): tty.debug("No Spack mirrors are currently configured") return {} if _cached_specs and spec in _cached_specs: return _cached_specs for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join( mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg("Finding buildcaches in %s" % mirror_dir) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) else: tty.msg("Finding buildcaches at %s" % url_util.format(fetch_url_build_cache)) link = url_util.join(fetch_url_build_cache, specfile_name) urls.add(link) return try_download_specs(urls=urls, force=force) def get_specs(force=False, allarch=False): arch = architecture.Arch(architecture.platform(), 'default_os', 'default_target') arch_pattern = ('([^-]*-[^-]*-[^-]*)') if not allarch: arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os) regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern) arch_re = re.compile(regex_pattern) if not spack.mirror.MirrorCollection(): tty.debug("No Spack mirrors are currently configured") return {} urls = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join( mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg("Finding buildcaches in %s" % mirror_dir) if os.path.exists(mirror_dir): files = os.listdir(mirror_dir) for file in files: m = arch_re.search(file) if m: link = url_util.join(fetch_url_build_cache, file) urls.add(link) else: tty.msg("Finding buildcaches at %s" % url_util.format(fetch_url_build_cache)) p, links = web_util.spider( url_util.join(fetch_url_build_cache, 'index.html')) for link in links: m = arch_re.search(link) if m: urls.add(link) return try_download_specs(urls=urls, force=force) def get_keys(install=False, trust=False, force=False): if not spack.mirror.MirrorCollection(): tty.die("Please add a spack mirror to allow " + "download of build caches.") keys = set() for mirror in spack.mirror.MirrorCollection().values(): fetch_url_build_cache = url_util.join( mirror.fetch_url, _build_cache_relative_path) mirror_dir = url_util.local_file_path(fetch_url_build_cache) if mirror_dir: tty.msg("Finding public keys in %s" % mirror_dir) files = os.listdir(str(mirror_dir)) for file in files: if re.search(r'\.key', file) or re.search(r'\.pub', file): link = url_util.join(fetch_url_build_cache, file) keys.add(link) else: tty.msg("Finding public keys at %s" % url_util.format(fetch_url_build_cache)) p, links = web_util.spider( url_util.join(fetch_url_build_cache, 'index.html'), depth=1) for link in links: if re.search(r'\.key', link) or re.search(r'\.pub', link): keys.add(link) for link in keys: with Stage(link, name="build_cache", keep=True) as stage: if os.path.exists(stage.save_filename) and force: os.remove(stage.save_filename) if not os.path.exists(stage.save_filename): try: stage.fetch() except fs.FetchError: continue tty.msg('Found key %s' % link) if install: if trust: Gpg.trust(stage.save_filename) tty.msg('Added this key to trusted keys.') else: tty.msg('Will not add this key to trusted keys.' 'Use -t to install all downloaded keys') def needs_rebuild(spec, mirror_url, rebuild_on_errors=False): if not spec.concrete: raise ValueError('spec must be concrete to check against mirror') pkg_name = spec.name pkg_version = spec.version pkg_hash = spec.dag_hash() pkg_full_hash = spec.full_hash() tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format( pkg_name, pkg_version, pkg_hash, pkg_full_hash)) tty.debug(spec.tree()) cache_prefix = build_cache_prefix(mirror_url) spec_yaml_file_name = tarball_name(spec, '.spec.yaml') file_path = os.path.join(cache_prefix, spec_yaml_file_name) result_of_error = 'Package ({0}) will {1}be rebuilt'.format( spec.short_spec, '' if rebuild_on_errors else 'not ') try: _, _, yaml_file = web_util.read_from_url(file_path) yaml_contents = codecs.getreader('utf-8')(yaml_file).read() except (URLError, web_util.SpackWebError) as url_err: err_msg = [ 'Unable to determine whether {0} needs rebuilding,', ' caught exception attempting to read from {1}.', ] tty.error(''.join(err_msg).format(spec.short_spec, file_path)) tty.debug(url_err) tty.warn(result_of_error) return rebuild_on_errors if not yaml_contents: tty.error('Reading {0} returned nothing'.format(file_path)) tty.warn(result_of_error) return rebuild_on_errors spec_yaml = syaml.load(yaml_contents) # did, but didn't match the one we computed locally, then we should if ('full_hash' not in spec_yaml or spec_yaml['full_hash'] != pkg_full_hash): if 'full_hash' in spec_yaml: reason = 'hash mismatch, remote = {0}, local = {1}'.format( spec_yaml['full_hash'], pkg_full_hash) else: reason = 'full_hash was missing from remote spec.yaml' tty.msg('Rebuilding {0}, reason: {1}'.format( spec.short_spec, reason)) tty.msg(spec.tree()) return True return False def check_specs_against_mirrors(mirrors, specs, output_file=None, rebuild_on_errors=False): rebuilds = {} for mirror in spack.mirror.MirrorCollection(mirrors).values(): tty.msg('Checking for built specs at %s' % mirror.fetch_url) rebuild_list = [] for spec in specs: if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors): rebuild_list.append({ 'short_spec': spec.short_spec, 'hash': spec.dag_hash() }) if rebuild_list: rebuilds[mirror.fetch_url] = { 'mirrorName': mirror.name, 'mirrorUrl': mirror.fetch_url, 'rebuildSpecs': rebuild_list } if output_file: with open(output_file, 'w') as outf: outf.write(json.dumps(rebuilds)) return 1 if rebuilds else 0 def _download_buildcache_entry(mirror_root, descriptions): for description in descriptions: description_url = os.path.join(mirror_root, description['url']) path = description['path'] fail_if_missing = description['required'] mkdirp(path) stage = Stage( description_url, name="build_cache", path=path, keep=True) try: stage.fetch() except fs.FetchError as e: tty.debug(e) if fail_if_missing: tty.error('Failed to download required url {0}'.format( description_url)) return False return True def download_buildcache_entry(file_descriptions, mirror_url=None): if not mirror_url and not spack.mirror.MirrorCollection(): tty.die("Please provide or add a spack mirror to allow " + "download of buildcache entries.") if mirror_url: mirror_root = os.path.join( mirror_url, _build_cache_relative_path) return _download_buildcache_entry(mirror_root, file_descriptions) for mirror in spack.mirror.MirrorCollection().values(): mirror_root = os.path.join( mirror.fetch_url, _build_cache_relative_path) if _download_buildcache_entry(mirror_root, file_descriptions): return True else: continue return False
true
true
7902e613e027a6d682c3b4828e2c9594bdfafd45
831
py
Python
xlsxwriter/test/comparison/test_image29.py
edparcell/XlsxWriter
d6a5df232ac0091017ae5c65f592bcc776d296ea
[ "BSD-2-Clause-FreeBSD" ]
1
2019-01-09T19:43:43.000Z
2019-01-09T19:43:43.000Z
xlsxwriter/test/comparison/test_image29.py
edparcell/XlsxWriter
d6a5df232ac0091017ae5c65f592bcc776d296ea
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
xlsxwriter/test/comparison/test_image29.py
edparcell/XlsxWriter
d6a5df232ac0091017ae5c65f592bcc776d296ea
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2018, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('image29.xlsx') def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image(0, 10, self.image_dir + 'red_208.png', {'x_offset': -210, 'y_offset': 1}) workbook.close() self.assertExcelEqual()
24.441176
104
0.612515
true
true
7902e64ce6e679080f9662b47802e49d079bf34f
19,230
py
Python
lingvo/core/conv_layers_with_time_padding.py
zhoudoufu/lingvo
bd0f89809942fd0508ff43bd4b6bca1b598220cb
[ "Apache-2.0" ]
null
null
null
lingvo/core/conv_layers_with_time_padding.py
zhoudoufu/lingvo
bd0f89809942fd0508ff43bd4b6bca1b598220cb
[ "Apache-2.0" ]
null
null
null
lingvo/core/conv_layers_with_time_padding.py
zhoudoufu/lingvo
bd0f89809942fd0508ff43bd4b6bca1b598220cb
[ "Apache-2.0" ]
null
null
null
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common conv layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from lingvo.core import base_layer from lingvo.core import bn_layers from lingvo.core import py_utils from lingvo.core import tshape def ComputeConvOutputShape(in_shape, t_stride, f_stride, outc=None, padding='SAME'): """Computes output shape for convolution and pooling layers. If `in_shape` is a dynamic shape, the output will be Tensors, while if `in_shape` is a list of ints then the output will also be a list of ints. Args: in_shape: A length 4 Tensor or list representing the input shape. t_stride: The stride along the time dimension. f_stride: The stride along the frequency dimension. outc: The expected output channel. If None, will use the input channel. padding: 'SAME' or 'VALID'. Returns: The expected output shape. """ # In the order of batch, time, frequency, channel n = in_shape[0] t = in_shape[1] f = in_shape[2] c = in_shape[3] # Last two dimensions has to be specified. assert f is not None and c is not None if padding == 'VALID': if t: t -= t_stride - 1 f -= f_stride - 1 ot = t if ot is not None: ot = (ot + t_stride - 1) // t_stride of = (f + f_stride - 1) // f_stride if outc is None: outc = c return [n, ot, of, outc] def ComputeConvOutputPadding(paddings, window, stride, padding_algorithm='SAME'): """Computes paddings for convolution and pooling output. out_padding[i] == 1 iff any in_padding corresponding to that output is 1. Args: paddings: The paddings tensor. It is expected to be of shape [batch, time]. window: The size of the windows. stride: The time-stride between adjacent windows. padding_algorithm: 'SAME' or 'VALID'. Returns: out_padding, The new padding tensor of size [batch, ceil(time / stride)]. """ if stride == 1: return paddings # Pad so input_length divides stride. input_length = py_utils.GetShape(paddings)[1] pad_len = (input_length + stride - 1) // stride * stride - input_length paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0) out_padding = tf.nn.pool( tf.expand_dims(paddings, -1), [window], 'MAX', padding_algorithm, strides=[stride], ) return tf.squeeze(out_padding, -1) class BaseConv2DLayerWithPadding(base_layer.BaseLayer): """Base class for 2D convolution layers.""" @classmethod def Params(cls): p = super(BaseConv2DLayerWithPadding, cls).Params() p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' out_channel. For causal convolution, filter_shape[0]' ' is the actual number of trained weights in the time dimension' ' of the kernel.') p.Define( 'filter_stride', (1, 1), 'Filter stride to use. Must be a pair of ints. The first int' ' specifies the stride on the time dimension. The second int' ' specifies the stride on the frequency dimension.') p.Define( 'dilation_rate', (1, 1), 'If > 1, dilation rate for atrous convolution. ' 'Must be a pair of ints. ' 'The first int specifies the dilation rate on the time dimension. ' 'The second int specifies the dilation rate on the frequency ' 'dimension. ' 'If any value of dilation_rate is > 1, then all values of strides ' 'must be 1.') p.Define( 'weight_norm', False, 'If true, apply weight normalization to weights as proposed by' ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868') return p @base_layer.initializer def __init__(self, params): super(BaseConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name assert len(p.filter_shape) == 4 assert len(p.filter_stride) == 2 assert all(x > 0 for x in p.filter_shape) assert all(x > 0 for x in p.filter_stride) assert len(p.dilation_rate) == 2 assert all(x > 0 for x in p.dilation_rate) # Dilation and stride can't be combined. if any(x > 1 for x in p.dilation_rate): assert all(x == 1 for x in p.filter_stride) @property def output_channels(self): """The number of output channels for this conv layer.""" raise NotImplementedError() @property def input_channels(self): """The number of input channels for this conv layer.""" return self.params.filter_shape[2] def OutShape(self, in_shape): """Compute the output shape given the input shape.""" p = self.params return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) def FProp(self, theta, inputs, paddings): """Apply convolution to inputs. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. inputs: The inputs tensor. It is expected to be of shape [batch, time, frequency, channel]. The time dimension corresponds to the height dimension as in images and the frequency dimension corresponds to the width dimension as in images. paddings: The paddings tensor, expected to be of shape [batch, time]. Returns: outputs, out_paddings pair. """ p = self.params with tf.name_scope(p.name): inputs = py_utils.with_dependencies([ py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]), py_utils.assert_shape_match( tf.shape(inputs), tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0)) ], inputs) def _ApplyPadding(tensor_in, padding_in): padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1) return tensor_in * (1.0 - padding_expanded) # Zeroing out padded inputs. inputs = _ApplyPadding(inputs, paddings) # Evaluate the conv kernel on 'inputs'. out = self._EvaluateConvKernel(theta, inputs) # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1. # But there's likely no real problems. Trying to set it gives an error: # pooling with SAME padding is not implemented for dilation_rate > 1. # NOTE: we use window=p.filter_stride[0] to be compatible with legacy # implementation. Consider updating it to be the actual shape. conv_padding = ComputeConvOutputPadding( paddings, window=p.filter_stride[0], stride=p.filter_stride[0]) # Assuming padded nodes will be properly zero-ed out if necessary by # sub-sequent layers. # out = _ApplyPadding(out, conv_padding) out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs))) return out, conv_padding def _EvaluateConvKernel(self, theta, conv_input): """Evaluate the convolution kernel on input 'conv_input'.""" raise NotImplementedError class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding): """Conv2D layer.""" @base_layer.initializer def __init__(self, params): super(Conv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[-1]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[-1] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last dim (standard conv). filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape( (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding): """2D conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding): """Depthwise conv 2D layer. paper: https://arxiv.org/abs/1610.02357 """ @classmethod def Params(cls): p = super(DepthwiseConv2DLayer, cls).Params() # Redefine 'filter_shape' since the semantic of shape elements is different # from regular Conv2D. p.Delete('filter_shape') p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' channel_multipliers. ') return p @base_layer.initializer def __init__(self, params): super(DepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[2], p.filter_shape[3]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [..., in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last two dims. filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape( (theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer): """DepthwiseConv2DLayer where weights are normalized over the time dim. https://arxiv.org/abs/1901.10430 """ @classmethod def Params(cls): p = super(NormalizedDepthwiseConv2DLayer, cls).Params() p.Define('dropconnect_prob', 0.0, 'Prob at which DropConnect regularization is performed.') p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.') p.Define('temperature', 1.0, 'Temperature for the softmax normalization of the weights.') p.Define('weight_tiling_factor', 1, 'Number of times weights are tiled over the input channels.') return p @base_layer.initializer def __init__(self, params): super(NormalizedDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.' assert p.temperature > 0.0, 'Absolute zero temperature is not possible.' @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [kernel_size, 1, in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor @property def input_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[2] * p.weight_tiling_factor def _GetWeight(self, theta): p = self.params filter_w = theta.w # First normalize filter_w over the temporal dimension here. filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0) # Add dropconnect on the weights for regularization. if p.dropconnect_prob > 0.0 and not p.is_eval: if p.deterministic_dropout: filter_w = py_utils.DeterministicDropout( filter_w, 1.0 - p.dropconnect_prob, py_utils.GenerateStepSeedPair(p, theta.global_step)) else: filter_w = tf.nn.dropout( filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed) # Tie the parameters of every subsequent number of weight_tiling_factor # channels. filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1]) return filter_w @classmethod def FPropMeta(cls, p, inputs, paddings): py_utils.CheckShapes((inputs, paddings)) b, t, f, ic = inputs assert f == 1 oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor outputs = tshape.Shape([b, t, f, oc]) flops = b * t * f * p.filter_shape[0] * ic * oc * 5 return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings)) class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" # Same as CausalDepthwiseConv2DLayer. p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class ConvBatchNormLayer(bn_layers.BatchNormLayer): """A wrapper around regular BatchNormLayer that pass around the ... paddings layers. """ def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) bned = super(ConvBatchNormLayer, self).FProp( theta, inputs, paddings_expanded) return bned, paddings # Supported activation functions. _ACTIVATIONS = { 'RELU': tf.nn.relu, 'RELU6': tf.nn.relu6, 'SIGMOID': tf.sigmoid, 'TANH': tf.tanh, 'SWISH': tf.nn.swish, 'NONE': tf.identity, } class ActivationLayer(base_layer.BaseLayer): """Applies activation function to the inputs.""" @classmethod def Params(cls): p = super(ActivationLayer, cls).Params() p.Define('activation', 'RELU', 'The activation function to apply') return p def FProp(self, theta, inputs, paddings): p = self.params out = _ACTIVATIONS[p.activation](inputs) return out, paddings class PaddingLayer(base_layer.BaseLayer): """Zeros out padded positions.""" def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) return inputs * (1.0 - paddings_expanded), paddings
35.155393
80
0.663599
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from lingvo.core import base_layer from lingvo.core import bn_layers from lingvo.core import py_utils from lingvo.core import tshape def ComputeConvOutputShape(in_shape, t_stride, f_stride, outc=None, padding='SAME'): n = in_shape[0] t = in_shape[1] f = in_shape[2] c = in_shape[3] assert f is not None and c is not None if padding == 'VALID': if t: t -= t_stride - 1 f -= f_stride - 1 ot = t if ot is not None: ot = (ot + t_stride - 1) // t_stride of = (f + f_stride - 1) // f_stride if outc is None: outc = c return [n, ot, of, outc] def ComputeConvOutputPadding(paddings, window, stride, padding_algorithm='SAME'): if stride == 1: return paddings input_length = py_utils.GetShape(paddings)[1] pad_len = (input_length + stride - 1) // stride * stride - input_length paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0) out_padding = tf.nn.pool( tf.expand_dims(paddings, -1), [window], 'MAX', padding_algorithm, strides=[stride], ) return tf.squeeze(out_padding, -1) class BaseConv2DLayerWithPadding(base_layer.BaseLayer): @classmethod def Params(cls): p = super(BaseConv2DLayerWithPadding, cls).Params() p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' out_channel. For causal convolution, filter_shape[0]' ' is the actual number of trained weights in the time dimension' ' of the kernel.') p.Define( 'filter_stride', (1, 1), 'Filter stride to use. Must be a pair of ints. The first int' ' specifies the stride on the time dimension. The second int' ' specifies the stride on the frequency dimension.') p.Define( 'dilation_rate', (1, 1), 'If > 1, dilation rate for atrous convolution. ' 'Must be a pair of ints. ' 'The first int specifies the dilation rate on the time dimension. ' 'The second int specifies the dilation rate on the frequency ' 'dimension. ' 'If any value of dilation_rate is > 1, then all values of strides ' 'must be 1.') p.Define( 'weight_norm', False, 'If true, apply weight normalization to weights as proposed by' ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868') return p @base_layer.initializer def __init__(self, params): super(BaseConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name assert len(p.filter_shape) == 4 assert len(p.filter_stride) == 2 assert all(x > 0 for x in p.filter_shape) assert all(x > 0 for x in p.filter_stride) assert len(p.dilation_rate) == 2 assert all(x > 0 for x in p.dilation_rate) if any(x > 1 for x in p.dilation_rate): assert all(x == 1 for x in p.filter_stride) @property def output_channels(self): raise NotImplementedError() @property def input_channels(self): return self.params.filter_shape[2] def OutShape(self, in_shape): p = self.params return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) def FProp(self, theta, inputs, paddings): p = self.params with tf.name_scope(p.name): inputs = py_utils.with_dependencies([ py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]), py_utils.assert_shape_match( tf.shape(inputs), tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0)) ], inputs) def _ApplyPadding(tensor_in, padding_in): padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1) return tensor_in * (1.0 - padding_expanded) # Zeroing out padded inputs. inputs = _ApplyPadding(inputs, paddings) # Evaluate the conv kernel on 'inputs'. out = self._EvaluateConvKernel(theta, inputs) # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1. # But there's likely no real problems. Trying to set it gives an error: conv_padding = ComputeConvOutputPadding( paddings, window=p.filter_stride[0], stride=p.filter_stride[0]) out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs))) return out, conv_padding def _EvaluateConvKernel(self, theta, conv_input): raise NotImplementedError class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding): @base_layer.initializer def __init__(self, params): super(Conv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[-1]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): p = self.params return p.filter_shape[-1] def _GetWeight(self, theta): p = self.params if p.weight_norm: filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape( (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): p = self.params filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding): @base_layer.initializer def __init__(self, params): super(CausalConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding): @classmethod def Params(cls): p = super(DepthwiseConv2DLayer, cls).Params() p.Delete('filter_shape') p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' channel_multipliers. ') return p @base_layer.initializer def __init__(self, params): super(DepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[2], p.filter_shape[3]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): p = self.params return p.filter_shape[2] * p.filter_shape[3] def _GetWeight(self, theta): p = self.params if p.weight_norm: filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape( (theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): p = self.params filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer): @base_layer.initializer def __init__(self, params): super(CausalDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer): @classmethod def Params(cls): p = super(NormalizedDepthwiseConv2DLayer, cls).Params() p.Define('dropconnect_prob', 0.0, 'Prob at which DropConnect regularization is performed.') p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.') p.Define('temperature', 1.0, 'Temperature for the softmax normalization of the weights.') p.Define('weight_tiling_factor', 1, 'Number of times weights are tiled over the input channels.') return p @base_layer.initializer def __init__(self, params): super(NormalizedDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.' assert p.temperature > 0.0, 'Absolute zero temperature is not possible.' @property def output_channels(self): p = self.params return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor @property def input_channels(self): p = self.params return p.filter_shape[2] * p.weight_tiling_factor def _GetWeight(self, theta): p = self.params filter_w = theta.w filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0) if p.dropconnect_prob > 0.0 and not p.is_eval: if p.deterministic_dropout: filter_w = py_utils.DeterministicDropout( filter_w, 1.0 - p.dropconnect_prob, py_utils.GenerateStepSeedPair(p, theta.global_step)) else: filter_w = tf.nn.dropout( filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed) filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1]) return filter_w @classmethod def FPropMeta(cls, p, inputs, paddings): py_utils.CheckShapes((inputs, paddings)) b, t, f, ic = inputs assert f == 1 oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor outputs = tshape.Shape([b, t, f, oc]) flops = b * t * f * p.filter_shape[0] * ic * oc * 5 return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings)) class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer): def _EvaluateConvKernel(self, theta, inputs): p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class ConvBatchNormLayer(bn_layers.BatchNormLayer): def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) bned = super(ConvBatchNormLayer, self).FProp( theta, inputs, paddings_expanded) return bned, paddings _ACTIVATIONS = { 'RELU': tf.nn.relu, 'RELU6': tf.nn.relu6, 'SIGMOID': tf.sigmoid, 'TANH': tf.tanh, 'SWISH': tf.nn.swish, 'NONE': tf.identity, } class ActivationLayer(base_layer.BaseLayer): @classmethod def Params(cls): p = super(ActivationLayer, cls).Params() p.Define('activation', 'RELU', 'The activation function to apply') return p def FProp(self, theta, inputs, paddings): p = self.params out = _ACTIVATIONS[p.activation](inputs) return out, paddings class PaddingLayer(base_layer.BaseLayer): def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) return inputs * (1.0 - paddings_expanded), paddings
true
true
7902e655dea2c020e0a3db4dd83bf1dbf7cddacd
192
py
Python
django_app/students/serializers.py
RodrigoBLima/app-django-react
749c1ed206334d2e17ce22b5737c1d212f9f4ae7
[ "MIT" ]
null
null
null
django_app/students/serializers.py
RodrigoBLima/app-django-react
749c1ed206334d2e17ce22b5737c1d212f9f4ae7
[ "MIT" ]
2
2022-02-13T08:35:03.000Z
2022-02-27T01:23:53.000Z
django_app/students/serializers.py
RodrigoBLima/app-django-react
749c1ed206334d2e17ce22b5737c1d212f9f4ae7
[ "MIT" ]
1
2021-06-30T02:39:59.000Z
2021-06-30T02:39:59.000Z
from rest_framework import serializers from .models import Student class StudentSerializer(serializers.ModelSerializer): class Meta: model = Student fields = ('__all__')
21.333333
53
0.729167
from rest_framework import serializers from .models import Student class StudentSerializer(serializers.ModelSerializer): class Meta: model = Student fields = ('__all__')
true
true
7902e69fa7db7f459fe2c262ca5e329f7363bc68
1,353
py
Python
interpolML/interpolML/model/model.py
MiguelMque/interpolML
980d55583285ba1d289de69b5c05c65fc34097f5
[ "MIT" ]
null
null
null
interpolML/interpolML/model/model.py
MiguelMque/interpolML
980d55583285ba1d289de69b5c05c65fc34097f5
[ "MIT" ]
null
null
null
interpolML/interpolML/model/model.py
MiguelMque/interpolML
980d55583285ba1d289de69b5c05c65fc34097f5
[ "MIT" ]
null
null
null
from typing import Any from copy import deepcopy class Model: def __init__(self, name: str, model, freq: str): self.name = name self.model = model self.freq = freq self.train = None self.test = None self.prediction = None self.pred_col = "prediction" self.y_col = "y" self.date_col = "ds" def fit(self, train_dataset): "Performs model training with standard settings" self.train = deepcopy(train_dataset) if "orbit" in self.name: self.model.fit(self.train) elif "nprophet" in self.name: self.model.fit(self.train, validate_each_epoch=True, valid_p=0.2, freq=self.freq, plot_live_loss=True, epochs=100) def predict(self, dataset: Any): "Performs prediction" self.test = deepcopy(dataset) if "orbit" in self.name: prediction = self.model.predict(self.test) elif "nprophet" in self.name: future = self.model.make_future_dataframe(self.train, periods=len(self.test)) prediction = self.model.predict(future).rename(columns={"yhat1": self.pred_col}) prediction = prediction[[self.date_col, self.pred_col]] self.prediction = prediction return self.prediction
27.612245
92
0.597931
from typing import Any from copy import deepcopy class Model: def __init__(self, name: str, model, freq: str): self.name = name self.model = model self.freq = freq self.train = None self.test = None self.prediction = None self.pred_col = "prediction" self.y_col = "y" self.date_col = "ds" def fit(self, train_dataset): self.train = deepcopy(train_dataset) if "orbit" in self.name: self.model.fit(self.train) elif "nprophet" in self.name: self.model.fit(self.train, validate_each_epoch=True, valid_p=0.2, freq=self.freq, plot_live_loss=True, epochs=100) def predict(self, dataset: Any): self.test = deepcopy(dataset) if "orbit" in self.name: prediction = self.model.predict(self.test) elif "nprophet" in self.name: future = self.model.make_future_dataframe(self.train, periods=len(self.test)) prediction = self.model.predict(future).rename(columns={"yhat1": self.pred_col}) prediction = prediction[[self.date_col, self.pred_col]] self.prediction = prediction return self.prediction
true
true
7902e6d7b86cab4565e6529cb165981907947b27
1,754
py
Python
root/plugins/main_filter.py
KoshikKumar17/TG-RenameBot
3e6d20544fb45156c77f8a2b81c3e1eb8c9143ec
[ "MIT" ]
null
null
null
root/plugins/main_filter.py
KoshikKumar17/TG-RenameBot
3e6d20544fb45156c77f8a2b81c3e1eb8c9143ec
[ "MIT" ]
null
null
null
root/plugins/main_filter.py
KoshikKumar17/TG-RenameBot
3e6d20544fb45156c77f8a2b81c3e1eb8c9143ec
[ "MIT" ]
null
null
null
''' RenameBot This file is a part of mrvishal2k2 rename repo Dont kang !!! © Mrvishal2k2 ''' import pyrogram from pyrogram import Client, filters from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') log = logging.getLogger(__name__) @Client.on_message(filters.document | filters.video | filters.audio | filters.voice | filters.video_note | filters.animation) async def rename_filter(c,m): media = m.document or m.video or m.audio or m.voice or m.video_note or m.animation ## couldn't add photo bcoz i want all photos to use as thumb.. text = "" button = [] try: filename = media.file_name text += f"FileName:\n{filename}\n" except: # some files dont gib name .. filename = None text += "Select the desired Option" button.append([InlineKeyboardButton("Rename as File", callback_data="rename_file")]) # Thanks to albert for mime_type suggestion if media.mime_type.startswith("video/"): ## how the f the other formats can be uploaded as video button.append([InlineKeyboardButton("Rename as Video",callback_data="rename_video")]) button.append([InlineKeyboardButton("Convert as File",callback_data="convert_file")]) button.append([InlineKeyboardButton("Convert as Video",callback_data="convert_video")]) button.append([InlineKeyboardButton("Cancel ❌",callback_data="cancel")]) markup = InlineKeyboardMarkup(button) try: await c.send_chat_action(m.chat.id, "typing") await m.reply_text(text,quote=True,reply_markup=markup,parse_mode="markdown",disable_web_page_preview=True) except Exception as e: log.info(str(e))
38.977778
126
0.72805
import pyrogram from pyrogram import Client, filters from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') log = logging.getLogger(__name__) @Client.on_message(filters.document | filters.video | filters.audio | filters.voice | filters.video_note | filters.animation) async def rename_filter(c,m): media = m.document or m.video or m.audio or m.voice or m.video_note or m.animation e_name text += f"FileName:\n{filename}\n" except: # some files dont gib name .. filename = None text += "Select the desired Option" button.append([InlineKeyboardButton("Rename as File", callback_data="rename_file")]) # Thanks to albert for mime_type suggestion if media.mime_type.startswith("video/"): ## how the f the other formats can be uploaded as video button.append([InlineKeyboardButton("Rename as Video",callback_data="rename_video")]) button.append([InlineKeyboardButton("Convert as File",callback_data="convert_file")]) button.append([InlineKeyboardButton("Convert as Video",callback_data="convert_video")]) button.append([InlineKeyboardButton("Cancel ❌",callback_data="cancel")]) markup = InlineKeyboardMarkup(button) try: await c.send_chat_action(m.chat.id, "typing") await m.reply_text(text,quote=True,reply_markup=markup,parse_mode="markdown",disable_web_page_preview=True) except Exception as e: log.info(str(e))
true
true
7902e7902d198e136e0b578083f411a15efb50e3
60
py
Python
pghoard/__main__.py
pellcorp/pghoard
8a83960ea9ee99fbe2bffa8753e7b7356f7c4417
[ "Apache-2.0" ]
731
2018-06-01T21:48:43.000Z
2022-03-29T08:21:42.000Z
pghoard/__main__.py
pellcorp/pghoard
8a83960ea9ee99fbe2bffa8753e7b7356f7c4417
[ "Apache-2.0" ]
124
2018-06-19T05:59:50.000Z
2022-03-31T18:17:59.000Z
pghoard/__main__.py
pellcorp/pghoard
8a83960ea9ee99fbe2bffa8753e7b7356f7c4417
[ "Apache-2.0" ]
64
2018-06-26T14:12:53.000Z
2022-03-20T07:33:33.000Z
import sys from . import pghoard sys.exit(pghoard.main())
10
24
0.733333
import sys from . import pghoard sys.exit(pghoard.main())
true
true
7902e7f0e2374bc0f2cb04301a2bc9bd459668ca
630
py
Python
modules/Exceptions.py
david-0609/OpenCV-Hand-Gesture-Control
2690216b9d997acc66bfd5f1185a49c9ca7354f5
[ "MIT" ]
1
2022-02-17T15:30:32.000Z
2022-02-17T15:30:32.000Z
modules/Exceptions.py
david-0609/OpenCV-Hand-Gesture-Control
2690216b9d997acc66bfd5f1185a49c9ca7354f5
[ "MIT" ]
null
null
null
modules/Exceptions.py
david-0609/OpenCV-Hand-Gesture-Control
2690216b9d997acc66bfd5f1185a49c9ca7354f5
[ "MIT" ]
null
null
null
class DirectionNotDetermined(BaseException): def __init__(self, message="Hand did not move in only one direction. Direction of movement cannot be determined."): self.message = message super().__init__(self.message) class ConfigError(BaseException): def __init__(self, message="Possible error in syntax of config."): self.message = message super().__init__(self.message) class GestureNotDetermined(BaseException): def __init__(self, message="The Gesture did not match with any registered gestures"): self.message = message super().__init__(self.message)
35
119
0.703175
class DirectionNotDetermined(BaseException): def __init__(self, message="Hand did not move in only one direction. Direction of movement cannot be determined."): self.message = message super().__init__(self.message) class ConfigError(BaseException): def __init__(self, message="Possible error in syntax of config."): self.message = message super().__init__(self.message) class GestureNotDetermined(BaseException): def __init__(self, message="The Gesture did not match with any registered gestures"): self.message = message super().__init__(self.message)
true
true
7902e8b66999407abb3cddccbea2ccc2a760a719
4,984
py
Python
core/track.py
yycho0108/monovo
9f2b5cf15f97e467c8e6e94ee16bb785ed6c7edd
[ "MIT" ]
null
null
null
core/track.py
yycho0108/monovo
9f2b5cf15f97e467c8e6e94ee16bb785ed6c7edd
[ "MIT" ]
null
null
null
core/track.py
yycho0108/monovo
9f2b5cf15f97e467c8e6e94ee16bb785ed6c7edd
[ "MIT" ]
null
null
null
import time import cv2 import numpy as np from collections import defaultdict class Tracker(object): def __init__(self, pLK=None): if pLK is None: # default LK param pLK = self.pLK0() self.lk_ = cv2.SparsePyrLKOpticalFlow_create( **pLK) self.tmp_ = defaultdict(lambda:None) def pLK0(self): """ Default LK Params. """ return dict( winSize = (12,6), maxLevel = 4, # == effective winsize up to 32*(2**4) = 512x256 crit= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 0.03), flags = 0, minEigThreshold = 1e-3 # TODO : disable eig? ) def __call__(self, img1, img2, pt1, pt2=None, thresh=2.0, return_msk=False ): """ Arguments: img1(np.ndarray) : previous image. (color/mono) (HxWx?) img2(np.ndarray) : current image (color/mono) (HxWx?) pt1(np.ndarray) : previous points. (Mx2) pt2(np.ndarray) : [Optional] current points estimate (Mx2) thresh(float) : Flow Back-projection Error threshold Returns: pt2(np.ndarray) : current points. (Mx2) idx(np.ndarray) : valid tracked indices from pt1 & pt2. """ if pt1.size <= 0: # soft fail pt2 = np.empty([0,2], dtype=np.float32) if return_msk: msk = np.empty([0], dtype=np.bool) return pt2, msk idx = np.empty([0], dtype=np.int32) return pt2, idx # stat img h, w = np.shape(img2)[:2] # convert to grayscale # TODO : check if already gray/mono if (np.ndim(img1) == 2) or img1.shape[2] == 1: # already monochromatic img1_gray = img1 img2_gray = img2 else: # handle image # 1 + pre-allocated data cache if self.tmp_['img1g'] is not None: cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY, self.tmp_['img1g']) img1_gray = self.tmp_['img1g'] else: img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) self.tmp_['img1g'] = np.empty_like(img1_gray) # handle image # 2 + pre-allocated data cache if self.tmp_['img2g'] is not None: cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY, self.tmp_['img2g']) img2_gray = self.tmp_['img2g'] else: img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) self.tmp_['img2g'] = np.empty_like(img2_gray) # forward flow if pt2 is not None: # set initial flow flags self.lk_.setFlags(self.lk_.getFlags() | cv2.OPTFLOW_USE_INITIAL_FLOW ) pt2, st, _ = self.lk_.calc( img1_gray, img2_gray, pt1, pt2 ) else: pt2, st, _ = self.lk_.calc( img1_gray, img2_gray, pt1, None ) st_fw = st[:,0].astype(np.bool) # backward flow # unset initial flow flags self.lk_.setFlags(self.lk_.getFlags() & ~cv2.OPTFLOW_USE_INITIAL_FLOW ) pt1_r, st, _ = self.lk_.calc( img2_gray, img1_gray, pt2, None ) st_bw = st[:,0].astype(np.bool) # override error with reprojection error # (default error doesn't make much sense anyways) err = np.linalg.norm(pt1 - pt1_r, axis=-1) # apply mask msk = np.logical_and.reduce([ # error check err < thresh, # bounds check 0 <= pt2[:,0], 0 <= pt2[:,1], pt2[:,0] < w, pt2[:,1] < h, # status check st_fw, st_bw, ]) if return_msk: return pt2, msk else: idx = np.where(msk)[0] return pt2, idx def main(): from matplotlib import pyplot as plt # params w = 2*640 h = 2*480 n = 2*1024 di = 8 dj = 32 track = Tracker() img1 = np.random.randint(0, 255, size=(h,w,3), dtype=np.uint8) #img2 = np.random.randint(0, 255, size=(480,640,3), dtype=np.uint8) img2 = np.roll(img1, di, axis=0) img2 = np.roll(img2, dj, axis=1) #img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) #img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) pt1 = np.random.uniform((0,0), (w,h), size=(n,2)).astype(np.float32) pt2, idx = track(img1, img2, pt1) #pt2, idx = track(img1, img2, pt1, pt2) fig, ax = plt.subplots(1,2) ax[0].imshow(img1, alpha=0.5) ax[0].plot(pt1[:,0], pt1[:,1], 'r+') ax[1].imshow(img2, alpha=0.5) ax[1].plot(pt1[:,0], pt1[:,1], 'bx') ax[1].plot(pt2[:,0], pt2[:,1], 'r+') plt.show() if __name__ == "__main__": main()
31.15
83
0.505618
import time import cv2 import numpy as np from collections import defaultdict class Tracker(object): def __init__(self, pLK=None): if pLK is None: pLK = self.pLK0() self.lk_ = cv2.SparsePyrLKOpticalFlow_create( **pLK) self.tmp_ = defaultdict(lambda:None) def pLK0(self): return dict( winSize = (12,6), maxLevel = 4, crit= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 0.03), flags = 0, minEigThreshold = 1e-3 ) def __call__(self, img1, img2, pt1, pt2=None, thresh=2.0, return_msk=False ): if pt1.size <= 0: pt2 = np.empty([0,2], dtype=np.float32) if return_msk: msk = np.empty([0], dtype=np.bool) return pt2, msk idx = np.empty([0], dtype=np.int32) return pt2, idx h, w = np.shape(img2)[:2] if (np.ndim(img1) == 2) or img1.shape[2] == 1: img1_gray = img1 img2_gray = img2 else: 1g'] is not None: cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY, self.tmp_['img1g']) img1_gray = self.tmp_['img1g'] else: img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) self.tmp_['img1g'] = np.empty_like(img1_gray) 2g'] is not None: cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY, self.tmp_['img2g']) img2_gray = self.tmp_['img2g'] else: img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) self.tmp_['img2g'] = np.empty_like(img2_gray) if pt2 is not None: self.lk_.setFlags(self.lk_.getFlags() | cv2.OPTFLOW_USE_INITIAL_FLOW ) pt2, st, _ = self.lk_.calc( img1_gray, img2_gray, pt1, pt2 ) else: pt2, st, _ = self.lk_.calc( img1_gray, img2_gray, pt1, None ) st_fw = st[:,0].astype(np.bool) self.lk_.setFlags(self.lk_.getFlags() & ~cv2.OPTFLOW_USE_INITIAL_FLOW ) pt1_r, st, _ = self.lk_.calc( img2_gray, img1_gray, pt2, None ) st_bw = st[:,0].astype(np.bool) err = np.linalg.norm(pt1 - pt1_r, axis=-1) # apply mask msk = np.logical_and.reduce([ # error check err < thresh, # bounds check 0 <= pt2[:,0], 0 <= pt2[:,1], pt2[:,0] < w, pt2[:,1] < h, # status check st_fw, st_bw, ]) if return_msk: return pt2, msk else: idx = np.where(msk)[0] return pt2, idx def main(): from matplotlib import pyplot as plt # params w = 2*640 h = 2*480 n = 2*1024 di = 8 dj = 32 track = Tracker() img1 = np.random.randint(0, 255, size=(h,w,3), dtype=np.uint8) #img2 = np.random.randint(0, 255, size=(480,640,3), dtype=np.uint8) img2 = np.roll(img1, di, axis=0) img2 = np.roll(img2, dj, axis=1) #img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) #img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) pt1 = np.random.uniform((0,0), (w,h), size=(n,2)).astype(np.float32) pt2, idx = track(img1, img2, pt1) #pt2, idx = track(img1, img2, pt1, pt2) fig, ax = plt.subplots(1,2) ax[0].imshow(img1, alpha=0.5) ax[0].plot(pt1[:,0], pt1[:,1], 'r+') ax[1].imshow(img2, alpha=0.5) ax[1].plot(pt1[:,0], pt1[:,1], 'bx') ax[1].plot(pt2[:,0], pt2[:,1], 'r+') plt.show() if __name__ == "__main__": main()
true
true
7902e942ac1ee9ebc0b17f459eb8215a678c1135
171,356
py
Python
lib/rucio/core/replica.py
dynamic-entropy/rucio
a0823b3ec19ff1f98639361a14b628ca6c908651
[ "Apache-2.0" ]
2
2021-05-19T08:55:40.000Z
2021-05-19T08:55:43.000Z
lib/rucio/core/replica.py
dynamic-entropy/rucio
a0823b3ec19ff1f98639361a14b628ca6c908651
[ "Apache-2.0" ]
null
null
null
lib/rucio/core/replica.py
dynamic-entropy/rucio
a0823b3ec19ff1f98639361a14b628ca6c908651
[ "Apache-2.0" ]
1
2018-06-25T19:12:53.000Z
2018-06-25T19:12:53.000Z
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <vincent.garonne@cern.ch>, 2013-2018 # - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2020 # - Ralph Vigne <ralph.vigne@cern.ch>, 2013-2014 # - Martin Barisits <martin.barisits@cern.ch>, 2013-2021 # - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2021 # - David Cameron <david.cameron@cern.ch>, 2014 # - Thomas Beermann <thomas.beermann@cern.ch>, 2014-2021 # - Wen Guan <wen.guan@cern.ch>, 2014-2015 # - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019 # - Dimitrios Christidis <dimitrios.christidis@cern.ch>, 2019-2021 # - Robert Illingworth <illingwo@fnal.gov>, 2019 # - James Perry <j.perry@epcc.ed.ac.uk>, 2019 # - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019 # - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019 # - Ilija Vukotic <ivukotic@cern.ch>, 2020-2021 # - Brandon White <bjwhite@fnal.gov>, 2019 # - Tomas Javurek <tomas.javurek@cern.ch>, 2020 # - Luc Goossens <luc.goossens@cern.ch>, 2020 # - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020 # - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020 # - Eric Vaandering <ewv@fnal.gov>, 2020-2021 # - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021 # - Radu Carpa <radu.carpa@cern.ch>, 2021 # - Gabriele Fronzé <sucre.91@hotmail.it>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
51.691101
289
0.600376
from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: try: update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): if not nrandom: yield from generator return selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), ) original_domain = deepcopy(domain) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: root_proxy_internal = config_get('root-proxy-internal', client_location['site'], default='', session=session) if root_proxy_internal: if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass else: pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): nbfiles, bytes = 0, 0 condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): none_value = None query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): if update_request['rse_id'] is not None: ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
true
true
7902e95a547b34bba8e07ed95d68b660f737a29b
883
py
Python
064_minimum_path_sum.py
Sanster/LeetCode
5a0c448928b216b49c127c4542ae3cd74c797782
[ "MIT" ]
2
2018-10-13T15:12:55.000Z
2020-06-07T09:35:40.000Z
064_minimum_path_sum.py
Sanster/PyLeeCode
5a0c448928b216b49c127c4542ae3cd74c797782
[ "MIT" ]
null
null
null
064_minimum_path_sum.py
Sanster/PyLeeCode
5a0c448928b216b49c127c4542ae3cd74c797782
[ "MIT" ]
null
null
null
from typing import List class Solution: def minPathSum(self, grid: List[List[int]]) -> int: m = len(grid) if m == 0: return 0 if m == 1: return sum(grid[0]) n = len(grid[0]) dp = [[0] * n for _ in range(m)] for x in range(n): for y in range(m): if x == y == 0: dp[0][0] = grid[0][0] elif x == 0: dp[y][x] = dp[y - 1][x] + grid[y][x] elif y == 0: dp[y][x] = dp[y][x - 1] + grid[y][x] else: dp[y][x] = min(dp[y][x - 1], dp[y - 1][x]) + grid[y][x] return dp[-1][-1] s = Solution() print(s.minPathSum([[1, 3, 1], [1, 5, 1], [4, 2, 1]])) print(s.minPathSum([[1, 3, 1]])) print(s.minPathSum([])) print(s.minPathSum([[1, 2, 5], [3, 2, 1]]))
24.527778
75
0.383918
from typing import List class Solution: def minPathSum(self, grid: List[List[int]]) -> int: m = len(grid) if m == 0: return 0 if m == 1: return sum(grid[0]) n = len(grid[0]) dp = [[0] * n for _ in range(m)] for x in range(n): for y in range(m): if x == y == 0: dp[0][0] = grid[0][0] elif x == 0: dp[y][x] = dp[y - 1][x] + grid[y][x] elif y == 0: dp[y][x] = dp[y][x - 1] + grid[y][x] else: dp[y][x] = min(dp[y][x - 1], dp[y - 1][x]) + grid[y][x] return dp[-1][-1] s = Solution() print(s.minPathSum([[1, 3, 1], [1, 5, 1], [4, 2, 1]])) print(s.minPathSum([[1, 3, 1]])) print(s.minPathSum([])) print(s.minPathSum([[1, 2, 5], [3, 2, 1]]))
true
true
7902e9c82770fbbbd899b0a3af1e68b5cd29c31a
5,740
py
Python
google/cloud/compute_v1/services/target_instances/pagers.py
vam-google/python-compute
799f2f55e5e205317862a17ca7ed548ce2ca66e5
[ "Apache-2.0" ]
19
2021-02-10T21:17:20.000Z
2022-02-20T03:16:36.000Z
google/cloud/compute_v1/services/target_instances/pagers.py
vam-google/python-compute
799f2f55e5e205317862a17ca7ed548ce2ca66e5
[ "Apache-2.0" ]
121
2021-01-08T23:46:58.000Z
2022-03-26T04:34:36.000Z
google/cloud/compute_v1/services/target_instances/pagers.py
vam-google/python-compute
799f2f55e5e205317862a17ca7ed548ce2ca66e5
[ "Apache-2.0" ]
20
2021-01-08T23:14:16.000Z
2022-02-25T01:27:20.000Z
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import ( Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional, ) from google.cloud.compute_v1.types import compute class AggregatedListPager: """A pager for iterating through ``aggregated_list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``AggregatedList`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceAggregatedList], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceAggregatedList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.AggregatedListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]: for page in self.pages: yield from page.items.items() def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]: return self._response.items.get(key) def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPager: """A pager for iterating through ``list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``List`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceList], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.ListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.ListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[compute.TargetInstance]: for page in self.pages: yield from page.items def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
37.032258
89
0.675958
from typing import ( Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional, ) from google.cloud.compute_v1.types import compute class AggregatedListPager: def __init__( self, method: Callable[..., compute.TargetInstanceAggregatedList], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[str, str]] = () ): self._method = method self._request = compute.AggregatedListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]: for page in self.pages: yield from page.items.items() def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]: return self._response.items.get(key) def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPager: def __init__( self, method: Callable[..., compute.TargetInstanceList], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[str, str]] = () ): self._method = method self._request = compute.ListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[compute.TargetInstance]: for page in self.pages: yield from page.items def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
true
true
7902ea2e9f78819df7c8377f2a396299278f16c8
288
py
Python
setup.py
jorepstein1/bidict
71c01dbf01963c9dc99d0119b63b4a668088982a
[ "MIT" ]
null
null
null
setup.py
jorepstein1/bidict
71c01dbf01963c9dc99d0119b63b4a668088982a
[ "MIT" ]
null
null
null
setup.py
jorepstein1/bidict
71c01dbf01963c9dc99d0119b63b4a668088982a
[ "MIT" ]
null
null
null
from distutils.core import setup setup(name='bidict', version='0.1', description='A bi-directional dictionary API', author='Jordan Epstein', author_email='jorepstein1@gmail.com', url='https://github.com/jorepstein1/bidict', packages=['bidict'], )
28.8
52
0.652778
from distutils.core import setup setup(name='bidict', version='0.1', description='A bi-directional dictionary API', author='Jordan Epstein', author_email='jorepstein1@gmail.com', url='https://github.com/jorepstein1/bidict', packages=['bidict'], )
true
true
7902ea5a5b945062bf7c46b4bcc4ea01608a3184
618
py
Python
producer_server.py
estarguars113/udacity-spark-project
310b8ca315d9e84f7645eacfff58715814a3fd3a
[ "MIT" ]
null
null
null
producer_server.py
estarguars113/udacity-spark-project
310b8ca315d9e84f7645eacfff58715814a3fd3a
[ "MIT" ]
null
null
null
producer_server.py
estarguars113/udacity-spark-project
310b8ca315d9e84f7645eacfff58715814a3fd3a
[ "MIT" ]
null
null
null
from kafka import KafkaProducer from json import dumps as json_dumps, load as json_load import time class ProducerServer(KafkaProducer): def __init__(self, input_file, topic, **kwargs): super().__init__(**kwargs) self.input_file = input_file self.topic = topic def generate_data(self): with open(self.input_file) as f: data = json_load(f) for line in data: message = self.dict_to_binary(line) self.send(self.topic, message) def dict_to_binary(self, json_dict): return json_dumps(json_dict).encode('utf-8')
29.428571
55
0.645631
from kafka import KafkaProducer from json import dumps as json_dumps, load as json_load import time class ProducerServer(KafkaProducer): def __init__(self, input_file, topic, **kwargs): super().__init__(**kwargs) self.input_file = input_file self.topic = topic def generate_data(self): with open(self.input_file) as f: data = json_load(f) for line in data: message = self.dict_to_binary(line) self.send(self.topic, message) def dict_to_binary(self, json_dict): return json_dumps(json_dict).encode('utf-8')
true
true
7902eb3980d11a9a96c34cebaff2390898a077d5
2,842
py
Python
results_processing/ABC/csv_processing.py
multimodalspectroscopy/hypothermia-bayescmd
94307593de7697140f7563f1b449f1f6165cd79b
[ "MIT" ]
null
null
null
results_processing/ABC/csv_processing.py
multimodalspectroscopy/hypothermia-bayescmd
94307593de7697140f7563f1b449f1f6165cd79b
[ "MIT" ]
5
2020-04-02T16:59:17.000Z
2021-12-13T19:58:50.000Z
results_processing/ABC/csv_processing.py
multimodalspectroscopy/hypothermia-bayescmd
94307593de7697140f7563f1b449f1f6165cd79b
[ "MIT" ]
null
null
null
import os import pandas as pd import re def sort_human(l): """Sort a list of strings by numerical.""" def convert(text): return float(text) if text.isdigit() else text def alphanum(key): return [convert(c) for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)] l.sort(key=alphanum) return l def data_merge_by_batch(parent_directory, verbose=True): """Merge a set of parameters.csv files into one. This is intended for use with batch processes from Legion, with each batch being 1000 runs longand numbered with integer values. Parameters ---------- parent_directory : :obj:`list` of :obj:`str` Parent directory to a set of directories each containing model runs and a parameters.csv file. verbose : :obj:`boolean`, optional Boolean indicator of whether to print extra information. Returns ------- None Concatenated will be written to file in `parent_directory` """ dirs = [os.path.abspath(os.path.join(parent_directory, d)) for d in os.listdir(parent_directory) if os.path.isdir(os.path.abspath( os.path.join(parent_directory, d))) and d != 'archives'] dirs = sort_human(dirs) if verbose: print(dirs) dfs = [] for d in dirs: try: dfs.append(pd.read_csv(os.path.join(d, 'parameters.csv'))) ii = len(dfs) - 1 print("Processing parameter file {}".format(ii)) if ii is not 0: dfs[ii]['ix'] = dfs[ii].index.values + \ dfs[ii - 1]['ix'].values[-1] + 1 else: dfs[ii]['ix'] = dfs[ii].index.values if os.path.split(d)[1].split('_')[-1].isdigit(): print(os.path.split(d)[1].split('_')[-1]) dfs[ii]['Batch'] = int(os.path.split(d)[1].split('_')[-1]) else: print("Batch number not found for {}".format(d)) continue except FileNotFoundError: print("No parameters file in {}".format(d)) continue if verbose: print("{} dataframes to be joined".format(len(dfs))) # for ii in range(len(dfs)): # if ii is not 0: # dfs[ii]['ix'] = dfs[ii].index.values + dfs[ii - 1]['ix'].values[-1] # else: # dfs[ii]['ix'] = dfs[ii].index.values # if os.path.split(dirs[ii])[1][:4].isdigit(): # print(os.path.split(dirs[ii])[1][:4]) # dfs[ii]['Start Time'] = os.path.split(dirs[ii])[1][:4] # else: # continue df = pd.concat(dfs) df.index = range(len(df)) output_file = os.path.join(parent_directory, 'all_parameters.csv') df.to_csv(output_file, index=False) return output_file
34.240964
81
0.54715
import os import pandas as pd import re def sort_human(l): def convert(text): return float(text) if text.isdigit() else text def alphanum(key): return [convert(c) for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)] l.sort(key=alphanum) return l def data_merge_by_batch(parent_directory, verbose=True): dirs = [os.path.abspath(os.path.join(parent_directory, d)) for d in os.listdir(parent_directory) if os.path.isdir(os.path.abspath( os.path.join(parent_directory, d))) and d != 'archives'] dirs = sort_human(dirs) if verbose: print(dirs) dfs = [] for d in dirs: try: dfs.append(pd.read_csv(os.path.join(d, 'parameters.csv'))) ii = len(dfs) - 1 print("Processing parameter file {}".format(ii)) if ii is not 0: dfs[ii]['ix'] = dfs[ii].index.values + \ dfs[ii - 1]['ix'].values[-1] + 1 else: dfs[ii]['ix'] = dfs[ii].index.values if os.path.split(d)[1].split('_')[-1].isdigit(): print(os.path.split(d)[1].split('_')[-1]) dfs[ii]['Batch'] = int(os.path.split(d)[1].split('_')[-1]) else: print("Batch number not found for {}".format(d)) continue except FileNotFoundError: print("No parameters file in {}".format(d)) continue if verbose: print("{} dataframes to be joined".format(len(dfs))) df = pd.concat(dfs) df.index = range(len(df)) output_file = os.path.join(parent_directory, 'all_parameters.csv') df.to_csv(output_file, index=False) return output_file
true
true
7902ebeeae60d0a2ddc98e1cda2e776a13b5a7be
326
py
Python
osquery_rules/osquery_outdated.py
designing-penguin/panther-analysis
26034cea4504f43227f8d3789225f6ca7b35dfe0
[ "Apache-2.0" ]
null
null
null
osquery_rules/osquery_outdated.py
designing-penguin/panther-analysis
26034cea4504f43227f8d3789225f6ca7b35dfe0
[ "Apache-2.0" ]
null
null
null
osquery_rules/osquery_outdated.py
designing-penguin/panther-analysis
26034cea4504f43227f8d3789225f6ca7b35dfe0
[ "Apache-2.0" ]
null
null
null
LATEST_VERSION = '4.2.0' def rule(event): return (event['name'] == 'pack_it-compliance_osquery_info' and event['columns']['version'] != LATEST_VERSION and event['action'] == 'added') def title(event): return 'Osquery Version {} is Outdated'.format( event['columns'].get('version'))
25.076923
66
0.613497
LATEST_VERSION = '4.2.0' def rule(event): return (event['name'] == 'pack_it-compliance_osquery_info' and event['columns']['version'] != LATEST_VERSION and event['action'] == 'added') def title(event): return 'Osquery Version {} is Outdated'.format( event['columns'].get('version'))
true
true
7902ece0e7c02acca8d73e7f9069ffe6977407ee
1,684
py
Python
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/unsafe/numbers.py
BadDevCode/lumberyard
3d688932f919dbf5821f0cb8a210ce24abe39e9e
[ "AML" ]
1,738
2017-09-21T10:59:12.000Z
2022-03-31T21:05:46.000Z
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/unsafe/numbers.py
olivier-be/lumberyard
3d688932f919dbf5821f0cb8a210ce24abe39e9e
[ "AML" ]
427
2017-09-29T22:54:36.000Z
2022-02-15T19:26:50.000Z
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/unsafe/numbers.py
olivier-be/lumberyard
3d688932f919dbf5821f0cb8a210ce24abe39e9e
[ "AML" ]
671
2017-09-21T08:04:01.000Z
2022-03-29T14:30:07.000Z
""" This module provides the unsafe things for targets/numbers.py """ from .. import types from ..extending import intrinsic from llvmlite import ir @intrinsic def viewer(tyctx, val, viewty): """ Bitcast a scalar 'val' to the given type 'viewty'. """ bits = val.bitwidth if isinstance(viewty.dtype, types.Integer): bitcastty = ir.IntType(bits) elif isinstance(viewty.dtype, types.Float): bitcastty = ir.FloatType() if bits == 32 else ir.DoubleType() else: assert 0, "unreachable" def codegen(cgctx, builder, typ, args): flt = args[0] return builder.bitcast(flt, bitcastty) retty = viewty.dtype sig = retty(val, viewty) return sig, codegen @intrinsic def trailing_zeros(typeingctx, src): """Counts trailing zeros in the binary representation of an integer.""" if not isinstance(src, types.Integer): raise TypeError( "trailing_zeros is only defined for integers, but passed value was" " '{}'.".format(src) ) def codegen(context, builder, signature, args): [src] = args return builder.cttz(src, ir.Constant(ir.IntType(1), 0)) return src(src), codegen @intrinsic def leading_zeros(typeingctx, src): """Counts leading zeros in the binary representation of an integer.""" if not isinstance(src, types.Integer): raise TypeError( "leading_zeros is only defined for integers, but passed value was " "'{}'.".format(src) ) def codegen(context, builder, signature, args): [src] = args return builder.ctlz(src, ir.Constant(ir.IntType(1), 0)) return src(src), codegen
30.071429
79
0.643705
from .. import types from ..extending import intrinsic from llvmlite import ir @intrinsic def viewer(tyctx, val, viewty): bits = val.bitwidth if isinstance(viewty.dtype, types.Integer): bitcastty = ir.IntType(bits) elif isinstance(viewty.dtype, types.Float): bitcastty = ir.FloatType() if bits == 32 else ir.DoubleType() else: assert 0, "unreachable" def codegen(cgctx, builder, typ, args): flt = args[0] return builder.bitcast(flt, bitcastty) retty = viewty.dtype sig = retty(val, viewty) return sig, codegen @intrinsic def trailing_zeros(typeingctx, src): if not isinstance(src, types.Integer): raise TypeError( "trailing_zeros is only defined for integers, but passed value was" " '{}'.".format(src) ) def codegen(context, builder, signature, args): [src] = args return builder.cttz(src, ir.Constant(ir.IntType(1), 0)) return src(src), codegen @intrinsic def leading_zeros(typeingctx, src): if not isinstance(src, types.Integer): raise TypeError( "leading_zeros is only defined for integers, but passed value was " "'{}'.".format(src) ) def codegen(context, builder, signature, args): [src] = args return builder.ctlz(src, ir.Constant(ir.IntType(1), 0)) return src(src), codegen
true
true
7902edb247cc73e6dd6c9166c0f1e2a21d07d657
6,202
py
Python
baselines/imit/memory.py
DanielTakeshi/baselines-fork
7ac6f52ff21f43c519e01179740c019bbe1c55bf
[ "MIT" ]
3
2020-10-25T20:32:22.000Z
2021-03-29T01:51:12.000Z
baselines/imit/memory.py
DanielTakeshi/baselines-fork
7ac6f52ff21f43c519e01179740c019bbe1c55bf
[ "MIT" ]
1
2020-02-11T22:56:54.000Z
2020-02-20T00:21:34.000Z
baselines/imit/memory.py
DanielTakeshi/baselines-fork
7ac6f52ff21f43c519e01179740c019bbe1c55bf
[ "MIT" ]
null
null
null
"""Similar to DDPG except we only need obs and act, not the reward, etc. """ import numpy as np class RingBuffer(object): def __init__(self, maxlen, shape, dtype='float32'): self.maxlen = maxlen self.start = 0 self.length = 0 if dtype == 'uint8': # Daniel: special case with our XP replay. Force memory allocation # right away by the += 0 op, to check that system has enough RAM. # Might not be good for speed so we'll have to time it. self.data = np.zeros((maxlen,) + shape, dtype=np.uint8) print("Allocating data of size {} ...".format(self.data.shape)) self.data += 0 else: self.data = np.zeros((maxlen,) + shape).astype(dtype) # Daniel: avoid over-writing teacher samples. self.teach_idx = 0 def __len__(self): return self.length def __getitem__(self, idx): # Daniel: we shouldn't be calling this if it's using our DDPG/IMIT. assert self.teach_idx == 0, \ 'Something went wrong, why are we calling this method?' if idx < 0 or idx >= self.length: raise KeyError() return self.data[(self.start + idx) % self.maxlen] def get_batch(self, idxs): #return self.data[(self.start + idxs) % self.maxlen] # Daniel: seems like it's just fine to do this. It's the responsibility # of the caller to call a valid set of indices. And we do that with # randint in the memory class later. Here we avoid headaches with # `self.start` because I restrict it to be at least the teach_idx. return self.data[idxs] def append(self, v, is_teacher=False): if self.length < self.maxlen: # We have space, simply increase the length. self.length += 1 if is_teacher: self.teach_idx += 1 elif self.length == self.maxlen: # No space, "remove" the first item. #self.start = (self.start + 1) % self.maxlen self.start = max(self.teach_idx, (self.start + 1) % self.maxlen) else: # This should never happen. raise RuntimeError() self.data[(self.start + self.length - 1) % self.maxlen] = v def array_min2d(x): x = np.array(x) if x.ndim >= 2: return x return x.reshape(-1, 1) class Memory(object): def __init__(self, limit, action_shape, observation_shape, dtype='float32', do_valid=False): """Daniel: careful about RAM usage. See: https://github.com/BerkeleyAutomation/baselines-fork/issues/9 For this we can assume that in the replay buffer, the teacher samples come first, and are fixed ahead of time, so our 'starting' index for adding into the replay buffer should be offset by this quantity. """ self.limit = limit self.do_valid = do_valid if self.do_valid: self.valid_frac = 0.2 self.nb_valid_items = 0 # will adjust later self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=dtype) self.actions = RingBuffer(limit, shape=action_shape) self.nb_teach = 0 self.done_adding_teach = False def sample(self, batch_size): # Draw such that we always have a proceeding element. # TODO(Daniel): the -2 doesn't make sense, we don't need a proceeding # element because the next observation is in a separate ring buffer?? I # think it should be nb_entries, so we are in practice not sampling the # last two items in this replay buffer. I'm switching to -1, should do # 0 later if I'm confident we're not ignoring anything else ... if self.do_valid: # If we're doing validation, which should NOT normally be true, # ignore the first few items, which we assign to be in validation. batch_idxs = np.random.randint(self.nb_valid_items, self.nb_entries-1, size=batch_size) else: batch_idxs = np.random.randint(self.nb_entries-1, size=batch_size) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) # Assume `x < self.nb_teach` (not equality!) is a teacher sample. flag_teacher = (batch_idxs < self.nb_teach).astype(np.float32) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), 'flag_teacher': array_min2d(flag_teacher), } return result def append(self, obs0, action, is_teacher=False, training=True): """Keep separate copies of obs0, obs1. So it's not memory efficient. """ if not training: return if is_teacher: assert not self.done_adding_teach, self.nb_teach assert self.nb_teach < self.limit, self.nb_teach self.nb_teach += 1 self.observations0.append(obs0, is_teacher) self.actions.append(action, is_teacher) def set_teacher_idx(self): """Call from IMIT so we do not over-write teacher data. """ self.done_adding_teach = True def set_valid_idx(self): """Set the validation index. """ assert self.done_adding_teach self.nb_valid_items = int(self.valid_frac * self.nb_entries) @property def nb_entries(self): return len(self.observations0) @property def nb_teach_entries(self): return self.nb_teach @property def nb_valid(self): return self.nb_valid_items def get_valid_obs(self, s_idx, e_idx): """Get a validation minibatch with fixed starting and ending indices. """ assert self.do_valid batch_idxs = np.arange(s_idx, e_idx) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), } return result
38.04908
84
0.60803
import numpy as np class RingBuffer(object): def __init__(self, maxlen, shape, dtype='float32'): self.maxlen = maxlen self.start = 0 self.length = 0 if dtype == 'uint8': self.data = np.zeros((maxlen,) + shape, dtype=np.uint8) print("Allocating data of size {} ...".format(self.data.shape)) self.data += 0 else: self.data = np.zeros((maxlen,) + shape).astype(dtype) # Daniel: avoid over-writing teacher samples. self.teach_idx = 0 def __len__(self): return self.length def __getitem__(self, idx): # Daniel: we shouldn't be calling this if it's using our DDPG/IMIT. assert self.teach_idx == 0, \ 'Something went wrong, why are we calling this method?' if idx < 0 or idx >= self.length: raise KeyError() return self.data[(self.start + idx) % self.maxlen] def get_batch(self, idxs): #return self.data[(self.start + idxs) % self.maxlen] # Daniel: seems like it's just fine to do this. It's the responsibility # of the caller to call a valid set of indices. And we do that with # randint in the memory class later. Here we avoid headaches with # `self.start` because I restrict it to be at least the teach_idx. return self.data[idxs] def append(self, v, is_teacher=False): if self.length < self.maxlen: # We have space, simply increase the length. self.length += 1 if is_teacher: self.teach_idx += 1 elif self.length == self.maxlen: # No space, "remove" the first item. #self.start = (self.start + 1) % self.maxlen self.start = max(self.teach_idx, (self.start + 1) % self.maxlen) else: # This should never happen. raise RuntimeError() self.data[(self.start + self.length - 1) % self.maxlen] = v def array_min2d(x): x = np.array(x) if x.ndim >= 2: return x return x.reshape(-1, 1) class Memory(object): def __init__(self, limit, action_shape, observation_shape, dtype='float32', do_valid=False): self.limit = limit self.do_valid = do_valid if self.do_valid: self.valid_frac = 0.2 self.nb_valid_items = 0 # will adjust later self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=dtype) self.actions = RingBuffer(limit, shape=action_shape) self.nb_teach = 0 self.done_adding_teach = False def sample(self, batch_size): # Draw such that we always have a proceeding element. # TODO(Daniel): the -2 doesn't make sense, we don't need a proceeding # element because the next observation is in a separate ring buffer?? I # think it should be nb_entries, so we are in practice not sampling the # last two items in this replay buffer. I'm switching to -1, should do if self.do_valid: # ignore the first few items, which we assign to be in validation. batch_idxs = np.random.randint(self.nb_valid_items, self.nb_entries-1, size=batch_size) else: batch_idxs = np.random.randint(self.nb_entries-1, size=batch_size) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) # Assume `x < self.nb_teach` (not equality!) is a teacher sample. flag_teacher = (batch_idxs < self.nb_teach).astype(np.float32) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), 'flag_teacher': array_min2d(flag_teacher), } return result def append(self, obs0, action, is_teacher=False, training=True): if not training: return if is_teacher: assert not self.done_adding_teach, self.nb_teach assert self.nb_teach < self.limit, self.nb_teach self.nb_teach += 1 self.observations0.append(obs0, is_teacher) self.actions.append(action, is_teacher) def set_teacher_idx(self): self.done_adding_teach = True def set_valid_idx(self): assert self.done_adding_teach self.nb_valid_items = int(self.valid_frac * self.nb_entries) @property def nb_entries(self): return len(self.observations0) @property def nb_teach_entries(self): return self.nb_teach @property def nb_valid(self): return self.nb_valid_items def get_valid_obs(self, s_idx, e_idx): assert self.do_valid batch_idxs = np.arange(s_idx, e_idx) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), } return result
true
true
7902eeefc3a986adfd0736c7bdb30a35c2e8c211
87,662
py
Python
python/pyspark/ml/regression.py
AjithShetty2489/spark
0c6bd3bd0b95d17bc1eebb503269eda43df90394
[ "BSD-2-Clause", "Apache-2.0", "CC0-1.0", "MIT", "MIT-0", "ECL-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
python/pyspark/ml/regression.py
AjithShetty2489/spark
0c6bd3bd0b95d17bc1eebb503269eda43df90394
[ "BSD-2-Clause", "Apache-2.0", "CC0-1.0", "MIT", "MIT-0", "ECL-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2019-03-26T05:30:37.000Z
2019-03-26T05:43:39.000Z
python/pyspark/ml/regression.py
ajithme/spark
0c6bd3bd0b95d17bc1eebb503269eda43df90394
[ "BSD-2-Clause", "Apache-2.0", "CC0-1.0", "MIT", "MIT-0", "ECL-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from pyspark import since, keyword_only from pyspark.ml.param.shared import * from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \ _TreeEnsembleModel, _TreeEnsembleParams, _RandomForestParams, _GBTParams, \ _HasVarianceImpurity, _TreeRegressorParams from pyspark.ml.util import * from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, \ JavaPredictor, JavaPredictionModel, _JavaPredictorParams, JavaWrapper from pyspark.ml.common import inherit_doc from pyspark.sql import DataFrame __all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel', 'DecisionTreeRegressor', 'DecisionTreeRegressionModel', 'GBTRegressor', 'GBTRegressionModel', 'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel', 'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary', 'IsotonicRegression', 'IsotonicRegressionModel', 'LinearRegression', 'LinearRegressionModel', 'LinearRegressionSummary', 'LinearRegressionTrainingSummary', 'RandomForestRegressor', 'RandomForestRegressionModel', 'FMRegressor', 'FMRegressionModel'] class _LinearRegressionParams(_JavaPredictorParams, HasRegParam, HasElasticNetParam, HasMaxIter, HasTol, HasFitIntercept, HasStandardization, HasWeightCol, HasSolver, HasAggregationDepth, HasLoss): """ Params for :py:class:`LinearRegression` and :py:class:`LinearRegressionModel`. .. versionadded:: 3.0.0 """ solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString) loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " + "options: squaredError, huber.", typeConverter=TypeConverters.toString) epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " + "robustness. Must be > 1.0. Only valid when loss is huber", typeConverter=TypeConverters.toFloat) @since("2.3.0") def getEpsilon(self): """ Gets the value of epsilon or its default value. """ return self.getOrDefault(self.epsilon) @inherit_doc class LinearRegression(JavaPredictor, _LinearRegressionParams, JavaMLWritable, JavaMLReadable): """ Linear regression. The learning objective is to minimize the specified loss function, with regularization. This supports two kinds of loss: * squaredError (a.k.a squared loss) * huber (a hybrid of squared error for relatively small errors and absolute error for \ relatively large ones, and we estimate the scale parameter from training data) This supports multiple types of regularization: * none (a.k.a. ordinary least squares) * L2 (ridge regression) * L1 (Lasso) * L2 + L1 (elastic net) Note: Fitting with huber loss only supports none and L2 regularization. >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (1.0, 2.0, Vectors.dense(1.0)), ... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"]) >>> lr = LinearRegression(regParam=0.0, solver="normal", weightCol="weight") >>> lr.setMaxIter(5) LinearRegression... >>> lr.getMaxIter() 5 >>> lr.setRegParam(0.1) LinearRegression... >>> lr.getRegParam() 0.1 >>> lr.setRegParam(0.0) LinearRegression... >>> model = lr.fit(df) >>> model.setFeaturesCol("features") LinearRegressionModel... >>> model.setPredictionCol("newPrediction") LinearRegressionModel... >>> model.getMaxIter() 5 >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> abs(model.predict(test0.head().features) - (-1.0)) < 0.001 True >>> abs(model.transform(test0).head().newPrediction - (-1.0)) < 0.001 True >>> abs(model.coefficients[0] - 1.0) < 0.001 True >>> abs(model.intercept - 0.0) < 0.001 True >>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"]) >>> abs(model.transform(test1).head().newPrediction - 1.0) < 0.001 True >>> lr.setParams("vector") Traceback (most recent call last): ... TypeError: Method setParams forces keyword arguments. >>> lr_path = temp_path + "/lr" >>> lr.save(lr_path) >>> lr2 = LinearRegression.load(lr_path) >>> lr2.getMaxIter() 5 >>> model_path = temp_path + "/lr_model" >>> model.save(model_path) >>> model2 = LinearRegressionModel.load(model_path) >>> model.coefficients[0] == model2.coefficients[0] True >>> model.intercept == model2.intercept True >>> model.numFeatures 1 >>> model.write().format("pmml").save(model_path + "_2") .. versionadded:: 1.4.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, standardization=True, solver="auto", weightCol=None, aggregationDepth=2, loss="squaredError", epsilon=1.35): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \ loss="squaredError", epsilon=1.35) """ super(LinearRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.LinearRegression", self.uid) self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, standardization=True, solver="auto", weightCol=None, aggregationDepth=2, loss="squaredError", epsilon=1.35): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \ loss="squaredError", epsilon=1.35) Sets params for linear regression. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return LinearRegressionModel(java_model) @since("2.3.0") def setEpsilon(self, value): """ Sets the value of :py:attr:`epsilon`. """ return self._set(epsilon=value) def setMaxIter(self, value): """ Sets the value of :py:attr:`maxIter`. """ return self._set(maxIter=value) def setRegParam(self, value): """ Sets the value of :py:attr:`regParam`. """ return self._set(regParam=value) def setTol(self, value): """ Sets the value of :py:attr:`tol`. """ return self._set(tol=value) def setElasticNetParam(self, value): """ Sets the value of :py:attr:`elasticNetParam`. """ return self._set(elasticNetParam=value) def setFitIntercept(self, value): """ Sets the value of :py:attr:`fitIntercept`. """ return self._set(fitIntercept=value) def setStandardization(self, value): """ Sets the value of :py:attr:`standardization`. """ return self._set(standardization=value) def setWeightCol(self, value): """ Sets the value of :py:attr:`weightCol`. """ return self._set(weightCol=value) def setSolver(self, value): """ Sets the value of :py:attr:`solver`. """ return self._set(solver=value) def setAggregationDepth(self, value): """ Sets the value of :py:attr:`aggregationDepth`. """ return self._set(aggregationDepth=value) def setLoss(self, value): """ Sets the value of :py:attr:`loss`. """ return self._set(lossType=value) class LinearRegressionModel(JavaPredictionModel, _LinearRegressionParams, GeneralJavaMLWritable, JavaMLReadable, HasTrainingSummary): """ Model fitted by :class:`LinearRegression`. .. versionadded:: 1.4.0 """ @property @since("2.0.0") def coefficients(self): """ Model coefficients. """ return self._call_java("coefficients") @property @since("1.4.0") def intercept(self): """ Model intercept. """ return self._call_java("intercept") @property @since("2.3.0") def scale(self): r""" The value by which :math:`\|y - X'w\|` is scaled down when loss is "huber", otherwise 1.0. """ return self._call_java("scale") @property @since("2.0.0") def summary(self): """ Gets summary (e.g. residuals, mse, r-squared ) of model on training set. An exception is thrown if `trainingSummary is None`. """ if self.hasSummary: return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @since("2.0.0") def evaluate(self, dataset): """ Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` """ if not isinstance(dataset, DataFrame): raise ValueError("dataset must be a DataFrame but got %s." % type(dataset)) java_lr_summary = self._call_java("evaluate", dataset) return LinearRegressionSummary(java_lr_summary) class LinearRegressionSummary(JavaWrapper): """ Linear regression results evaluated on a dataset. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def predictions(self): """ Dataframe outputted by the model's `transform` method. """ return self._call_java("predictions") @property @since("2.0.0") def predictionCol(self): """ Field in "predictions" which gives the predicted value of the label at each instance. """ return self._call_java("predictionCol") @property @since("2.0.0") def labelCol(self): """ Field in "predictions" which gives the true label of each instance. """ return self._call_java("labelCol") @property @since("2.0.0") def featuresCol(self): """ Field in "predictions" which gives the features of each instance as a vector. """ return self._call_java("featuresCol") @property @since("2.0.0") def explainedVariance(self): r""" Returns the explained variance regression score. explainedVariance = :math:`1 - \frac{variance(y - \hat{y})}{variance(y)}` .. seealso:: `Wikipedia explain variation <http://en.wikipedia.org/wiki/Explained_variation>`_ .. note:: This ignores instance weights (setting all to 1.0) from `LinearRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("explainedVariance") @property @since("2.0.0") def meanAbsoluteError(self): """ Returns the mean absolute error, which is a risk function corresponding to the expected value of the absolute error loss or l1-norm loss. .. note:: This ignores instance weights (setting all to 1.0) from `LinearRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("meanAbsoluteError") @property @since("2.0.0") def meanSquaredError(self): """ Returns the mean squared error, which is a risk function corresponding to the expected value of the squared error loss or quadratic loss. .. note:: This ignores instance weights (setting all to 1.0) from `LinearRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("meanSquaredError") @property @since("2.0.0") def rootMeanSquaredError(self): """ Returns the root mean squared error, which is defined as the square root of the mean squared error. .. note:: This ignores instance weights (setting all to 1.0) from `LinearRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("rootMeanSquaredError") @property @since("2.0.0") def r2(self): """ Returns R^2, the coefficient of determination. .. seealso:: `Wikipedia coefficient of determination <http://en.wikipedia.org/wiki/Coefficient_of_determination>`_ .. note:: This ignores instance weights (setting all to 1.0) from `LinearRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("r2") @property @since("2.4.0") def r2adj(self): """ Returns Adjusted R^2, the adjusted coefficient of determination. .. seealso:: `Wikipedia coefficient of determination, Adjusted R^2 <https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_ .. note:: This ignores instance weights (setting all to 1.0) from `LinearRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("r2adj") @property @since("2.0.0") def residuals(self): """ Residuals (label - predicted value) """ return self._call_java("residuals") @property @since("2.0.0") def numInstances(self): """ Number of instances in DataFrame predictions """ return self._call_java("numInstances") @property @since("2.2.0") def degreesOfFreedom(self): """ Degrees of freedom. """ return self._call_java("degreesOfFreedom") @property @since("2.0.0") def devianceResiduals(self): """ The weighted residuals, the usual residuals rescaled by the square root of the instance weights. """ return self._call_java("devianceResiduals") @property @since("2.0.0") def coefficientStandardErrors(self): """ Standard error of estimated coefficients and intercept. This value is only available when using the "normal" solver. If :py:attr:`LinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept. .. seealso:: :py:attr:`LinearRegression.solver` """ return self._call_java("coefficientStandardErrors") @property @since("2.0.0") def tValues(self): """ T-statistic of estimated coefficients and intercept. This value is only available when using the "normal" solver. If :py:attr:`LinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept. .. seealso:: :py:attr:`LinearRegression.solver` """ return self._call_java("tValues") @property @since("2.0.0") def pValues(self): """ Two-sided p-value of estimated coefficients and intercept. This value is only available when using the "normal" solver. If :py:attr:`LinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept. .. seealso:: :py:attr:`LinearRegression.solver` """ return self._call_java("pValues") @inherit_doc class LinearRegressionTrainingSummary(LinearRegressionSummary): """ Linear regression training results. Currently, the training summary ignores the training weights except for the objective trace. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def objectiveHistory(self): """ Objective function (scaled loss + regularization) at each iteration. This value is only available when using the "l-bfgs" solver. .. seealso:: :py:attr:`LinearRegression.solver` """ return self._call_java("objectiveHistory") @property @since("2.0.0") def totalIterations(self): """ Number of training iterations until termination. This value is only available when using the "l-bfgs" solver. .. seealso:: :py:attr:`LinearRegression.solver` """ return self._call_java("totalIterations") class _IsotonicRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol): """ Params for :py:class:`IsotonicRegression` and :py:class:`IsotonicRegressionModel`. .. versionadded:: 3.0.0 """ isotonic = Param( Params._dummy(), "isotonic", "whether the output sequence should be isotonic/increasing (true) or" + "antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean) featureIndex = Param( Params._dummy(), "featureIndex", "The index of the feature if featuresCol is a vector column, no effect otherwise.", typeConverter=TypeConverters.toInt) def getIsotonic(self): """ Gets the value of isotonic or its default value. """ return self.getOrDefault(self.isotonic) def getFeatureIndex(self): """ Gets the value of featureIndex or its default value. """ return self.getOrDefault(self.featureIndex) @inherit_doc class IsotonicRegression(JavaEstimator, _IsotonicRegressionParams, HasWeightCol, JavaMLWritable, JavaMLReadable): """ Currently implemented using parallelized pool adjacent violators algorithm. Only univariate (single feature) algorithm supported. >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> ir = IsotonicRegression() >>> model = ir.fit(df) >>> model.setFeaturesCol("features") IsotonicRegressionModel... >>> model.numFeatures() 1 >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> model.transform(test0).head().prediction 0.0 >>> model.predict(test0.head().features[model.getFeatureIndex()]) 0.0 >>> model.boundaries DenseVector([0.0, 1.0]) >>> ir_path = temp_path + "/ir" >>> ir.save(ir_path) >>> ir2 = IsotonicRegression.load(ir_path) >>> ir2.getIsotonic() True >>> model_path = temp_path + "/ir_model" >>> model.save(model_path) >>> model2 = IsotonicRegressionModel.load(model_path) >>> model.boundaries == model2.boundaries True >>> model.predictions == model2.predictions True .. versionadded:: 1.6.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ weightCol=None, isotonic=True, featureIndex=0): """ super(IsotonicRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.IsotonicRegression", self.uid) self._setDefault(isotonic=True, featureIndex=0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ weightCol=None, isotonic=True, featureIndex=0): Set the params for IsotonicRegression. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return IsotonicRegressionModel(java_model) def setIsotonic(self, value): """ Sets the value of :py:attr:`isotonic`. """ return self._set(isotonic=value) def setFeatureIndex(self, value): """ Sets the value of :py:attr:`featureIndex`. """ return self._set(featureIndex=value) @since("1.6.0") def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) @since("1.6.0") def setPredictionCol(self, value): """ Sets the value of :py:attr:`predictionCol`. """ return self._set(predictionCol=value) @since("1.6.0") def setLabelCol(self, value): """ Sets the value of :py:attr:`labelCol`. """ return self._set(labelCol=value) @since("1.6.0") def setWeightCol(self, value): """ Sets the value of :py:attr:`weightCol`. """ return self._set(weightCol=value) class IsotonicRegressionModel(JavaModel, _IsotonicRegressionParams, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`IsotonicRegression`. .. versionadded:: 1.6.0 """ @since("3.0.0") def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) @since("3.0.0") def setPredictionCol(self, value): """ Sets the value of :py:attr:`predictionCol`. """ return self._set(predictionCol=value) def setFeatureIndex(self, value): """ Sets the value of :py:attr:`featureIndex`. """ return self._set(featureIndex=value) @property @since("1.6.0") def boundaries(self): """ Boundaries in increasing order for which predictions are known. """ return self._call_java("boundaries") @property @since("1.6.0") def predictions(self): """ Predictions associated with the boundaries at the same index, monotone because of isotonic regression. """ return self._call_java("predictions") @since("3.0.0") def numFeatures(self): """ Returns the number of features the model was trained on. If unknown, returns -1 """ return self._call_java("numFeatures") @since("3.0.0") def predict(self, value): """ Predict label for the given features. """ return self._call_java("predict", value) class _DecisionTreeRegressorParams(_DecisionTreeParams, _TreeRegressorParams, HasVarianceCol): """ Params for :py:class:`DecisionTreeRegressor` and :py:class:`DecisionTreeRegressionModel`. .. versionadded:: 3.0.0 """ pass @inherit_doc class DecisionTreeRegressor(JavaPredictor, _DecisionTreeRegressorParams, JavaMLWritable, JavaMLReadable): """ `Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_ learning algorithm for regression. It supports both continuous and categorical features. >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> dt = DecisionTreeRegressor(maxDepth=2) >>> dt.setVarianceCol("variance") DecisionTreeRegressor... >>> model = dt.fit(df) >>> model.getVarianceCol() 'variance' >>> model.setLeafCol("leafId") DecisionTreeRegressionModel... >>> model.depth 1 >>> model.numNodes 3 >>> model.featureImportances SparseVector(1, {0: 1.0}) >>> model.numFeatures 1 >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> model.predict(test0.head().features) 0.0 >>> result = model.transform(test0).head() >>> result.prediction 0.0 >>> model.predictLeaf(test0.head().features) 0.0 >>> result.leafId 0.0 >>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"]) >>> model.transform(test1).head().prediction 1.0 >>> dtr_path = temp_path + "/dtr" >>> dt.save(dtr_path) >>> dt2 = DecisionTreeRegressor.load(dtr_path) >>> dt2.getMaxDepth() 2 >>> model_path = temp_path + "/dtr_model" >>> model.save(model_path) >>> model2 = DecisionTreeRegressionModel.load(model_path) >>> model.numNodes == model2.numNodes True >>> model.depth == model2.depth True >>> model.transform(test1).head().variance 0.0 >>> df3 = spark.createDataFrame([ ... (1.0, 0.2, Vectors.dense(1.0)), ... (1.0, 0.8, Vectors.dense(1.0)), ... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"]) >>> dt3 = DecisionTreeRegressor(maxDepth=2, weightCol="weight", varianceCol="variance") >>> model3 = dt3.fit(df3) >>> print(model3.toDebugString) DecisionTreeRegressionModel...depth=1, numNodes=3... .. versionadded:: 1.4.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", seed=None, varianceCol=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \ impurity="variance", seed=None, varianceCol=None, weightCol=None, \ leafCol="", minWeightFractionPerNode=0.0) """ super(DecisionTreeRegressor, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", leafCol="", minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", seed=None, varianceCol=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \ impurity="variance", seed=None, varianceCol=None, weightCol=None, \ leafCol="", minWeightFractionPerNode=0.0) Sets params for the DecisionTreeRegressor. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return DecisionTreeRegressionModel(java_model) @since("1.4.0") def setMaxDepth(self, value): """ Sets the value of :py:attr:`maxDepth`. """ return self._set(maxDepth=value) @since("1.4.0") def setMaxBins(self, value): """ Sets the value of :py:attr:`maxBins`. """ return self._set(maxBins=value) @since("1.4.0") def setMinInstancesPerNode(self, value): """ Sets the value of :py:attr:`minInstancesPerNode`. """ return self._set(minInstancesPerNode=value) @since("3.0.0") def setMinWeightFractionPerNode(self, value): """ Sets the value of :py:attr:`minWeightFractionPerNode`. """ return self._set(minWeightFractionPerNode=value) @since("1.4.0") def setMinInfoGain(self, value): """ Sets the value of :py:attr:`minInfoGain`. """ return self._set(minInfoGain=value) @since("1.4.0") def setMaxMemoryInMB(self, value): """ Sets the value of :py:attr:`maxMemoryInMB`. """ return self._set(maxMemoryInMB=value) @since("1.4.0") def setCacheNodeIds(self, value): """ Sets the value of :py:attr:`cacheNodeIds`. """ return self._set(cacheNodeIds=value) @since("1.4.0") def setImpurity(self, value): """ Sets the value of :py:attr:`impurity`. """ return self._set(impurity=value) @since("1.4.0") def setCheckpointInterval(self, value): """ Sets the value of :py:attr:`checkpointInterval`. """ return self._set(checkpointInterval=value) def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) @since("3.0.0") def setWeightCol(self, value): """ Sets the value of :py:attr:`weightCol`. """ return self._set(weightCol=value) @since("2.0.0") def setVarianceCol(self, value): """ Sets the value of :py:attr:`varianceCol`. """ return self._set(varianceCol=value) @inherit_doc class DecisionTreeRegressionModel(_DecisionTreeModel, _DecisionTreeRegressorParams, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`DecisionTreeRegressor`. .. versionadded:: 1.4.0 """ @since("3.0.0") def setVarianceCol(self, value): """ Sets the value of :py:attr:`varianceCol`. """ return self._set(varianceCol=value) @property @since("2.0.0") def featureImportances(self): """ Estimate of the importance of each feature. This generalizes the idea of "Gini" importance to other losses, following the explanation of Gini importance from "Random Forests" documentation by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn. This feature importance is calculated as follows: - importance(feature j) = sum (over nodes which split on feature j) of the gain, where gain is scaled by the number of instances passing through node - Normalize importances for tree to sum to 1. .. note:: Feature importance for single decision trees can have high variance due to correlated predictor variables. Consider using a :py:class:`RandomForestRegressor` to determine feature importance instead. """ return self._call_java("featureImportances") class _RandomForestRegressorParams(_RandomForestParams, _TreeRegressorParams): """ Params for :py:class:`RandomForestRegressor` and :py:class:`RandomForestRegressionModel`. .. versionadded:: 3.0.0 """ pass @inherit_doc class RandomForestRegressor(JavaPredictor, _RandomForestRegressorParams, JavaMLWritable, JavaMLReadable): """ `Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_ learning algorithm for regression. It supports both continuous and categorical features. >>> from numpy import allclose >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> rf = RandomForestRegressor(numTrees=2, maxDepth=2) >>> rf.getMinWeightFractionPerNode() 0.0 >>> rf.setSeed(42) RandomForestRegressor... >>> model = rf.fit(df) >>> model.getSeed() 42 >>> model.setLeafCol("leafId") RandomForestRegressionModel... >>> model.featureImportances SparseVector(1, {0: 1.0}) >>> allclose(model.treeWeights, [1.0, 1.0]) True >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> model.predict(test0.head().features) 0.0 >>> model.predictLeaf(test0.head().features) DenseVector([0.0, 0.0]) >>> result = model.transform(test0).head() >>> result.prediction 0.0 >>> result.leafId DenseVector([0.0, 0.0]) >>> model.numFeatures 1 >>> model.trees [DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...] >>> model.getNumTrees 2 >>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"]) >>> model.transform(test1).head().prediction 0.5 >>> rfr_path = temp_path + "/rfr" >>> rf.save(rfr_path) >>> rf2 = RandomForestRegressor.load(rfr_path) >>> rf2.getNumTrees() 2 >>> model_path = temp_path + "/rfr_model" >>> model.save(model_path) >>> model2 = RandomForestRegressionModel.load(model_path) >>> model.featureImportances == model2.featureImportances True .. versionadded:: 1.4.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, weightCol=None): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \ impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \ featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", \ weightCol=None) """ super(RandomForestRegressor, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.RandomForestRegressor", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, weightCol=None): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \ impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \ featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, \ weightCol=None) Sets params for linear regression. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return RandomForestRegressionModel(java_model) def setMaxDepth(self, value): """ Sets the value of :py:attr:`maxDepth`. """ return self._set(maxDepth=value) def setMaxBins(self, value): """ Sets the value of :py:attr:`maxBins`. """ return self._set(maxBins=value) def setMinInstancesPerNode(self, value): """ Sets the value of :py:attr:`minInstancesPerNode`. """ return self._set(minInstancesPerNode=value) def setMinInfoGain(self, value): """ Sets the value of :py:attr:`minInfoGain`. """ return self._set(minInfoGain=value) def setMaxMemoryInMB(self, value): """ Sets the value of :py:attr:`maxMemoryInMB`. """ return self._set(maxMemoryInMB=value) def setCacheNodeIds(self, value): """ Sets the value of :py:attr:`cacheNodeIds`. """ return self._set(cacheNodeIds=value) @since("1.4.0") def setImpurity(self, value): """ Sets the value of :py:attr:`impurity`. """ return self._set(impurity=value) @since("1.4.0") def setNumTrees(self, value): """ Sets the value of :py:attr:`numTrees`. """ return self._set(numTrees=value) @since("1.4.0") def setSubsamplingRate(self, value): """ Sets the value of :py:attr:`subsamplingRate`. """ return self._set(subsamplingRate=value) @since("2.4.0") def setFeatureSubsetStrategy(self, value): """ Sets the value of :py:attr:`featureSubsetStrategy`. """ return self._set(featureSubsetStrategy=value) def setCheckpointInterval(self, value): """ Sets the value of :py:attr:`checkpointInterval`. """ return self._set(checkpointInterval=value) def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) @since("3.0.0") def setWeightCol(self, value): """ Sets the value of :py:attr:`weightCol`. """ return self._set(weightCol=value) @since("3.0.0") def setMinWeightFractionPerNode(self, value): """ Sets the value of :py:attr:`minWeightFractionPerNode`. """ return self._set(minWeightFractionPerNode=value) class RandomForestRegressionModel(_TreeEnsembleModel, _RandomForestRegressorParams, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`RandomForestRegressor`. .. versionadded:: 1.4.0 """ @property @since("2.0.0") def trees(self): """Trees in this ensemble. Warning: These have null parent Estimators.""" return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))] @property @since("2.0.0") def featureImportances(self): """ Estimate of the importance of each feature. Each feature's importance is the average of its importance across all trees in the ensemble The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) and follows the implementation from scikit-learn. .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances` """ return self._call_java("featureImportances") class _GBTRegressorParams(_GBTParams, _TreeRegressorParams): """ Params for :py:class:`GBTRegressor` and :py:class:`GBTRegressorModel`. .. versionadded:: 3.0.0 """ supportedLossTypes = ["squared", "absolute"] lossType = Param(Params._dummy(), "lossType", "Loss function which GBT tries to minimize (case-insensitive). " + "Supported options: " + ", ".join(supportedLossTypes), typeConverter=TypeConverters.toString) @since("1.4.0") def getLossType(self): """ Gets the value of lossType or its default value. """ return self.getOrDefault(self.lossType) @inherit_doc class GBTRegressor(JavaPredictor, _GBTRegressorParams, JavaMLWritable, JavaMLReadable): """ `Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_ learning algorithm for regression. It supports both continuous and categorical features. >>> from numpy import allclose >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> gbt = GBTRegressor(maxDepth=2, seed=42, leafCol="leafId") >>> gbt.setMaxIter(5) GBTRegressor... >>> gbt.setMinWeightFractionPerNode(0.049) GBTRegressor... >>> gbt.getMaxIter() 5 >>> print(gbt.getImpurity()) variance >>> print(gbt.getFeatureSubsetStrategy()) all >>> model = gbt.fit(df) >>> model.featureImportances SparseVector(1, {0: 1.0}) >>> model.numFeatures 1 >>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1]) True >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> model.predict(test0.head().features) 0.0 >>> model.predictLeaf(test0.head().features) DenseVector([0.0, 0.0, 0.0, 0.0, 0.0]) >>> result = model.transform(test0).head() >>> result.prediction 0.0 >>> result.leafId DenseVector([0.0, 0.0, 0.0, 0.0, 0.0]) >>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"]) >>> model.transform(test1).head().prediction 1.0 >>> gbtr_path = temp_path + "gbtr" >>> gbt.save(gbtr_path) >>> gbt2 = GBTRegressor.load(gbtr_path) >>> gbt2.getMaxDepth() 2 >>> model_path = temp_path + "gbtr_model" >>> model.save(model_path) >>> model2 = GBTRegressionModel.load(model_path) >>> model.featureImportances == model2.featureImportances True >>> model.treeWeights == model2.treeWeights True >>> model.trees [DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...] >>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))], ... ["label", "features"]) >>> model.evaluateEachIteration(validation, "squared") [0.0, 0.0, 0.0, 0.0, 0.0] >>> gbt = gbt.setValidationIndicatorCol("validationIndicator") >>> gbt.getValidationIndicatorCol() 'validationIndicator' >>> gbt.getValidationTol() 0.01 .. versionadded:: 1.4.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \ checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \ impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \ validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None) """ super(GBTRegressor, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, leafCol="", minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impuriy="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \ checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \ impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \ validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \ weightCol=None) Sets params for Gradient Boosted Tree Regression. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return GBTRegressionModel(java_model) @since("1.4.0") def setMaxDepth(self, value): """ Sets the value of :py:attr:`maxDepth`. """ return self._set(maxDepth=value) @since("1.4.0") def setMaxBins(self, value): """ Sets the value of :py:attr:`maxBins`. """ return self._set(maxBins=value) @since("1.4.0") def setMinInstancesPerNode(self, value): """ Sets the value of :py:attr:`minInstancesPerNode`. """ return self._set(minInstancesPerNode=value) @since("1.4.0") def setMinInfoGain(self, value): """ Sets the value of :py:attr:`minInfoGain`. """ return self._set(minInfoGain=value) @since("1.4.0") def setMaxMemoryInMB(self, value): """ Sets the value of :py:attr:`maxMemoryInMB`. """ return self._set(maxMemoryInMB=value) @since("1.4.0") def setCacheNodeIds(self, value): """ Sets the value of :py:attr:`cacheNodeIds`. """ return self._set(cacheNodeIds=value) @since("1.4.0") def setImpurity(self, value): """ Sets the value of :py:attr:`impurity`. """ return self._set(impurity=value) @since("1.4.0") def setLossType(self, value): """ Sets the value of :py:attr:`lossType`. """ return self._set(lossType=value) @since("1.4.0") def setSubsamplingRate(self, value): """ Sets the value of :py:attr:`subsamplingRate`. """ return self._set(subsamplingRate=value) @since("2.4.0") def setFeatureSubsetStrategy(self, value): """ Sets the value of :py:attr:`featureSubsetStrategy`. """ return self._set(featureSubsetStrategy=value) @since("3.0.0") def setValidationIndicatorCol(self, value): """ Sets the value of :py:attr:`validationIndicatorCol`. """ return self._set(validationIndicatorCol=value) @since("1.4.0") def setMaxIter(self, value): """ Sets the value of :py:attr:`maxIter`. """ return self._set(maxIter=value) @since("1.4.0") def setCheckpointInterval(self, value): """ Sets the value of :py:attr:`checkpointInterval`. """ return self._set(checkpointInterval=value) @since("1.4.0") def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) @since("1.4.0") def setStepSize(self, value): """ Sets the value of :py:attr:`stepSize`. """ return self._set(stepSize=value) @since("3.0.0") def setWeightCol(self, value): """ Sets the value of :py:attr:`weightCol`. """ return self._set(weightCol=value) @since("3.0.0") def setMinWeightFractionPerNode(self, value): """ Sets the value of :py:attr:`minWeightFractionPerNode`. """ return self._set(minWeightFractionPerNode=value) class GBTRegressionModel(_TreeEnsembleModel, _GBTRegressorParams, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`GBTRegressor`. .. versionadded:: 1.4.0 """ @property @since("2.0.0") def featureImportances(self): """ Estimate of the importance of each feature. Each feature's importance is the average of its importance across all trees in the ensemble The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) and follows the implementation from scikit-learn. .. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances` """ return self._call_java("featureImportances") @property @since("2.0.0") def trees(self): """Trees in this ensemble. Warning: These have null parent Estimators.""" return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))] @since("2.4.0") def evaluateEachIteration(self, dataset, loss): """ Method to compute error or loss for every iteration of gradient boosting. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` :param loss: The loss function used to compute error. Supported options: squared, absolute """ return self._call_java("evaluateEachIteration", dataset, loss) class _AFTSurvivalRegressionParams(_JavaPredictorParams, HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth): """ Params for :py:class:`AFTSurvivalRegression` and :py:class:`AFTSurvivalRegressionModel`. .. versionadded:: 3.0.0 """ censorCol = Param( Params._dummy(), "censorCol", "censor column name. The value of this column could be 0 or 1. " + "If the value is 1, it means the event has occurred i.e. " + "uncensored; otherwise censored.", typeConverter=TypeConverters.toString) quantileProbabilities = Param( Params._dummy(), "quantileProbabilities", "quantile probabilities array. Values of the quantile probabilities array " + "should be in the range (0, 1) and the array should be non-empty.", typeConverter=TypeConverters.toListFloat) quantilesCol = Param( Params._dummy(), "quantilesCol", "quantiles column name. This column will output quantiles of " + "corresponding quantileProbabilities if it is set.", typeConverter=TypeConverters.toString) @since("1.6.0") def getCensorCol(self): """ Gets the value of censorCol or its default value. """ return self.getOrDefault(self.censorCol) @since("1.6.0") def getQuantileProbabilities(self): """ Gets the value of quantileProbabilities or its default value. """ return self.getOrDefault(self.quantileProbabilities) @since("1.6.0") def getQuantilesCol(self): """ Gets the value of quantilesCol or its default value. """ return self.getOrDefault(self.quantilesCol) @inherit_doc class AFTSurvivalRegression(JavaPredictor, _AFTSurvivalRegressionParams, JavaMLWritable, JavaMLReadable): """ Accelerated Failure Time (AFT) Model Survival Regression Fit a parametric AFT survival regression model based on the Weibull distribution of the survival time. .. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_ >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0), 1.0), ... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"]) >>> aftsr = AFTSurvivalRegression() >>> aftsr.setMaxIter(10) AFTSurvivalRegression... >>> aftsr.getMaxIter() 10 >>> aftsr.clear(aftsr.maxIter) >>> model = aftsr.fit(df) >>> model.setFeaturesCol("features") AFTSurvivalRegressionModel... >>> model.predict(Vectors.dense(6.3)) 1.0 >>> model.predictQuantiles(Vectors.dense(6.3)) DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052]) >>> model.transform(df).show() +-------+---------+------+----------+ | label| features|censor|prediction| +-------+---------+------+----------+ | 1.0| [1.0]| 1.0| 1.0| |1.0E-40|(1,[],[])| 0.0| 1.0| +-------+---------+------+----------+ ... >>> aftsr_path = temp_path + "/aftsr" >>> aftsr.save(aftsr_path) >>> aftsr2 = AFTSurvivalRegression.load(aftsr_path) >>> aftsr2.getMaxIter() 100 >>> model_path = temp_path + "/aftsr_model" >>> model.save(model_path) >>> model2 = AFTSurvivalRegressionModel.load(model_path) >>> model.coefficients == model2.coefficients True >>> model.intercept == model2.intercept True >>> model.scale == model2.scale True .. versionadded:: 1.6.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \ quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \ quantilesCol=None, aggregationDepth=2) """ super(AFTSurvivalRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid) self._setDefault(censorCol="censor", quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], maxIter=100, tol=1E-6) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \ quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \ quantilesCol=None, aggregationDepth=2): """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return AFTSurvivalRegressionModel(java_model) @since("1.6.0") def setCensorCol(self, value): """ Sets the value of :py:attr:`censorCol`. """ return self._set(censorCol=value) @since("1.6.0") def setQuantileProbabilities(self, value): """ Sets the value of :py:attr:`quantileProbabilities`. """ return self._set(quantileProbabilities=value) @since("1.6.0") def setQuantilesCol(self, value): """ Sets the value of :py:attr:`quantilesCol`. """ return self._set(quantilesCol=value) @since("1.6.0") def setMaxIter(self, value): """ Sets the value of :py:attr:`maxIter`. """ return self._set(maxIter=value) @since("1.6.0") def setTol(self, value): """ Sets the value of :py:attr:`tol`. """ return self._set(tol=value) @since("1.6.0") def setFitIntercept(self, value): """ Sets the value of :py:attr:`fitIntercept`. """ return self._set(fitIntercept=value) @since("2.1.0") def setAggregationDepth(self, value): """ Sets the value of :py:attr:`aggregationDepth`. """ return self._set(aggregationDepth=value) class AFTSurvivalRegressionModel(JavaPredictionModel, _AFTSurvivalRegressionParams, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`AFTSurvivalRegression`. .. versionadded:: 1.6.0 """ @since("3.0.0") def setQuantileProbabilities(self, value): """ Sets the value of :py:attr:`quantileProbabilities`. """ return self._set(quantileProbabilities=value) @since("3.0.0") def setQuantilesCol(self, value): """ Sets the value of :py:attr:`quantilesCol`. """ return self._set(quantilesCol=value) @property @since("2.0.0") def coefficients(self): """ Model coefficients. """ return self._call_java("coefficients") @property @since("1.6.0") def intercept(self): """ Model intercept. """ return self._call_java("intercept") @property @since("1.6.0") def scale(self): """ Model scale parameter. """ return self._call_java("scale") @since("2.0.0") def predictQuantiles(self, features): """ Predicted Quantiles """ return self._call_java("predictQuantiles", features) class _GeneralizedLinearRegressionParams(_JavaPredictorParams, HasFitIntercept, HasMaxIter, HasTol, HasRegParam, HasWeightCol, HasSolver, HasAggregationDepth): """ Params for :py:class:`GeneralizedLinearRegression` and :py:class:`GeneralizedLinearRegressionModel`. .. versionadded:: 3.0.0 """ family = Param(Params._dummy(), "family", "The name of family which is a description of " + "the error distribution to be used in the model. Supported options: " + "gaussian (default), binomial, poisson, gamma and tweedie.", typeConverter=TypeConverters.toString) link = Param(Params._dummy(), "link", "The name of link function which provides the " + "relationship between the linear predictor and the mean of the distribution " + "function. Supported options: identity, log, inverse, logit, probit, cloglog " + "and sqrt.", typeConverter=TypeConverters.toString) linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " + "predictor) column name", typeConverter=TypeConverters.toString) variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " + "of the Tweedie distribution which characterizes the relationship " + "between the variance and mean of the distribution. Only applicable " + "for the Tweedie family. Supported values: 0 and [1, Inf).", typeConverter=TypeConverters.toFloat) linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " + "Only applicable to the Tweedie family.", typeConverter=TypeConverters.toFloat) solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: irls.", typeConverter=TypeConverters.toString) offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " + "or empty, we treat all instance offsets as 0.0", typeConverter=TypeConverters.toString) @since("2.0.0") def getFamily(self): """ Gets the value of family or its default value. """ return self.getOrDefault(self.family) @since("2.0.0") def getLinkPredictionCol(self): """ Gets the value of linkPredictionCol or its default value. """ return self.getOrDefault(self.linkPredictionCol) @since("2.0.0") def getLink(self): """ Gets the value of link or its default value. """ return self.getOrDefault(self.link) @since("2.2.0") def getVariancePower(self): """ Gets the value of variancePower or its default value. """ return self.getOrDefault(self.variancePower) @since("2.2.0") def getLinkPower(self): """ Gets the value of linkPower or its default value. """ return self.getOrDefault(self.linkPower) @since("2.3.0") def getOffsetCol(self): """ Gets the value of offsetCol or its default value. """ return self.getOrDefault(self.offsetCol) @inherit_doc class GeneralizedLinearRegression(JavaPredictor, _GeneralizedLinearRegressionParams, JavaMLWritable, JavaMLReadable): """ Generalized Linear Regression. Fit a Generalized Linear Model specified by giving a symbolic description of the linear predictor (link function) and a description of the error distribution (family). It supports "gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for each family is listed below. The first link function of each family is the default one. * "gaussian" -> "identity", "log", "inverse" * "binomial" -> "logit", "probit", "cloglog" * "poisson" -> "log", "identity", "sqrt" * "gamma" -> "inverse", "identity", "log" * "tweedie" -> power link function specified through "linkPower". \ The default link power in the tweedie family is 1 - variancePower. .. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_ >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(0.0, 0.0)), ... (1.0, Vectors.dense(1.0, 2.0)), ... (2.0, Vectors.dense(0.0, 0.0)), ... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"]) >>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p") >>> glr.setRegParam(0.1) GeneralizedLinearRegression... >>> glr.getRegParam() 0.1 >>> glr.clear(glr.regParam) >>> glr.setMaxIter(10) GeneralizedLinearRegression... >>> glr.getMaxIter() 10 >>> glr.clear(glr.maxIter) >>> model = glr.fit(df) >>> model.setFeaturesCol("features") GeneralizedLinearRegressionModel... >>> model.getMaxIter() 25 >>> model.getAggregationDepth() 2 >>> transformed = model.transform(df) >>> abs(transformed.head().prediction - 1.5) < 0.001 True >>> abs(transformed.head().p - 1.5) < 0.001 True >>> model.coefficients DenseVector([1.5..., -1.0...]) >>> model.numFeatures 2 >>> abs(model.intercept - 1.5) < 0.001 True >>> glr_path = temp_path + "/glr" >>> glr.save(glr_path) >>> glr2 = GeneralizedLinearRegression.load(glr_path) >>> glr.getFamily() == glr2.getFamily() True >>> model_path = temp_path + "/glr_model" >>> model.save(model_path) >>> model2 = GeneralizedLinearRegressionModel.load(model_path) >>> model.intercept == model2.intercept True >>> model.coefficients[0] == model2.coefficients[0] True .. versionadded:: 2.0.0 """ @keyword_only def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): """ __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \ family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \ regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \ variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2) """ super(GeneralizedLinearRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid) self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls", variancePower=0.0, aggregationDepth=2) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.0.0") def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): """ setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \ family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \ regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \ variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2) Sets params for generalized linear regression. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return GeneralizedLinearRegressionModel(java_model) @since("2.0.0") def setFamily(self, value): """ Sets the value of :py:attr:`family`. """ return self._set(family=value) @since("2.0.0") def setLinkPredictionCol(self, value): """ Sets the value of :py:attr:`linkPredictionCol`. """ return self._set(linkPredictionCol=value) @since("2.0.0") def setLink(self, value): """ Sets the value of :py:attr:`link`. """ return self._set(link=value) @since("2.2.0") def setVariancePower(self, value): """ Sets the value of :py:attr:`variancePower`. """ return self._set(variancePower=value) @since("2.2.0") def setLinkPower(self, value): """ Sets the value of :py:attr:`linkPower`. """ return self._set(linkPower=value) @since("2.3.0") def setOffsetCol(self, value): """ Sets the value of :py:attr:`offsetCol`. """ return self._set(offsetCol=value) @since("2.0.0") def setMaxIter(self, value): """ Sets the value of :py:attr:`maxIter`. """ return self._set(maxIter=value) @since("2.0.0") def setRegParam(self, value): """ Sets the value of :py:attr:`regParam`. """ return self._set(regParam=value) @since("2.0.0") def setTol(self, value): """ Sets the value of :py:attr:`tol`. """ return self._set(tol=value) @since("2.2.0") def setFitIntercept(self, value): """ Sets the value of :py:attr:`fitIntercept`. """ return self._set(fitIntercept=value) @since("2.0.0") def setWeightCol(self, value): """ Sets the value of :py:attr:`weightCol`. """ return self._set(weightCol=value) @since("2.0.0") def setSolver(self, value): """ Sets the value of :py:attr:`solver`. """ return self._set(solver=value) @since("3.0.0") def setAggregationDepth(self, value): """ Sets the value of :py:attr:`aggregationDepth`. """ return self._set(aggregationDepth=value) class GeneralizedLinearRegressionModel(JavaPredictionModel, _GeneralizedLinearRegressionParams, JavaMLWritable, JavaMLReadable, HasTrainingSummary): """ Model fitted by :class:`GeneralizedLinearRegression`. .. versionadded:: 2.0.0 """ @since("3.0.0") def setLinkPredictionCol(self, value): """ Sets the value of :py:attr:`linkPredictionCol`. """ return self._set(linkPredictionCol=value) @property @since("2.0.0") def coefficients(self): """ Model coefficients. """ return self._call_java("coefficients") @property @since("2.0.0") def intercept(self): """ Model intercept. """ return self._call_java("intercept") @property @since("2.0.0") def summary(self): """ Gets summary (e.g. residuals, deviance, pValues) of model on training set. An exception is thrown if `trainingSummary is None`. """ if self.hasSummary: return GeneralizedLinearRegressionTrainingSummary( super(GeneralizedLinearRegressionModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @since("2.0.0") def evaluate(self, dataset): """ Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` """ if not isinstance(dataset, DataFrame): raise ValueError("dataset must be a DataFrame but got %s." % type(dataset)) java_glr_summary = self._call_java("evaluate", dataset) return GeneralizedLinearRegressionSummary(java_glr_summary) class GeneralizedLinearRegressionSummary(JavaWrapper): """ Generalized linear regression results evaluated on a dataset. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def predictions(self): """ Predictions output by the model's `transform` method. """ return self._call_java("predictions") @property @since("2.0.0") def predictionCol(self): """ Field in :py:attr:`predictions` which gives the predicted value of each instance. This is set to a new column name if the original model's `predictionCol` is not set. """ return self._call_java("predictionCol") @property @since("2.2.0") def numInstances(self): """ Number of instances in DataFrame predictions. """ return self._call_java("numInstances") @property @since("2.0.0") def rank(self): """ The numeric rank of the fitted linear model. """ return self._call_java("rank") @property @since("2.0.0") def degreesOfFreedom(self): """ Degrees of freedom. """ return self._call_java("degreesOfFreedom") @property @since("2.0.0") def residualDegreeOfFreedom(self): """ The residual degrees of freedom. """ return self._call_java("residualDegreeOfFreedom") @property @since("2.0.0") def residualDegreeOfFreedomNull(self): """ The residual degrees of freedom for the null model. """ return self._call_java("residualDegreeOfFreedomNull") @since("2.0.0") def residuals(self, residualsType="deviance"): """ Get the residuals of the fitted model by type. :param residualsType: The type of residuals which should be returned. Supported options: deviance (default), pearson, working, and response. """ return self._call_java("residuals", residualsType) @property @since("2.0.0") def nullDeviance(self): """ The deviance for the null model. """ return self._call_java("nullDeviance") @property @since("2.0.0") def deviance(self): """ The deviance for the fitted model. """ return self._call_java("deviance") @property @since("2.0.0") def dispersion(self): """ The dispersion of the fitted model. It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise estimated by the residual Pearson's Chi-Squared statistic (which is defined as sum of the squares of the Pearson residuals) divided by the residual degrees of freedom. """ return self._call_java("dispersion") @property @since("2.0.0") def aic(self): """ Akaike's "An Information Criterion"(AIC) for the fitted model. """ return self._call_java("aic") @inherit_doc class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary): """ Generalized linear regression training results. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def numIterations(self): """ Number of training iterations. """ return self._call_java("numIterations") @property @since("2.0.0") def solver(self): """ The numeric solver used for training. """ return self._call_java("solver") @property @since("2.0.0") def coefficientStandardErrors(self): """ Standard error of estimated coefficients and intercept. If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept. """ return self._call_java("coefficientStandardErrors") @property @since("2.0.0") def tValues(self): """ T-statistic of estimated coefficients and intercept. If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept. """ return self._call_java("tValues") @property @since("2.0.0") def pValues(self): """ Two-sided p-value of estimated coefficients and intercept. If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True, then the last element returned corresponds to the intercept. """ return self._call_java("pValues") def __repr__(self): return self._call_java("toString") class _FactorizationMachinesParams(_JavaPredictorParams, HasMaxIter, HasStepSize, HasTol, HasSolver, HasSeed, HasFitIntercept, HasRegParam): """ Params for :py:class:`FMRegressor`, :py:class:`FMRegressionModel`, :py:class:`FMClassifier` and :py:class:`FMClassifierModel`. .. versionadded:: 3.0.0 """ factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " + "which are used to get pairwise interactions between variables", typeConverter=TypeConverters.toInt) fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)", typeConverter=TypeConverters.toBoolean) miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " + "set that should be used for one iteration of gradient descent", typeConverter=TypeConverters.toFloat) initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients", typeConverter=TypeConverters.toFloat) solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString) @since("3.0.0") def getFactorSize(self): """ Gets the value of factorSize or its default value. """ return self.getOrDefault(self.factorSize) @since("3.0.0") def getFitLinear(self): """ Gets the value of fitLinear or its default value. """ return self.getOrDefault(self.fitLinear) @since("3.0.0") def getMiniBatchFraction(self): """ Gets the value of miniBatchFraction or its default value. """ return self.getOrDefault(self.miniBatchFraction) @since("3.0.0") def getInitStd(self): """ Gets the value of initStd or its default value. """ return self.getOrDefault(self.initStd) @inherit_doc class FMRegressor(JavaPredictor, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable): """ Factorization Machines learning algorithm for regression. solver Supports: * gd (normal mini-batch gradient descent) * adamW (default) >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.regression import FMRegressor >>> df = spark.createDataFrame([ ... (2.0, Vectors.dense(2.0)), ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> >>> fm = FMRegressor(factorSize=2) >>> fm.setSeed(16) FMRegressor... >>> model = fm.fit(df) >>> model.getMaxIter() 100 >>> test0 = spark.createDataFrame([ ... (Vectors.dense(-2.0),), ... (Vectors.dense(0.5),), ... (Vectors.dense(1.0),), ... (Vectors.dense(4.0),)], ["features"]) >>> model.transform(test0).show(10, False) +--------+-------------------+ |features|prediction | +--------+-------------------+ |[-2.0] |-1.9989237712341565| |[0.5] |0.4956682219523814 | |[1.0] |0.994586620589689 | |[4.0] |3.9880970124135344 | +--------+-------------------+ ... >>> model.intercept -0.0032501766849261557 >>> model.linear DenseVector([0.9978]) >>> model.factors DenseMatrix(1, 2, [0.0173, 0.0021], 1) .. versionadded:: 3.0.0 """ factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " + "which are used to get pairwise interactions between variables", typeConverter=TypeConverters.toInt) fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)", typeConverter=TypeConverters.toBoolean) miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " + "set that should be used for one iteration of gradient descent", typeConverter=TypeConverters.toFloat) initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients", typeConverter=TypeConverters.toFloat) solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \ miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \ tol=1e-6, solver="adamW", seed=None) """ super(FMRegressor, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.FMRegressor", self.uid) self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("3.0.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \ miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \ tol=1e-6, solver="adamW", seed=None) Sets Params for FMRegressor. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return FMRegressionModel(java_model) @since("3.0.0") def setFactorSize(self, value): """ Sets the value of :py:attr:`factorSize`. """ return self._set(factorSize=value) @since("3.0.0") def setFitLinear(self, value): """ Sets the value of :py:attr:`fitLinear`. """ return self._set(fitLinear=value) @since("3.0.0") def setMiniBatchFraction(self, value): """ Sets the value of :py:attr:`miniBatchFraction`. """ return self._set(miniBatchFraction=value) @since("3.0.0") def setInitStd(self, value): """ Sets the value of :py:attr:`initStd`. """ return self._set(initStd=value) @since("3.0.0") def setMaxIter(self, value): """ Sets the value of :py:attr:`maxIter`. """ return self._set(maxIter=value) @since("3.0.0") def setStepSize(self, value): """ Sets the value of :py:attr:`stepSize`. """ return self._set(stepSize=value) @since("3.0.0") def setTol(self, value): """ Sets the value of :py:attr:`tol`. """ return self._set(tol=value) @since("3.0.0") def setSolver(self, value): """ Sets the value of :py:attr:`solver`. """ return self._set(solver=value) @since("3.0.0") def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) @since("3.0.0") def setFitIntercept(self, value): """ Sets the value of :py:attr:`fitIntercept`. """ return self._set(fitIntercept=value) @since("3.0.0") def setRegParam(self, value): """ Sets the value of :py:attr:`regParam`. """ return self._set(regParam=value) class FMRegressionModel(JavaPredictionModel, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable): """ Model fitted by :class:`FMRegressor`. .. versionadded:: 3.0.0 """ @property @since("3.0.0") def intercept(self): """ Model intercept. """ return self._call_java("intercept") @property @since("3.0.0") def linear(self): """ Model linear term. """ return self._call_java("linear") @property @since("3.0.0") def factors(self): """ Model factor term. """ return self._call_java("factors") if __name__ == "__main__": import doctest import pyspark.ml.regression from pyspark.sql import SparkSession globs = pyspark.ml.regression.__dict__.copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: spark = SparkSession.builder\ .master("local[2]")\ .appName("ml.regression tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark import tempfile temp_path = tempfile.mkdtemp() globs['temp_path'] = temp_path try: (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() finally: from shutil import rmtree try: rmtree(temp_path) except OSError: pass if failure_count: sys.exit(-1)
33.820216
100
0.605371
import sys from pyspark import since, keyword_only from pyspark.ml.param.shared import * from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \ _TreeEnsembleModel, _TreeEnsembleParams, _RandomForestParams, _GBTParams, \ _HasVarianceImpurity, _TreeRegressorParams from pyspark.ml.util import * from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, \ JavaPredictor, JavaPredictionModel, _JavaPredictorParams, JavaWrapper from pyspark.ml.common import inherit_doc from pyspark.sql import DataFrame __all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel', 'DecisionTreeRegressor', 'DecisionTreeRegressionModel', 'GBTRegressor', 'GBTRegressionModel', 'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel', 'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary', 'IsotonicRegression', 'IsotonicRegressionModel', 'LinearRegression', 'LinearRegressionModel', 'LinearRegressionSummary', 'LinearRegressionTrainingSummary', 'RandomForestRegressor', 'RandomForestRegressionModel', 'FMRegressor', 'FMRegressionModel'] class _LinearRegressionParams(_JavaPredictorParams, HasRegParam, HasElasticNetParam, HasMaxIter, HasTol, HasFitIntercept, HasStandardization, HasWeightCol, HasSolver, HasAggregationDepth, HasLoss): solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString) loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " + "options: squaredError, huber.", typeConverter=TypeConverters.toString) epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " + "robustness. Must be > 1.0. Only valid when loss is huber", typeConverter=TypeConverters.toFloat) @since("2.3.0") def getEpsilon(self): return self.getOrDefault(self.epsilon) @inherit_doc class LinearRegression(JavaPredictor, _LinearRegressionParams, JavaMLWritable, JavaMLReadable): @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, standardization=True, solver="auto", weightCol=None, aggregationDepth=2, loss="squaredError", epsilon=1.35): super(LinearRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.LinearRegression", self.uid) self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, standardization=True, solver="auto", weightCol=None, aggregationDepth=2, loss="squaredError", epsilon=1.35): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return LinearRegressionModel(java_model) @since("2.3.0") def setEpsilon(self, value): return self._set(epsilon=value) def setMaxIter(self, value): return self._set(maxIter=value) def setRegParam(self, value): return self._set(regParam=value) def setTol(self, value): return self._set(tol=value) def setElasticNetParam(self, value): return self._set(elasticNetParam=value) def setFitIntercept(self, value): return self._set(fitIntercept=value) def setStandardization(self, value): return self._set(standardization=value) def setWeightCol(self, value): return self._set(weightCol=value) def setSolver(self, value): return self._set(solver=value) def setAggregationDepth(self, value): return self._set(aggregationDepth=value) def setLoss(self, value): return self._set(lossType=value) class LinearRegressionModel(JavaPredictionModel, _LinearRegressionParams, GeneralJavaMLWritable, JavaMLReadable, HasTrainingSummary): @property @since("2.0.0") def coefficients(self): return self._call_java("coefficients") @property @since("1.4.0") def intercept(self): return self._call_java("intercept") @property @since("2.3.0") def scale(self): return self._call_java("scale") @property @since("2.0.0") def summary(self): if self.hasSummary: return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @since("2.0.0") def evaluate(self, dataset): if not isinstance(dataset, DataFrame): raise ValueError("dataset must be a DataFrame but got %s." % type(dataset)) java_lr_summary = self._call_java("evaluate", dataset) return LinearRegressionSummary(java_lr_summary) class LinearRegressionSummary(JavaWrapper): @property @since("2.0.0") def predictions(self): return self._call_java("predictions") @property @since("2.0.0") def predictionCol(self): return self._call_java("predictionCol") @property @since("2.0.0") def labelCol(self): return self._call_java("labelCol") @property @since("2.0.0") def featuresCol(self): return self._call_java("featuresCol") @property @since("2.0.0") def explainedVariance(self): return self._call_java("explainedVariance") @property @since("2.0.0") def meanAbsoluteError(self): return self._call_java("meanAbsoluteError") @property @since("2.0.0") def meanSquaredError(self): return self._call_java("meanSquaredError") @property @since("2.0.0") def rootMeanSquaredError(self): return self._call_java("rootMeanSquaredError") @property @since("2.0.0") def r2(self): return self._call_java("r2") @property @since("2.4.0") def r2adj(self): return self._call_java("r2adj") @property @since("2.0.0") def residuals(self): return self._call_java("residuals") @property @since("2.0.0") def numInstances(self): return self._call_java("numInstances") @property @since("2.2.0") def degreesOfFreedom(self): return self._call_java("degreesOfFreedom") @property @since("2.0.0") def devianceResiduals(self): return self._call_java("devianceResiduals") @property @since("2.0.0") def coefficientStandardErrors(self): return self._call_java("coefficientStandardErrors") @property @since("2.0.0") def tValues(self): return self._call_java("tValues") @property @since("2.0.0") def pValues(self): return self._call_java("pValues") @inherit_doc class LinearRegressionTrainingSummary(LinearRegressionSummary): @property @since("2.0.0") def objectiveHistory(self): return self._call_java("objectiveHistory") @property @since("2.0.0") def totalIterations(self): return self._call_java("totalIterations") class _IsotonicRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol): isotonic = Param( Params._dummy(), "isotonic", "whether the output sequence should be isotonic/increasing (true) or" + "antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean) featureIndex = Param( Params._dummy(), "featureIndex", "The index of the feature if featuresCol is a vector column, no effect otherwise.", typeConverter=TypeConverters.toInt) def getIsotonic(self): return self.getOrDefault(self.isotonic) def getFeatureIndex(self): return self.getOrDefault(self.featureIndex) @inherit_doc class IsotonicRegression(JavaEstimator, _IsotonicRegressionParams, HasWeightCol, JavaMLWritable, JavaMLReadable): @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0): super(IsotonicRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.IsotonicRegression", self.uid) self._setDefault(isotonic=True, featureIndex=0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return IsotonicRegressionModel(java_model) def setIsotonic(self, value): return self._set(isotonic=value) def setFeatureIndex(self, value): return self._set(featureIndex=value) @since("1.6.0") def setFeaturesCol(self, value): return self._set(featuresCol=value) @since("1.6.0") def setPredictionCol(self, value): return self._set(predictionCol=value) @since("1.6.0") def setLabelCol(self, value): return self._set(labelCol=value) @since("1.6.0") def setWeightCol(self, value): return self._set(weightCol=value) class IsotonicRegressionModel(JavaModel, _IsotonicRegressionParams, JavaMLWritable, JavaMLReadable): @since("3.0.0") def setFeaturesCol(self, value): return self._set(featuresCol=value) @since("3.0.0") def setPredictionCol(self, value): return self._set(predictionCol=value) def setFeatureIndex(self, value): return self._set(featureIndex=value) @property @since("1.6.0") def boundaries(self): return self._call_java("boundaries") @property @since("1.6.0") def predictions(self): return self._call_java("predictions") @since("3.0.0") def numFeatures(self): return self._call_java("numFeatures") @since("3.0.0") def predict(self, value): return self._call_java("predict", value) class _DecisionTreeRegressorParams(_DecisionTreeParams, _TreeRegressorParams, HasVarianceCol): pass @inherit_doc class DecisionTreeRegressor(JavaPredictor, _DecisionTreeRegressorParams, JavaMLWritable, JavaMLReadable): @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", seed=None, varianceCol=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0): super(DecisionTreeRegressor, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", leafCol="", minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", seed=None, varianceCol=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return DecisionTreeRegressionModel(java_model) @since("1.4.0") def setMaxDepth(self, value): return self._set(maxDepth=value) @since("1.4.0") def setMaxBins(self, value): return self._set(maxBins=value) @since("1.4.0") def setMinInstancesPerNode(self, value): return self._set(minInstancesPerNode=value) @since("3.0.0") def setMinWeightFractionPerNode(self, value): return self._set(minWeightFractionPerNode=value) @since("1.4.0") def setMinInfoGain(self, value): return self._set(minInfoGain=value) @since("1.4.0") def setMaxMemoryInMB(self, value): return self._set(maxMemoryInMB=value) @since("1.4.0") def setCacheNodeIds(self, value): return self._set(cacheNodeIds=value) @since("1.4.0") def setImpurity(self, value): return self._set(impurity=value) @since("1.4.0") def setCheckpointInterval(self, value): return self._set(checkpointInterval=value) def setSeed(self, value): return self._set(seed=value) @since("3.0.0") def setWeightCol(self, value): return self._set(weightCol=value) @since("2.0.0") def setVarianceCol(self, value): return self._set(varianceCol=value) @inherit_doc class DecisionTreeRegressionModel(_DecisionTreeModel, _DecisionTreeRegressorParams, JavaMLWritable, JavaMLReadable): @since("3.0.0") def setVarianceCol(self, value): return self._set(varianceCol=value) @property @since("2.0.0") def featureImportances(self): return self._call_java("featureImportances") class _RandomForestRegressorParams(_RandomForestParams, _TreeRegressorParams): pass @inherit_doc class RandomForestRegressor(JavaPredictor, _RandomForestRegressorParams, JavaMLWritable, JavaMLReadable): @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, weightCol=None): super(RandomForestRegressor, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.RandomForestRegressor", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, weightCol=None): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return RandomForestRegressionModel(java_model) def setMaxDepth(self, value): return self._set(maxDepth=value) def setMaxBins(self, value): return self._set(maxBins=value) def setMinInstancesPerNode(self, value): return self._set(minInstancesPerNode=value) def setMinInfoGain(self, value): return self._set(minInfoGain=value) def setMaxMemoryInMB(self, value): return self._set(maxMemoryInMB=value) def setCacheNodeIds(self, value): return self._set(cacheNodeIds=value) @since("1.4.0") def setImpurity(self, value): return self._set(impurity=value) @since("1.4.0") def setNumTrees(self, value): return self._set(numTrees=value) @since("1.4.0") def setSubsamplingRate(self, value): return self._set(subsamplingRate=value) @since("2.4.0") def setFeatureSubsetStrategy(self, value): return self._set(featureSubsetStrategy=value) def setCheckpointInterval(self, value): return self._set(checkpointInterval=value) def setSeed(self, value): return self._set(seed=value) @since("3.0.0") def setWeightCol(self, value): return self._set(weightCol=value) @since("3.0.0") def setMinWeightFractionPerNode(self, value): return self._set(minWeightFractionPerNode=value) class RandomForestRegressionModel(_TreeEnsembleModel, _RandomForestRegressorParams, JavaMLWritable, JavaMLReadable): @property @since("2.0.0") def trees(self): return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))] @property @since("2.0.0") def featureImportances(self): return self._call_java("featureImportances") class _GBTRegressorParams(_GBTParams, _TreeRegressorParams): supportedLossTypes = ["squared", "absolute"] lossType = Param(Params._dummy(), "lossType", "Loss function which GBT tries to minimize (case-insensitive). " + "Supported options: " + ", ".join(supportedLossTypes), typeConverter=TypeConverters.toString) @since("1.4.0") def getLossType(self): return self.getOrDefault(self.lossType) @inherit_doc class GBTRegressor(JavaPredictor, _GBTRegressorParams, JavaMLWritable, JavaMLReadable): @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None): super(GBTRegressor, self).__init__() self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, impurity="variance", featureSubsetStrategy="all", validationTol=0.01, leafCol="", minWeightFractionPerNode=0.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, impuriy="variance", featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, weightCol=None): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return GBTRegressionModel(java_model) @since("1.4.0") def setMaxDepth(self, value): return self._set(maxDepth=value) @since("1.4.0") def setMaxBins(self, value): return self._set(maxBins=value) @since("1.4.0") def setMinInstancesPerNode(self, value): return self._set(minInstancesPerNode=value) @since("1.4.0") def setMinInfoGain(self, value): return self._set(minInfoGain=value) @since("1.4.0") def setMaxMemoryInMB(self, value): return self._set(maxMemoryInMB=value) @since("1.4.0") def setCacheNodeIds(self, value): return self._set(cacheNodeIds=value) @since("1.4.0") def setImpurity(self, value): return self._set(impurity=value) @since("1.4.0") def setLossType(self, value): return self._set(lossType=value) @since("1.4.0") def setSubsamplingRate(self, value): return self._set(subsamplingRate=value) @since("2.4.0") def setFeatureSubsetStrategy(self, value): return self._set(featureSubsetStrategy=value) @since("3.0.0") def setValidationIndicatorCol(self, value): return self._set(validationIndicatorCol=value) @since("1.4.0") def setMaxIter(self, value): return self._set(maxIter=value) @since("1.4.0") def setCheckpointInterval(self, value): return self._set(checkpointInterval=value) @since("1.4.0") def setSeed(self, value): return self._set(seed=value) @since("1.4.0") def setStepSize(self, value): return self._set(stepSize=value) @since("3.0.0") def setWeightCol(self, value): return self._set(weightCol=value) @since("3.0.0") def setMinWeightFractionPerNode(self, value): return self._set(minWeightFractionPerNode=value) class GBTRegressionModel(_TreeEnsembleModel, _GBTRegressorParams, JavaMLWritable, JavaMLReadable): @property @since("2.0.0") def featureImportances(self): return self._call_java("featureImportances") @property @since("2.0.0") def trees(self): return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))] @since("2.4.0") def evaluateEachIteration(self, dataset, loss): return self._call_java("evaluateEachIteration", dataset, loss) class _AFTSurvivalRegressionParams(_JavaPredictorParams, HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth): censorCol = Param( Params._dummy(), "censorCol", "censor column name. The value of this column could be 0 or 1. " + "If the value is 1, it means the event has occurred i.e. " + "uncensored; otherwise censored.", typeConverter=TypeConverters.toString) quantileProbabilities = Param( Params._dummy(), "quantileProbabilities", "quantile probabilities array. Values of the quantile probabilities array " + "should be in the range (0, 1) and the array should be non-empty.", typeConverter=TypeConverters.toListFloat) quantilesCol = Param( Params._dummy(), "quantilesCol", "quantiles column name. This column will output quantiles of " + "corresponding quantileProbabilities if it is set.", typeConverter=TypeConverters.toString) @since("1.6.0") def getCensorCol(self): return self.getOrDefault(self.censorCol) @since("1.6.0") def getQuantileProbabilities(self): return self.getOrDefault(self.quantileProbabilities) @since("1.6.0") def getQuantilesCol(self): return self.getOrDefault(self.quantilesCol) @inherit_doc class AFTSurvivalRegression(JavaPredictor, _AFTSurvivalRegressionParams, JavaMLWritable, JavaMLReadable): @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): super(AFTSurvivalRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid) self._setDefault(censorCol="censor", quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], maxIter=100, tol=1E-6) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), quantilesCol=None, aggregationDepth=2): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return AFTSurvivalRegressionModel(java_model) @since("1.6.0") def setCensorCol(self, value): return self._set(censorCol=value) @since("1.6.0") def setQuantileProbabilities(self, value): return self._set(quantileProbabilities=value) @since("1.6.0") def setQuantilesCol(self, value): return self._set(quantilesCol=value) @since("1.6.0") def setMaxIter(self, value): return self._set(maxIter=value) @since("1.6.0") def setTol(self, value): return self._set(tol=value) @since("1.6.0") def setFitIntercept(self, value): return self._set(fitIntercept=value) @since("2.1.0") def setAggregationDepth(self, value): return self._set(aggregationDepth=value) class AFTSurvivalRegressionModel(JavaPredictionModel, _AFTSurvivalRegressionParams, JavaMLWritable, JavaMLReadable): @since("3.0.0") def setQuantileProbabilities(self, value): return self._set(quantileProbabilities=value) @since("3.0.0") def setQuantilesCol(self, value): return self._set(quantilesCol=value) @property @since("2.0.0") def coefficients(self): return self._call_java("coefficients") @property @since("1.6.0") def intercept(self): return self._call_java("intercept") @property @since("1.6.0") def scale(self): return self._call_java("scale") @since("2.0.0") def predictQuantiles(self, features): return self._call_java("predictQuantiles", features) class _GeneralizedLinearRegressionParams(_JavaPredictorParams, HasFitIntercept, HasMaxIter, HasTol, HasRegParam, HasWeightCol, HasSolver, HasAggregationDepth): family = Param(Params._dummy(), "family", "The name of family which is a description of " + "the error distribution to be used in the model. Supported options: " + "gaussian (default), binomial, poisson, gamma and tweedie.", typeConverter=TypeConverters.toString) link = Param(Params._dummy(), "link", "The name of link function which provides the " + "relationship between the linear predictor and the mean of the distribution " + "function. Supported options: identity, log, inverse, logit, probit, cloglog " + "and sqrt.", typeConverter=TypeConverters.toString) linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " + "predictor) column name", typeConverter=TypeConverters.toString) variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " + "of the Tweedie distribution which characterizes the relationship " + "between the variance and mean of the distribution. Only applicable " + "for the Tweedie family. Supported values: 0 and [1, Inf).", typeConverter=TypeConverters.toFloat) linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " + "Only applicable to the Tweedie family.", typeConverter=TypeConverters.toFloat) solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: irls.", typeConverter=TypeConverters.toString) offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " + "or empty, we treat all instance offsets as 0.0", typeConverter=TypeConverters.toString) @since("2.0.0") def getFamily(self): return self.getOrDefault(self.family) @since("2.0.0") def getLinkPredictionCol(self): return self.getOrDefault(self.linkPredictionCol) @since("2.0.0") def getLink(self): return self.getOrDefault(self.link) @since("2.2.0") def getVariancePower(self): return self.getOrDefault(self.variancePower) @since("2.2.0") def getLinkPower(self): return self.getOrDefault(self.linkPower) @since("2.3.0") def getOffsetCol(self): return self.getOrDefault(self.offsetCol) @inherit_doc class GeneralizedLinearRegression(JavaPredictor, _GeneralizedLinearRegressionParams, JavaMLWritable, JavaMLReadable): @keyword_only def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): super(GeneralizedLinearRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid) self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls", variancePower=0.0, aggregationDepth=2) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.0.0") def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return GeneralizedLinearRegressionModel(java_model) @since("2.0.0") def setFamily(self, value): return self._set(family=value) @since("2.0.0") def setLinkPredictionCol(self, value): return self._set(linkPredictionCol=value) @since("2.0.0") def setLink(self, value): return self._set(link=value) @since("2.2.0") def setVariancePower(self, value): return self._set(variancePower=value) @since("2.2.0") def setLinkPower(self, value): return self._set(linkPower=value) @since("2.3.0") def setOffsetCol(self, value): return self._set(offsetCol=value) @since("2.0.0") def setMaxIter(self, value): return self._set(maxIter=value) @since("2.0.0") def setRegParam(self, value): return self._set(regParam=value) @since("2.0.0") def setTol(self, value): return self._set(tol=value) @since("2.2.0") def setFitIntercept(self, value): return self._set(fitIntercept=value) @since("2.0.0") def setWeightCol(self, value): return self._set(weightCol=value) @since("2.0.0") def setSolver(self, value): return self._set(solver=value) @since("3.0.0") def setAggregationDepth(self, value): return self._set(aggregationDepth=value) class GeneralizedLinearRegressionModel(JavaPredictionModel, _GeneralizedLinearRegressionParams, JavaMLWritable, JavaMLReadable, HasTrainingSummary): @since("3.0.0") def setLinkPredictionCol(self, value): return self._set(linkPredictionCol=value) @property @since("2.0.0") def coefficients(self): return self._call_java("coefficients") @property @since("2.0.0") def intercept(self): return self._call_java("intercept") @property @since("2.0.0") def summary(self): if self.hasSummary: return GeneralizedLinearRegressionTrainingSummary( super(GeneralizedLinearRegressionModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @since("2.0.0") def evaluate(self, dataset): if not isinstance(dataset, DataFrame): raise ValueError("dataset must be a DataFrame but got %s." % type(dataset)) java_glr_summary = self._call_java("evaluate", dataset) return GeneralizedLinearRegressionSummary(java_glr_summary) class GeneralizedLinearRegressionSummary(JavaWrapper): @property @since("2.0.0") def predictions(self): return self._call_java("predictions") @property @since("2.0.0") def predictionCol(self): return self._call_java("predictionCol") @property @since("2.2.0") def numInstances(self): return self._call_java("numInstances") @property @since("2.0.0") def rank(self): return self._call_java("rank") @property @since("2.0.0") def degreesOfFreedom(self): return self._call_java("degreesOfFreedom") @property @since("2.0.0") def residualDegreeOfFreedom(self): return self._call_java("residualDegreeOfFreedom") @property @since("2.0.0") def residualDegreeOfFreedomNull(self): return self._call_java("residualDegreeOfFreedomNull") @since("2.0.0") def residuals(self, residualsType="deviance"): return self._call_java("residuals", residualsType) @property @since("2.0.0") def nullDeviance(self): return self._call_java("nullDeviance") @property @since("2.0.0") def deviance(self): return self._call_java("deviance") @property @since("2.0.0") def dispersion(self): return self._call_java("dispersion") @property @since("2.0.0") def aic(self): return self._call_java("aic") @inherit_doc class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary): @property @since("2.0.0") def numIterations(self): return self._call_java("numIterations") @property @since("2.0.0") def solver(self): return self._call_java("solver") @property @since("2.0.0") def coefficientStandardErrors(self): return self._call_java("coefficientStandardErrors") @property @since("2.0.0") def tValues(self): return self._call_java("tValues") @property @since("2.0.0") def pValues(self): return self._call_java("pValues") def __repr__(self): return self._call_java("toString") class _FactorizationMachinesParams(_JavaPredictorParams, HasMaxIter, HasStepSize, HasTol, HasSolver, HasSeed, HasFitIntercept, HasRegParam): factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " + "which are used to get pairwise interactions between variables", typeConverter=TypeConverters.toInt) fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)", typeConverter=TypeConverters.toBoolean) miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " + "set that should be used for one iteration of gradient descent", typeConverter=TypeConverters.toFloat) initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients", typeConverter=TypeConverters.toFloat) solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString) @since("3.0.0") def getFactorSize(self): return self.getOrDefault(self.factorSize) @since("3.0.0") def getFitLinear(self): return self.getOrDefault(self.fitLinear) @since("3.0.0") def getMiniBatchFraction(self): return self.getOrDefault(self.miniBatchFraction) @since("3.0.0") def getInitStd(self): return self.getOrDefault(self.initStd) @inherit_doc class FMRegressor(JavaPredictor, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable): factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " + "which are used to get pairwise interactions between variables", typeConverter=TypeConverters.toInt) fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)", typeConverter=TypeConverters.toBoolean) miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " + "set that should be used for one iteration of gradient descent", typeConverter=TypeConverters.toFloat) initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients", typeConverter=TypeConverters.toFloat) solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None): super(FMRegressor, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.regression.FMRegressor", self.uid) self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("3.0.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, tol=1e-6, solver="adamW", seed=None): kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return FMRegressionModel(java_model) @since("3.0.0") def setFactorSize(self, value): return self._set(factorSize=value) @since("3.0.0") def setFitLinear(self, value): return self._set(fitLinear=value) @since("3.0.0") def setMiniBatchFraction(self, value): return self._set(miniBatchFraction=value) @since("3.0.0") def setInitStd(self, value): return self._set(initStd=value) @since("3.0.0") def setMaxIter(self, value): return self._set(maxIter=value) @since("3.0.0") def setStepSize(self, value): return self._set(stepSize=value) @since("3.0.0") def setTol(self, value): return self._set(tol=value) @since("3.0.0") def setSolver(self, value): return self._set(solver=value) @since("3.0.0") def setSeed(self, value): return self._set(seed=value) @since("3.0.0") def setFitIntercept(self, value): return self._set(fitIntercept=value) @since("3.0.0") def setRegParam(self, value): return self._set(regParam=value) class FMRegressionModel(JavaPredictionModel, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable): @property @since("3.0.0") def intercept(self): return self._call_java("intercept") @property @since("3.0.0") def linear(self): return self._call_java("linear") @property @since("3.0.0") def factors(self): return self._call_java("factors") if __name__ == "__main__": import doctest import pyspark.ml.regression from pyspark.sql import SparkSession globs = pyspark.ml.regression.__dict__.copy() spark = SparkSession.builder\ .master("local[2]")\ .appName("ml.regression tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark import tempfile temp_path = tempfile.mkdtemp() globs['temp_path'] = temp_path try: (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() finally: from shutil import rmtree try: rmtree(temp_path) except OSError: pass if failure_count: sys.exit(-1)
true
true
7902ef834b1c58761c7a961bde269e3d05dccb1b
495
py
Python
python/engine/functions/orbit_derived_parameters_test.py
RomainEndelin/keplerian_orbits
3380e5d9a1006e73580cf3a86cb10845196c405d
[ "MIT" ]
null
null
null
python/engine/functions/orbit_derived_parameters_test.py
RomainEndelin/keplerian_orbits
3380e5d9a1006e73580cf3a86cb10845196c405d
[ "MIT" ]
null
null
null
python/engine/functions/orbit_derived_parameters_test.py
RomainEndelin/keplerian_orbits
3380e5d9a1006e73580cf3a86cb10845196c405d
[ "MIT" ]
null
null
null
import pytest from engine.constants import G from pytest import param as p from .orbit_derived_parameters import OrbitalPeriod @pytest.mark.parametrize( ("primary_mass", "secondary_mass", "semimajor_axis", "expected"), [p(10e10, 100, 10, 76.9102, id="arbitrary period")], ) def test_orbital_period(primary_mass, secondary_mass, semimajor_axis, expected): assert OrbitalPeriod( primary_mass, secondary_mass, semimajor_axis, G ).evalf() == pytest.approx(expected, 1e-3)
30.9375
80
0.747475
import pytest from engine.constants import G from pytest import param as p from .orbit_derived_parameters import OrbitalPeriod @pytest.mark.parametrize( ("primary_mass", "secondary_mass", "semimajor_axis", "expected"), [p(10e10, 100, 10, 76.9102, id="arbitrary period")], ) def test_orbital_period(primary_mass, secondary_mass, semimajor_axis, expected): assert OrbitalPeriod( primary_mass, secondary_mass, semimajor_axis, G ).evalf() == pytest.approx(expected, 1e-3)
true
true
7902efe6839b721a246a0971ff8d89aa568d6aa5
821
py
Python
stonesoup/types/particle.py
Isaac-JenkinsRA/Stone-Soup
54c9c7dca8162dadaa58e85933cf10a0f86ce1e1
[ "MIT" ]
1
2021-12-02T00:17:21.000Z
2021-12-02T00:17:21.000Z
stonesoup/types/particle.py
Isaac-JenkinsRA/Stone-Soup
54c9c7dca8162dadaa58e85933cf10a0f86ce1e1
[ "MIT" ]
null
null
null
stonesoup/types/particle.py
Isaac-JenkinsRA/Stone-Soup
54c9c7dca8162dadaa58e85933cf10a0f86ce1e1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from ..base import Property from .array import StateVector from .base import Type class Particle(Type): """ Particle type A particle type which contains a state and weight """ state_vector: StateVector = Property(doc="State vector") weight: float = Property(doc='Weight of particle') parent: 'Particle' = Property(default=None, doc='Parent particle') def __init__(self, state_vector, weight, parent=None, *args, **kwargs): if parent: parent.parent = None if state_vector is not None and not isinstance(state_vector, StateVector): state_vector = StateVector(state_vector) super().__init__(state_vector, weight, parent, *args, **kwargs) @property def ndim(self): return self.state_vector.shape[0]
29.321429
82
0.666261
from ..base import Property from .array import StateVector from .base import Type class Particle(Type): state_vector: StateVector = Property(doc="State vector") weight: float = Property(doc='Weight of particle') parent: 'Particle' = Property(default=None, doc='Parent particle') def __init__(self, state_vector, weight, parent=None, *args, **kwargs): if parent: parent.parent = None if state_vector is not None and not isinstance(state_vector, StateVector): state_vector = StateVector(state_vector) super().__init__(state_vector, weight, parent, *args, **kwargs) @property def ndim(self): return self.state_vector.shape[0]
true
true
7902f16b5ae36d13d542e5602e3cf18b4356c040
4,957
py
Python
bokeh/server/tests/test_tornado.py
chinasaur/bokeh
d3662a871679adf2cc8f95b80a51120db4dcccd4
[ "BSD-3-Clause" ]
6
2018-02-13T11:06:40.000Z
2020-10-23T09:30:46.000Z
bokeh/server/tests/test_tornado.py
chinasaur/bokeh
d3662a871679adf2cc8f95b80a51120db4dcccd4
[ "BSD-3-Clause" ]
6
2018-11-29T15:40:20.000Z
2021-05-07T14:59:30.000Z
bokeh/server/tests/test_tornado.py
chinasaur/bokeh
d3662a871679adf2cc8f95b80a51120db4dcccd4
[ "BSD-3-Clause" ]
3
2018-06-20T11:43:40.000Z
2021-12-21T06:51:56.000Z
from __future__ import absolute_import, print_function import logging import bokeh.server.tornado as tornado from bokeh.application import Application from bokeh.client import pull_session from bokeh.server.views.static_handler import StaticHandler from .utils import ManagedServerLoop, url logging.basicConfig(level=logging.DEBUG) def test_check_whitelist_rejects_port_mismatch(): assert False == tornado.check_whitelist("foo:100", ["foo:101", "foo:102"]) def test_check_whitelist_rejects_name_mismatch(): assert False == tornado.check_whitelist("foo:100", ["bar:100", "baz:100"]) def test_check_whitelist_accepts_name_port_match(): assert True == tornado.check_whitelist("foo:100", ["foo:100", "baz:100"]) def test_check_whitelist_accepts_implicit_port_80(): assert True == tornado.check_whitelist("foo", ["foo:80"]) def test_check_whitelist_accepts_all_on_star(): assert True == tornado.check_whitelist("192.168.0.1", ['*']) assert True == tornado.check_whitelist("192.168.0.1:80", ['*']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['*']) assert True == tornado.check_whitelist("192.168.0.1:80", ['*:80']) assert False == tornado.check_whitelist("192.168.0.1:80", ['*:81']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['*:*']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*']) assert True == tornado.check_whitelist("foobarbaz", ['*']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:5006']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:80']) assert True == tornado.check_whitelist("foobarbaz", ['*']) assert True == tornado.check_whitelist("foobarbaz", ['*:*']) assert True == tornado.check_whitelist("foobarbaz", ['*:80']) assert False == tornado.check_whitelist("foobarbaz", ['*:5006']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*:*']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*:5006']) def test_default_resources(): application = Application() with ManagedServerLoop(application) as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo/") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="foo/") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="foo") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo/bar") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/bar/" assert r.path_versioner == StaticHandler.append_version def test_default_app_paths(): app = Application() t = tornado.BokehTornado({}, "", []) assert t.app_paths == set() t = tornado.BokehTornado({"/": app}, "", []) assert t.app_paths == { "/" } t = tornado.BokehTornado({"/": app, "/foo": app}, "", []) assert t.app_paths == { "/", "/foo"} # tried to use capsys to test what's actually logged and it wasn't # working, in the meantime at least this tests that log_stats # doesn't crash in various scenarios def test_log_stats(): application = Application() with ManagedServerLoop(application) as server: server._tornado.log_stats() session1 = pull_session(session_id='session1', url=url(server), io_loop=server.io_loop) session2 = pull_session(session_id='session2', url=url(server), io_loop=server.io_loop) server._tornado.log_stats() session1.close() session2.close() server._tornado.log_stats()
42.732759
80
0.65463
from __future__ import absolute_import, print_function import logging import bokeh.server.tornado as tornado from bokeh.application import Application from bokeh.client import pull_session from bokeh.server.views.static_handler import StaticHandler from .utils import ManagedServerLoop, url logging.basicConfig(level=logging.DEBUG) def test_check_whitelist_rejects_port_mismatch(): assert False == tornado.check_whitelist("foo:100", ["foo:101", "foo:102"]) def test_check_whitelist_rejects_name_mismatch(): assert False == tornado.check_whitelist("foo:100", ["bar:100", "baz:100"]) def test_check_whitelist_accepts_name_port_match(): assert True == tornado.check_whitelist("foo:100", ["foo:100", "baz:100"]) def test_check_whitelist_accepts_implicit_port_80(): assert True == tornado.check_whitelist("foo", ["foo:80"]) def test_check_whitelist_accepts_all_on_star(): assert True == tornado.check_whitelist("192.168.0.1", ['*']) assert True == tornado.check_whitelist("192.168.0.1:80", ['*']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['*']) assert True == tornado.check_whitelist("192.168.0.1:80", ['*:80']) assert False == tornado.check_whitelist("192.168.0.1:80", ['*:81']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['*:*']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*']) assert True == tornado.check_whitelist("foobarbaz", ['*']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:5006']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:80']) assert True == tornado.check_whitelist("foobarbaz", ['*']) assert True == tornado.check_whitelist("foobarbaz", ['*:*']) assert True == tornado.check_whitelist("foobarbaz", ['*:80']) assert False == tornado.check_whitelist("foobarbaz", ['*:5006']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*:*']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*:5006']) def test_default_resources(): application = Application() with ManagedServerLoop(application) as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo/") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="foo/") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="foo") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo/bar") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/bar/" assert r.path_versioner == StaticHandler.append_version def test_default_app_paths(): app = Application() t = tornado.BokehTornado({}, "", []) assert t.app_paths == set() t = tornado.BokehTornado({"/": app}, "", []) assert t.app_paths == { "/" } t = tornado.BokehTornado({"/": app, "/foo": app}, "", []) assert t.app_paths == { "/", "/foo"} def test_log_stats(): application = Application() with ManagedServerLoop(application) as server: server._tornado.log_stats() session1 = pull_session(session_id='session1', url=url(server), io_loop=server.io_loop) session2 = pull_session(session_id='session2', url=url(server), io_loop=server.io_loop) server._tornado.log_stats() session1.close() session2.close() server._tornado.log_stats()
true
true
7902f20ba68d27112a108b05cc9e3030605587d8
943
py
Python
autorenamer.py
hletrd/PyAstrophotography
27dbcae347705b0f73933cb39940f5d9fcce0f93
[ "MIT" ]
4
2019-01-17T17:58:13.000Z
2021-07-29T23:14:16.000Z
autorenamer.py
hletrd/PyAstrophotography
27dbcae347705b0f73933cb39940f5d9fcce0f93
[ "MIT" ]
null
null
null
autorenamer.py
hletrd/PyAstrophotography
27dbcae347705b0f73933cb39940f5d9fcce0f93
[ "MIT" ]
1
2017-06-22T07:53:39.000Z
2017-06-22T07:53:39.000Z
import astropy.io.fits as fits import argparse, os, re, copy parser = argparse.ArgumentParser() parser.add_argument('--list', default='list.list') parser.add_argument('--rename_by', default='DATA-TYP') parser.add_argument('--reparse', default=0, type=int) args = parser.parse_args() def log(description): print(description) try: lst_f = open(args.list, 'r') except: error("List file list not found: " + args.list) lst = lst_f.read() lst = lst.replace('\r\n', '\n') lst = lst.replace('\r', '\n') lst = lst.split('\n') log("Loading file(s)...") for i in lst: try: #if True: hdulist = fits.open(i) hdulist.verify('fix') log("Loading file: " + i) headers = dict(hdulist[0].header) typ = headers[args.rename_by].strip() if args.reparse == 1: newname = typ + '_' + i.split('_')[-1] else: newname = typ + '_' + i log("Renamed to " + newname) os.rename(i, newname) except: log("Error while reading file " + i)
21.431818
54
0.645811
import astropy.io.fits as fits import argparse, os, re, copy parser = argparse.ArgumentParser() parser.add_argument('--list', default='list.list') parser.add_argument('--rename_by', default='DATA-TYP') parser.add_argument('--reparse', default=0, type=int) args = parser.parse_args() def log(description): print(description) try: lst_f = open(args.list, 'r') except: error("List file list not found: " + args.list) lst = lst_f.read() lst = lst.replace('\r\n', '\n') lst = lst.replace('\r', '\n') lst = lst.split('\n') log("Loading file(s)...") for i in lst: try: hdulist = fits.open(i) hdulist.verify('fix') log("Loading file: " + i) headers = dict(hdulist[0].header) typ = headers[args.rename_by].strip() if args.reparse == 1: newname = typ + '_' + i.split('_')[-1] else: newname = typ + '_' + i log("Renamed to " + newname) os.rename(i, newname) except: log("Error while reading file " + i)
true
true
7902f2cd319fe51657f75734ec29382ba427d8ea
1,195
py
Python
view/libs/colorDialog.py
jsk1107/UDK_labeler
cd9071c775658dbb6056d3c65bcbc626a18f1f02
[ "Apache-2.0" ]
6
2021-03-08T02:29:05.000Z
2022-02-18T13:04:51.000Z
view/libs/colorDialog.py
7eta/udk_labeler
8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb
[ "Apache-2.0" ]
6
2021-03-03T14:18:46.000Z
2022-03-08T06:55:57.000Z
view/libs/colorDialog.py
7eta/udk_labeler
8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb
[ "Apache-2.0" ]
1
2021-03-08T02:29:07.000Z
2021-03-08T02:29:07.000Z
from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import QColorDialog, QDialogButtonBox BB = QDialogButtonBox class ColorDialog(QColorDialog): def __init__(self, parent=None): super(ColorDialog, self).__init__(parent) self.setOption(QColorDialog.ShowAlphaChannel) # The Mac native dialog does not support our restore button. self.setOption(QColorDialog.DontUseNativeDialog) # Add a restore defaults button. # The default is set at invocation time, so that it # works across dialogs for different elements. self.default = None self.bb = self.layout().itemAt(1).widget() self.bb.addButton(BB.RestoreDefaults) self.bb.clicked.connect(self.checkRestore) def getColor(self, value=None, title=None, default=None): self.default = default if title: self.setWindowTitle(title) if value: self.setCurrentColor(value) return self.currentColor() if self.exec_() else None def checkRestore(self, button): if self.bb.buttonRole(button) & BB.ResetRole and self.default: self.setCurrentColor(self.default)
35.147059
70
0.680335
from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import QColorDialog, QDialogButtonBox BB = QDialogButtonBox class ColorDialog(QColorDialog): def __init__(self, parent=None): super(ColorDialog, self).__init__(parent) self.setOption(QColorDialog.ShowAlphaChannel) self.setOption(QColorDialog.DontUseNativeDialog) self.default = None self.bb = self.layout().itemAt(1).widget() self.bb.addButton(BB.RestoreDefaults) self.bb.clicked.connect(self.checkRestore) def getColor(self, value=None, title=None, default=None): self.default = default if title: self.setWindowTitle(title) if value: self.setCurrentColor(value) return self.currentColor() if self.exec_() else None def checkRestore(self, button): if self.bb.buttonRole(button) & BB.ResetRole and self.default: self.setCurrentColor(self.default)
true
true
7902f30e420cc51a9b42e3cee0042d4c19b7a707
9,545
py
Python
lib/python2.7/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
nishaero/wifi-userseg-ryu
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
[ "Apache-2.0" ]
15
2018-04-26T08:17:18.000Z
2021-03-05T08:44:13.000Z
lib/python2.7/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
nishaero/wifi-userseg-ryu
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
[ "Apache-2.0" ]
71
2015-01-05T16:50:55.000Z
2020-09-30T19:17:47.000Z
lib/python2.7/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
nishaero/wifi-userseg-ryu
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
[ "Apache-2.0" ]
14
2015-02-15T22:19:18.000Z
2020-09-30T18:54:54.000Z
""" Current-flow betweenness centrality measures for subsets of nodes. """ # Copyright (C) 2010-2011 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. __author__ = """Aric Hagberg (hagberg@lanl.gov)""" __all__ = ['current_flow_betweenness_centrality_subset', 'edge_current_flow_betweenness_centrality_subset'] import itertools import networkx as nx from networkx.algorithms.centrality.flow_matrix import * def current_flow_betweenness_centrality_subset(G,sources,targets, normalized=True, weight='weight', dtype=float, solver='lu'): r"""Compute current-flow betweenness centrality for subsets of nodes. Current-flow betweenness centrality uses an electrical current model for information spreading in contrast to betweenness centrality which uses shortest paths. Current-flow betweenness centrality is also known as random-walk betweenness centrality [2]_. Parameters ---------- G : graph A NetworkX graph sources: list of nodes Nodes to use as sources for current targets: list of nodes Nodes to use as sinks for current normalized : bool, optional (default=True) If True the betweenness values are normalized by b=b/(n-1)(n-2) where n is the number of nodes in G. weight : string or None, optional (default='weight') Key for edge data used as the edge weight. If None, then use 1 as each edge weight. dtype: data type (float) Default data type for internal matrices. Set to np.float32 for lower memory consumption. solver: string (default='lu') Type of linear solver to use for computing the flow matrix. Options are "full" (uses most memory), "lu" (recommended), and "cg" (uses least memory). Returns ------- nodes : dictionary Dictionary of nodes with betweenness centrality as the value. See Also -------- approximate_current_flow_betweenness_centrality betweenness_centrality edge_betweenness_centrality edge_current_flow_betweenness_centrality Notes ----- Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)` time [1]_, where `I(n-1)` is the time needed to compute the inverse Laplacian. For a full matrix this is `O(n^3)` but using sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the Laplacian matrix condition number. The space required is `O(nw) where `w` is the width of the sparse Laplacian matrix. Worse case is `w=n` for `O(n^2)`. If the edges have a 'weight' attribute they will be used as weights in this algorithm. Unspecified weights are set to 1. References ---------- .. [1] Centrality Measures Based on Current Flow. Ulrik Brandes and Daniel Fleischer, Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). LNCS 3404, pp. 533-544. Springer-Verlag, 2005. http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf .. [2] A measure of betweenness centrality based on random walks, M. E. J. Newman, Social Networks 27, 39-54 (2005). """ from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('current_flow_betweenness_centrality() ', 'not defined for digraphs.') if not nx.is_connected(G): raise nx.NetworkXError("Graph not connected.") n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) # make a copy with integer labels according to rcm ordering # this could be done without a copy if we really wanted to mapping=dict(zip(ordering,range(n))) H = nx.relabel_nodes(G,mapping) betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i=mapping[ss] for tt in targets: j=mapping[tt] betweenness[s]+=0.5*np.abs(row[i]-row[j]) betweenness[t]+=0.5*np.abs(row[i]-row[j]) if normalized: nb=(n-1.0)*(n-2.0) # normalization factor else: nb=2.0 for v in H: betweenness[v]=betweenness[v]/nb+1.0/(2-n) return dict((ordering[k],v) for k,v in betweenness.items()) def edge_current_flow_betweenness_centrality_subset(G, sources, targets, normalized=True, weight='weight', dtype=float, solver='lu'): """Compute current-flow betweenness centrality for edges using subsets of nodes. Current-flow betweenness centrality uses an electrical current model for information spreading in contrast to betweenness centrality which uses shortest paths. Current-flow betweenness centrality is also known as random-walk betweenness centrality [2]_. Parameters ---------- G : graph A NetworkX graph sources: list of nodes Nodes to use as sources for current targets: list of nodes Nodes to use as sinks for current normalized : bool, optional (default=True) If True the betweenness values are normalized by b=b/(n-1)(n-2) where n is the number of nodes in G. weight : string or None, optional (default='weight') Key for edge data used as the edge weight. If None, then use 1 as each edge weight. dtype: data type (float) Default data type for internal matrices. Set to np.float32 for lower memory consumption. solver: string (default='lu') Type of linear solver to use for computing the flow matrix. Options are "full" (uses most memory), "lu" (recommended), and "cg" (uses least memory). Returns ------- nodes : dictionary Dictionary of edge tuples with betweenness centrality as the value. See Also -------- betweenness_centrality edge_betweenness_centrality current_flow_betweenness_centrality Notes ----- Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)` time [1]_, where `I(n-1)` is the time needed to compute the inverse Laplacian. For a full matrix this is `O(n^3)` but using sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the Laplacian matrix condition number. The space required is `O(nw) where `w` is the width of the sparse Laplacian matrix. Worse case is `w=n` for `O(n^2)`. If the edges have a 'weight' attribute they will be used as weights in this algorithm. Unspecified weights are set to 1. References ---------- .. [1] Centrality Measures Based on Current Flow. Ulrik Brandes and Daniel Fleischer, Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). LNCS 3404, pp. 533-544. Springer-Verlag, 2005. http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf .. [2] A measure of betweenness centrality based on random walks, M. E. J. Newman, Social Networks 27, 39-54 (2005). """ from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('edge_current_flow_betweenness_centrality ', 'not defined for digraphs.') if not nx.is_connected(G): raise nx.NetworkXError("Graph not connected.") n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) # make a copy with integer labels according to rcm ordering # this could be done without a copy if we really wanted to mapping=dict(zip(ordering,range(n))) H = nx.relabel_nodes(G,mapping) betweenness=(dict.fromkeys(H.edges(),0.0)) if normalized: nb=(n-1.0)*(n-2.0) # normalization factor else: nb=2.0 for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i=mapping[ss] for tt in targets: j=mapping[tt] betweenness[e]+=0.5*np.abs(row[i]-row[j]) betweenness[e]/=nb return dict(((ordering[s],ordering[t]),v) for (s,t),v in betweenness.items()) # fixture for nose tests def setup_module(module): from nose import SkipTest try: import numpy import scipy except: raise SkipTest("NumPy not available")
36.155303
80
0.629649
__author__ = """Aric Hagberg (hagberg@lanl.gov)""" __all__ = ['current_flow_betweenness_centrality_subset', 'edge_current_flow_betweenness_centrality_subset'] import itertools import networkx as nx from networkx.algorithms.centrality.flow_matrix import * def current_flow_betweenness_centrality_subset(G,sources,targets, normalized=True, weight='weight', dtype=float, solver='lu'): from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('current_flow_betweenness_centrality() ', 'not defined for digraphs.') if not nx.is_connected(G): raise nx.NetworkXError("Graph not connected.") n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) mapping=dict(zip(ordering,range(n))) H = nx.relabel_nodes(G,mapping) betweenness = dict.fromkeys(H,0.0) for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i=mapping[ss] for tt in targets: j=mapping[tt] betweenness[s]+=0.5*np.abs(row[i]-row[j]) betweenness[t]+=0.5*np.abs(row[i]-row[j]) if normalized: nb=(n-1.0)*(n-2.0) else: nb=2.0 for v in H: betweenness[v]=betweenness[v]/nb+1.0/(2-n) return dict((ordering[k],v) for k,v in betweenness.items()) def edge_current_flow_betweenness_centrality_subset(G, sources, targets, normalized=True, weight='weight', dtype=float, solver='lu'): from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('edge_current_flow_betweenness_centrality ', 'not defined for digraphs.') if not nx.is_connected(G): raise nx.NetworkXError("Graph not connected.") n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) mapping=dict(zip(ordering,range(n))) H = nx.relabel_nodes(G,mapping) betweenness=(dict.fromkeys(H.edges(),0.0)) if normalized: nb=(n-1.0)*(n-2.0) else: nb=2.0 for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i=mapping[ss] for tt in targets: j=mapping[tt] betweenness[e]+=0.5*np.abs(row[i]-row[j]) betweenness[e]/=nb return dict(((ordering[s],ordering[t]),v) for (s,t),v in betweenness.items()) def setup_module(module): from nose import SkipTest try: import numpy import scipy except: raise SkipTest("NumPy not available")
true
true
7902f31209e73eb3224b41d7307ffa1377d9fc51
6,479
py
Python
runBatch/run_FlapGainSweep_BAR.py
ptrbortolotti/pCrunch
df2488891d8a0d884cb90edd5bb0412ac0af248f
[ "Apache-2.0" ]
5
2020-06-30T14:23:18.000Z
2021-09-02T08:06:24.000Z
runBatch/run_FlapGainSweep_BAR.py
ptrbortolotti/pCrunch
df2488891d8a0d884cb90edd5bb0412ac0af248f
[ "Apache-2.0" ]
6
2021-03-30T21:17:35.000Z
2022-01-10T16:50:44.000Z
runBatch/run_FlapGainSweep_BAR.py
ptrbortolotti/pCrunch
df2488891d8a0d884cb90edd5bb0412ac0af248f
[ "Apache-2.0" ]
9
2020-05-18T14:33:18.000Z
2022-01-05T08:38:18.000Z
""" A python script to run a parameter sweep """ # Python tools import numpy as np import yaml import os # WISDEM tools from wisdem.aeroelasticse import runFAST_pywrapper, CaseGen_General from wisdem.aeroelasticse.Util import FileTools # ROSCO tools from ROSCO_toolbox import controller as ROSCO_controller from ROSCO_toolbox import turbine as ROSCO_turbine from ROSCO_toolbox import utilities as ROSCO_utilities from pCrunch import CaseGen_Control, Analysis, Processing # FLAGS eagle = True multi = False # Controller tuning yaml if eagle: parameter_filename = '/home/nabbas/Documents/TurbineModels/ControllerYamls/BAR.yaml' else: parameter_filename = '../../Turbine_Tuning/BAR/BAR.yaml' # Generate case inputs for control related stuff input_params = ['zeta_flp', 'omega_flp'] DISCON_params = ['Flp_Kp', 'Flp_Ki'] # values = [[0.7], [2.73]] values = [np.around(np.arange(0.5, 2.5, 0.05), decimals=3), # use np.around to avoid precision issues np.around(np.arange(2.2, 3.5, 0.05) , decimals=3)] group = 1 # Some path specifics/ if eagle: FAST_InputFile = 'BAR_10p_75s.fst' # FAST input file (ext=.fst) FAST_directory = '/projects/bar/nabbas/TurbineModels/BAR_10p_75s' FAST_runDirectory = '/projects/bar/nabbas/batch_GainSweep_10p_75s_2' wind_dir = '/projects/bar/nabbas/TurbineModels/wind' dll_filename = '/home/nabbas/ROSCO_toolbox/ROSCO/build/libdiscon.so' Turbsim_exe = 'turbsim' FAST_exe = 'openfast' else: FAST_InputFile = 'OpenFAST_BAR_10.fst' # FAST input file (ext=.fst) FAST_directory = '/Users/nabbas/Documents/TurbineModels/BAR/OpenFAST_Models/BAR_10/' FAST_runDirectory = 'temp' wind_dir = '/Users/nabbas/Documents/TurbineModels/BAR/wind' dll_filename = '/Users/nabbas/Documents/TurbineModels/TurbineControllers/FortranControllers/ROSCO/build/libdiscon.dylib' Turbsim_exe = 'turbsim_dev' FAST_exe = 'openfast_dev' case_name_base = 'BAR_10p_75s' debug_level = 2 # Wind WindType = [3] Uref = [8.25, 10.25] seeds = [13428, 1524] # Time TMax = 330 # Turbine Definition D = 206 # Rotor Diameter z_hub = 137 # Tower Height # Multiprocessing/Eagle related if eagle: cores = 36 else: cores = 4 # Initialize CaseGen cgc = CaseGen_Control.CaseGen_Control(parameter_filename) # Modify some parameters cgc.path_params['FAST_InputFile'] = FAST_InputFile cgc.path_params['FAST_directory'] = FAST_directory cgc.AnalysisTime = TMax cgc.case_name_base = case_name_base cgc.D = D cgc.z_hub = z_hub cgc.debug_level = debug_level cgc.overwrite = True # Generate wind speeds cgc.seeds = seeds cgc.wind_dir = wind_dir cgc.Turbsim_exe = Turbsim_exe wind_file, wind_file_type = cgc.gen_turbwind(Uref) # Generate control case inputs # NOTE: Usually, group=1 is easiest. Then some baseline characteristics in group 0, etc... case_inputs, tuning_inputs = cgc.gen_control_cases(input_params, DISCON_params, values, group) # Add time specification if group 0 if group == 0: ci_key = list(case_inputs.keys())[0] TMax_list = [TMax]*len(case_inputs[ci_key]['vals']) case_inputs[("Fst", "TMax")] = {'vals': TMax_list, 'group': 0} else: case_inputs[("Fst", "TMax")] = {'vals': [TMax], 'group': 0} # DISCON case_inputs[('ServoDyn', 'DLL_FileName')] = {'vals': [dll_filename], 'group': 0} # Wind case_inputs[("InflowWind", "WindType")] = {'vals': [wind_file_type], 'group': 0} case_inputs[("InflowWind", "Filename")] = {'vals': [wind_file], 'group': 0} # FAST details fastBatch = runFAST_pywrapper.runFAST_pywrapper_batch(FAST_ver='OpenFAST', dev_branch=True) fastBatch.FAST_exe = FAST_exe # Path to executable fastBatch.FAST_InputFile = FAST_InputFile fastBatch.FAST_directory = FAST_directory fastBatch.FAST_runDirectory = FAST_runDirectory fastBatch.debug_level = debug_level # Generate cases case_list, case_name_list = CaseGen_General.CaseGen_General( case_inputs, dir_matrix=fastBatch.FAST_runDirectory, namebase=case_name_base) # Append case matrix with controller tuning parameters for file in os.listdir(fastBatch.FAST_runDirectory): if file.endswith(".yaml"): yfile = file yamldata = FileTools.load_yaml(os.path.join(fastBatch.FAST_runDirectory, yfile), package=1) CaseGen_Control.append_case_matrix_yaml( fastBatch.FAST_runDirectory, yfile, tuning_inputs, 'tuning_inputs') # Make sure flags are on var_out = [ # ElastoDyn "BldPitch1", "BldPitch2", "BldPitch3", "Azimuth", "RotSpeed", "GenSpeed", "NacYaw", "OoPDefl1", "IPDefl1", "TwstDefl1", "OoPDefl2", "IPDefl2", "TwstDefl2", "OoPDefl3", "IPDefl3", "TwstDefl3", "TwrClrnc1", "TwrClrnc2", "TwrClrnc3", "NcIMUTAxs", "NcIMUTAys", "NcIMUTAzs", "TTDspFA", "TTDspSS", "TTDspTwst", "PtfmSurge", "PtfmSway", "PtfmHeave", "PtfmRoll", "PtfmPitch", "PtfmYaw", "PtfmTAxt", "PtfmTAyt", "PtfmTAzt", "RootFxc1", "RootFyc1", "RootFzc1", "RootMxc1", "RootMyc1", "RootMzc1", "RootFxc2", "RootFyc2", "RootFzc2", "RootMxc2", "RootMyc2", "RootMzc2", "RootFxc3", "RootFyc3", "RootFzc3", "RootMxc3", "RootMyc3", "RootMzc3", "Spn1MLxb1", "Spn1MLyb1", "Spn1MLzb1", "Spn1MLxb2", "Spn1MLyb2", "Spn1MLzb2", "Spn1MLxb3", "Spn1MLyb3", "Spn1MLzb3", "RotThrust", "LSSGagFya", "LSSGagFza", "RotTorq", "LSSGagMya", "LSSGagMza", "YawBrFxp", "YawBrFyp", "YawBrFzp", "YawBrMxp", "YawBrMyp", "YawBrMzp", "TwrBsFxt", "TwrBsFyt", "TwrBsFzt", "TwrBsMxt", "TwrBsMyt", "TwrBsMzt", "TwHt1MLxt", "TwHt1MLyt", "TwHt1MLzt", # ServoDyn "GenPwr", "GenTq", # AeroDyn15 "RtArea", "RtVAvgxh", "B1N3Clrnc", "B2N3Clrnc", "B3N3Clrnc", "RtAeroCp", 'RtAeroCq', 'RtAeroCt', 'RtTSR', # InflowWind "Wind1VelX", "Wind1VelY", "Wind1VelZ", # FLAPS # "BLFLAP1", "BLFLAP2", "BLFLAP3", "RtVAvgxh", "OoPDefl1") ] channels = {} for var in var_out: channels[var] = True fastBatch.channels = channels fastBatch.case_list = case_list fastBatch.case_name_list = case_name_list if multi: fastBatch.run_multi(cores) # fastBatch.run_mpi() else: fastBatch.run_serial() # Post processing case_info = FileTools.load_yaml(FAST_runDirectory + '/case_matrix.yaml', package=1) outfiles = [FAST_runDirectory + fname + '.outb' for fname in case_info['Case_Name']] fp = Processing.FAST_Processing() fp.OpenFAST_outfile_list = outfiles fp.t0 = 30 fp.parallel_analysis = True fp.verbose=True fp.results_dir = os.path.join(run_dir,'stats') fp.save_LoadRanking = True fp.save_SummaryStats = True stats, load_ranking = fp.batch_processing()
34.280423
124
0.722334
import numpy as np import yaml import os from wisdem.aeroelasticse import runFAST_pywrapper, CaseGen_General from wisdem.aeroelasticse.Util import FileTools from ROSCO_toolbox import controller as ROSCO_controller from ROSCO_toolbox import turbine as ROSCO_turbine from ROSCO_toolbox import utilities as ROSCO_utilities from pCrunch import CaseGen_Control, Analysis, Processing eagle = True multi = False if eagle: parameter_filename = '/home/nabbas/Documents/TurbineModels/ControllerYamls/BAR.yaml' else: parameter_filename = '../../Turbine_Tuning/BAR/BAR.yaml' input_params = ['zeta_flp', 'omega_flp'] DISCON_params = ['Flp_Kp', 'Flp_Ki'] values = [np.around(np.arange(0.5, 2.5, 0.05), decimals=3), np.around(np.arange(2.2, 3.5, 0.05) , decimals=3)] group = 1 if eagle: FAST_InputFile = 'BAR_10p_75s.fst' FAST_directory = '/projects/bar/nabbas/TurbineModels/BAR_10p_75s' FAST_runDirectory = '/projects/bar/nabbas/batch_GainSweep_10p_75s_2' wind_dir = '/projects/bar/nabbas/TurbineModels/wind' dll_filename = '/home/nabbas/ROSCO_toolbox/ROSCO/build/libdiscon.so' Turbsim_exe = 'turbsim' FAST_exe = 'openfast' else: FAST_InputFile = 'OpenFAST_BAR_10.fst' FAST_directory = '/Users/nabbas/Documents/TurbineModels/BAR/OpenFAST_Models/BAR_10/' FAST_runDirectory = 'temp' wind_dir = '/Users/nabbas/Documents/TurbineModels/BAR/wind' dll_filename = '/Users/nabbas/Documents/TurbineModels/TurbineControllers/FortranControllers/ROSCO/build/libdiscon.dylib' Turbsim_exe = 'turbsim_dev' FAST_exe = 'openfast_dev' case_name_base = 'BAR_10p_75s' debug_level = 2 WindType = [3] Uref = [8.25, 10.25] seeds = [13428, 1524] TMax = 330 D = 206 z_hub = 137 if eagle: cores = 36 else: cores = 4 cgc = CaseGen_Control.CaseGen_Control(parameter_filename) cgc.path_params['FAST_InputFile'] = FAST_InputFile cgc.path_params['FAST_directory'] = FAST_directory cgc.AnalysisTime = TMax cgc.case_name_base = case_name_base cgc.D = D cgc.z_hub = z_hub cgc.debug_level = debug_level cgc.overwrite = True cgc.seeds = seeds cgc.wind_dir = wind_dir cgc.Turbsim_exe = Turbsim_exe wind_file, wind_file_type = cgc.gen_turbwind(Uref) case_inputs, tuning_inputs = cgc.gen_control_cases(input_params, DISCON_params, values, group) if group == 0: ci_key = list(case_inputs.keys())[0] TMax_list = [TMax]*len(case_inputs[ci_key]['vals']) case_inputs[("Fst", "TMax")] = {'vals': TMax_list, 'group': 0} else: case_inputs[("Fst", "TMax")] = {'vals': [TMax], 'group': 0} case_inputs[('ServoDyn', 'DLL_FileName')] = {'vals': [dll_filename], 'group': 0} case_inputs[("InflowWind", "WindType")] = {'vals': [wind_file_type], 'group': 0} case_inputs[("InflowWind", "Filename")] = {'vals': [wind_file], 'group': 0} fastBatch = runFAST_pywrapper.runFAST_pywrapper_batch(FAST_ver='OpenFAST', dev_branch=True) fastBatch.FAST_exe = FAST_exe fastBatch.FAST_InputFile = FAST_InputFile fastBatch.FAST_directory = FAST_directory fastBatch.FAST_runDirectory = FAST_runDirectory fastBatch.debug_level = debug_level case_list, case_name_list = CaseGen_General.CaseGen_General( case_inputs, dir_matrix=fastBatch.FAST_runDirectory, namebase=case_name_base) for file in os.listdir(fastBatch.FAST_runDirectory): if file.endswith(".yaml"): yfile = file yamldata = FileTools.load_yaml(os.path.join(fastBatch.FAST_runDirectory, yfile), package=1) CaseGen_Control.append_case_matrix_yaml( fastBatch.FAST_runDirectory, yfile, tuning_inputs, 'tuning_inputs') var_out = [ "BldPitch1", "BldPitch2", "BldPitch3", "Azimuth", "RotSpeed", "GenSpeed", "NacYaw", "OoPDefl1", "IPDefl1", "TwstDefl1", "OoPDefl2", "IPDefl2", "TwstDefl2", "OoPDefl3", "IPDefl3", "TwstDefl3", "TwrClrnc1", "TwrClrnc2", "TwrClrnc3", "NcIMUTAxs", "NcIMUTAys", "NcIMUTAzs", "TTDspFA", "TTDspSS", "TTDspTwst", "PtfmSurge", "PtfmSway", "PtfmHeave", "PtfmRoll", "PtfmPitch", "PtfmYaw", "PtfmTAxt", "PtfmTAyt", "PtfmTAzt", "RootFxc1", "RootFyc1", "RootFzc1", "RootMxc1", "RootMyc1", "RootMzc1", "RootFxc2", "RootFyc2", "RootFzc2", "RootMxc2", "RootMyc2", "RootMzc2", "RootFxc3", "RootFyc3", "RootFzc3", "RootMxc3", "RootMyc3", "RootMzc3", "Spn1MLxb1", "Spn1MLyb1", "Spn1MLzb1", "Spn1MLxb2", "Spn1MLyb2", "Spn1MLzb2", "Spn1MLxb3", "Spn1MLyb3", "Spn1MLzb3", "RotThrust", "LSSGagFya", "LSSGagFza", "RotTorq", "LSSGagMya", "LSSGagMza", "YawBrFxp", "YawBrFyp", "YawBrFzp", "YawBrMxp", "YawBrMyp", "YawBrMzp", "TwrBsFxt", "TwrBsFyt", "TwrBsFzt", "TwrBsMxt", "TwrBsMyt", "TwrBsMzt", "TwHt1MLxt", "TwHt1MLyt", "TwHt1MLzt", "GenPwr", "GenTq", "RtArea", "RtVAvgxh", "B1N3Clrnc", "B2N3Clrnc", "B3N3Clrnc", "RtAeroCp", 'RtAeroCq', 'RtAeroCt', 'RtTSR', "Wind1VelX", "Wind1VelY", "Wind1VelZ", ] channels = {} for var in var_out: channels[var] = True fastBatch.channels = channels fastBatch.case_list = case_list fastBatch.case_name_list = case_name_list if multi: fastBatch.run_multi(cores) else: fastBatch.run_serial() case_info = FileTools.load_yaml(FAST_runDirectory + '/case_matrix.yaml', package=1) outfiles = [FAST_runDirectory + fname + '.outb' for fname in case_info['Case_Name']] fp = Processing.FAST_Processing() fp.OpenFAST_outfile_list = outfiles fp.t0 = 30 fp.parallel_analysis = True fp.verbose=True fp.results_dir = os.path.join(run_dir,'stats') fp.save_LoadRanking = True fp.save_SummaryStats = True stats, load_ranking = fp.batch_processing()
true
true
7902f36de311e71b3344188407ed5835b6f51866
2,924
py
Python
PyBank/main.py
abelgk/python-challenge
fbdc3344aac8702dff74d5a14f730dc7f0d105ff
[ "MIT" ]
null
null
null
PyBank/main.py
abelgk/python-challenge
fbdc3344aac8702dff74d5a14f730dc7f0d105ff
[ "MIT" ]
null
null
null
PyBank/main.py
abelgk/python-challenge
fbdc3344aac8702dff74d5a14f730dc7f0d105ff
[ "MIT" ]
null
null
null
import csv source_file = "Resources/budget_data.csv" output_file = "Resources/budget_data_analysis.txt" #initialize months counter, total income, decrease and increase in revenue amounts number_of_months = 0 # to track the total number of months income_total = 0 #variable to hold total income as we iterate through the csv previous_income = 0 #variable to hold previously eveluated value from csv greatest_profit_increase = ["",0] #list to hold the greatest profit increase, inaitialized to lowest value 0 greatest_loss_decrease = ["",1000000000000] #list to hold the greatest loss decrease, inaitialized to highest value change_in_pl = [] #list to hold change in profit/loss as we iterate through the csv change_in_income = 0 #print (revenue_decrease) with open(source_file) as budget_data: csv_reader = csv.DictReader(budget_data) for row in csv_reader: number_of_months = number_of_months + 1 #print(row["Profit/Losses"]) income_total = income_total + int(row["Profit/Losses"]) #print(row) #trace the changes in amount change_in_income = int(row["Profit/Losses"]) - previous_income #print(change_in_income) #reinitiate the value to the record we completed evaluating previous_income = int(row["Profit/Losses"]) #print(previous_income) #greatest increase if(change_in_income > greatest_profit_increase[1]): greatest_profit_increase[0] = row["Date"] greatest_profit_increase[1] = change_in_income #greatest decrease if(change_in_income < greatest_loss_decrease[1]): greatest_loss_decrease[0] = row["Date"] greatest_loss_decrease[1] = change_in_income #append to the change_in_pl for sum calculations #print(int(row['Profit/Losses'])) change_in_pl.append(int(row['Profit/Losses'])) #calculate net profit or loss net_profit = sum(change_in_pl) #print(net_profit) print() print('Financial Anlysis') print('--------------------------') print("Total Months: " + str(number_of_months)) print("Total Income: " + "$" + str(net_profit)) print("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1])) print("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1])) #write outup to text file with open(output_file,"w") as results: results.write("Total Months: " + str(number_of_months)) results.write("\n") results.write("Total Income: " + "$" + str(net_profit)) results.write("\n") results.write("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1])) results.write("\n") results.write("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1]))
45.6875
128
0.691518
import csv source_file = "Resources/budget_data.csv" output_file = "Resources/budget_data_analysis.txt" number_of_months = 0 income_total = 0 previous_income = 0 greatest_profit_increase = ["",0] greatest_loss_decrease = ["",1000000000000] change_in_pl = [] change_in_income = 0 with open(source_file) as budget_data: csv_reader = csv.DictReader(budget_data) for row in csv_reader: number_of_months = number_of_months + 1 income_total = income_total + int(row["Profit/Losses"]) change_in_income = int(row["Profit/Losses"]) - previous_income previous_income = int(row["Profit/Losses"]) if(change_in_income > greatest_profit_increase[1]): greatest_profit_increase[0] = row["Date"] greatest_profit_increase[1] = change_in_income if(change_in_income < greatest_loss_decrease[1]): greatest_loss_decrease[0] = row["Date"] greatest_loss_decrease[1] = change_in_income change_in_pl.append(int(row['Profit/Losses'])) net_profit = sum(change_in_pl) print() print('Financial Anlysis') print('--------------------------') print("Total Months: " + str(number_of_months)) print("Total Income: " + "$" + str(net_profit)) print("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1])) print("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1])) with open(output_file,"w") as results: results.write("Total Months: " + str(number_of_months)) results.write("\n") results.write("Total Income: " + "$" + str(net_profit)) results.write("\n") results.write("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1])) results.write("\n") results.write("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1]))
true
true
7902f3a437a67873a6e0d630b67eb7b60de96916
502
py
Python
it_purchase_project/it_purchase_app/migrations/0031_auto_20180607_1031.py
gokhankaraboga/test
6d01ab00b5c03f56e817283dce42330d22e29820
[ "MIT" ]
null
null
null
it_purchase_project/it_purchase_app/migrations/0031_auto_20180607_1031.py
gokhankaraboga/test
6d01ab00b5c03f56e817283dce42330d22e29820
[ "MIT" ]
null
null
null
it_purchase_project/it_purchase_app/migrations/0031_auto_20180607_1031.py
gokhankaraboga/test
6d01ab00b5c03f56e817283dce42330d22e29820
[ "MIT" ]
null
null
null
# Generated by Django 2.0.5 on 2018-06-07 10:31 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('it_purchase_app', '0030_auto_20180607_1020'), ] operations = [ migrations.AlterField( model_name='purchase', name='manager_approval', field=models.CharField(blank=True, choices=[('Not Decided', 'Not Decided'), ('Yes', 'Yes'), ('No', 'No')], max_length=500, null=True), ), ]
26.421053
146
0.609562
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('it_purchase_app', '0030_auto_20180607_1020'), ] operations = [ migrations.AlterField( model_name='purchase', name='manager_approval', field=models.CharField(blank=True, choices=[('Not Decided', 'Not Decided'), ('Yes', 'Yes'), ('No', 'No')], max_length=500, null=True), ), ]
true
true
7902f3bb9e15286cf85d566adf8d86dd4b7ac876
42,104
py
Python
app.py
Lwnlcks/Vivato
316bd4ee58b9d56e061aacf0757f69a1e1e7c4d0
[ "BSD-3-Clause" ]
6
2020-05-20T00:19:41.000Z
2021-05-09T09:43:24.000Z
app.py
BlueWhiteDev/VientoEngine
9ab061576e5a98aafa2875e99f1e466e40a8c961
[ "BSD-3-Clause" ]
8
2021-01-13T15:37:30.000Z
2021-08-11T15:20:14.000Z
app.py
BlueWhiteDev/VientoEngine
9ab061576e5a98aafa2875e99f1e466e40a8c961
[ "BSD-3-Clause" ]
5
2020-05-30T06:50:49.000Z
2021-07-31T13:33:22.000Z
from sanic import Sanic, response, Blueprint from sanic.request import RequestParameters from sanic_jinja2 import SanicJinja2 from sanic_session import Session, AIORedisSessionInterface import aiosqlite import aiofiles import aioredis import asyncio import json import html import sys import os import re from route.tool.tool import * from route.mark.py.namumark import * setting_data = json.loads(open('data/setting.json', encoding = 'utf8').read()) version_load = json.loads(open('data/version.json', encoding='utf-8').read()) engine_version = version_load["main"]["engine_version"] markup_version = version_load["main"]["markup_version"] build_count = version_load["main"]["build_count"] renew_count = version_load["main"]["renew_count"] print('') print('VientoEngine') print('engine_version : ' + engine_version) print('markup_version : ' + markup_version) print('build_count : ' + build_count) print('renew_count : ' + renew_count) print('') for route_file in os.listdir("route"): py_file = re.search(r"(.+)\.py$", route_file) if py_file: py_file = py_file.groups()[0] exec("from route." + py_file + " import *") ## 위키 설정 async def run(): server_setting = { "host" : { "setting": "host", "default": "0.0.0.0" }, "port" : { "setting": "port", "default": "3000" }, "lang" : { "setting": "lang", "default": "ko-KR", "list" : ["ko-KR", "en-US"] }, "encode" : { "setting": "encode", "default": "pbkdf2-sha512", "list" : ["sha3", "sha256", "pbkdf2-sha512"] } } try: async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) if not 'db_type' and 'db_name' and 'host' and 'port' in setting_data: try: os.remove('data/setting.json') except: print('Error : Please delete data/setting.json') raise else: print('db_type : ' + setting_data['db_type']) print('db_name : ' + setting_data['db_name']) print('\n', end='') print('host : ' + setting_data['host']) print('port : ' + setting_data['port']) except: setting_json = ['sqlite', '', '', ''] db_type = ['sqlite'] print('db_type : sqlite') print('db_name : ', end = '') setting_json[1] = str(input()) if setting_json[1] == '': setting_json[1] = 'data' print('\n', end='') print('host (' + server_setting['host']['default'] + ') : ', end = '') setting_json[2] = str(input()) if setting_json[2] == '': setting_json[2] = server_setting['host']['default'] print('port (' + server_setting['port']['default'] + ') : ', end = '') setting_json[3] = str(input()) if setting_json[3] == '': setting_json[3] = server_setting['port']['default'] async with aiofiles.open('data/setting.json', 'w', encoding = 'utf8') as f: await f.write('{ "db_name" : "' + setting_json[1] + '", "db_type" : "' + setting_json[0] + '", "host" : "' + setting_json[2] + '", "port" : "' + setting_json[3] + '" }') async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') db_create = {} db_create['table'] = ['doc', 'doc_cac', 'doc_his', 'rec_dis', 'rec_ban', 'rec_log', 'mbr', 'mbr_set', 'mbr_log', 'ban', 'dis', 'dis_log', 'acl', 'backlink', 'wiki_set', 'list_per', 'list_fil', 'html_fil', 'list_alarm', 'list_watch', 'list_inter'] for i in db_create['table']: try: await db.execute('select test from ' + i + ' limit 1') except: try: await db.execute('create table ' + i + '(test longtext)') except: await db.execute("alter table " + i + " add test longtext default ''") db_setup = 0 try: db_ver = await db.execute('select data from wiki_set where name = "db_ver"') db_ver = await db_ver.fetchall() if not db_ver: db_setup = 1 else: if int(version_load['main']['renew_count']) > int(db_ver[0][0]): db_setup = 1 except: db_setup = 1 if db_setup != 0: db_create['doc'] = ['title', 'data'] db_create['doc_cac'] = ['title', 'data'] db_create['doc_his'] = ['id', 'title', 'data', 'date', 'ip', 'send', 'leng', 'hide', 'type'] db_create['rec_dis'] = ['title', 'sub', 'date', 'band', 'stop', 'agree'] db_create['rec_ban'] = ['block', 'end', 'today', 'blocker', 'why', 'band'] db_create['rec_log'] = ['who', 'what', 'time'] db_create['mbr'] = ['id', 'pw', 'acl', 'date', 'email'] db_create['mbr_set'] = ['name', 'id', 'data'] db_create['mbr_log'] = ['name', 'ip', 'ua', 'today', 'sub'] db_create['ban'] = ['block', 'end', 'why', 'band', 'login'] db_create['dis'] = ['doc', 'title', 'id', 'state', 'date', 'agree'] db_create['dis_log'] = ['id', 'data', 'date', 'ip', 'block', 'top', 'code', 'doc'] db_create['acl'] = ['title', 'decu', 'dis', 'view', 'why'] db_create['backlink'] = ['title', 'link', 'type'] db_create['wiki_set'] = ['name', 'data', 'coverage'] db_create['list_per'] = ['name', 'acl'] db_create['list_fil'] = ['name', 'regex', 'sub'] db_create['html_fil'] = ['html', 'kind', 'plus'] db_create['list_alarm'] = ['name', 'data', 'date'] db_create['list_watch'] = ['user', 'title'] db_create['list_inter'] = ['title', 'link', 'icon'] for create_table in db_create['table']: for create in db_create[create_table]: try: await db.execute('select ' + create + ' from ' + create_table + ' limit 1') except: await db.execute("alter table " + create_table + " add " + create + " longtext default ''") try: await db.execute('create index index_' + create_table + '_' + create + ' on ' + create_table + '(' + create + ')') except: pass await db.execute('delete from wiki_set where name = "db_ver"') await db.execute('insert into wiki_set (name, data) values (?, ?)', ["db_ver", version_load['main']['renew_count']]) await db.commit() first_setup = await db.execute('select data from wiki_set where name = "lang"') first_setup = await first_setup.fetchall() if not first_setup: lang = server_setting['lang']['list'][0] + ', ' + server_setting['lang']['list'][1] print('lang [' + lang + '] (' + server_setting['lang']['default'] + ') : ', end = '') setting_lang = str(input()) if setting_lang == '': setting_lang = server_setting['lang']['default'] await db.execute('insert into wiki_set (name, data) values (?, ?)', ['lang', setting_lang]) encode = server_setting['encode']['list'][0] + ', ' + server_setting['encode']['list'][1] + ', ' + server_setting['encode']['list'][2] print('encode [' + encode + '] (' + server_setting['encode']['default'] + ') : ', end = '') setting_encode = str(input()) if setting_encode == '': setting_encode = server_setting['encode']['default'] await db.execute('insert into wiki_set (name, data) values (?, ?)', ['encode', setting_encode]) await db.commit() else: encode_check = await db.execute('select data from wiki_set where name = "encode"') encode_check = await encode_check.fetchall() print('lang : ' + first_setup[0][0]) print('encode : ' + encode_check[0][0]) print("\n", end='') loop = asyncio.get_event_loop() loop.run_until_complete(run()) app = Sanic(__name__) jinja = SanicJinja2(app, pkg_path='skins') session = Session(app) app.static('/skins', './skins') ## 주소 설정 '''@app.listener('before_server_start') async def server_init(app, loop): app.redis = await aioredis.create_pool( ('localhost', 6379), minsize=5, maxsize=10, loop=loop ) session.init_app(app, interface=AIORedisSessionInterface(app.redis))''' @app.route('/') async def wiki_frontpage(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from wiki_set where name = ?", ['frontpage']) data_get = await data_get.fetchall() if data_get: return response.redirect('/w/' + data_get[0][0]) else: return response.redirect('/w/FrontPage') @app.route("/w/<name:string>") async def wiki_read(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = await db.execute("select data from doc where title = ?", [name]) data = await data.fetchall() if data: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = await namumark(data[0][0]), title = name, sub = 0, menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']] ) else: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = "해당 문서를 찾을 수 없습니다.", title = name, sub = 0, menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']] ) @app.route("/edit/<name:string>", methods=['POST', 'GET']) async def wiki_edit(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from doc where title = ? ", [name]) data_get = await data_get.fetchall() data = "" olddata = '' if data_get: data = data_get[0][0] olddata = data if request.method == 'POST': data = request.form.get('wiki_edit_textarea_1', '') send = request.form.get('wiki_edit_textbox_1', '') if data_get: if data_get[0][0] == data: return response.redirect("/w/" + name) else: data = re.sub('\n', '<br>', data) await db.execute("update doc set data = ? where title = ?", [data, name]) await db.commit() await history_add(name, data, await date_time(), await user_name(request), send, str(len(data) - len(olddata))) return response.redirect("/w/" + name) else: data = re.sub('\n', '<br>', data) await db.execute("insert into doc (title, data) values (?, ?)", [name, data]) await db.commit() await history_add(name, data, await date_time(), await user_name(request), send, str(len(data))) return response.redirect("/w/" + name) return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <textarea rows="25" class="wiki_textarea" name="wiki_edit_textarea_1">''' + html.escape(re.sub('<br>', '\n', data)) + '''</textarea> <hr class="wiki_hr"> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_edit_textbox_1"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_edit_button_1">저장</button> </form> ''', title = name, sub = '편집', menu = [['delete/' + name, '삭제'], ['move/' + name, '이동'], ['w/' + name, '문서']] ) @app.route("/history/<name:string>") async def wiki_history(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = ''' <table class="wiki_history_table"> <tr class="wiki_history_table_top"> <td class="wiki_table_history_top">문서</td> <td class="wiki_table_history_top">편집자</td> <td class="wiki_table_history_top">시간</td> </tr> ''' data_get = await db.execute("select id, title, date, ip, send, leng from doc_his where title = ? order by id + 0 desc limit 30", [name]) data_get = await data_get.fetchall() for history_data in data_get: if data_get: data += ''' <tr class="wiki_history_table_middle"> <td class="wiki_table_history"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td> <td class="wiki_table_history">''' + await user_link(history_data[3]) + '''</td> <td class="wiki_table_history">''' + history_data[2] + ''' </tr> <tr> <td colspan="3" class="wiki_table_history">''' + history_data[4] + '''</td> </tr> ''' data += '</table>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = data, title = name, sub = '역사', menu = [['w/' + name, '문서']] ) @app.route("/delete/<name:string>", methods=['POST', 'GET']) async def wiki_delete(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from doc where title = ? ", [name]) data_get = await data_get.fetchall() if request.method == 'POST': send = request.form.get('wiki_delete_textbox_1', '') await db.execute("delete from doc where title = ?", [name]) await db.commit() await history_add(name, '', await date_time(), await user_name(request), send, '0') return response.redirect("/w/" + name) if data_get: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <textarea class="wiki_textarea" name="wiki_dekete_textarea_1" readonly>''' + data_get[0][0] + '''</textarea> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_delete_textbox_1"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_delete_button_1">확인</button> </form> ''', title = name, sub = '삭제', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") # 오류 페이지 구현 필요 @app.route("/move/<name:string>", methods=['POST', 'GET']) async def wiki_move(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from doc where title = ? ", [name]) data_get = await data_get.fetchall() if request.method == 'POST': change_name = request.form.get('wiki_move_textbox_1', '') send = request.form.get('wiki_move_textbox_2', '') await db.execute("update doc set title = ? where title = ?", [change_name, name]) await db.execute("update doc_his set title = ? where title = ?", [change_name, name]) await db.commit() await history_add(change_name, '', await date_time(), await user_name(request), send, '0') return response.redirect("/w/" + change_name) if data_get: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <input type="text" value="''' + name + '''" class="wiki_textbox" name="wiki_move_textbox_1"> <hr class="wiki_hr"> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_move_textbox_2"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_move_button_1">확인</button> </form> ''', title = name, sub = '이동', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") # 오류 페이지 구현 필요 @app.route("/revert/<name:string>", methods=['POST', 'GET']) async def wiki_revert(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') args = RequestParameters() num = request.args.get('num', '1') dbdata = await db.execute("select data from doc_his order by cast(id as integer) desc limit 1") dbdata = await dbdata.fetchall() current = dbdata[0][0] data_get = await db.execute("select data from doc_his where id = ?", [num]) data_get = await data_get.fetchall() data_get = data_get[0][0] if request.method == 'POST': send = request.form.get('wiki_revert_textbox_2', '') data_get = re.sub('\n', '<br>', data_get) await db.execute("update doc set data = ? where title = ?", [data_get, name]) await db.commit() await history_add(name, data_get, await date_time(), await user_name(request), send, str(len(current) - len(data_get))) return response.redirect("/w/" + name) if data_get: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <textarea rows="25" class="wiki_textarea" name="wiki_revert_textarea_1" readonly>''' + data_get + '''</textarea> <hr class="wiki_hr"> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_revert_textbox_2"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_revert_button_1">확인</button> </form> ''', title = name, sub = 'r' + num + ' 복구', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") # 오류 페이지 구현 필요 @app.route("/member/signup", methods=['POST', 'GET']) async def wiki_signup(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') if request.ctx.session.get('id') == 1: return response.redirect('/') if request.method == 'POST': signup_id = request.form.get('wiki_signup_textbox_1', '') signup_password_1 = request.form.get('wiki_signup_textbox_2', '') signup_password_2 = request.form.get('wiki_signup_textbox_3', '') if not signup_password_1 and not signup_password_2: return response.redirect("/error/") # 오류 페이지 구현 필요 if signup_password_1 != signup_password_2: return response.redirect("/error/") # 오류 페이지 구현 필요 if re.search("(?:[^A-Za-z0-9가-힣])", signup_id): return response.redirect("/error/") # 오류 페이지 구현 필요 if len(signup_id) > 24 or len(signup_id) < 3: return response.redirect("/error/") # 오류 페이지 구현 필요 id_check = await db.execute("select id from mbr where id = ?", [signup_id]) id_check = await id_check.fetchall() if id_check: return response.redirect("/error/") encode_password = await password_encode(signup_password_1, signup_id) first_check = await db.execute("select * from mbr limit 1") first_check = await first_check.fetchall() if not first_check: await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'owner', await date_time(), '']) await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()]) await db.commit() return response.redirect("/member/login") else: await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'member', await date_time(), '']) # 추후 권한 개편 시 member가 아닌 직접 선택하도록 변경. await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()]) await db.commit() return response.redirect("/member/login") return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = ''' <form method="post"> <input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_signup_textbox_1"> <hr class="wiki_hr"> <input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_signup_textbox_2"> <hr class="wiki_hr"> <input type="password" placeholder="비밀번호 확인" class="wiki_textbox" name="wiki_signup_textbox_3"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_signup_button_1">확인</button> </form> ''', title = '계정 만들기', sub = 0, menu = 0 ) @app.route("/member/login", methods=['POST', 'GET']) async def wiki_login(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') if request.ctx.session.get('id') == 1: return response.redirect('/') if request.method == 'POST': wiki_id = request.form.get('wiki_login_textbox_1', '') wiki_password = request.form.get('wiki_login_textbox_2', '') wiki_pass_check = await VerifyAuth(wiki_id, wiki_password, 0) if wiki_pass_check == 1: request.ctx.session['id'] = wiki_id return response.redirect("/") else: return response.redirect('/error/') # 오류 페이지 구현 필요 return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = ''' <form method="post"> <input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_login_textbox_1"> <hr class="wiki_hr"> <input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_login_textbox_2"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_login_button_1">확인</button> </form> ''', title = '로그인', sub = 0, menu = 0 ) @app.route("/member/logout", methods=['POST', 'GET']) async def wiki_logout(request): if not request.ctx.session.get('id') or request.ctx.session.get('id') == 0: return response.redirect('/') request.ctx.session['id'] = 0 return response.redirect("/") @app.route("/discuss/<name:string>", methods=['POST', 'GET']) async def wiki_discuss(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = '' discuss_get = await db.execute("select title, id, state, date, agree from dis where doc = ?", [name]) discuss_get = await discuss_get.fetchall() if discuss_get: for discuss in discuss_get: data += '<h2><a href="/discuss/' + name + '/' + discuss[1] + '">' + discuss[1] + '. ' + discuss[0] + '</a></h2><hr class="wiki_hr">' if request.method == "POST": discuss_title = request.form.get('wiki_discuss_textbox_1', '') discuss_data = request.form.get('wiki_discuss_textarea_1', '') if discuss_title == '' or discuss_data == '': return response.redirect("/error/") # 오류 구현 필요 discuss_number = await db.execute("select id from dis where doc = ? order by id desc", [name]) discuss_number = await discuss_number.fetchall() if not discuss_number: discuss_id = '1' else: discuss_id = str(int(discuss_number[0][0]) + 1) await db.execute("insert into dis (doc, title, id, state, date, agree) values (?, ?, ?, 'normal', ?, '0')", [name, discuss_title, discuss_id, await date_time()]) await db.execute("insert into dis_log (id, data, date, ip, block, code, doc) values (?, ?, ?, ?, '0', ?, ?)", ['1', discuss_data, await date_time(), await user_name(request), discuss_id, name]) await db.commit() return response.redirect("/discuss/" + name + '/' + discuss_id) return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = data + ''' <form method="post"> <input type="text" placeholder="토론 제목" class="wiki_textbox" name="wiki_discuss_textbox_1"> <hr class="wiki_hr"> <textarea placeholder="토론 내용" class="wiki_textarea" name="wiki_discuss_textarea_1"></textarea> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_discuss_button_1">확인</button> </form> ''', title = name, sub = '토론', menu = [['w/' + name, '문서']] ) @app.route("/discuss/<name:string>/<num:int>", methods=['POST', 'GET']) async def wiki_discuss_thread(request, name, num): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = '' thread_list = await db.execute("select id, data, date, ip, block, top from dis_log where code = ? and doc = ?", [num, name]) thread_list = await thread_list.fetchall() thread_user = await db.execute("select ip from dis_log where id = '1'") thread_user = await thread_user.fetchall() if not thread_list: return response.redirect("/error/") # 오류 구현 필요 for thread_data in thread_list: # 비효율적인 구조, 추후 개선 예정. if thread_data[3] != '1' and thread_user[0][0] == thread_data[3]: data += ''' <div class="wiki_thread_table_first"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + ''' </div> <div class="wiki_thread_table_bottom"> ''' + thread_data[1] + ''' </div> </div> ''' elif thread_data[3] != '1' and thread_user[0][0] != thread_data[3]: data += ''' <div class="wiki_thread_table_other"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + ''' </div> <div class="wiki_thread_table_bottom"> ''' + thread_data[1] + ''' </div> </div> ''' elif thread_data[3] == '1' and thread_user[0][0] == thread_data[3]: data += ''' <div class="wiki_thread_table_first_blind"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + ''' </div> <div class="wiki_thread_table_bottom"> 블라인드된 스레드입니다. </div> </div> ''' else: data += ''' <div class="wiki_thread_table_other_blind"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + ''' </div> <div class="wiki_thread_table_bottom"> 블라인드된 스레드입니다. </div> </div> ''' if request.method == "POST": textarea_data = request.form.get('wiki_thread_textarea_1') if not textarea_data: return response.redirect("/error/") discuss_num = await db.execute("select id from dis_log where doc = ? order by id desc", [name]) discuss_num = await discuss_num.fetchall() discuss_num = int(discuss_num[0][0]) + 1 await db.execute("insert into dis_log (id, data, date, ip, block, top, code, doc) values (?, ?, ?, ?, '0', '0', ?, ?)", [discuss_num, textarea_data, await date_time(), await user_name(request), num, name]) await db.commit() return response.redirect("/discuss/" + name + "/" + str(num)) return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = data + ''' <form method="post"> <textarea class="wiki_textarea" name="wiki_thread_textarea_1"></textarea> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_thread_button_1">확인</button> </form> ''', title = name, sub = '토론', menu = [['w/' + name, '문서']] ) @app.route("/discuss/<name:string>/<num:int>/setting", methods=['POST', 'GET']) async def wiki_discuss_thread_setting(request, name, num): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') discuss_title = await db.execute("select title from dis where doc = ? and id = ?", [name, num]) discuss_title = await discuss_title.fetchall() discuss_doc = await db.execute("select doc from dis where doc = ? and id = ?", [name, num]) discuss_doc = await discuss_doc.fetchall() if request.method == 'POST': change_title = request.form.get('wiki_thread_textbox_setting_1', '') change_doc = request.form.get('wiki_thread_textbox_setting_2', '') if change_title == '' or change_doc == '': return response.redirect("/error/") if change_title == discuss_title[0][0] and change_doc == discuss_doc[0][0]: return response.redirect("setting") if change_title != discuss_title[0][0]: await db.execute("update dis set title = ? where doc = ? and id = ?", [change_title, discuss_doc[0][0], str(num)]) await db.commit() return response.redirect("/discuss/" + discuss_doc[0][0] + "/" + str(num) + "/setting") if change_doc != discuss_doc[0][0]: number_check = await db.execute("select id from dis where doc = ? and id = ?", [change_doc, str(num)]) number_check = await number_check.fetchall() if number_check: discuss_renew_num = await db.execute("select id from dis where doc = ? order by id desc", [change_doc]) discuss_renew_num = await discuss_renew_num.fetchall() discuss_renew_num = str(int(discuss_renew_num[0][0]) + 1) await db.execute("update dis set doc = ?, id = ? where doc = ? and id = ?", [change_doc, discuss_renew_num, discuss_doc[0][0], str(num)]) await db.execute("update dis_log set code = ?, doc = ? where code = ? and doc = ?", [discuss_renew_num, change_doc, str(num), discuss_doc[0][0]]) await db.commit() return response.redirect("/discuss/" + change_doc + "/" + discuss_renew_num + "/setting") else: await db.execute("update dis set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]]) await db.execute("update dis_log set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]]) await db.commit() return response.redirect("/discuss/" + change_doc + "/" + str(num) + "/setting") return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <input class="wiki_textbox" name="wiki_thread_textbox_setting_1" value="''' + discuss_title[0][0] + '''"> <hr class="wiki_hr"> <input class="wiki_textbox" name="wiki_thread_textbox_setting_2" value="''' + discuss_doc[0][0] + '''"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_thread_button_setting_1">확인</button> </form> ''', title = name, sub = '토론', menu = [['w/' + name, '문서']] ) @app.route("/recent/changes") async def wiki_recent_changes(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = ''' <table class="wiki_changes_table"> <tr class="wiki_changes_table_top"> <td class="wiki_table_changes_top">문서</td> <td class="wiki_table_changes_top">편집자</td> <td class="wiki_table_changes_top">시간</td> </tr> ''' data_get = await db.execute("select id, title, date, ip, send, leng from doc_his order by id + 0 desc limit 30") data_get = await data_get.fetchall() for history_data in data_get: if data_get: data += ''' <tr class="wiki_changes_table_middle"> <td class="wiki_table_changes"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td> <td class="wiki_table_changes">''' + await user_link(history_data[3]) + '''</td> <td class="wiki_table_changes">''' + history_data[2] + ''' </tr> <tr> <td colspan="3" class="wiki_table_changes">''' + history_data[4] + '''</td> </tr> ''' data += '</table>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = data, title = '최근 변경', sub = 0, menu = 0 ) @app.route("/recent/discuss") async def wiki_recent_discuss(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = ''' <table class="wiki_discuss_table"> <tr class="wiki_discuss_table_top"> <td class="wiki_table_discuss_top">토론</td> <td class="wiki_table_discuss_top">문서명</td> <td class="wiki_table_discuss_top">시간</td> </tr> ''' data_get = await db.execute("select doc, title, id, date from dis where state = ? order by date desc limit 30", ['normal']) data_get = await data_get.fetchall() for discuss_data in data_get: if data_get: data += ''' <tr class="wiki_discuss_table_middle"> <td class="wiki_table_discuss"><a href="/discuss/''' + discuss_data[0] + '''/''' + discuss_data[2] + '''">''' + discuss_data[1] + '''</a></td> <td class="wiki_table_discuss"><a href="/w/''' + discuss_data[0] + '''">''' + discuss_data[0] + '''</a></td> <td class="wiki_table_discuss">''' + discuss_data[3] + '''</td> </tr> ''' data += '</table>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = data, title = '최근 토론', sub = 0, menu = 0 ) @app.route("/raw/<name:string>") async def wiki_raw(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') args = RequestParameters() num = request.args.get('num', '1') raw_data = await db.execute("select data from doc_his where id = ? and title = ?", [num, name]) raw_data = await raw_data.fetchall() if raw_data: return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = '<textarea class="wiki_textarea" id="wiki_textarea_raw_1" readonly>' + raw_data[0][0] + '</textarea>', title = name, sub = 'r' + num + ' RAW', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") @app.route("/diff/<name:string>") async def wiki_diff(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') args = RequestParameters() num1 = request.args.get('first', '1') num2 = request.args.get('second', '2') data_get = await db.execute("") @app.route("/manage") async def wiki_manage(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/group") async def wiki_manage_group(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = '' li = '' permission_get = await db.execute("select name from list_per") permission_get = await permission_get.fetchall() if request.method == 'POST': return 0 for first in permission_get: li += '<li class="wiki_li" style="margin-left: 20px;"><a href="/manage/group/' + first[0] + '">' + first[0] + '</a></li>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = li, title = '권한 그룹', sub = 0, menu = [['manage', '이전']] ) @app.route("/manage/grant") async def wiki_manage_grant(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/namespace") async def wiki_manage_namespace(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/restart") async def wiki_manage_restart(request): try: os.execl(sys.executable, sys.executable, *sys.argv) except: try: os.execl(sys.executable, '"' + sys.executable + '"', *sys.argv) except: return response.redirect("/error/") @app.route("/manage/engine") async def wiki_manage_engine(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/edit_filter") async def wiki_manage_edit_filter(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/inter_wiki") async def wiki_manage_inter_wiki(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') if __name__ == "__main__": app.run(debug=False, access_log=False, host=setting_data['host'], port=setting_data['port'])
44.087958
251
0.548713
from sanic import Sanic, response, Blueprint from sanic.request import RequestParameters from sanic_jinja2 import SanicJinja2 from sanic_session import Session, AIORedisSessionInterface import aiosqlite import aiofiles import aioredis import asyncio import json import html import sys import os import re from route.tool.tool import * from route.mark.py.namumark import * setting_data = json.loads(open('data/setting.json', encoding = 'utf8').read()) version_load = json.loads(open('data/version.json', encoding='utf-8').read()) engine_version = version_load["main"]["engine_version"] markup_version = version_load["main"]["markup_version"] build_count = version_load["main"]["build_count"] renew_count = version_load["main"]["renew_count"] print('') print('VientoEngine') print('engine_version : ' + engine_version) print('markup_version : ' + markup_version) print('build_count : ' + build_count) print('renew_count : ' + renew_count) print('') for route_file in os.listdir("route"): py_file = re.search(r"(.+)\.py$", route_file) if py_file: py_file = py_file.groups()[0] exec("from route." + py_file + " import *") def run(): server_setting = { "host" : { "setting": "host", "default": "0.0.0.0" }, "port" : { "setting": "port", "default": "3000" }, "lang" : { "setting": "lang", "default": "ko-KR", "list" : ["ko-KR", "en-US"] }, "encode" : { "setting": "encode", "default": "pbkdf2-sha512", "list" : ["sha3", "sha256", "pbkdf2-sha512"] } } try: async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) if not 'db_type' and 'db_name' and 'host' and 'port' in setting_data: try: os.remove('data/setting.json') except: print('Error : Please delete data/setting.json') raise else: print('db_type : ' + setting_data['db_type']) print('db_name : ' + setting_data['db_name']) print('\n', end='') print('host : ' + setting_data['host']) print('port : ' + setting_data['port']) except: setting_json = ['sqlite', '', '', ''] db_type = ['sqlite'] print('db_type : sqlite') print('db_name : ', end = '') setting_json[1] = str(input()) if setting_json[1] == '': setting_json[1] = 'data' print('\n', end='') print('host (' + server_setting['host']['default'] + ') : ', end = '') setting_json[2] = str(input()) if setting_json[2] == '': setting_json[2] = server_setting['host']['default'] print('port (' + server_setting['port']['default'] + ') : ', end = '') setting_json[3] = str(input()) if setting_json[3] == '': setting_json[3] = server_setting['port']['default'] async with aiofiles.open('data/setting.json', 'w', encoding = 'utf8') as f: await f.write('{ "db_name" : "' + setting_json[1] + '", "db_type" : "' + setting_json[0] + '", "host" : "' + setting_json[2] + '", "port" : "' + setting_json[3] + '" }') async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') db_create = {} db_create['table'] = ['doc', 'doc_cac', 'doc_his', 'rec_dis', 'rec_ban', 'rec_log', 'mbr', 'mbr_set', 'mbr_log', 'ban', 'dis', 'dis_log', 'acl', 'backlink', 'wiki_set', 'list_per', 'list_fil', 'html_fil', 'list_alarm', 'list_watch', 'list_inter'] for i in db_create['table']: try: await db.execute('select test from ' + i + ' limit 1') except: try: await db.execute('create table ' + i + '(test longtext)') except: await db.execute("alter table " + i + " add test longtext default ''") db_setup = 0 try: db_ver = await db.execute('select data from wiki_set where name = "db_ver"') db_ver = await db_ver.fetchall() if not db_ver: db_setup = 1 else: if int(version_load['main']['renew_count']) > int(db_ver[0][0]): db_setup = 1 except: db_setup = 1 if db_setup != 0: db_create['doc'] = ['title', 'data'] db_create['doc_cac'] = ['title', 'data'] db_create['doc_his'] = ['id', 'title', 'data', 'date', 'ip', 'send', 'leng', 'hide', 'type'] db_create['rec_dis'] = ['title', 'sub', 'date', 'band', 'stop', 'agree'] db_create['rec_ban'] = ['block', 'end', 'today', 'blocker', 'why', 'band'] db_create['rec_log'] = ['who', 'what', 'time'] db_create['mbr'] = ['id', 'pw', 'acl', 'date', 'email'] db_create['mbr_set'] = ['name', 'id', 'data'] db_create['mbr_log'] = ['name', 'ip', 'ua', 'today', 'sub'] db_create['ban'] = ['block', 'end', 'why', 'band', 'login'] db_create['dis'] = ['doc', 'title', 'id', 'state', 'date', 'agree'] db_create['dis_log'] = ['id', 'data', 'date', 'ip', 'block', 'top', 'code', 'doc'] db_create['acl'] = ['title', 'decu', 'dis', 'view', 'why'] db_create['backlink'] = ['title', 'link', 'type'] db_create['wiki_set'] = ['name', 'data', 'coverage'] db_create['list_per'] = ['name', 'acl'] db_create['list_fil'] = ['name', 'regex', 'sub'] db_create['html_fil'] = ['html', 'kind', 'plus'] db_create['list_alarm'] = ['name', 'data', 'date'] db_create['list_watch'] = ['user', 'title'] db_create['list_inter'] = ['title', 'link', 'icon'] for create_table in db_create['table']: for create in db_create[create_table]: try: await db.execute('select ' + create + ' from ' + create_table + ' limit 1') except: await db.execute("alter table " + create_table + " add " + create + " longtext default ''") try: await db.execute('create index index_' + create_table + '_' + create + ' on ' + create_table + '(' + create + ')') except: pass await db.execute('delete from wiki_set where name = "db_ver"') await db.execute('insert into wiki_set (name, data) values (?, ?)', ["db_ver", version_load['main']['renew_count']]) await db.commit() first_setup = await db.execute('select data from wiki_set where name = "lang"') first_setup = await first_setup.fetchall() if not first_setup: lang = server_setting['lang']['list'][0] + ', ' + server_setting['lang']['list'][1] print('lang [' + lang + '] (' + server_setting['lang']['default'] + ') : ', end = '') setting_lang = str(input()) if setting_lang == '': setting_lang = server_setting['lang']['default'] await db.execute('insert into wiki_set (name, data) values (?, ?)', ['lang', setting_lang]) encode = server_setting['encode']['list'][0] + ', ' + server_setting['encode']['list'][1] + ', ' + server_setting['encode']['list'][2] print('encode [' + encode + '] (' + server_setting['encode']['default'] + ') : ', end = '') setting_encode = str(input()) if setting_encode == '': setting_encode = server_setting['encode']['default'] await db.execute('insert into wiki_set (name, data) values (?, ?)', ['encode', setting_encode]) await db.commit() else: encode_check = await db.execute('select data from wiki_set where name = "encode"') encode_check = await encode_check.fetchall() print('lang : ' + first_setup[0][0]) print('encode : ' + encode_check[0][0]) print("\n", end='') loop = asyncio.get_event_loop() loop.run_until_complete(run()) app = Sanic(__name__) jinja = SanicJinja2(app, pkg_path='skins') session = Session(app) app.static('/skins', './skins') p.route('/') async def wiki_frontpage(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from wiki_set where name = ?", ['frontpage']) data_get = await data_get.fetchall() if data_get: return response.redirect('/w/' + data_get[0][0]) else: return response.redirect('/w/FrontPage') @app.route("/w/<name:string>") async def wiki_read(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = await db.execute("select data from doc where title = ?", [name]) data = await data.fetchall() if data: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = await namumark(data[0][0]), title = name, sub = 0, menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']] ) else: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = "해당 문서를 찾을 수 없습니다.", title = name, sub = 0, menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']] ) @app.route("/edit/<name:string>", methods=['POST', 'GET']) async def wiki_edit(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from doc where title = ? ", [name]) data_get = await data_get.fetchall() data = "" olddata = '' if data_get: data = data_get[0][0] olddata = data if request.method == 'POST': data = request.form.get('wiki_edit_textarea_1', '') send = request.form.get('wiki_edit_textbox_1', '') if data_get: if data_get[0][0] == data: return response.redirect("/w/" + name) else: data = re.sub('\n', '<br>', data) await db.execute("update doc set data = ? where title = ?", [data, name]) await db.commit() await history_add(name, data, await date_time(), await user_name(request), send, str(len(data) - len(olddata))) return response.redirect("/w/" + name) else: data = re.sub('\n', '<br>', data) await db.execute("insert into doc (title, data) values (?, ?)", [name, data]) await db.commit() await history_add(name, data, await date_time(), await user_name(request), send, str(len(data))) return response.redirect("/w/" + name) return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <textarea rows="25" class="wiki_textarea" name="wiki_edit_textarea_1">''' + html.escape(re.sub('<br>', '\n', data)) + '''</textarea> <hr class="wiki_hr"> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_edit_textbox_1"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_edit_button_1">저장</button> </form> ''', title = name, sub = '편집', menu = [['delete/' + name, '삭제'], ['move/' + name, '이동'], ['w/' + name, '문서']] ) @app.route("/history/<name:string>") async def wiki_history(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = ''' <table class="wiki_history_table"> <tr class="wiki_history_table_top"> <td class="wiki_table_history_top">문서</td> <td class="wiki_table_history_top">편집자</td> <td class="wiki_table_history_top">시간</td> </tr> ''' data_get = await db.execute("select id, title, date, ip, send, leng from doc_his where title = ? order by id + 0 desc limit 30", [name]) data_get = await data_get.fetchall() for history_data in data_get: if data_get: data += ''' <tr class="wiki_history_table_middle"> <td class="wiki_table_history"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td> <td class="wiki_table_history">''' + await user_link(history_data[3]) + '''</td> <td class="wiki_table_history">''' + history_data[2] + ''' </tr> <tr> <td colspan="3" class="wiki_table_history">''' + history_data[4] + '''</td> </tr> ''' data += '</table>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = data, title = name, sub = '역사', menu = [['w/' + name, '문서']] ) @app.route("/delete/<name:string>", methods=['POST', 'GET']) async def wiki_delete(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from doc where title = ? ", [name]) data_get = await data_get.fetchall() if request.method == 'POST': send = request.form.get('wiki_delete_textbox_1', '') await db.execute("delete from doc where title = ?", [name]) await db.commit() await history_add(name, '', await date_time(), await user_name(request), send, '0') return response.redirect("/w/" + name) if data_get: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <textarea class="wiki_textarea" name="wiki_dekete_textarea_1" readonly>''' + data_get[0][0] + '''</textarea> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_delete_textbox_1"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_delete_button_1">확인</button> </form> ''', title = name, sub = '삭제', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") @app.route("/move/<name:string>", methods=['POST', 'GET']) async def wiki_move(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data_get = await db.execute("select data from doc where title = ? ", [name]) data_get = await data_get.fetchall() if request.method == 'POST': change_name = request.form.get('wiki_move_textbox_1', '') send = request.form.get('wiki_move_textbox_2', '') await db.execute("update doc set title = ? where title = ?", [change_name, name]) await db.execute("update doc_his set title = ? where title = ?", [change_name, name]) await db.commit() await history_add(change_name, '', await date_time(), await user_name(request), send, '0') return response.redirect("/w/" + change_name) if data_get: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <input type="text" value="''' + name + '''" class="wiki_textbox" name="wiki_move_textbox_1"> <hr class="wiki_hr"> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_move_textbox_2"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_move_button_1">확인</button> </form> ''', title = name, sub = '이동', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") @app.route("/revert/<name:string>", methods=['POST', 'GET']) async def wiki_revert(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') args = RequestParameters() num = request.args.get('num', '1') dbdata = await db.execute("select data from doc_his order by cast(id as integer) desc limit 1") dbdata = await dbdata.fetchall() current = dbdata[0][0] data_get = await db.execute("select data from doc_his where id = ?", [num]) data_get = await data_get.fetchall() data_get = data_get[0][0] if request.method == 'POST': send = request.form.get('wiki_revert_textbox_2', '') data_get = re.sub('\n', '<br>', data_get) await db.execute("update doc set data = ? where title = ?", [data_get, name]) await db.commit() await history_add(name, data_get, await date_time(), await user_name(request), send, str(len(current) - len(data_get))) return response.redirect("/w/" + name) if data_get: return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <textarea rows="25" class="wiki_textarea" name="wiki_revert_textarea_1" readonly>''' + data_get + '''</textarea> <hr class="wiki_hr"> <input type="text" placeholder="요약" class="wiki_textbox" name="wiki_revert_textbox_2"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_revert_button_1">확인</button> </form> ''', title = name, sub = 'r' + num + ' 복구', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") @app.route("/member/signup", methods=['POST', 'GET']) async def wiki_signup(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') if request.ctx.session.get('id') == 1: return response.redirect('/') if request.method == 'POST': signup_id = request.form.get('wiki_signup_textbox_1', '') signup_password_1 = request.form.get('wiki_signup_textbox_2', '') signup_password_2 = request.form.get('wiki_signup_textbox_3', '') if not signup_password_1 and not signup_password_2: return response.redirect("/error/") if signup_password_1 != signup_password_2: return response.redirect("/error/") if re.search("(?:[^A-Za-z0-9가-힣])", signup_id): return response.redirect("/error/") if len(signup_id) > 24 or len(signup_id) < 3: return response.redirect("/error/") id_check = await db.execute("select id from mbr where id = ?", [signup_id]) id_check = await id_check.fetchall() if id_check: return response.redirect("/error/") encode_password = await password_encode(signup_password_1, signup_id) first_check = await db.execute("select * from mbr limit 1") first_check = await first_check.fetchall() if not first_check: await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'owner', await date_time(), '']) await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()]) await db.commit() return response.redirect("/member/login") else: await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'member', await date_time(), '']) await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()]) await db.commit() return response.redirect("/member/login") return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = ''' <form method="post"> <input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_signup_textbox_1"> <hr class="wiki_hr"> <input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_signup_textbox_2"> <hr class="wiki_hr"> <input type="password" placeholder="비밀번호 확인" class="wiki_textbox" name="wiki_signup_textbox_3"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_signup_button_1">확인</button> </form> ''', title = '계정 만들기', sub = 0, menu = 0 ) @app.route("/member/login", methods=['POST', 'GET']) async def wiki_login(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') if request.ctx.session.get('id') == 1: return response.redirect('/') if request.method == 'POST': wiki_id = request.form.get('wiki_login_textbox_1', '') wiki_password = request.form.get('wiki_login_textbox_2', '') wiki_pass_check = await VerifyAuth(wiki_id, wiki_password, 0) if wiki_pass_check == 1: request.ctx.session['id'] = wiki_id return response.redirect("/") else: return response.redirect('/error/') return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = ''' <form method="post"> <input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_login_textbox_1"> <hr class="wiki_hr"> <input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_login_textbox_2"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_login_button_1">확인</button> </form> ''', title = '로그인', sub = 0, menu = 0 ) @app.route("/member/logout", methods=['POST', 'GET']) async def wiki_logout(request): if not request.ctx.session.get('id') or request.ctx.session.get('id') == 0: return response.redirect('/') request.ctx.session['id'] = 0 return response.redirect("/") @app.route("/discuss/<name:string>", methods=['POST', 'GET']) async def wiki_discuss(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = '' discuss_get = await db.execute("select title, id, state, date, agree from dis where doc = ?", [name]) discuss_get = await discuss_get.fetchall() if discuss_get: for discuss in discuss_get: data += '<h2><a href="/discuss/' + name + '/' + discuss[1] + '">' + discuss[1] + '. ' + discuss[0] + '</a></h2><hr class="wiki_hr">' if request.method == "POST": discuss_title = request.form.get('wiki_discuss_textbox_1', '') discuss_data = request.form.get('wiki_discuss_textarea_1', '') if discuss_title == '' or discuss_data == '': return response.redirect("/error/") discuss_number = await db.execute("select id from dis where doc = ? order by id desc", [name]) discuss_number = await discuss_number.fetchall() if not discuss_number: discuss_id = '1' else: discuss_id = str(int(discuss_number[0][0]) + 1) await db.execute("insert into dis (doc, title, id, state, date, agree) values (?, ?, ?, 'normal', ?, '0')", [name, discuss_title, discuss_id, await date_time()]) await db.execute("insert into dis_log (id, data, date, ip, block, code, doc) values (?, ?, ?, ?, '0', ?, ?)", ['1', discuss_data, await date_time(), await user_name(request), discuss_id, name]) await db.commit() return response.redirect("/discuss/" + name + '/' + discuss_id) return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = data + ''' <form method="post"> <input type="text" placeholder="토론 제목" class="wiki_textbox" name="wiki_discuss_textbox_1"> <hr class="wiki_hr"> <textarea placeholder="토론 내용" class="wiki_textarea" name="wiki_discuss_textarea_1"></textarea> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_discuss_button_1">확인</button> </form> ''', title = name, sub = '토론', menu = [['w/' + name, '문서']] ) @app.route("/discuss/<name:string>/<num:int>", methods=['POST', 'GET']) async def wiki_discuss_thread(request, name, num): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = '' thread_list = await db.execute("select id, data, date, ip, block, top from dis_log where code = ? and doc = ?", [num, name]) thread_list = await thread_list.fetchall() thread_user = await db.execute("select ip from dis_log where id = '1'") thread_user = await thread_user.fetchall() if not thread_list: return response.redirect("/error/") for thread_data in thread_list: if thread_data[3] != '1' and thread_user[0][0] == thread_data[3]: data += ''' <div class="wiki_thread_table_first"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + ''' </div> <div class="wiki_thread_table_bottom"> ''' + thread_data[1] + ''' </div> </div> ''' elif thread_data[3] != '1' and thread_user[0][0] != thread_data[3]: data += ''' <div class="wiki_thread_table_other"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + ''' </div> <div class="wiki_thread_table_bottom"> ''' + thread_data[1] + ''' </div> </div> ''' elif thread_data[3] == '1' and thread_user[0][0] == thread_data[3]: data += ''' <div class="wiki_thread_table_first_blind"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + ''' </div> <div class="wiki_thread_table_bottom"> 블라인드된 스레드입니다. </div> </div> ''' else: data += ''' <div class="wiki_thread_table_other_blind"> <div class="wiki_thread_table_top"> ''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + ''' </div> <div class="wiki_thread_table_bottom"> 블라인드된 스레드입니다. </div> </div> ''' if request.method == "POST": textarea_data = request.form.get('wiki_thread_textarea_1') if not textarea_data: return response.redirect("/error/") discuss_num = await db.execute("select id from dis_log where doc = ? order by id desc", [name]) discuss_num = await discuss_num.fetchall() discuss_num = int(discuss_num[0][0]) + 1 await db.execute("insert into dis_log (id, data, date, ip, block, top, code, doc) values (?, ?, ?, ?, '0', '0', ?, ?)", [discuss_num, textarea_data, await date_time(), await user_name(request), num, name]) await db.commit() return response.redirect("/discuss/" + name + "/" + str(num)) return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = data + ''' <form method="post"> <textarea class="wiki_textarea" name="wiki_thread_textarea_1"></textarea> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_thread_button_1">확인</button> </form> ''', title = name, sub = '토론', menu = [['w/' + name, '문서']] ) @app.route("/discuss/<name:string>/<num:int>/setting", methods=['POST', 'GET']) async def wiki_discuss_thread_setting(request, name, num): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') discuss_title = await db.execute("select title from dis where doc = ? and id = ?", [name, num]) discuss_title = await discuss_title.fetchall() discuss_doc = await db.execute("select doc from dis where doc = ? and id = ?", [name, num]) discuss_doc = await discuss_doc.fetchall() if request.method == 'POST': change_title = request.form.get('wiki_thread_textbox_setting_1', '') change_doc = request.form.get('wiki_thread_textbox_setting_2', '') if change_title == '' or change_doc == '': return response.redirect("/error/") if change_title == discuss_title[0][0] and change_doc == discuss_doc[0][0]: return response.redirect("setting") if change_title != discuss_title[0][0]: await db.execute("update dis set title = ? where doc = ? and id = ?", [change_title, discuss_doc[0][0], str(num)]) await db.commit() return response.redirect("/discuss/" + discuss_doc[0][0] + "/" + str(num) + "/setting") if change_doc != discuss_doc[0][0]: number_check = await db.execute("select id from dis where doc = ? and id = ?", [change_doc, str(num)]) number_check = await number_check.fetchall() if number_check: discuss_renew_num = await db.execute("select id from dis where doc = ? order by id desc", [change_doc]) discuss_renew_num = await discuss_renew_num.fetchall() discuss_renew_num = str(int(discuss_renew_num[0][0]) + 1) await db.execute("update dis set doc = ?, id = ? where doc = ? and id = ?", [change_doc, discuss_renew_num, discuss_doc[0][0], str(num)]) await db.execute("update dis_log set code = ?, doc = ? where code = ? and doc = ?", [discuss_renew_num, change_doc, str(num), discuss_doc[0][0]]) await db.commit() return response.redirect("/discuss/" + change_doc + "/" + discuss_renew_num + "/setting") else: await db.execute("update dis set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]]) await db.execute("update dis_log set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]]) await db.commit() return response.redirect("/discuss/" + change_doc + "/" + str(num) + "/setting") return jinja.render("index.html", request, wiki_set = await wiki_set(request, name), data = ''' <form method="post"> <input class="wiki_textbox" name="wiki_thread_textbox_setting_1" value="''' + discuss_title[0][0] + '''"> <hr class="wiki_hr"> <input class="wiki_textbox" name="wiki_thread_textbox_setting_2" value="''' + discuss_doc[0][0] + '''"> <hr class="wiki_hr"> <button type="submit" class="wiki_button" name="wiki_thread_button_setting_1">확인</button> </form> ''', title = name, sub = '토론', menu = [['w/' + name, '문서']] ) @app.route("/recent/changes") async def wiki_recent_changes(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = ''' <table class="wiki_changes_table"> <tr class="wiki_changes_table_top"> <td class="wiki_table_changes_top">문서</td> <td class="wiki_table_changes_top">편집자</td> <td class="wiki_table_changes_top">시간</td> </tr> ''' data_get = await db.execute("select id, title, date, ip, send, leng from doc_his order by id + 0 desc limit 30") data_get = await data_get.fetchall() for history_data in data_get: if data_get: data += ''' <tr class="wiki_changes_table_middle"> <td class="wiki_table_changes"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td> <td class="wiki_table_changes">''' + await user_link(history_data[3]) + '''</td> <td class="wiki_table_changes">''' + history_data[2] + ''' </tr> <tr> <td colspan="3" class="wiki_table_changes">''' + history_data[4] + '''</td> </tr> ''' data += '</table>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = data, title = '최근 변경', sub = 0, menu = 0 ) @app.route("/recent/discuss") async def wiki_recent_discuss(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = ''' <table class="wiki_discuss_table"> <tr class="wiki_discuss_table_top"> <td class="wiki_table_discuss_top">토론</td> <td class="wiki_table_discuss_top">문서명</td> <td class="wiki_table_discuss_top">시간</td> </tr> ''' data_get = await db.execute("select doc, title, id, date from dis where state = ? order by date desc limit 30", ['normal']) data_get = await data_get.fetchall() for discuss_data in data_get: if data_get: data += ''' <tr class="wiki_discuss_table_middle"> <td class="wiki_table_discuss"><a href="/discuss/''' + discuss_data[0] + '''/''' + discuss_data[2] + '''">''' + discuss_data[1] + '''</a></td> <td class="wiki_table_discuss"><a href="/w/''' + discuss_data[0] + '''">''' + discuss_data[0] + '''</a></td> <td class="wiki_table_discuss">''' + discuss_data[3] + '''</td> </tr> ''' data += '</table>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = data, title = '최근 토론', sub = 0, menu = 0 ) @app.route("/raw/<name:string>") async def wiki_raw(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') args = RequestParameters() num = request.args.get('num', '1') raw_data = await db.execute("select data from doc_his where id = ? and title = ?", [num, name]) raw_data = await raw_data.fetchall() if raw_data: return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = '<textarea class="wiki_textarea" id="wiki_textarea_raw_1" readonly>' + raw_data[0][0] + '</textarea>', title = name, sub = 'r' + num + ' RAW', menu = [['w/' + name, '문서']] ) else: return response.redirect("/error/") @app.route("/diff/<name:string>") async def wiki_diff(request, name): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') args = RequestParameters() num1 = request.args.get('first', '1') num2 = request.args.get('second', '2') data_get = await db.execute("") @app.route("/manage") async def wiki_manage(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/group") async def wiki_manage_group(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') data = '' li = '' permission_get = await db.execute("select name from list_per") permission_get = await permission_get.fetchall() if request.method == 'POST': return 0 for first in permission_get: li += '<li class="wiki_li" style="margin-left: 20px;"><a href="/manage/group/' + first[0] + '">' + first[0] + '</a></li>' return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0), data = li, title = '권한 그룹', sub = 0, menu = [['manage', '이전']] ) @app.route("/manage/grant") async def wiki_manage_grant(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/namespace") async def wiki_manage_namespace(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/restart") async def wiki_manage_restart(request): try: os.execl(sys.executable, sys.executable, *sys.argv) except: try: os.execl(sys.executable, '"' + sys.executable + '"', *sys.argv) except: return response.redirect("/error/") @app.route("/manage/engine") async def wiki_manage_engine(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/edit_filter") async def wiki_manage_edit_filter(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') @app.route("/manage/inter_wiki") async def wiki_manage_inter_wiki(request): async with aiofiles.open('data/setting.json', encoding = 'utf8') as f: setting_data = json.loads(await f.read()) db = await aiosqlite.connect(setting_data['db_name'] + '.db') if __name__ == "__main__": app.run(debug=False, access_log=False, host=setting_data['host'], port=setting_data['port'])
true
true
7902f492f731db00642041633d79b41eaaf7f05a
2,298
py
Python
utils/FocalLoss.py
lphxx6222712/MSAN_Retina
f23502603fc2e68afd66d7801f16aad286949b4d
[ "Apache-2.0" ]
1
2022-03-16T12:08:40.000Z
2022-03-16T12:08:40.000Z
utils/FocalLoss.py
86236291/MSAN_Retina
7723fbfe7c6fcd4e310beb8b776a9057af62a2f1
[ "Apache-2.0" ]
1
2022-03-31T05:16:21.000Z
2022-03-31T05:16:21.000Z
utils/FocalLoss.py
86236291/MSAN_Retina
7723fbfe7c6fcd4e310beb8b776a9057af62a2f1
[ "Apache-2.0" ]
1
2022-03-16T12:08:35.000Z
2022-03-16T12:08:35.000Z
import torch import torch.nn as nn def clip_by_tensor(t, t_min, t_max): result = (t>=t_min)*t+(t<t_min)*t_min result = (result<=t_max)*result+(result>t_max)*t_max return result class FocalLoss(nn.Module): def __init__(self, gamma=2, alpha=0.25): super(FocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha def forward(self, prediction_tensor, target_tensor): alpha = self.alpha gamma = self.gamma # input:size is M*2. M is the batch number """Compute focal loss for predictions. Multi-labels Focal loss formula: FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p) ,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: A float tensor of shape [batch_size, num_anchors] alpha: A scalar tensor for focal loss alpha hyper-parameter gamma: A scalar tensor for focal loss gamma hyper-parameter Returns: loss: A (scalar) tensor representing the value of the loss function """ sigmoid_p = torch.sigmoid(prediction_tensor) zeros = torch.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype) # For poitive prediction, only need consider front part loss, back part is 0; # target_tensor > zeros <=> z=1, so poitive coefficient = z - p. pos_p_sub = torch.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros) # For negative prediction, only need consider back part loss, front part is 0; # target_tensor > zeros <=> z=1, so negative coefficient = 0. neg_p_sub = torch.where(target_tensor > zeros, zeros, sigmoid_p) per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * torch.log(clip_by_tensor(sigmoid_p, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * torch.log(clip_by_tensor(1.0 - sigmoid_p, 1e-8, 1.0)) return per_entry_cross_ent.mean()
46.897959
122
0.639252
import torch import torch.nn as nn def clip_by_tensor(t, t_min, t_max): result = (t>=t_min)*t+(t<t_min)*t_min result = (result<=t_max)*result+(result>t_max)*t_max return result class FocalLoss(nn.Module): def __init__(self, gamma=2, alpha=0.25): super(FocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha def forward(self, prediction_tensor, target_tensor): alpha = self.alpha gamma = self.gamma sigmoid_p = torch.sigmoid(prediction_tensor) zeros = torch.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype) pos_p_sub = torch.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros) neg_p_sub = torch.where(target_tensor > zeros, zeros, sigmoid_p) per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * torch.log(clip_by_tensor(sigmoid_p, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * torch.log(clip_by_tensor(1.0 - sigmoid_p, 1e-8, 1.0)) return per_entry_cross_ent.mean()
true
true
7902f49c40844113abadadb659ecd46f82f4ce99
10,315
py
Python
preprocessing.py
MTC-ETH/RecommenderSystems
ede5aa961740348a68210f271397e1924c5f7cf6
[ "Apache-2.0" ]
null
null
null
preprocessing.py
MTC-ETH/RecommenderSystems
ede5aa961740348a68210f271397e1924c5f7cf6
[ "Apache-2.0" ]
2
2020-11-12T09:12:59.000Z
2020-11-12T09:16:46.000Z
preprocessing.py
MTC-ETH/RecommenderSystems
ede5aa961740348a68210f271397e1924c5f7cf6
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 ETH Zurich, Media Technology Center # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import os import pandas as pd """ This module is mainly used to transform the data from the partners into our desired format. In the and only load_data and get_metadata is used in the algorithms. """ def load_data(folder, input_path='user_item', cut=40,high_cut=1000000, seed=None): """ loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs :param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files :param cut: value to cut off users with less than "cut" read articles :return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs. (look in create_split to see how the split is defines) """ # cut cuts off users that read less than cut articles user_item_train, user_item_test, user_item_validation = pd.read_pickle( f'{folder}/{input_path}_train.pkl'), pd.read_pickle(f'{folder}/{input_path}_test.pkl'), pd.read_pickle( f'{folder}/{input_path}_validation.pkl') user_item_train = user_item_train[user_item_train.str.len() > cut * 0.7] user_item_train = user_item_train[user_item_train.str.len() < high_cut * 0.7] user_item_test = user_item_test.loc[user_item_train.index] user_item_validation = user_item_validation.loc[user_item_train.index] return user_item_train, user_item_test, user_item_validation def load_data_vertical(folder, input_path='user_item_vertical', cut=40): """ loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs :param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files :param cut: value to cut off users with less than "cut" read articles :return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs. (look in create_split to see how the split is defines) """ # cut cuts off users that read less than cut articles user_item_train, user_item_test, user_item_validation = pd.read_parquet( f'{folder}/{input_path}_train.pq'), pd.read_parquet(f'{folder}/{input_path}_test.pq'), pd.read_parquet( f'{folder}/{input_path}_validation.pq') user_item_train = user_item_train[user_item_train['count'] >cut] user_item_test =user_item_test[user_item_test['count'] >cut] user_item_validation = user_item_validation[user_item_validation['count'] >cut] user_item_train['resource_id']=user_item_train['article_id'] user_item_test['resource_id']=user_item_test['article_id'] user_item_validation['resource_id']=user_item_validation['article_id'] return user_item_train, user_item_test, user_item_validation def load_data_cv(folder, input_path='user_item', cut=40, high_cut=100000,seed=1): """ Same as load_data but only returns random 80% of the training set """ # cut cuts off users that read less than cut articles user_item_train, user_item_test, user_item_validation = load_data(folder, input_path=input_path, cut=cut,high_cut=high_cut) user_item_train = user_item_train.sample(frac=0.8,random_state=seed) user_item_test = user_item_test.sample(frac=1, random_state=seed) return user_item_train, user_item_test, user_item_validation def load_data_vertical_cv(folder, input_path='user_item_vertical', cut=40, high_cut=100000,seed=1): """ Same as load_data but only returns random 80% of the training set """ # cut cuts off users that read less than cut articles user_item_train, user_item_test, user_item_validation = load_data_vertical(folder, input_path=input_path, cut=cut) user_item_train = user_item_train.sample(frac=0.8,random_state=seed) user_item_test = user_item_test.sample(frac=1, random_state=seed) return user_item_train, user_item_test, user_item_validation def get_metadata(folder, usecols=[]): """ Loads and returns the article metadata. The algorithms expect the format to be a Dataframe with two columns: - "resource_id": unique id for the article - "text": full text of the article (without html tags) """ if not usecols: usecols = ['text', 'resource_id'] metadata = pd.read_csv(f"{folder}/meta.csv", usecols=usecols) return metadata.dropna(subset=['text']) def transform_item_matrix_to_horizontal_format(folder, output_path='user_item_matrix.pkl', input_path='user_item_matrix_vertical.pq', sortby='ts'): """ Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have one row for each user and each row contains a (sorted) list of articles she/he clicked on. :param folder: Input folder :param output_path: Filename/path for outputfile :param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns: "user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time. :param sortby: Columnname of the timestamp column to sort by :return: returns a Series where the index is the UserID and values is the by timestamp sorted list of clicked ArticleIDs """ now = datetime.datetime.now() matrices = pd.read_parquet(f"{folder}/{input_path}") grouped = matrices.sort_values(sortby).groupby(['user_ix']).apply(lambda x: list(x['article_id'])) grouped.to_pickle(f"{folder}/{output_path}") print(f"Data transformed {datetime.datetime.now() - now}") def create_split(folder, input_path='user_item_matrix.pkl', ouput_path='user_item', cut_dump=10): """ Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split. This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10% read articles in the test set. We remove users with less than 10 clicked articles. This is the data that is loaded to train/test the models in the end. """ now = datetime.datetime.now() user_item = pd.read_pickle(f"{folder}/{input_path}") user_item = user_item[user_item.str.len() > (cut_dump)] user_item_train = user_item.apply(lambda x: x[:int(len(x) * 0.7)]) user_item_test = user_item.apply(lambda x: x[int(len(x) * 0.7):int(len(x) * 0.9)]) user_item_validation = user_item.apply(lambda x: x[int(len(x) * 0.9):]) user_item_train.name = 'article_id' user_item_test.name = 'article_id' user_item_validation.name = 'article_id' user_item_train.to_pickle(f'{folder}/{ouput_path}_train.pkl') user_item_test.to_pickle(f'{folder}/{ouput_path}_test.pkl') user_item_validation.to_pickle(f'{folder}/{ouput_path}_validation.pkl') print(f"Split created {datetime.datetime.now() - now}") def create_split_vertical(folder, input_path='user_item_matrix_vertical.pq', ouput_path='user_item_vertical', cut_dump=10,time_column='ts'): """ Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split. This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10% read articles in the test set. We remove users with less than 10 clicked articles. This is the data that is loaded to train/test the models in the end. """ now = datetime.datetime.now() user_item = pd.read_parquet(f"{folder}/{input_path}").sort_values(time_column) user_item['count']=user_item.groupby(['user_ix']).article_id.transform('count') user_item = user_item[user_item['count']>cut_dump] grouped = user_item.groupby(['user_ix']) user_item['percentile'] = (grouped.article_id.cumcount() + 1) / grouped.article_id.transform('count') user_item_train = user_item[user_item['percentile']<=0.7] user_item_test = user_item[(user_item['percentile']>0.7) & (user_item['percentile']<0.9)] user_item_validation = user_item[user_item['percentile']>0.9] user_item_train.to_parquet(f'{folder}/{ouput_path}_train.pq') user_item_test.to_parquet(f'{folder}/{ouput_path}_test.pq') user_item_validation.to_parquet(f'{folder}/{ouput_path}_validation.pq') print(f"Split created {datetime.datetime.now() - now}") def transform_horizontal_to_vertical(df): """ Transforms the horizontal format into vertical format :param df: :return: """ return df.explode().reset_index() if __name__ == "__main__": import pandas as pd folder = os.getenv('DATA_FOLDER','processed') # Transforms the user-item-matrix into a user-series. For each user we store the articles read as one sorted list. # Save the new format. # This format is more convenient for creating the split and for training some of the algorithms. transform_item_matrix_to_horizontal_format(folder=folder) # Create a train,test,validation split. 70%,10%,20% and save it create_split(folder=folder, cut_dump=10) create_split_vertical(folder=folder, cut_dump=10) # loads the saved train,validation,test split train, test, validation = load_data(folder=folder, cut=40) # # if you wish to transform into normal user-item-format # train_vertical = transform_horizontal_to_vertical(train)
49.591346
140
0.728938
import datetime import os import pandas as pd def load_data(folder, input_path='user_item', cut=40,high_cut=1000000, seed=None): user_item_train, user_item_test, user_item_validation = pd.read_pickle( f'{folder}/{input_path}_train.pkl'), pd.read_pickle(f'{folder}/{input_path}_test.pkl'), pd.read_pickle( f'{folder}/{input_path}_validation.pkl') user_item_train = user_item_train[user_item_train.str.len() > cut * 0.7] user_item_train = user_item_train[user_item_train.str.len() < high_cut * 0.7] user_item_test = user_item_test.loc[user_item_train.index] user_item_validation = user_item_validation.loc[user_item_train.index] return user_item_train, user_item_test, user_item_validation def load_data_vertical(folder, input_path='user_item_vertical', cut=40): user_item_train, user_item_test, user_item_validation = pd.read_parquet( f'{folder}/{input_path}_train.pq'), pd.read_parquet(f'{folder}/{input_path}_test.pq'), pd.read_parquet( f'{folder}/{input_path}_validation.pq') user_item_train = user_item_train[user_item_train['count'] >cut] user_item_test =user_item_test[user_item_test['count'] >cut] user_item_validation = user_item_validation[user_item_validation['count'] >cut] user_item_train['resource_id']=user_item_train['article_id'] user_item_test['resource_id']=user_item_test['article_id'] user_item_validation['resource_id']=user_item_validation['article_id'] return user_item_train, user_item_test, user_item_validation def load_data_cv(folder, input_path='user_item', cut=40, high_cut=100000,seed=1): user_item_train, user_item_test, user_item_validation = load_data(folder, input_path=input_path, cut=cut,high_cut=high_cut) user_item_train = user_item_train.sample(frac=0.8,random_state=seed) user_item_test = user_item_test.sample(frac=1, random_state=seed) return user_item_train, user_item_test, user_item_validation def load_data_vertical_cv(folder, input_path='user_item_vertical', cut=40, high_cut=100000,seed=1): user_item_train, user_item_test, user_item_validation = load_data_vertical(folder, input_path=input_path, cut=cut) user_item_train = user_item_train.sample(frac=0.8,random_state=seed) user_item_test = user_item_test.sample(frac=1, random_state=seed) return user_item_train, user_item_test, user_item_validation def get_metadata(folder, usecols=[]): if not usecols: usecols = ['text', 'resource_id'] metadata = pd.read_csv(f"{folder}/meta.csv", usecols=usecols) return metadata.dropna(subset=['text']) def transform_item_matrix_to_horizontal_format(folder, output_path='user_item_matrix.pkl', input_path='user_item_matrix_vertical.pq', sortby='ts'): now = datetime.datetime.now() matrices = pd.read_parquet(f"{folder}/{input_path}") grouped = matrices.sort_values(sortby).groupby(['user_ix']).apply(lambda x: list(x['article_id'])) grouped.to_pickle(f"{folder}/{output_path}") print(f"Data transformed {datetime.datetime.now() - now}") def create_split(folder, input_path='user_item_matrix.pkl', ouput_path='user_item', cut_dump=10): now = datetime.datetime.now() user_item = pd.read_pickle(f"{folder}/{input_path}") user_item = user_item[user_item.str.len() > (cut_dump)] user_item_train = user_item.apply(lambda x: x[:int(len(x) * 0.7)]) user_item_test = user_item.apply(lambda x: x[int(len(x) * 0.7):int(len(x) * 0.9)]) user_item_validation = user_item.apply(lambda x: x[int(len(x) * 0.9):]) user_item_train.name = 'article_id' user_item_test.name = 'article_id' user_item_validation.name = 'article_id' user_item_train.to_pickle(f'{folder}/{ouput_path}_train.pkl') user_item_test.to_pickle(f'{folder}/{ouput_path}_test.pkl') user_item_validation.to_pickle(f'{folder}/{ouput_path}_validation.pkl') print(f"Split created {datetime.datetime.now() - now}") def create_split_vertical(folder, input_path='user_item_matrix_vertical.pq', ouput_path='user_item_vertical', cut_dump=10,time_column='ts'): now = datetime.datetime.now() user_item = pd.read_parquet(f"{folder}/{input_path}").sort_values(time_column) user_item['count']=user_item.groupby(['user_ix']).article_id.transform('count') user_item = user_item[user_item['count']>cut_dump] grouped = user_item.groupby(['user_ix']) user_item['percentile'] = (grouped.article_id.cumcount() + 1) / grouped.article_id.transform('count') user_item_train = user_item[user_item['percentile']<=0.7] user_item_test = user_item[(user_item['percentile']>0.7) & (user_item['percentile']<0.9)] user_item_validation = user_item[user_item['percentile']>0.9] user_item_train.to_parquet(f'{folder}/{ouput_path}_train.pq') user_item_test.to_parquet(f'{folder}/{ouput_path}_test.pq') user_item_validation.to_parquet(f'{folder}/{ouput_path}_validation.pq') print(f"Split created {datetime.datetime.now() - now}") def transform_horizontal_to_vertical(df): return df.explode().reset_index() if __name__ == "__main__": import pandas as pd folder = os.getenv('DATA_FOLDER','processed') transform_item_matrix_to_horizontal_format(folder=folder) create_split(folder=folder, cut_dump=10) create_split_vertical(folder=folder, cut_dump=10) train, test, validation = load_data(folder=folder, cut=40)
true
true
7902f5679cd3a07f4c6afe32c6fadf126ee0ff76
170
py
Python
maxwell/doer.py
maxwell-dev/maxwell-client-python
9d68ae57974c29d7454f4e95ff3c103e45ac48c7
[ "Apache-2.0" ]
1
2020-11-18T14:39:19.000Z
2020-11-18T14:39:19.000Z
maxwell/doer.py
maxwell-dev/maxwell-client-python
9d68ae57974c29d7454f4e95ff3c103e45ac48c7
[ "Apache-2.0" ]
null
null
null
maxwell/doer.py
maxwell-dev/maxwell-client-python
9d68ae57974c29d7454f4e95ff3c103e45ac48c7
[ "Apache-2.0" ]
1
2021-07-15T01:59:19.000Z
2021-07-15T01:59:19.000Z
class Doer(object): def __init__(self, frontend): self.__frontend = frontend async def do(self, action): return await self.__frontend.do(action)
24.285714
47
0.664706
class Doer(object): def __init__(self, frontend): self.__frontend = frontend async def do(self, action): return await self.__frontend.do(action)
true
true
7902f74fc821350a2773f857e2d3735b48e4a3e4
1,063
py
Python
texaslan/voting/migrations/0003_auto_20170223_1207.py
hsmeans/texaslan.org
a981e7835381e77320e39536a619981ba9d03451
[ "MIT" ]
2
2018-02-06T06:24:03.000Z
2018-03-20T03:32:13.000Z
texaslan/voting/migrations/0003_auto_20170223_1207.py
hsmeans/texaslan.org
a981e7835381e77320e39536a619981ba9d03451
[ "MIT" ]
32
2017-02-21T20:01:43.000Z
2020-02-08T21:52:16.000Z
texaslan/voting/migrations/0003_auto_20170223_1207.py
hsmeans/texaslan.org
a981e7835381e77320e39536a619981ba9d03451
[ "MIT" ]
6
2017-03-21T21:16:40.000Z
2020-02-08T20:46:20.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-02-23 18:07 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('voting', '0002_auto_20170223_1054'), ] operations = [ migrations.RemoveField( model_name='voteballot', name='vote_candidate', ), migrations.AddField( model_name='voteballot', name='candidates', field=models.ManyToManyField(related_name='vote_ballot', to='voting.Candidate', verbose_name="Vote's Candidate"), ), migrations.AddField( model_name='voteballot', name='position', field=models.CharField(choices=[('P', 'President'), ('A', 'VP of Administration'), ('T', 'Treasurer'), ('S', 'VP of Service'), ('N', 'VP of New Member Services'), ('O', 'VP of Social Affairs'), ('J', 'VP of Standards'), ('R', 'Risk Management'), ('B', 'Standards Board')], default='P', max_length=1), ), ]
35.433333
312
0.594544
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('voting', '0002_auto_20170223_1054'), ] operations = [ migrations.RemoveField( model_name='voteballot', name='vote_candidate', ), migrations.AddField( model_name='voteballot', name='candidates', field=models.ManyToManyField(related_name='vote_ballot', to='voting.Candidate', verbose_name="Vote's Candidate"), ), migrations.AddField( model_name='voteballot', name='position', field=models.CharField(choices=[('P', 'President'), ('A', 'VP of Administration'), ('T', 'Treasurer'), ('S', 'VP of Service'), ('N', 'VP of New Member Services'), ('O', 'VP of Social Affairs'), ('J', 'VP of Standards'), ('R', 'Risk Management'), ('B', 'Standards Board')], default='P', max_length=1), ), ]
true
true
7902f7e79e29a3cc621fbd2fea33ae1f59e18a46
1,836
py
Python
bdaybot/snailmail.py
Ahsoka/bdaybot
3aec9509119c28b294efdc0c76e8ee7fda7e8553
[ "MIT" ]
5
2021-01-21T07:49:50.000Z
2022-02-09T17:13:09.000Z
bdaybot/snailmail.py
Ahsoka/bdaybot
3aec9509119c28b294efdc0c76e8ee7fda7e8553
[ "MIT" ]
22
2020-12-30T02:13:32.000Z
2022-02-24T05:52:54.000Z
bdaybot/snailmail.py
Ahsoka/bdaybot
3aec9509119c28b294efdc0c76e8ee7fda7e8553
[ "MIT" ]
3
2021-02-02T05:21:14.000Z
2021-08-20T01:16:30.000Z
import os import pathlib from dotenv import load_dotenv, find_dotenv from fpdf import FPDF #envelope size: 110 by 145 mm # Elliot Torres # 4321 Loser Road # La Crescenta, CA 91214 # # Ryan Lee # 1234 Boomer Road # La Crescenta, CA 91214 load_dotenv(find_dotenv()) # types out address on envelope def sendmail( FULLNAME, ADDRESS_LINE_ONE, CITY, STATE, ZIPCODE, PERSON=None, ADDRESS_LINE_TWO=None ): if PERSON is None: sender_name = os.environ['sender_name'] sender_addr1 = os.environ['sender_addr1'] sender_addr2 = os.environ['sender_addr2'] else: sender_name = PERSON.fullname sender_addr1 = f'{PERSON.addrline1}' sender_addr2 = f'{PERSON.city}, {PERSON.state} {PERSON.zipcode}' pdf = FPDF('L', 'mm', (110, 145)) pdf.add_page() pdf.set_font('Times', '', 9.8) pdf.set_margins(0, 0, 0) pdf.text(7, 7.5, sender_name) pdf.text(7, 10.5, sender_addr1) pdf.text(7, 13.5, sender_addr2) pdf.set_font('Times', '', 14) if ADDRESS_LINE_TWO is None: pdf.text(44, 78, FULLNAME) pdf.text(44, 82, ADDRESS_LINE_ONE) pdf.text(44, 86, f'{CITY}, {STATE} {ZIPCODE}') else: pdf.text(44, 78, FULLNAME) pdf.text(44, 82, f'{ADDRESS_LINE_ONE}, {ADDRESS_LINE_TWO}') pdf.text(44, 86, f'{CITY}, {STATE} {ZIPCODE}') # types out message on back fo envelope pdf.add_page() pdf.set_margins(0, 0, 0) pdf.text(10, 78, f"Happy Birthday {FULLNAME}!") pdf.text(10, 82, "Have a wonderful day and enjoy your sweet!") pdf.text(10, 86, "-CVHS Bday Team") envelope_file = pathlib.Path('envelope.pdf') if envelope_file.exists(): envelope_file.unlink() pdf.output('envelope.pdf', dest='F').encode('latin-1') os.system("lp -d printer envelope.pdf")
25.5
72
0.634532
import os import pathlib from dotenv import load_dotenv, find_dotenv from fpdf import FPDF load_dotenv(find_dotenv()) def sendmail( FULLNAME, ADDRESS_LINE_ONE, CITY, STATE, ZIPCODE, PERSON=None, ADDRESS_LINE_TWO=None ): if PERSON is None: sender_name = os.environ['sender_name'] sender_addr1 = os.environ['sender_addr1'] sender_addr2 = os.environ['sender_addr2'] else: sender_name = PERSON.fullname sender_addr1 = f'{PERSON.addrline1}' sender_addr2 = f'{PERSON.city}, {PERSON.state} {PERSON.zipcode}' pdf = FPDF('L', 'mm', (110, 145)) pdf.add_page() pdf.set_font('Times', '', 9.8) pdf.set_margins(0, 0, 0) pdf.text(7, 7.5, sender_name) pdf.text(7, 10.5, sender_addr1) pdf.text(7, 13.5, sender_addr2) pdf.set_font('Times', '', 14) if ADDRESS_LINE_TWO is None: pdf.text(44, 78, FULLNAME) pdf.text(44, 82, ADDRESS_LINE_ONE) pdf.text(44, 86, f'{CITY}, {STATE} {ZIPCODE}') else: pdf.text(44, 78, FULLNAME) pdf.text(44, 82, f'{ADDRESS_LINE_ONE}, {ADDRESS_LINE_TWO}') pdf.text(44, 86, f'{CITY}, {STATE} {ZIPCODE}') pdf.add_page() pdf.set_margins(0, 0, 0) pdf.text(10, 78, f"Happy Birthday {FULLNAME}!") pdf.text(10, 82, "Have a wonderful day and enjoy your sweet!") pdf.text(10, 86, "-CVHS Bday Team") envelope_file = pathlib.Path('envelope.pdf') if envelope_file.exists(): envelope_file.unlink() pdf.output('envelope.pdf', dest='F').encode('latin-1') os.system("lp -d printer envelope.pdf")
true
true
7902f803c0ccd4f78c6e5630fda57ecfb85a50ba
481
py
Python
output/models/ms_data/regex/re_g20_xsd/re_g20.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
1
2021-08-14T17:59:21.000Z
2021-08-14T17:59:21.000Z
output/models/ms_data/regex/re_g18_xsd/re_g18.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
4
2020-02-12T21:30:44.000Z
2020-04-15T20:06:46.000Z
output/models/ms_data/regex/re_g20_xsd/re_g20.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
null
null
null
from dataclasses import dataclass, field from typing import List, Optional @dataclass class Regex: att: Optional[str] = field( default=None, metadata={ "type": "Attribute", "pattern": r"[\C\?a-c\?]+", } ) @dataclass class Doc: class Meta: name = "doc" elem: List[Regex] = field( default_factory=list, metadata={ "type": "Element", "namespace": "", } )
17.178571
40
0.503119
from dataclasses import dataclass, field from typing import List, Optional @dataclass class Regex: att: Optional[str] = field( default=None, metadata={ "type": "Attribute", "pattern": r"[\C\?a-c\?]+", } ) @dataclass class Doc: class Meta: name = "doc" elem: List[Regex] = field( default_factory=list, metadata={ "type": "Element", "namespace": "", } )
true
true
7902f91df2e8b13c906f753f86fe4e48e8fd4868
5,249
py
Python
miniimgnet/KD-gan/task_generator.py
WendyBaiYunwei/FSL
e20470872d52332efdb1449b4593445c5d94e4fb
[ "MIT" ]
null
null
null
miniimgnet/KD-gan/task_generator.py
WendyBaiYunwei/FSL
e20470872d52332efdb1449b4593445c5d94e4fb
[ "MIT" ]
null
null
null
miniimgnet/KD-gan/task_generator.py
WendyBaiYunwei/FSL
e20470872d52332efdb1449b4593445c5d94e4fb
[ "MIT" ]
null
null
null
# code is based on https://github.com/katerakelly/pytorch-maml import torchvision import torchvision.datasets as dset import torchvision.transforms as transforms import torch from torch.utils.data import DataLoader,Dataset import random import os from PIL import Image import matplotlib.pyplot as plt import numpy as np from torch.utils.data.sampler import Sampler def imshow(img): npimg = img.numpy() plt.axis("off") plt.imshow(np.transpose(npimg,(1,2,0))) plt.show() class Rotate(object): def __init__(self, angle): self.angle = angle def __call__(self, x, mode="reflect"): x = x.rotate(self.angle) return x def mini_imagenet_folders(): train_folder = './train' test_folder = './test' metatrain_folders = [os.path.join(train_folder, label) \ for label in os.listdir(train_folder) \ if os.path.isdir(os.path.join(train_folder, label)) \ ] metatest_folders = [os.path.join(test_folder, label) \ for label in os.listdir(test_folder) \ if os.path.isdir(os.path.join(test_folder, label)) \ ] random.seed(1) random.shuffle(metatrain_folders) random.shuffle(metatest_folders) return metatrain_folders,metatest_folders class MiniImagenetTask(object): def __init__(self, character_folders, num_classes, train_num,test_num): self.character_folders = character_folders self.num_classes = num_classes self.train_num = train_num self.test_num = test_num class_folders = random.sample(self.character_folders,self.num_classes) labels = np.array(range(len(class_folders))) labels = dict(zip(class_folders, labels)) samples = dict() self.train_roots = [] self.test_roots = [] for c in class_folders: temp = [os.path.join(c, x) for x in os.listdir(c)] samples[c] = random.sample(temp, len(temp)) random.shuffle(samples[c]) self.train_roots += samples[c][:train_num] self.test_roots += samples[c][train_num:train_num+test_num] self.train_labels = [labels[self.get_class(x)] for x in self.train_roots] self.test_labels = [labels[self.get_class(x)] for x in self.test_roots] def get_class(self, sample): return os.path.join(*sample.split('/')[:-1]) class FewShotDataset(Dataset): def __init__(self, task, split='train', transform=None, target_transform=None): self.transform = transform # Torch operations on the input image self.target_transform = target_transform self.task = task self.split = split self.image_roots = self.task.train_roots if self.split == 'train' else self.task.test_roots self.labels = self.task.train_labels if self.split == 'train' else self.task.test_labels def __len__(self): return len(self.image_roots) def __getitem__(self, idx): raise NotImplementedError("This is an abstract class. Subclass this class for your particular dataset.") class MiniImagenet(FewShotDataset): def __init__(self, *args, **kwargs): super(MiniImagenet, self).__init__(*args, **kwargs) def __getitem__(self, idx): image_root = self.image_roots[idx] image = Image.open(image_root) image = image.convert('RGB') if self.transform is not None: image = self.transform(image) label = self.labels[idx] if self.target_transform is not None: label = self.target_transform(label) return image, label, image_root class ClassBalancedSampler(Sampler): ''' Samples 'num_inst' examples each from 'num_cl' pools of examples of size 'num_per_class' ''' def __init__(self, num_per_class, num_cl, num_inst,shuffle=True): self.num_per_class = num_per_class self.num_cl = num_cl self.num_inst = num_inst self.shuffle = shuffle def __iter__(self): # return a single list of indices, assuming that items will be grouped by class if self.shuffle: batch = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)] else: batch = [[i+j*self.num_inst for i in range(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)] batch = [item for sublist in batch for item in sublist] if self.shuffle: random.shuffle(batch) return iter(batch) def __len__(self): return 1 def get_mini_imagenet_data_loader(task, num_per_class=1, split='train',shuffle = False): normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426]) dataset = MiniImagenet(task,split=split,transform=transforms.Compose([transforms.ToTensor(),normalize])) if split == 'train': sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.train_num,shuffle=shuffle) else: sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.test_num,shuffle=shuffle) loader = DataLoader(dataset, batch_size=num_per_class*task.num_classes, sampler=sampler) return loader
34.761589
129
0.668508
import torchvision import torchvision.datasets as dset import torchvision.transforms as transforms import torch from torch.utils.data import DataLoader,Dataset import random import os from PIL import Image import matplotlib.pyplot as plt import numpy as np from torch.utils.data.sampler import Sampler def imshow(img): npimg = img.numpy() plt.axis("off") plt.imshow(np.transpose(npimg,(1,2,0))) plt.show() class Rotate(object): def __init__(self, angle): self.angle = angle def __call__(self, x, mode="reflect"): x = x.rotate(self.angle) return x def mini_imagenet_folders(): train_folder = './train' test_folder = './test' metatrain_folders = [os.path.join(train_folder, label) \ for label in os.listdir(train_folder) \ if os.path.isdir(os.path.join(train_folder, label)) \ ] metatest_folders = [os.path.join(test_folder, label) \ for label in os.listdir(test_folder) \ if os.path.isdir(os.path.join(test_folder, label)) \ ] random.seed(1) random.shuffle(metatrain_folders) random.shuffle(metatest_folders) return metatrain_folders,metatest_folders class MiniImagenetTask(object): def __init__(self, character_folders, num_classes, train_num,test_num): self.character_folders = character_folders self.num_classes = num_classes self.train_num = train_num self.test_num = test_num class_folders = random.sample(self.character_folders,self.num_classes) labels = np.array(range(len(class_folders))) labels = dict(zip(class_folders, labels)) samples = dict() self.train_roots = [] self.test_roots = [] for c in class_folders: temp = [os.path.join(c, x) for x in os.listdir(c)] samples[c] = random.sample(temp, len(temp)) random.shuffle(samples[c]) self.train_roots += samples[c][:train_num] self.test_roots += samples[c][train_num:train_num+test_num] self.train_labels = [labels[self.get_class(x)] for x in self.train_roots] self.test_labels = [labels[self.get_class(x)] for x in self.test_roots] def get_class(self, sample): return os.path.join(*sample.split('/')[:-1]) class FewShotDataset(Dataset): def __init__(self, task, split='train', transform=None, target_transform=None): self.transform = transform self.target_transform = target_transform self.task = task self.split = split self.image_roots = self.task.train_roots if self.split == 'train' else self.task.test_roots self.labels = self.task.train_labels if self.split == 'train' else self.task.test_labels def __len__(self): return len(self.image_roots) def __getitem__(self, idx): raise NotImplementedError("This is an abstract class. Subclass this class for your particular dataset.") class MiniImagenet(FewShotDataset): def __init__(self, *args, **kwargs): super(MiniImagenet, self).__init__(*args, **kwargs) def __getitem__(self, idx): image_root = self.image_roots[idx] image = Image.open(image_root) image = image.convert('RGB') if self.transform is not None: image = self.transform(image) label = self.labels[idx] if self.target_transform is not None: label = self.target_transform(label) return image, label, image_root class ClassBalancedSampler(Sampler): def __init__(self, num_per_class, num_cl, num_inst,shuffle=True): self.num_per_class = num_per_class self.num_cl = num_cl self.num_inst = num_inst self.shuffle = shuffle def __iter__(self): if self.shuffle: batch = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)] else: batch = [[i+j*self.num_inst for i in range(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)] batch = [item for sublist in batch for item in sublist] if self.shuffle: random.shuffle(batch) return iter(batch) def __len__(self): return 1 def get_mini_imagenet_data_loader(task, num_per_class=1, split='train',shuffle = False): normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426]) dataset = MiniImagenet(task,split=split,transform=transforms.Compose([transforms.ToTensor(),normalize])) if split == 'train': sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.train_num,shuffle=shuffle) else: sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.test_num,shuffle=shuffle) loader = DataLoader(dataset, batch_size=num_per_class*task.num_classes, sampler=sampler) return loader
true
true
7902f9afffe60bf396230930977afefb58a2a6f2
3,288
py
Python
applications/ShapeOptimizationApplication/python_scripts/value_logger_steepest_descent.py
Jacklwln/Kratos
12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de
[ "BSD-4-Clause" ]
1
2019-08-01T09:01:08.000Z
2019-08-01T09:01:08.000Z
applications/ShapeOptimizationApplication/python_scripts/value_logger_steepest_descent.py
Jacklwln/Kratos
12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de
[ "BSD-4-Clause" ]
null
null
null
applications/ShapeOptimizationApplication/python_scripts/value_logger_steepest_descent.py
Jacklwln/Kratos
12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de
[ "BSD-4-Clause" ]
null
null
null
# ============================================================================== # KratosShapeOptimizationApplication # # License: BSD License # license: ShapeOptimizationApplication/license.txt # # Main authors: Baumgaertner Daniel, https://github.com/dbaumgaertner # Geiser Armin, https://github.com/armingeiser # # ============================================================================== # Kratos Core and Apps from KratosMultiphysics import * from KratosMultiphysics.ShapeOptimizationApplication import * # Import logger base classes from value_logger_base import ValueLogger # Import additional libraries import csv from custom_timer import Timer # ============================================================================== class ValueLoggerSteepestDescent( ValueLogger ): # -------------------------------------------------------------------------- def InitializeLogging( self ): with open(self.complete_log_file_name, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("{:>4s}".format("itr")) row.append("{:>13s}".format("f")) row.append("{:>13s}".format("df_abs[%]")) row.append("{:>13s}".format("df_rel[%]")) row.append("{:>13s}".format("norm_df")) row.append("{:>13s}".format("step_size")) row.append("{:>25s}".format("time_stamp")) historyWriter.writerow(row) # -------------------------------------------------------------------------- def _WriteCurrentValuesToConsole( self ): objective_id = self.objectives[0]["identifier"].GetString() print("\n> Current value of objective = ", "{:> .5E}".format(self.history["response_value"][objective_id][self.current_index])) print("> Absolut change of objective = ","{:> .5E}".format(self.history["abs_change_objective"][self.current_index])," [%]") print("> Relative change of objective = ","{:> .5E}".format(self.history["rel_change_objective"][self.current_index])," [%]\n") # -------------------------------------------------------------------------- def _WriteCurrentValuesToFile( self ): with open(self.complete_log_file_name, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("{:>4d}".format(self.current_index)) objective_id = self.objectives[0]["identifier"].GetString() row.append(" {:> .5E}".format(self.history["response_value"][objective_id][self.current_index])) row.append(" {:> .5E}".format(self.history["abs_change_objective"][self.current_index])) row.append(" {:> .5E}".format(self.history["rel_change_objective"][self.current_index])) row.append(" {:> .5E}".format(self.history["norm_objective_gradient"][self.current_index])) row.append(" {:> .5E}".format(self.history["step_size"][self.current_index])) row.append("{:>25}".format(Timer().GetTimeStamp())) historyWriter.writerow(row) # ==============================================================================
50.584615
135
0.528893
from KratosMultiphysics import * from KratosMultiphysics.ShapeOptimizationApplication import * from value_logger_base import ValueLogger import csv from custom_timer import Timer class ValueLoggerSteepestDescent( ValueLogger ): def InitializeLogging( self ): with open(self.complete_log_file_name, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("{:>4s}".format("itr")) row.append("{:>13s}".format("f")) row.append("{:>13s}".format("df_abs[%]")) row.append("{:>13s}".format("df_rel[%]")) row.append("{:>13s}".format("norm_df")) row.append("{:>13s}".format("step_size")) row.append("{:>25s}".format("time_stamp")) historyWriter.writerow(row) def _WriteCurrentValuesToConsole( self ): objective_id = self.objectives[0]["identifier"].GetString() print("\n> Current value of objective = ", "{:> .5E}".format(self.history["response_value"][objective_id][self.current_index])) print("> Absolut change of objective = ","{:> .5E}".format(self.history["abs_change_objective"][self.current_index])," [%]") print("> Relative change of objective = ","{:> .5E}".format(self.history["rel_change_objective"][self.current_index])," [%]\n") def _WriteCurrentValuesToFile( self ): with open(self.complete_log_file_name, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("{:>4d}".format(self.current_index)) objective_id = self.objectives[0]["identifier"].GetString() row.append(" {:> .5E}".format(self.history["response_value"][objective_id][self.current_index])) row.append(" {:> .5E}".format(self.history["abs_change_objective"][self.current_index])) row.append(" {:> .5E}".format(self.history["rel_change_objective"][self.current_index])) row.append(" {:> .5E}".format(self.history["norm_objective_gradient"][self.current_index])) row.append(" {:> .5E}".format(self.history["step_size"][self.current_index])) row.append("{:>25}".format(Timer().GetTimeStamp())) historyWriter.writerow(row)
true
true
7902fa697f6ac2fe46c7e67612dce698a7632d97
6,931
py
Python
tensorflow/python/autograph/impl/conversion_test.py
ranbir/tensorflow
46924b2f7bc4262b2c4b36841d393741113594ca
[ "Apache-2.0" ]
36
2016-12-17T15:25:25.000Z
2022-01-29T21:50:53.000Z
tensorflow/python/autograph/impl/conversion_test.py
ranbir/tensorflow
46924b2f7bc4262b2c4b36841d393741113594ca
[ "Apache-2.0" ]
30
2016-10-04T15:38:08.000Z
2020-07-16T12:09:33.000Z
tensorflow/python/autograph/impl/conversion_test.py
ranbir/tensorflow
46924b2f7bc4262b2c4b36841d393741113594ca
[ "Apache-2.0" ]
36
2017-07-27T21:12:40.000Z
2022-02-03T16:45:56.000Z
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for conversion module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gast from tensorflow.python.autograph import utils from tensorflow.python.autograph.core import converter from tensorflow.python.autograph.impl import api from tensorflow.python.autograph.impl import conversion from tensorflow.python.autograph.pyct import compiler from tensorflow.python.framework import constant_op from tensorflow.python.keras.engine import training from tensorflow.python.platform import test class ConversionTest(test.TestCase): def _simple_program_ctx(self): return converter.ProgramContext( options=converter.ConversionOptions(recursive=True), autograph_module=api) def test_is_whitelisted_for_graph(self): def test_fn(): return constant_op.constant(1) self.assertFalse(conversion.is_whitelisted_for_graph(test_fn)) self.assertTrue(conversion.is_whitelisted_for_graph(utils)) self.assertTrue(conversion.is_whitelisted_for_graph(constant_op.constant)) def test_entity_to_graph_unsupported_types(self): with self.assertRaises(NotImplementedError): program_ctx = self._simple_program_ctx() conversion.entity_to_graph('dummy', program_ctx, None, None) def test_entity_to_graph_callable(self): b = 2 def f(a): return a + b program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.FunctionDef) self.assertEqual('tf__f', name) self.assertIs(ns['b'], b) def test_entity_to_graph_function_with_defaults(self): b = 2 c = 1 def f(a, d=c + 1): return a + b + d program_ctx = self._simple_program_ctx() nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.FunctionDef) self.assertEqual('tf__f', name) self.assertEqual( compiler.ast_to_source(fn_node.args.defaults[0]).strip(), 'None') def test_entity_to_graph_call_tree(self): def g(a): return a def f(a): return g(a) program_ctx = self._simple_program_ctx() nodes, _, _ = conversion.entity_to_graph(f, program_ctx, None, None) f_node = nodes[0] self.assertEqual('tf__f', f_node.name) def test_entity_to_graph_class_hierarchy(self): class TestBase(object): def __init__(self, x='base'): self.x = x def foo(self): return self.x def bar(self): return self.x class TestSubclass(TestBase): def __init__(self, y): super(TestSubclass, self).__init__('sub') self.y = y def foo(self): return self.y def baz(self): return self.y program_ctx = self._simple_program_ctx() with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'): conversion.entity_to_graph(TestSubclass, program_ctx, None, None) def test_entity_to_graph_class_hierarchy_whitelisted(self): class TestSubclass(training.Model): def __init__(self, y): super(TestSubclass, self).__init__() self.built = False def call(self, x): return 3 * x program_ctx = self._simple_program_ctx() nodes, name, _ = conversion.entity_to_graph(TestSubclass, program_ctx, None, None) class_node = nodes[-2] # TODO(mdan): This is brittle. self.assertEqual(name, 'TfTestSubclass') self.assertEqual(class_node.name, 'TfTestSubclass') def test_entity_to_graph_lambda(self): b = 2 f = lambda x: b * x if x > 0 else -x program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.Assign) self.assertIsInstance(fn_node.value, gast.Lambda) self.assertEqual('tf__lambda', name) self.assertIs(ns['b'], b) def test_entity_to_graph_multiple_lambdas(self): a, b = 1, 2 f, _ = (lambda x: a * x, lambda y: b * y) program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.Assign) self.assertIsInstance(fn_node.value, gast.Lambda) self.assertEqual('tf__lambda', name) self.assertIs(ns['a'], a) def test_entity_to_graph_multiple_lambdas_ambiguous_definitions(self): a, b = 1, 2 f, _ = (lambda x: a * x, lambda x: b * x) program_ctx = self._simple_program_ctx() with self.assertRaises(ValueError): conversion.entity_to_graph(f, program_ctx, None, None) def test_entity_to_graph_lambda_code_with_garbage(self): # pylint:disable=g-long-lambda f = ( # intentional wrap lambda x: (x # intentional wrap + 1),)[0] # pylint:enable=g-long-lambda program_ctx = self._simple_program_ctx() nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.Assign) self.assertIsInstance(fn_node.value, gast.Lambda) self.assertEqual('tf__lambda', name) def test_entity_to_graph_nested_functions(self): b = 2 def f(x): def g(x): return b * x return g(x) program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.FunctionDef) self.assertEqual(fn_node.name, 'tf__f') self.assertEqual('tf__f', name) self.assertIs(ns['b'], b) def test_ag_module_cached(self): def callee(): return range(3) def caller(a): return a() program_ctx = self._simple_program_ctx() _, _, callee_ns = conversion.entity_to_graph(callee, program_ctx, None, None) _, _, caller_ns = conversion.entity_to_graph(caller, program_ctx, None, None) self.assertTrue(callee_ns['ag__'] is caller_ns['ag__']) if __name__ == '__main__': test.main()
31.361991
80
0.68273
from __future__ import absolute_import from __future__ import division from __future__ import print_function import gast from tensorflow.python.autograph import utils from tensorflow.python.autograph.core import converter from tensorflow.python.autograph.impl import api from tensorflow.python.autograph.impl import conversion from tensorflow.python.autograph.pyct import compiler from tensorflow.python.framework import constant_op from tensorflow.python.keras.engine import training from tensorflow.python.platform import test class ConversionTest(test.TestCase): def _simple_program_ctx(self): return converter.ProgramContext( options=converter.ConversionOptions(recursive=True), autograph_module=api) def test_is_whitelisted_for_graph(self): def test_fn(): return constant_op.constant(1) self.assertFalse(conversion.is_whitelisted_for_graph(test_fn)) self.assertTrue(conversion.is_whitelisted_for_graph(utils)) self.assertTrue(conversion.is_whitelisted_for_graph(constant_op.constant)) def test_entity_to_graph_unsupported_types(self): with self.assertRaises(NotImplementedError): program_ctx = self._simple_program_ctx() conversion.entity_to_graph('dummy', program_ctx, None, None) def test_entity_to_graph_callable(self): b = 2 def f(a): return a + b program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.FunctionDef) self.assertEqual('tf__f', name) self.assertIs(ns['b'], b) def test_entity_to_graph_function_with_defaults(self): b = 2 c = 1 def f(a, d=c + 1): return a + b + d program_ctx = self._simple_program_ctx() nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.FunctionDef) self.assertEqual('tf__f', name) self.assertEqual( compiler.ast_to_source(fn_node.args.defaults[0]).strip(), 'None') def test_entity_to_graph_call_tree(self): def g(a): return a def f(a): return g(a) program_ctx = self._simple_program_ctx() nodes, _, _ = conversion.entity_to_graph(f, program_ctx, None, None) f_node = nodes[0] self.assertEqual('tf__f', f_node.name) def test_entity_to_graph_class_hierarchy(self): class TestBase(object): def __init__(self, x='base'): self.x = x def foo(self): return self.x def bar(self): return self.x class TestSubclass(TestBase): def __init__(self, y): super(TestSubclass, self).__init__('sub') self.y = y def foo(self): return self.y def baz(self): return self.y program_ctx = self._simple_program_ctx() with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'): conversion.entity_to_graph(TestSubclass, program_ctx, None, None) def test_entity_to_graph_class_hierarchy_whitelisted(self): class TestSubclass(training.Model): def __init__(self, y): super(TestSubclass, self).__init__() self.built = False def call(self, x): return 3 * x program_ctx = self._simple_program_ctx() nodes, name, _ = conversion.entity_to_graph(TestSubclass, program_ctx, None, None) class_node = nodes[-2] self.assertEqual(name, 'TfTestSubclass') self.assertEqual(class_node.name, 'TfTestSubclass') def test_entity_to_graph_lambda(self): b = 2 f = lambda x: b * x if x > 0 else -x program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.Assign) self.assertIsInstance(fn_node.value, gast.Lambda) self.assertEqual('tf__lambda', name) self.assertIs(ns['b'], b) def test_entity_to_graph_multiple_lambdas(self): a, b = 1, 2 f, _ = (lambda x: a * x, lambda y: b * y) program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.Assign) self.assertIsInstance(fn_node.value, gast.Lambda) self.assertEqual('tf__lambda', name) self.assertIs(ns['a'], a) def test_entity_to_graph_multiple_lambdas_ambiguous_definitions(self): a, b = 1, 2 f, _ = (lambda x: a * x, lambda x: b * x) program_ctx = self._simple_program_ctx() with self.assertRaises(ValueError): conversion.entity_to_graph(f, program_ctx, None, None) def test_entity_to_graph_lambda_code_with_garbage(self): f = ( lambda x: (x + 1),)[0] program_ctx = self._simple_program_ctx() nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.Assign) self.assertIsInstance(fn_node.value, gast.Lambda) self.assertEqual('tf__lambda', name) def test_entity_to_graph_nested_functions(self): b = 2 def f(x): def g(x): return b * x return g(x) program_ctx = self._simple_program_ctx() nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None) fn_node, _ = nodes self.assertIsInstance(fn_node, gast.FunctionDef) self.assertEqual(fn_node.name, 'tf__f') self.assertEqual('tf__f', name) self.assertIs(ns['b'], b) def test_ag_module_cached(self): def callee(): return range(3) def caller(a): return a() program_ctx = self._simple_program_ctx() _, _, callee_ns = conversion.entity_to_graph(callee, program_ctx, None, None) _, _, caller_ns = conversion.entity_to_graph(caller, program_ctx, None, None) self.assertTrue(callee_ns['ag__'] is caller_ns['ag__']) if __name__ == '__main__': test.main()
true
true
7902fae55d57433b120b53b363455acee96728b0
236
py
Python
days/day101/Bite 18. Find the most common word/test_harry.py
alex-vegan/100daysofcode-with-python-course
b6c12316abe18274b7963371b8f0ed2fd549ef07
[ "MIT" ]
2
2018-10-28T17:12:37.000Z
2018-10-28T17:12:39.000Z
days/day101/Bite 18. Find the most common word/test_harry.py
alex-vegan/100daysofcode-with-python-course
b6c12316abe18274b7963371b8f0ed2fd549ef07
[ "MIT" ]
3
2018-10-28T17:11:04.000Z
2018-10-29T22:36:36.000Z
days/day101/Bite 18. Find the most common word/test_harry.py
alex-vegan/100daysofcode-with-python-course
b6c12316abe18274b7963371b8f0ed2fd549ef07
[ "MIT" ]
null
null
null
from harry import get_harry_most_common_word def test_get_harry_most_common_word(): top_word = get_harry_most_common_word() assert type(top_word) == tuple assert top_word[0] == 'dursley' assert top_word[1] == 45
29.5
45
0.724576
from harry import get_harry_most_common_word def test_get_harry_most_common_word(): top_word = get_harry_most_common_word() assert type(top_word) == tuple assert top_word[0] == 'dursley' assert top_word[1] == 45
true
true
7902fb8b9b62d3e2631f484ad462b2cd25c1434c
10,492
py
Python
install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py
JoanAzpeitia/lp_sg
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
[ "MIT" ]
null
null
null
install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py
JoanAzpeitia/lp_sg
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
[ "MIT" ]
null
null
null
install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py
JoanAzpeitia/lp_sg
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
[ "MIT" ]
1
2020-02-15T10:42:56.000Z
2020-02-15T10:42:56.000Z
# Copyright (c) 2015 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. from sgtk.platform.qt import QtCore, QtGui import sgtk class WorkAreaButton(QtGui.QToolButton): """ UX for switching work area. This displays a "change work area" button which a user can interact with The button is designed to expand so that it is subtle until a user hovers over it. :signal clicked(str, int): Fires when someone clicks the change work area button. Arguments passed are the entity type and entity id """ WIDGET_WIDTH_COLLAPSED = 30 WIDGET_HEIGHT = 30 NON_WORK_AREA_TYPES = [ "PublishedFile", "Project", "TankPublishedFile", "Version", "Note", "Group", "HumanUser", "ScriptUser", "ApiUser", "ClientUser", "Department", "Cut", "CutItem", "Delivery", "Playlist", "Ticket" ] change_work_area = QtCore.Signal(str, int) def __init__(self, parent): """ :param parent: The model parent. :type parent: :class:`~PySide.QtGui.QObject` """ super(WorkAreaButton, self).__init__(parent) # an icon to represent all items which # aren't the current work area self._normal_icon = QtGui.QIcon() self._normal_icon.addPixmap( QtGui.QPixmap(":/tk_multi_infopanel/pin.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off ) # an icon to represent the current work area self._current_work_area_icon = QtGui.QIcon() self._current_work_area_icon.addPixmap( QtGui.QPixmap(":/tk_multi_infopanel/pin_blue.png"), QtGui.QIcon.Disabled, QtGui.QIcon.Off ) self.setIcon(self._normal_icon) self.setIconSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT)) self._bundle = sgtk.platform.current_bundle() self._entity_type = None self._entity_id = None self._is_static = False self._caption = "Set Work Area" self._width = 120 self.clicked.connect(self._on_click) self.setVisible(False) def set_up(self, entity_type, entity_id): """ Sets up the button for a given entity. :param entity_type: Entity type to set up button for :param entity_id: Entity id to set up button for """ self._entity_id = entity_id self._entity_type = entity_type if not self._bundle.get_setting("enable_context_switch"): # context switch button not enabled return # figure out if this is the current project context = self._bundle.context context_entity = context.task or context.entity or context.project or None self.setVisible(True) self.setEnabled(True) self.setIcon(self._normal_icon) self._is_static = False if context_entity and context_entity["type"] == entity_type and context_entity["id"] == entity_id: # the current work area self.setPopupMode(QtGui.QToolButton.DelayedPopup) self.setToolTip( "This is your current work area.\n" "The work you do will be associated with this item in Shotgun." ) # set blue icon self.setIcon(self._current_work_area_icon) # disable the button self.setEnabled(False) # make sure it doesn't pop on mouseover self._is_static = True elif entity_type in self.NON_WORK_AREA_TYPES: # don't show the ctx selector for some types self.setToolTip("This cannot be a work area.") # disable the button self.setEnabled(False) # make sure it doesn't pop on mouse over self._is_static = True else: if entity_type == "Task": self._caption = "Set Work Area" self.setToolTip("Click to set your work area to the current task.") else: self._caption = "Pick Work Area" self.setToolTip("Click to select a task.") self._init_default_state() def _init_default_state(self): """ Sets up the default collapsed state of the button """ self.setText("") self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly) self.setMinimumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT)) self.setMaximumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT)) # tell the style sheet to adjust self.setProperty("is_expanded", False) self.style().unpolish(self) self.style().polish(self) def _on_click(self): """ Executed when the button is clicked """ self.change_work_area.emit(self._entity_type, self._entity_id) def enterEvent(self, evt): """ QT Mouse enter event """ if not self._is_static: # not the current work area. so expand the button self.setText(self._caption) self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.setMinimumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT)) self.setMaximumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT)) # tell the style sheet to adjust self.setProperty("is_expanded", True) self.style().unpolish(self) self.style().polish(self) return super(WorkAreaButton, self).enterEvent(evt) def leaveEvent(self, evt): """ QT Mouse leave event """ if not self._is_static: # collapse button after a delay QtCore.QTimer.singleShot(300, self._init_default_state) return super(WorkAreaButton, self).leaveEvent(evt) class FloatingWorkAreaButton(WorkAreaButton): """ UX for switching work area. This displays a "change work area" button which a user can interact with The button is designed to expand so that it is subtle until a user hovers over it. Derives from :class:`WorkAreaButton` and positions the widget relative to the bottom-right corner of the parent widget. :signal clicked(str, int): Fires when someone clicks the change work area button. Arguments passed are the entity type and entity id """ RIGHT_OFFSET = 6 BOTTOM_OFFSET = 6 def __init__(self, parent): """ :param right_side_offset: Right hand side offset in pixels :param bottom_offset: Bottom offset in pixels :param parent: The model parent. :type parent: :class:`~PySide.QtGui.QObject` """ super(FloatingWorkAreaButton, self).__init__(parent) # hook up a listener to the parent window so this widget # follows along when the parent window changes size filter = ResizeEventFilter(parent) filter.resized.connect(self._on_parent_resized) parent.installEventFilter(filter) def set_up(self, entity_type, entity_id): """ Sets up the button for a given entity. :param entity_type: Entity type to set up button for :param entity_id: Entity id to set up button for """ if entity_type in self.NON_WORK_AREA_TYPES: # hide the widget self.setVisible(False) else: # base class implementation super(FloatingWorkAreaButton, self).set_up(entity_type, entity_id) def __position_widget(self): """ Moves the widget to the bottom-right corner of the parent widget. """ self.move( self.parentWidget().width() - self.width() - self.RIGHT_OFFSET, self.parentWidget().height() - self.height() - self.BOTTOM_OFFSET ) def _init_default_state(self): """ Sets up the default collapsed state of the button """ super(FloatingWorkAreaButton, self)._init_default_state() self.__position_widget() def enterEvent(self, evt): """ QT Mouse enter event """ status = super(FloatingWorkAreaButton, self).enterEvent(evt) if not self._is_static: self.__position_widget() return status def _on_parent_resized(self): """ Special slot hooked up to the event filter. When associated widget is resized this slot is being called. """ self.__position_widget() class ResizeEventFilter(QtCore.QObject): """ Utility and helper. Event filter which emits a resized signal whenever the monitored widget resizes. You use it like this: # create the filter object. Typically, it's # it's easiest to parent it to the object that is # being monitored (in this case self.ui.thumbnail) filter = ResizeEventFilter(self.ui.thumbnail) # now set up a signal/slot connection so that the # __on_thumb_resized slot gets called every time # the widget is resized filter.resized.connect(self.__on_thumb_resized) # finally, install the event filter into the QT # event system self.ui.thumbnail.installEventFilter(filter) """ resized = QtCore.Signal() def eventFilter(self, obj, event): """ Event filter implementation. For information, see the QT docs: http://doc.qt.io/qt-4.8/qobject.html#eventFilter This will emit the resized signal (in this class) whenever the linked up object is being resized. :param obj: The object that is being watched for events :param event: Event object that the object has emitted :returns: Always returns False to indicate that no events should ever be discarded by the filter. """ # peek at the message if event.type() == QtCore.QEvent.Resize: # re-broadcast any resize events self.resized.emit() # pass it on! return False
32.583851
106
0.629527
from sgtk.platform.qt import QtCore, QtGui import sgtk class WorkAreaButton(QtGui.QToolButton): WIDGET_WIDTH_COLLAPSED = 30 WIDGET_HEIGHT = 30 NON_WORK_AREA_TYPES = [ "PublishedFile", "Project", "TankPublishedFile", "Version", "Note", "Group", "HumanUser", "ScriptUser", "ApiUser", "ClientUser", "Department", "Cut", "CutItem", "Delivery", "Playlist", "Ticket" ] change_work_area = QtCore.Signal(str, int) def __init__(self, parent): super(WorkAreaButton, self).__init__(parent) self._normal_icon = QtGui.QIcon() self._normal_icon.addPixmap( QtGui.QPixmap(":/tk_multi_infopanel/pin.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off ) # an icon to represent the current work area self._current_work_area_icon = QtGui.QIcon() self._current_work_area_icon.addPixmap( QtGui.QPixmap(":/tk_multi_infopanel/pin_blue.png"), QtGui.QIcon.Disabled, QtGui.QIcon.Off ) self.setIcon(self._normal_icon) self.setIconSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT)) self._bundle = sgtk.platform.current_bundle() self._entity_type = None self._entity_id = None self._is_static = False self._caption = "Set Work Area" self._width = 120 self.clicked.connect(self._on_click) self.setVisible(False) def set_up(self, entity_type, entity_id): self._entity_id = entity_id self._entity_type = entity_type if not self._bundle.get_setting("enable_context_switch"): # context switch button not enabled return # figure out if this is the current project context = self._bundle.context context_entity = context.task or context.entity or context.project or None self.setVisible(True) self.setEnabled(True) self.setIcon(self._normal_icon) self._is_static = False if context_entity and context_entity["type"] == entity_type and context_entity["id"] == entity_id: # the current work area self.setPopupMode(QtGui.QToolButton.DelayedPopup) self.setToolTip( "This is your current work area.\n" "The work you do will be associated with this item in Shotgun." ) # set blue icon self.setIcon(self._current_work_area_icon) # disable the button self.setEnabled(False) # make sure it doesn't pop on mouseover self._is_static = True elif entity_type in self.NON_WORK_AREA_TYPES: self.setToolTip("This cannot be a work area.") # disable the button self.setEnabled(False) # make sure it doesn't pop on mouse over self._is_static = True else: if entity_type == "Task": self._caption = "Set Work Area" self.setToolTip("Click to set your work area to the current task.") else: self._caption = "Pick Work Area" self.setToolTip("Click to select a task.") self._init_default_state() def _init_default_state(self): self.setText("") self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly) self.setMinimumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT)) self.setMaximumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT)) self.setProperty("is_expanded", False) self.style().unpolish(self) self.style().polish(self) def _on_click(self): self.change_work_area.emit(self._entity_type, self._entity_id) def enterEvent(self, evt): if not self._is_static: self.setText(self._caption) self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.setMinimumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT)) self.setMaximumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT)) self.setProperty("is_expanded", True) self.style().unpolish(self) self.style().polish(self) return super(WorkAreaButton, self).enterEvent(evt) def leaveEvent(self, evt): if not self._is_static: QtCore.QTimer.singleShot(300, self._init_default_state) return super(WorkAreaButton, self).leaveEvent(evt) class FloatingWorkAreaButton(WorkAreaButton): RIGHT_OFFSET = 6 BOTTOM_OFFSET = 6 def __init__(self, parent): super(FloatingWorkAreaButton, self).__init__(parent) filter = ResizeEventFilter(parent) filter.resized.connect(self._on_parent_resized) parent.installEventFilter(filter) def set_up(self, entity_type, entity_id): if entity_type in self.NON_WORK_AREA_TYPES: self.setVisible(False) else: super(FloatingWorkAreaButton, self).set_up(entity_type, entity_id) def __position_widget(self): self.move( self.parentWidget().width() - self.width() - self.RIGHT_OFFSET, self.parentWidget().height() - self.height() - self.BOTTOM_OFFSET ) def _init_default_state(self): super(FloatingWorkAreaButton, self)._init_default_state() self.__position_widget() def enterEvent(self, evt): status = super(FloatingWorkAreaButton, self).enterEvent(evt) if not self._is_static: self.__position_widget() return status def _on_parent_resized(self): self.__position_widget() class ResizeEventFilter(QtCore.QObject): resized = QtCore.Signal() def eventFilter(self, obj, event): if event.type() == QtCore.QEvent.Resize: self.resized.emit() return False
true
true
7902fc15fa979fdab469120dbb1c690152b62290
3,355
py
Python
btcmagic/test_transaction.py
Dirbaio/btcmagic
be8d195dc019a0fdb5d352beb7ad555198f23dfb
[ "MIT" ]
null
null
null
btcmagic/test_transaction.py
Dirbaio/btcmagic
be8d195dc019a0fdb5d352beb7ad555198f23dfb
[ "MIT" ]
null
null
null
btcmagic/test_transaction.py
Dirbaio/btcmagic
be8d195dc019a0fdb5d352beb7ad555198f23dfb
[ "MIT" ]
null
null
null
from unittest import TestCase from btcmagic import transaction, convert import os import json class TestTransaction(TestCase): def setUp(self): self.tx_bin = convert.hex_to_bytes( '0100000001637aaf20d708fcff67bb688af6e41d1807e6883f736c50eacb6042bf6e6c829c010000008c493046022100da1e59d78bb88ca7c3e13a4a6f4e259d5dd8cb177d5f79199bf024b1f57121d50221008d1d9838606a62ed4bd011a6ce8a2042ae2dc38fd05381b50aa388a1c8bd9150014104d3b615c609e48ae81389f6617b50473bf4c93f63c9853cd038aa4f00a989ebd62ae8253555e24c88b939817da18cd4e7263fda6a0e815097589bb90a5a6b3ff1ffffffff03b9000000000000001976a9149fe14d50c95abd6ecddc5d61255cfe5aebeba7e988ac57300f00000000001976a914c0492db5f283a22274ef378cdffbe5ecbe29862b88ac00000000000000000a6a0810e2cdc1af05180100000000') self.tx_obj = { 'ins': [ { 'sequence': 4294967295, 'script': b'I0F\x02!\x00\xda\x1eY\xd7\x8b\xb8\x8c\xa7\xc3\xe1:JoN%\x9d]\xd8\xcb\x17}_y\x19\x9b\xf0$\xb1\xf5q!\xd5\x02!\x00\x8d\x1d\x988`jb\xedK\xd0\x11\xa6\xce\x8a B\xae-\xc3\x8f\xd0S\x81\xb5\n\xa3\x88\xa1\xc8\xbd\x91P\x01A\x04\xd3\xb6\x15\xc6\t\xe4\x8a\xe8\x13\x89\xf6a{PG;\xf4\xc9?c\xc9\x85<\xd08\xaaO\x00\xa9\x89\xeb\xd6*\xe8%5U\xe2L\x88\xb99\x81}\xa1\x8c\xd4\xe7&?\xdaj\x0e\x81P\x97X\x9b\xb9\nZk?\xf1', 'outpoint': {'index': 1, 'hash': b'\x9c\x82ln\xbfB`\xcb\xeaPls?\x88\xe6\x07\x18\x1d\xe4\xf6\x8ah\xbbg\xff\xfc\x08\xd7 \xafzc'} } ], 'locktime': 0, 'version': 1, 'outs': [ { 'value': 185, 'script': b'v\xa9\x14\x9f\xe1MP\xc9Z\xbdn\xcd\xdc]a%\\\xfeZ\xeb\xeb\xa7\xe9\x88\xac' }, { 'value': 995415, 'script': b'v\xa9\x14\xc0I-\xb5\xf2\x83\xa2"t\xef7\x8c\xdf\xfb\xe5\xec\xbe)\x86+\x88\xac' }, { 'value': 0, 'script': b'j\x08\x10\xe2\xcd\xc1\xaf\x05\x18\x01' } ] } def test_deserialization(self): tx_obj = transaction.deserialize(self.tx_bin) self.assertEqual(tx_obj, self.tx_obj) def test_serialization(self): tx_bin = transaction.serialize(self.tx_obj) self.assertEqual(tx_bin, self.tx_bin) class TestSighash(TestCase): def setUp(self): loc = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) with open(os.path.join(loc, 'sighash.json')) as f: self.data = json.load(f) def test_sighash(self): first = True for vector in self.data: # Ignore first header row in the JSON. if first: first = False continue tx = transaction.deserialize(convert.hex_to_bytes(vector[0])) script = convert.hex_to_bytes(vector[1]) index = int(vector[2]) hashtype = int(vector[3]) & 0xffffffff # This must be unsigned int sighash = convert.hex_to_bytes(vector[4])[::-1] # It's reversed for some reason? my_sighash = transaction.sighash(tx, index, script, hashtype) self.assertEqual( sighash, my_sighash, 'hashtype = {:x}'.format(hashtype) )
44.733333
571
0.613115
from unittest import TestCase from btcmagic import transaction, convert import os import json class TestTransaction(TestCase): def setUp(self): self.tx_bin = convert.hex_to_bytes( '0100000001637aaf20d708fcff67bb688af6e41d1807e6883f736c50eacb6042bf6e6c829c010000008c493046022100da1e59d78bb88ca7c3e13a4a6f4e259d5dd8cb177d5f79199bf024b1f57121d50221008d1d9838606a62ed4bd011a6ce8a2042ae2dc38fd05381b50aa388a1c8bd9150014104d3b615c609e48ae81389f6617b50473bf4c93f63c9853cd038aa4f00a989ebd62ae8253555e24c88b939817da18cd4e7263fda6a0e815097589bb90a5a6b3ff1ffffffff03b9000000000000001976a9149fe14d50c95abd6ecddc5d61255cfe5aebeba7e988ac57300f00000000001976a914c0492db5f283a22274ef378cdffbe5ecbe29862b88ac00000000000000000a6a0810e2cdc1af05180100000000') self.tx_obj = { 'ins': [ { 'sequence': 4294967295, 'script': b'I0F\x02!\x00\xda\x1eY\xd7\x8b\xb8\x8c\xa7\xc3\xe1:JoN%\x9d]\xd8\xcb\x17}_y\x19\x9b\xf0$\xb1\xf5q!\xd5\x02!\x00\x8d\x1d\x988`jb\xedK\xd0\x11\xa6\xce\x8a B\xae-\xc3\x8f\xd0S\x81\xb5\n\xa3\x88\xa1\xc8\xbd\x91P\x01A\x04\xd3\xb6\x15\xc6\t\xe4\x8a\xe8\x13\x89\xf6a{PG;\xf4\xc9?c\xc9\x85<\xd08\xaaO\x00\xa9\x89\xeb\xd6*\xe8%5U\xe2L\x88\xb99\x81}\xa1\x8c\xd4\xe7&?\xdaj\x0e\x81P\x97X\x9b\xb9\nZk?\xf1', 'outpoint': {'index': 1, 'hash': b'\x9c\x82ln\xbfB`\xcb\xeaPls?\x88\xe6\x07\x18\x1d\xe4\xf6\x8ah\xbbg\xff\xfc\x08\xd7 \xafzc'} } ], 'locktime': 0, 'version': 1, 'outs': [ { 'value': 185, 'script': b'v\xa9\x14\x9f\xe1MP\xc9Z\xbdn\xcd\xdc]a%\\\xfeZ\xeb\xeb\xa7\xe9\x88\xac' }, { 'value': 995415, 'script': b'v\xa9\x14\xc0I-\xb5\xf2\x83\xa2"t\xef7\x8c\xdf\xfb\xe5\xec\xbe)\x86+\x88\xac' }, { 'value': 0, 'script': b'j\x08\x10\xe2\xcd\xc1\xaf\x05\x18\x01' } ] } def test_deserialization(self): tx_obj = transaction.deserialize(self.tx_bin) self.assertEqual(tx_obj, self.tx_obj) def test_serialization(self): tx_bin = transaction.serialize(self.tx_obj) self.assertEqual(tx_bin, self.tx_bin) class TestSighash(TestCase): def setUp(self): loc = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) with open(os.path.join(loc, 'sighash.json')) as f: self.data = json.load(f) def test_sighash(self): first = True for vector in self.data: # Ignore first header row in the JSON. if first: first = False continue tx = transaction.deserialize(convert.hex_to_bytes(vector[0])) script = convert.hex_to_bytes(vector[1]) index = int(vector[2]) hashtype = int(vector[3]) & 0xffffffff # This must be unsigned int sighash = convert.hex_to_bytes(vector[4])[::-1] # It's reversed for some reason? my_sighash = transaction.sighash(tx, index, script, hashtype) self.assertEqual( sighash, my_sighash, 'hashtype = {:x}'.format(hashtype) )
true
true
7902fc527c8669cfd316d84932dece02136067cd
761
py
Python
src/gnome_extensions_cli/utils.py
essembeh/gnome-extensions-cli
4ce70b7db0411d752aaea2c1fef64fa6f553db83
[ "Apache-2.0" ]
12
2020-11-22T09:00:22.000Z
2022-01-22T20:12:12.000Z
src/gnome_extensions_cli/utils.py
essembeh/gnome-extensions-cli
4ce70b7db0411d752aaea2c1fef64fa6f553db83
[ "Apache-2.0" ]
2
2020-12-22T03:55:30.000Z
2022-02-14T18:34:05.000Z
src/gnome_extensions_cli/utils.py
essembeh/gnome-extensions-cli
4ce70b7db0411d752aaea2c1fef64fa6f553db83
[ "Apache-2.0" ]
null
null
null
import subprocess import sys from distutils.version import LooseVersion from re import fullmatch def get_shell_version(): try: for line in ( subprocess.check_output(["gnome-shell", "--version"]).decode().splitlines() ): m = fullmatch(r"GNOME Shell (?P<version>[0-9.]+)", line) if m: return m.group("version") except BaseException: print("Warning, cannot retrieve current Gnome Shell version", file=sys.stderr) def version_comparator(a, b): if a == b: return 0 if a is None: return 1 if b is None: return -1 a, b = LooseVersion(str(a)), LooseVersion(str(b)) if a < b: return 1 if a > b: return -1 return 0
23.78125
87
0.576873
import subprocess import sys from distutils.version import LooseVersion from re import fullmatch def get_shell_version(): try: for line in ( subprocess.check_output(["gnome-shell", "--version"]).decode().splitlines() ): m = fullmatch(r"GNOME Shell (?P<version>[0-9.]+)", line) if m: return m.group("version") except BaseException: print("Warning, cannot retrieve current Gnome Shell version", file=sys.stderr) def version_comparator(a, b): if a == b: return 0 if a is None: return 1 if b is None: return -1 a, b = LooseVersion(str(a)), LooseVersion(str(b)) if a < b: return 1 if a > b: return -1 return 0
true
true
7902fd84010547b574e549c0b484519222d451e3
19,748
py
Python
plugins/modules/oci_waas_access_rules_facts.py
slmjy/oci-ansible-collection
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
[ "Apache-2.0" ]
108
2020-05-19T20:46:10.000Z
2022-03-25T14:10:01.000Z
plugins/modules/oci_waas_access_rules_facts.py
slmjy/oci-ansible-collection
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
[ "Apache-2.0" ]
90
2020-06-14T22:07:11.000Z
2022-03-07T05:40:29.000Z
plugins/modules/oci_waas_access_rules_facts.py
slmjy/oci-ansible-collection
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
[ "Apache-2.0" ]
42
2020-08-30T23:09:12.000Z
2022-03-25T16:58:01.000Z
#!/usr/bin/python # Copyright (c) 2020, 2021 Oracle and/or its affiliates. # This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Apache License v2.0 # See LICENSE.TXT for details. # GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_waas_access_rules_facts short_description: Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure description: - Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure - Gets the currently configured access rules for the Web Application Firewall configuration of a specified WAAS policy. The order of the access rules is important. The rules will be checked in the order they are specified and the first matching rule will be used. version_added: "2.9.0" author: Oracle (@oracle) options: waas_policy_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy. type: str required: true extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_name_option ] """ EXAMPLES = """ - name: List access_rules oci_waas_access_rules_facts: # required waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx" """ RETURN = """ access_rules: description: - List of AccessRules resources returned: on success type: complex contains: name: description: - The unique name of the access rule. returned: on success type: str sample: name_example criteria: description: - The list of access rule criteria. The rule would be applied only for the requests that matched all the listed conditions. returned: on success type: complex contains: condition: description: - "The criteria the access rule and JavaScript Challenge uses to determine if action should be taken on a request. - **URL_IS:** Matches if the concatenation of request URL path and query is identical to the contents of the `value` field. URL must start with a `/`. - **URL_IS_NOT:** Matches if the concatenation of request URL path and query is not identical to the contents of the `value` field. URL must start with a `/`. - **URL_STARTS_WITH:** Matches if the concatenation of request URL path and query starts with the contents of the `value` field. URL must start with a `/`. - **URL_PART_ENDS_WITH:** Matches if the concatenation of request URL path and query ends with the contents of the `value` field. - **URL_PART_CONTAINS:** Matches if the concatenation of request URL path and query contains the contents of the `value` field. - **URL_REGEX:** Matches if the concatenation of request URL path and query is described by the regular expression in the value field. The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org). - **URL_DOES_NOT_MATCH_REGEX:** Matches if the concatenation of request URL path and query is not described by the regular expression in the `value` field. The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org). - **URL_DOES_NOT_START_WITH:** Matches if the concatenation of request URL path and query does not start with the contents of the `value` field. - **URL_PART_DOES_NOT_CONTAIN:** Matches if the concatenation of request URL path and query does not contain the contents of the `value` field. - **URL_PART_DOES_NOT_END_WITH:** Matches if the concatenation of request URL path and query does not end with the contents of the `value` field. - **IP_IS:** Matches if the request originates from one of the IP addresses contained in the defined address list. The `value` in this case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n *Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\" - **IP_IS_NOT:** Matches if the request does not originate from any of the IP addresses contained in the defined address list. The `value` in this case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n *Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\" - **IP_IN_LIST:** Matches if the request originates from one of the IP addresses contained in the referenced address list. The `value` in this case is OCID of the address list. - **IP_NOT_IN_LIST:** Matches if the request does not originate from any IP address contained in the referenced address list. The `value` field in this case is OCID of the address list. - **HTTP_HEADER_CONTAINS:** The HTTP_HEADER_CONTAINS criteria is defined using a compound value separated by a colon: a header field name and a header field value. `host:test.example.com` is an example of a criteria value where `host` is the header field name and `test.example.com` is the header field value. A request matches when the header field name is a case insensitive match and the header field value is a case insensitive, substring match. *Example:* With a criteria value of `host:test.example.com`, where `host` is the name of the field and `test.example.com` is the value of the host field, a request with the header values, `Host: www.test.example.com` will match, where as a request with header values of `host: www.example.com` or `host: test.sub.example.com` will not match. - **HTTP_METHOD_IS:** Matches if the request method is identical to one of the values listed in field. The `value` in this case is string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`, `POST`, `PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`" - "*Example:* \\"GET\\\\nPOST\\"" - "- **HTTP_METHOD_IS_NOT:** Matches if the request is not identical to any of the contents of the `value` field. The `value` in this case is string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`, `POST`, `PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`" - "*Example:* \\"GET\\\\nPOST\\"" - "- **COUNTRY_IS:** Matches if the request originates from one of countries in the `value` field. The `value` in this case is string with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). *Example:* \\"AL\\\\nDZ\\\\nAM\\" - **COUNTRY_IS_NOT:** Matches if the request does not originate from any of countries in the `value` field. The `value` in this case is string with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). *Example:* \\"AL\\\\nDZ\\\\nAM\\" - **USER_AGENT_IS:** Matches if the requesting user agent is identical to the contents of the `value` field. *Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0` - **USER_AGENT_IS_NOT:** Matches if the requesting user agent is not identical to the contents of the `value` field. *Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0`" returned: on success type: str sample: URL_IS value: description: - The criteria value. returned: on success type: str sample: value_example is_case_sensitive: description: - When enabled, the condition will be matched with case-sensitive rules. returned: on success type: bool sample: true action: description: - The action to take when the access criteria are met for a rule. If unspecified, defaults to `ALLOW`. - "- **ALLOW:** Takes no action, just logs the request." - "- **DETECT:** Takes no action, but creates an alert for the request." - "- **BLOCK:** Blocks the request by returning specified response code or showing error page." - "- **BYPASS:** Bypasses some or all challenges." - "- **REDIRECT:** Redirects the request to the specified URL. These fields are required when `REDIRECT` is selected: `redirectUrl`, `redirectResponseCode`." - "- **SHOW_CAPTCHA:** Show a CAPTCHA Challenge page instead of the requested page." - Regardless of action, no further rules are processed once a rule is matched. returned: on success type: str sample: ALLOW block_action: description: - The method used to block requests if `action` is set to `BLOCK` and the access criteria are met. If unspecified, defaults to `SET_RESPONSE_CODE`. returned: on success type: str sample: SET_RESPONSE_CODE block_response_code: description: - "The response status code to return when `action` is set to `BLOCK`, `blockAction` is set to `SET_RESPONSE_CODE`, and the access criteria are met. If unspecified, defaults to `403`. The list of available response codes: `200`, `201`, `202`, `204`, `206`, `300`, `301`, `302`, `303`, `304`, `307`, `400`, `401`, `403`, `404`, `405`, `408`, `409`, `411`, `412`, `413`, `414`, `415`, `416`, `422`, `444`, `494`, `495`, `496`, `497`, `499`, `500`, `501`, `502`, `503`, `504`, `507`." returned: on success type: int sample: 56 block_error_page_message: description: - The message to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are met. If unspecified, defaults to 'Access to the website is blocked.' returned: on success type: str sample: block_error_page_message_example block_error_page_code: description: - The error code to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are met. If unspecified, defaults to 'Access rules'. returned: on success type: str sample: block_error_page_code_example block_error_page_description: description: - The description text to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are met. If unspecified, defaults to 'Access blocked by website owner. Please contact support.' returned: on success type: str sample: block_error_page_description_example bypass_challenges: description: - The list of challenges to bypass when `action` is set to `BYPASS`. If unspecified or empty, all challenges are bypassed. - "- **JS_CHALLENGE:** Bypasses JavaScript Challenge." - "- **DEVICE_FINGERPRINT_CHALLENGE:** Bypasses Device Fingerprint Challenge." - "- **HUMAN_INTERACTION_CHALLENGE:** Bypasses Human Interaction Challenge." - "- **CAPTCHA:** Bypasses CAPTCHA Challenge." returned: on success type: list sample: [] redirect_url: description: - The target to which the request should be redirected, represented as a URI reference. Required when `action` is `REDIRECT`. returned: on success type: str sample: redirect_url_example redirect_response_code: description: - The response status code to return when `action` is set to `REDIRECT`. - "- **MOVED_PERMANENTLY:** Used for designating the permanent movement of a page (numerical code - 301)." - "- **FOUND:** Used for designating the temporary movement of a page (numerical code - 302)." returned: on success type: str sample: MOVED_PERMANENTLY captcha_title: description: - The title used when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_title_example captcha_header: description: - The text to show in the header when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_header_example captcha_footer: description: - The text to show in the footer when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_footer_example captcha_submit_label: description: - The text to show on the label of the CAPTCHA challenge submit button when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_submit_label_example response_header_manipulation: description: - An object that represents an action to apply to an HTTP response headers if all rule criteria will be matched regardless of `action` value. returned: on success type: complex contains: action: description: - "" returned: on success type: str sample: EXTEND_HTTP_RESPONSE_HEADER header: description: - A header field name that conforms to RFC 7230. - "Example: `example_header_name`" returned: on success type: str sample: header_example value: description: - A header field value that conforms to RFC 7230. - "Example: `example_value`" returned: on success type: str sample: value_example sample: [{ "name": "name_example", "criteria": [{ "condition": "URL_IS", "value": "value_example", "is_case_sensitive": true }], "action": "ALLOW", "block_action": "SET_RESPONSE_CODE", "block_response_code": 56, "block_error_page_message": "block_error_page_message_example", "block_error_page_code": "block_error_page_code_example", "block_error_page_description": "block_error_page_description_example", "bypass_challenges": [], "redirect_url": "redirect_url_example", "redirect_response_code": "MOVED_PERMANENTLY", "captcha_title": "captcha_title_example", "captcha_header": "captcha_header_example", "captcha_footer": "captcha_footer_example", "captcha_submit_label": "captcha_submit_label_example", "response_header_manipulation": [{ "action": "EXTEND_HTTP_RESPONSE_HEADER", "header": "header_example", "value": "value_example" }] }] """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceFactsHelperBase, get_custom_class, ) try: from oci.waas import WaasClient HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class AccessRulesFactsHelperGen(OCIResourceFactsHelperBase): """Supported operations: list""" def get_required_params_for_list(self): return [ "waas_policy_id", ] def list_resources(self): optional_list_method_params = [ "name", ] optional_kwargs = dict( (param, self.module.params[param]) for param in optional_list_method_params if self.module.params.get(param) is not None ) return oci_common_utils.list_all_resources( self.client.list_access_rules, waas_policy_id=self.module.params.get("waas_policy_id"), **optional_kwargs ) AccessRulesFactsHelperCustom = get_custom_class("AccessRulesFactsHelperCustom") class ResourceFactsHelper(AccessRulesFactsHelperCustom, AccessRulesFactsHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec() module_args.update( dict(waas_policy_id=dict(type="str", required=True), name=dict(type="str"),) ) module = AnsibleModule(argument_spec=module_args) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_facts_helper = ResourceFactsHelper( module=module, resource_type="access_rules", service_client_class=WaasClient, namespace="waas", ) result = [] if resource_facts_helper.is_list(): result = resource_facts_helper.list() else: resource_facts_helper.fail() module.exit_json(access_rules=result) if __name__ == "__main__": main()
53.663043
160
0.597731
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_waas_access_rules_facts short_description: Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure description: - Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure - Gets the currently configured access rules for the Web Application Firewall configuration of a specified WAAS policy. The order of the access rules is important. The rules will be checked in the order they are specified and the first matching rule will be used. version_added: "2.9.0" author: Oracle (@oracle) options: waas_policy_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy. type: str required: true extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_name_option ] """ EXAMPLES = """ - name: List access_rules oci_waas_access_rules_facts: # required waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx" """ RETURN = """ access_rules: description: - List of AccessRules resources returned: on success type: complex contains: name: description: - The unique name of the access rule. returned: on success type: str sample: name_example criteria: description: - The list of access rule criteria. The rule would be applied only for the requests that matched all the listed conditions. returned: on success type: complex contains: condition: description: - "The criteria the access rule and JavaScript Challenge uses to determine if action should be taken on a request. - **URL_IS:** Matches if the concatenation of request URL path and query is identical to the contents of the `value` field. URL must start with a `/`. - **URL_IS_NOT:** Matches if the concatenation of request URL path and query is not identical to the contents of the `value` field. URL must start with a `/`. - **URL_STARTS_WITH:** Matches if the concatenation of request URL path and query starts with the contents of the `value` field. URL must start with a `/`. - **URL_PART_ENDS_WITH:** Matches if the concatenation of request URL path and query ends with the contents of the `value` field. - **URL_PART_CONTAINS:** Matches if the concatenation of request URL path and query contains the contents of the `value` field. - **URL_REGEX:** Matches if the concatenation of request URL path and query is described by the regular expression in the value field. The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org). - **URL_DOES_NOT_MATCH_REGEX:** Matches if the concatenation of request URL path and query is not described by the regular expression in the `value` field. The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org). - **URL_DOES_NOT_START_WITH:** Matches if the concatenation of request URL path and query does not start with the contents of the `value` field. - **URL_PART_DOES_NOT_CONTAIN:** Matches if the concatenation of request URL path and query does not contain the contents of the `value` field. - **URL_PART_DOES_NOT_END_WITH:** Matches if the concatenation of request URL path and query does not end with the contents of the `value` field. - **IP_IS:** Matches if the request originates from one of the IP addresses contained in the defined address list. The `value` in this case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n *Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\" - **IP_IS_NOT:** Matches if the request does not originate from any of the IP addresses contained in the defined address list. The `value` in this case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n *Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\" - **IP_IN_LIST:** Matches if the request originates from one of the IP addresses contained in the referenced address list. The `value` in this case is OCID of the address list. - **IP_NOT_IN_LIST:** Matches if the request does not originate from any IP address contained in the referenced address list. The `value` field in this case is OCID of the address list. - **HTTP_HEADER_CONTAINS:** The HTTP_HEADER_CONTAINS criteria is defined using a compound value separated by a colon: a header field name and a header field value. `host:test.example.com` is an example of a criteria value where `host` is the header field name and `test.example.com` is the header field value. A request matches when the header field name is a case insensitive match and the header field value is a case insensitive, substring match. *Example:* With a criteria value of `host:test.example.com`, where `host` is the name of the field and `test.example.com` is the value of the host field, a request with the header values, `Host: www.test.example.com` will match, where as a request with header values of `host: www.example.com` or `host: test.sub.example.com` will not match. - **HTTP_METHOD_IS:** Matches if the request method is identical to one of the values listed in field. The `value` in this case is string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`, `POST`, `PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`" - "*Example:* \\"GET\\\\nPOST\\"" - "- **HTTP_METHOD_IS_NOT:** Matches if the request is not identical to any of the contents of the `value` field. The `value` in this case is string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`, `POST`, `PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`" - "*Example:* \\"GET\\\\nPOST\\"" - "- **COUNTRY_IS:** Matches if the request originates from one of countries in the `value` field. The `value` in this case is string with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). *Example:* \\"AL\\\\nDZ\\\\nAM\\" - **COUNTRY_IS_NOT:** Matches if the request does not originate from any of countries in the `value` field. The `value` in this case is string with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). *Example:* \\"AL\\\\nDZ\\\\nAM\\" - **USER_AGENT_IS:** Matches if the requesting user agent is identical to the contents of the `value` field. *Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0` - **USER_AGENT_IS_NOT:** Matches if the requesting user agent is not identical to the contents of the `value` field. *Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0`" returned: on success type: str sample: URL_IS value: description: - The criteria value. returned: on success type: str sample: value_example is_case_sensitive: description: - When enabled, the condition will be matched with case-sensitive rules. returned: on success type: bool sample: true action: description: - The action to take when the access criteria are met for a rule. If unspecified, defaults to `ALLOW`. - "- **ALLOW:** Takes no action, just logs the request." - "- **DETECT:** Takes no action, but creates an alert for the request." - "- **BLOCK:** Blocks the request by returning specified response code or showing error page." - "- **BYPASS:** Bypasses some or all challenges." - "- **REDIRECT:** Redirects the request to the specified URL. These fields are required when `REDIRECT` is selected: `redirectUrl`, `redirectResponseCode`." - "- **SHOW_CAPTCHA:** Show a CAPTCHA Challenge page instead of the requested page." - Regardless of action, no further rules are processed once a rule is matched. returned: on success type: str sample: ALLOW block_action: description: - The method used to block requests if `action` is set to `BLOCK` and the access criteria are met. If unspecified, defaults to `SET_RESPONSE_CODE`. returned: on success type: str sample: SET_RESPONSE_CODE block_response_code: description: - "The response status code to return when `action` is set to `BLOCK`, `blockAction` is set to `SET_RESPONSE_CODE`, and the access criteria are met. If unspecified, defaults to `403`. The list of available response codes: `200`, `201`, `202`, `204`, `206`, `300`, `301`, `302`, `303`, `304`, `307`, `400`, `401`, `403`, `404`, `405`, `408`, `409`, `411`, `412`, `413`, `414`, `415`, `416`, `422`, `444`, `494`, `495`, `496`, `497`, `499`, `500`, `501`, `502`, `503`, `504`, `507`." returned: on success type: int sample: 56 block_error_page_message: description: - The message to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are met. If unspecified, defaults to 'Access to the website is blocked.' returned: on success type: str sample: block_error_page_message_example block_error_page_code: description: - The error code to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are met. If unspecified, defaults to 'Access rules'. returned: on success type: str sample: block_error_page_code_example block_error_page_description: description: - The description text to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are met. If unspecified, defaults to 'Access blocked by website owner. Please contact support.' returned: on success type: str sample: block_error_page_description_example bypass_challenges: description: - The list of challenges to bypass when `action` is set to `BYPASS`. If unspecified or empty, all challenges are bypassed. - "- **JS_CHALLENGE:** Bypasses JavaScript Challenge." - "- **DEVICE_FINGERPRINT_CHALLENGE:** Bypasses Device Fingerprint Challenge." - "- **HUMAN_INTERACTION_CHALLENGE:** Bypasses Human Interaction Challenge." - "- **CAPTCHA:** Bypasses CAPTCHA Challenge." returned: on success type: list sample: [] redirect_url: description: - The target to which the request should be redirected, represented as a URI reference. Required when `action` is `REDIRECT`. returned: on success type: str sample: redirect_url_example redirect_response_code: description: - The response status code to return when `action` is set to `REDIRECT`. - "- **MOVED_PERMANENTLY:** Used for designating the permanent movement of a page (numerical code - 301)." - "- **FOUND:** Used for designating the temporary movement of a page (numerical code - 302)." returned: on success type: str sample: MOVED_PERMANENTLY captcha_title: description: - The title used when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_title_example captcha_header: description: - The text to show in the header when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_header_example captcha_footer: description: - The text to show in the footer when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_footer_example captcha_submit_label: description: - The text to show on the label of the CAPTCHA challenge submit button when `action` is set to `SHOW_CAPTCHA` and the request is challenged. returned: on success type: str sample: captcha_submit_label_example response_header_manipulation: description: - An object that represents an action to apply to an HTTP response headers if all rule criteria will be matched regardless of `action` value. returned: on success type: complex contains: action: description: - "" returned: on success type: str sample: EXTEND_HTTP_RESPONSE_HEADER header: description: - A header field name that conforms to RFC 7230. - "Example: `example_header_name`" returned: on success type: str sample: header_example value: description: - A header field value that conforms to RFC 7230. - "Example: `example_value`" returned: on success type: str sample: value_example sample: [{ "name": "name_example", "criteria": [{ "condition": "URL_IS", "value": "value_example", "is_case_sensitive": true }], "action": "ALLOW", "block_action": "SET_RESPONSE_CODE", "block_response_code": 56, "block_error_page_message": "block_error_page_message_example", "block_error_page_code": "block_error_page_code_example", "block_error_page_description": "block_error_page_description_example", "bypass_challenges": [], "redirect_url": "redirect_url_example", "redirect_response_code": "MOVED_PERMANENTLY", "captcha_title": "captcha_title_example", "captcha_header": "captcha_header_example", "captcha_footer": "captcha_footer_example", "captcha_submit_label": "captcha_submit_label_example", "response_header_manipulation": [{ "action": "EXTEND_HTTP_RESPONSE_HEADER", "header": "header_example", "value": "value_example" }] }] """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceFactsHelperBase, get_custom_class, ) try: from oci.waas import WaasClient HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class AccessRulesFactsHelperGen(OCIResourceFactsHelperBase): def get_required_params_for_list(self): return [ "waas_policy_id", ] def list_resources(self): optional_list_method_params = [ "name", ] optional_kwargs = dict( (param, self.module.params[param]) for param in optional_list_method_params if self.module.params.get(param) is not None ) return oci_common_utils.list_all_resources( self.client.list_access_rules, waas_policy_id=self.module.params.get("waas_policy_id"), **optional_kwargs ) AccessRulesFactsHelperCustom = get_custom_class("AccessRulesFactsHelperCustom") class ResourceFactsHelper(AccessRulesFactsHelperCustom, AccessRulesFactsHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec() module_args.update( dict(waas_policy_id=dict(type="str", required=True), name=dict(type="str"),) ) module = AnsibleModule(argument_spec=module_args) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_facts_helper = ResourceFactsHelper( module=module, resource_type="access_rules", service_client_class=WaasClient, namespace="waas", ) result = [] if resource_facts_helper.is_list(): result = resource_facts_helper.list() else: resource_facts_helper.fail() module.exit_json(access_rules=result) if __name__ == "__main__": main()
true
true
7902fe0e814a91c3eb4a76ee910bfd916912235b
496
py
Python
packages/python/plotly/plotly/validators/funnel/_insidetextanchor.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
null
null
null
packages/python/plotly/plotly/validators/funnel/_insidetextanchor.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
null
null
null
packages/python/plotly/plotly/validators/funnel/_insidetextanchor.py
mastermind88/plotly.py
efa70710df1af22958e1be080e105130042f1839
[ "MIT" ]
null
null
null
import _plotly_utils.basevalidators class InsidetextanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__(self, plotly_name="insidetextanchor", parent_name="funnel", **kwargs): super(InsidetextanchorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), values=kwargs.pop("values", ["end", "middle", "start"]), **kwargs, )
38.153846
87
0.669355
import _plotly_utils.basevalidators class InsidetextanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__(self, plotly_name="insidetextanchor", parent_name="funnel", **kwargs): super(InsidetextanchorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), values=kwargs.pop("values", ["end", "middle", "start"]), **kwargs, )
true
true
7902feb30271f5231dd4fcf6f88405f6572b5532
205
py
Python
calender/calender/doctype/alaqoal/test_alaqoal.py
bahaaabed/AumAlqura
9d12d7917225d9e82b4a480c3bc8f7acf8edab77
[ "MIT" ]
null
null
null
calender/calender/doctype/alaqoal/test_alaqoal.py
bahaaabed/AumAlqura
9d12d7917225d9e82b4a480c3bc8f7acf8edab77
[ "MIT" ]
null
null
null
calender/calender/doctype/alaqoal/test_alaqoal.py
bahaaabed/AumAlqura
9d12d7917225d9e82b4a480c3bc8f7acf8edab77
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2022, bahaa and Contributors # See license.txt from __future__ import unicode_literals # import frappe import unittest class TestAlaqoal(unittest.TestCase): pass
18.636364
44
0.756098
from __future__ import unicode_literals import unittest class TestAlaqoal(unittest.TestCase): pass
true
true
79030111cc4867fd53a652d3f11ae14e8ac7efab
26,292
py
Python
artifacts/old_dataset_versions/original_commits_v02/pennylane/pennylane#385/after/test_tf.py
MattePalte/Bugs-Quantum-Computing-Platforms
0c1c805fd5dfce465a8955ee3faf81037023a23e
[ "MIT" ]
3
2021-11-08T11:46:42.000Z
2021-12-27T10:13:38.000Z
artifacts/old_dataset_versions/minimal_commits/pennylane/pennylane#385/after/test_tf.py
MattePalte/Bugs-Quantum-Computing-Platforms
0c1c805fd5dfce465a8955ee3faf81037023a23e
[ "MIT" ]
2
2021-11-09T14:57:09.000Z
2022-01-12T12:35:58.000Z
artifacts/old_dataset_versions/original_commits_v02/pennylane/pennylane#385/after/test_tf.py
MattePalte/Bugs-Quantum-Computing-Platforms
0c1c805fd5dfce465a8955ee3faf81037023a23e
[ "MIT" ]
null
null
null
# Copyright 2018 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Unit tests for the :mod:`pennylane.interface.tf` QNode interface. """ import pytest import numpy as np try: import tensorflow as tf if tf.__version__[0] == "1": import tensorflow.contrib.eager as tfe tf.enable_eager_execution() Variable = tfe.Variable else: from tensorflow import Variable except ImportError as e: pass import pennylane as qml from pennylane.qnode import _flatten, unflatten, QNode, QuantumFunctionError from pennylane.plugins.default_qubit import CNOT, Rotx, Roty, Rotz, I, Y, Z from pennylane._device import DeviceError def expZ(state): return np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2 @pytest.fixture(scope='module') def tf_support(): """Boolean fixture for TensorFlow support""" try: import tensorflow as tf tf_support = True except ImportError as e: tf_support = False return tf_support @pytest.fixture() def skip_if_no_tf_support(tf_support): if not tf_support: pytest.skip("Skipped, no tf support") @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFQNodeExceptions(): """TFQNode basic tests.""" def test_qnode_fails_on_wrong_return_type(self, qubit_device_2_wires): """The qfunc must return only Expectations""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) return qml.expval(qml.PauliZ(0)), 0.3 with pytest.raises(QuantumFunctionError, match='must return either'): qf(Variable(0.5)) def test_qnode_fails_on_expval_not_returned(self, qubit_device_2_wires): """All expectation values in the qfunc must be returned""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ex = qml.expval(qml.PauliZ(1)) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='All measured observables'): qf(Variable(0.5)) def test_qnode_fails_on_wrong_expval_order(self, qubit_device_2_wires): """Expvals must be returned in the order they were created in""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ex = qml.expval(qml.PauliZ(1)) return qml.expval(qml.PauliZ(0)), ex with pytest.raises(QuantumFunctionError, match='All measured observables'): qf(Variable(0.5)) def test_qnode_fails_on_gates_after_measurements(self, qubit_device_2_wires): """Gates have to precede measurements""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ev = qml.expval(qml.PauliZ(1)) qml.RY(0.5, wires=[0]) return ev with pytest.raises(QuantumFunctionError, match='gates must precede'): qf(Variable(0.5)) def test_qnode_fails_on_multiple_measurements_of_same_wire(self, qubit_device_2_wires): """A wire can only be measured once""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliX(0)) with pytest.raises(QuantumFunctionError, match='can only be measured once'): qf(Variable(0.5)) def test_qnode_fails_on_qfunc_with_too_many_wires(self, qubit_device_2_wires): """The device must have sufficient wires for the qfunc""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.CNOT(wires=[0, 2]) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='applied to invalid wire'): qf(Variable(0.5)) def test_qnode_fails_on_combination_of_cv_and_qbit_ops(self, qubit_device_1_wire): """CV and discrete operations must not be mixed""" @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.Displacement(0.5, 0, wires=[0]) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='Continuous and discrete'): qf(Variable(0.5)) def test_qnode_fails_for_cv_ops_on_qubit_device(self, qubit_device_1_wire): """A qubit device cannot execute CV operations""" @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): qml.Displacement(0.5, 0, wires=[0]) return qml.expval(qml.X(0)) with pytest.raises(DeviceError, match='Gate [a-zA-Z]+ not supported on device'): qf(Variable(0.5)) def test_qnode_fails_for_cv_observables_on_qubit_device(self, qubit_device_1_wire): """A qubit device cannot measure CV observables""" @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): return qml.expval(qml.X(0)) with pytest.raises(DeviceError, match='Observable [a-zA-Z]+ not supported on device'): qf(Variable(0.5)) @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFQNodeParameterHandling: """Test that the TFQNode properly handles the parameters of qfuncs""" def test_qnode_fanout(self, qubit_device_1_wire, tol): """Tests that qnodes can compute the correct function when the same parameter is used in multiple gates.""" @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(reused_param, other_param): qml.RX(reused_param, wires=[0]) qml.RZ(other_param, wires=[0]) qml.RX(reused_param, wires=[0]) return qml.expval(qml.PauliZ(0)) thetas = tf.linspace(-2*np.pi, 2*np.pi, 7) for reused_param in thetas: for theta in thetas: other_param = theta ** 2 / 11 y_eval = circuit(reused_param, other_param) Rx = Rotx(reused_param.numpy()) Rz = Rotz(other_param.numpy()) zero_state = np.array([1.,0.]) final_state = (Rx @ Rz @ Rx @ zero_state) y_true = expZ(final_state) assert np.allclose(y_eval, y_true, atol=tol, rtol=0) def test_qnode_array_parameters_scalar_return(self, qubit_device_1_wire, tol): """Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow. Test case for a circuit that returns a scalar.""" # The objective of this test is not to check if the results are correctly calculated, # but to check that the interoperability of the different return types works. @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) return qml.expval(qml.PauliX(0)) # returns a scalar grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_qnode_array_parameters_1_vector_return(self, qubit_device_1_wire, tol): """Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow Test case for a circuit that returns a 1-vector.""" # The objective of this test is not to check if the results are correctly calculated, # but to check that the interoperability of the different return types works. @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) return qml.expval(qml.PauliX(0)), # note the comma, returns a 1-vector grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) c = c[0] # get a scalar return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_qnode_array_parameters_2_vector_return(self, qubit_device_2_wires, tol): """Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow Test case for a circuit that returns a 2-vector.""" # The objective of this test is not to check if the results are correctly calculated, # but to check that the interoperability of the different return types works. @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) qml.RY(array[1,0], wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) # returns a 2-vector grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) c = c[0] # get a scalar return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_array_parameters_evaluate(self, qubit_device_2_wires, tol): """Test that array parameters gives same result as positional arguments.""" a, b, c = tf.constant(0.5), tf.constant(0.54), tf.constant(0.3) def ansatz(x, y, z): qml.QubitStateVector(np.array([1, 0, 1, 1])/np.sqrt(3), wires=[0, 1]) qml.Rot(x, y, z, wires=0) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit1(x, y, z): return ansatz(x, y, z) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit2(x, array): return ansatz(x, array[0], array[1]) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit3(array): return ansatz(*array) positional_res = circuit1(a, b, c) array_res1 = circuit2(a, Variable([b, c])) array_res2 = circuit3(Variable([a, b, c])) assert np.allclose(positional_res.numpy(), array_res1.numpy(), atol=tol, rtol=0) assert np.allclose(positional_res.numpy(), array_res2.numpy(), atol=tol, rtol=0) def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol): """Tests that qnodes return multiple expectation values.""" a, b, c = Variable(0.5), Variable(0.54), Variable(0.3) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(x, y, z): qml.RX(x, wires=[0]) qml.RZ(y, wires=[0]) qml.CNOT(wires=[0, 1]) qml.RY(y, wires=[0]) qml.RX(z, wires=[0]) return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1)) res = circuit(a, b, c) out_state = np.kron(Rotx(c.numpy()), I) @ np.kron(Roty(b.numpy()), I) @ CNOT \ @ np.kron(Rotz(b.numpy()), I) @ np.kron(Rotx(a.numpy()), I) @ np.array([1, 0, 0, 0]) ex0 = np.vdot(out_state, np.kron(Y, I) @ out_state) ex1 = np.vdot(out_state, np.kron(I, Z) @ out_state) ex = np.array([ex0, ex1]) assert np.allclose(ex, res.numpy(), atol=tol, rtol=0) def test_multiple_keywordargs_used(self, qubit_device_2_wires, tol): """Tests that qnodes use multiple keyword arguments.""" @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(w, x=None, y=None): qml.RX(x, wires=[0]) qml.RX(y, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) c = circuit(tf.constant(1.), x=np.pi, y=np.pi) assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0) def test_multidimensional_keywordargs_used(self, qubit_device_2_wires, tol): """Tests that qnodes use multi-dimensional keyword arguments.""" def circuit(w, x=None): qml.RX(x[0], wires=[0]) qml.RX(x[1], wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c = circuit(tf.constant(1.), x=[np.pi, np.pi]) assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0) def test_keywordargs_for_wires(self, qubit_device_2_wires, tol): """Tests that wires can be passed as keyword arguments.""" default_q = 0 def circuit(x, q=default_q): qml.RY(x, wires=0) return qml.expval(qml.PauliZ(q)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c = circuit(tf.constant(np.pi), q=1) assert np.allclose(c, 1., atol=tol, rtol=0) c = circuit(tf.constant(np.pi)) assert np.allclose(c.numpy(), -1., atol=tol, rtol=0) def test_keywordargs_used(self, qubit_device_1_wire, tol): """Tests that qnodes use keyword arguments.""" def circuit(w, x=None): qml.RX(x, wires=[0]) return qml.expval(qml.PauliZ(0)) circuit = qml.QNode(circuit, qubit_device_1_wire).to_tf() c = circuit(tf.constant(1.), x=np.pi) assert np.allclose(c.numpy(), -1., atol=tol, rtol=0) def test_mixture_numpy_tensors(self, qubit_device_2_wires, tol): """Tests that qnodes work with python types and tensors.""" @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(w, x, y): qml.RX(x, wires=[0]) qml.RX(y, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) c = circuit(tf.constant(1.), np.pi, np.pi).numpy() assert np.allclose(c, [-1., -1.], atol=tol, rtol=0) def test_keywordarg_updated_in_multiple_calls(self, qubit_device_2_wires): """Tests that qnodes update keyword arguments in consecutive calls.""" def circuit(w, x=None): qml.RX(w, wires=[0]) qml.RX(x, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c1 = circuit(tf.constant(0.1), x=tf.constant(0.)) c2 = circuit(tf.constant(0.1), x=np.pi) assert c1[1] != c2[1] def test_keywordarg_passes_through_classicalnode(self, qubit_device_2_wires, tol): """Tests that qnodes' keyword arguments pass through classical nodes.""" def circuit(w, x=None): qml.RX(w, wires=[0]) qml.RX(x, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() def classnode(w, x=None): return circuit(w, x=x) c = classnode(tf.constant(0.), x=np.pi) assert np.allclose(c.numpy(), [1., -1.], atol=tol, rtol=0) def test_keywordarg_gradient(self, qubit_device_2_wires, tol): """Tests that qnodes' keyword arguments work with gradients""" def circuit(x, y, input_state=np.array([0, 0])): qml.BasisState(input_state, wires=[0, 1]) qml.RX(x, wires=[0]) qml.RY(y, wires=[0]) return qml.expval(qml.PauliZ(0)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() x = 0.543 y = 0.45632 expected_grad = np.array([np.sin(x)*np.cos(y), np.sin(y)*np.cos(x)]) x_t = Variable(x) y_t = Variable(y) # test first basis state against analytic result with tf.GradientTape() as tape: c = circuit(x_t, y_t, input_state=np.array([0, 0])) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, -expected_grad, atol=tol, rtol=0) # test third basis state against analytic result with tf.GradientTape() as tape: c = circuit(x_t, y_t, input_state=np.array([1, 0])) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, expected_grad, atol=tol, rtol=0) # test first basis state via the default keyword argument against analytic result with tf.GradientTape() as tape: c = circuit(x_t, y_t) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, -expected_grad, atol=tol, rtol=0) @pytest.mark.usefixtures("skip_if_no_tf_support") class TestIntegration: """Integration tests to ensure the TensorFlow QNode agrees with the NumPy QNode""" def test_qnode_evaluation_agrees(self, qubit_device_2_wires, tol): """Tests that simple example is consistent.""" @qml.qnode(qubit_device_2_wires, interface='autograd') def circuit(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit_tf(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) phi = [0.5, 0.1] theta = [0.2] phi_t = Variable(phi) theta_t = Variable(theta) autograd_eval = circuit(phi, theta) tf_eval = circuit_tf(phi_t, theta_t) assert np.allclose(autograd_eval, tf_eval.numpy(), atol=tol, rtol=0) def test_qnode_gradient_agrees(self, qubit_device_2_wires, tol): """Tests that simple gradient example is consistent.""" @qml.qnode(qubit_device_2_wires, interface='autograd') def circuit(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit_tf(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) phi = [0.5, 0.1] theta = [0.2] phi_t = Variable(phi) theta_t = Variable(theta) dcircuit = qml.grad(circuit, [0, 1]) autograd_grad = dcircuit(phi, theta) with tf.GradientTape() as g: g.watch([phi_t, theta_t]) y = circuit_tf(phi_t, theta_t) tf_grad = g.gradient(y, [phi_t, theta_t]) assert np.allclose(autograd_grad[0], tf_grad[0], atol=tol, rtol=0) assert np.allclose(autograd_grad[1], tf_grad[1], atol=tol, rtol=0) gradient_test_data = [ (0.5, -0.1), (0.0, np.pi), (-3.6, -3.6), (1.0, 2.5), ] @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFGradients: """Integration tests involving gradients of QNodes and hybrid computations using the tf interface""" @pytest.fixture def qnodes(self): """Two QNodes to be used for the gradient tests""" dev = qml.device("default.qubit", wires=2) @qml.qnode(dev, interface="tf") def f(x): qml.RX(x, wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(dev, interface="tf") def g(y): qml.RY(y, wires=0) return qml.expval(qml.PauliX(0)) return f, g @pytest.mark.parametrize("x, y", gradient_test_data) def test_addition_qnodes_gradient(self, qnodes, x, y): """Test the gradient of addition of two QNode circuits""" f, g = qnodes def add(a, b): return a + b xt = Variable(x) yt = Variable(y) # addition with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = add(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == 1.0 # same tensor added to itself with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) y = add(a, a) grad = tape.gradient(y, [a, a]) assert grad[0].numpy() == 2.0 assert grad[1].numpy() == 2.0 # different qnodes with same input parameter added together with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(xt) y = add(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == 1.0 @pytest.mark.parametrize("x, y", gradient_test_data) def test_subtraction_qnodes_gradient(self, qnodes, x, y): """Test the gradient of subtraction of two QNode circuits""" f, g = qnodes def subtract(a, b): return a - b xt = Variable(x) yt = Variable(y) # subtraction with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = subtract(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == -1.0 @pytest.mark.parametrize("x, y", gradient_test_data) def test_multiplication_qnodes_gradient(self, qnodes, x, y): """Test the gradient of multiplication of two QNode circuits""" f, g = qnodes def mult(a, b): return a * b xt = Variable(x) yt = Variable(y) # multiplication with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = mult(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == b.numpy() assert grad[1].numpy() == a.numpy() @pytest.mark.parametrize("x, y", gradient_test_data) def test_division_qnodes_gradient(self, qnodes, x, y, tol): """Test the gradient of division of two QNode circuits""" f, g = qnodes def div(a, b): return a / b xt = Variable(x) yt = Variable(y) # division with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = div(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1 / b.numpy() assert np.allclose(grad[1].numpy(), -a.numpy() / b.numpy() ** 2, atol=tol, rtol=0) @pytest.mark.parametrize("x, y", gradient_test_data) def test_composition_qnodes_gradient(self, qnodes, x, y): """Test the gradient of composition of two QNode circuits""" f, g = qnodes xt = Variable(x) yt = Variable(y) # compose function with xt as input with tf.GradientTape() as tape: tape.watch([xt]) y = f(xt) grad1 = tape.gradient(y, xt) with tf.GradientTape() as tape: tape.watch([xt]) y = f(xt) grad2 = tape.gradient(y, xt) assert tf.equal(grad1, grad2) # compose function with a as input with tf.GradientTape() as tape: tape.watch([xt]) a = f(xt) y = f(a) grad1 = tape.gradient(y, a) with tf.GradientTape() as tape: tape.watch([xt]) a = f(xt) y = f(a) grad2 = tape.gradient(y, a) assert tf.equal(grad1, grad2) # compose function with b as input with tf.GradientTape() as tape: tape.watch([xt]) b = g(xt) y = g(b) grad1 = tape.gradient(y, b) with tf.GradientTape() as tape: tape.watch([xt]) b = g(xt) y = g(b) grad2 = tape.gradient(y, b) assert tf.equal(grad1, grad2)
35.577808
115
0.588924
import pytest import numpy as np try: import tensorflow as tf if tf.__version__[0] == "1": import tensorflow.contrib.eager as tfe tf.enable_eager_execution() Variable = tfe.Variable else: from tensorflow import Variable except ImportError as e: pass import pennylane as qml from pennylane.qnode import _flatten, unflatten, QNode, QuantumFunctionError from pennylane.plugins.default_qubit import CNOT, Rotx, Roty, Rotz, I, Y, Z from pennylane._device import DeviceError def expZ(state): return np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2 @pytest.fixture(scope='module') def tf_support(): try: import tensorflow as tf tf_support = True except ImportError as e: tf_support = False return tf_support @pytest.fixture() def skip_if_no_tf_support(tf_support): if not tf_support: pytest.skip("Skipped, no tf support") @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFQNodeExceptions(): def test_qnode_fails_on_wrong_return_type(self, qubit_device_2_wires): @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) return qml.expval(qml.PauliZ(0)), 0.3 with pytest.raises(QuantumFunctionError, match='must return either'): qf(Variable(0.5)) def test_qnode_fails_on_expval_not_returned(self, qubit_device_2_wires): @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ex = qml.expval(qml.PauliZ(1)) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='All measured observables'): qf(Variable(0.5)) def test_qnode_fails_on_wrong_expval_order(self, qubit_device_2_wires): @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ex = qml.expval(qml.PauliZ(1)) return qml.expval(qml.PauliZ(0)), ex with pytest.raises(QuantumFunctionError, match='All measured observables'): qf(Variable(0.5)) def test_qnode_fails_on_gates_after_measurements(self, qubit_device_2_wires): @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ev = qml.expval(qml.PauliZ(1)) qml.RY(0.5, wires=[0]) return ev with pytest.raises(QuantumFunctionError, match='gates must precede'): qf(Variable(0.5)) def test_qnode_fails_on_multiple_measurements_of_same_wire(self, qubit_device_2_wires): @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliX(0)) with pytest.raises(QuantumFunctionError, match='can only be measured once'): qf(Variable(0.5)) def test_qnode_fails_on_qfunc_with_too_many_wires(self, qubit_device_2_wires): @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.CNOT(wires=[0, 2]) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='applied to invalid wire'): qf(Variable(0.5)) def test_qnode_fails_on_combination_of_cv_and_qbit_ops(self, qubit_device_1_wire): @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.Displacement(0.5, 0, wires=[0]) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='Continuous and discrete'): qf(Variable(0.5)) def test_qnode_fails_for_cv_ops_on_qubit_device(self, qubit_device_1_wire): @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): qml.Displacement(0.5, 0, wires=[0]) return qml.expval(qml.X(0)) with pytest.raises(DeviceError, match='Gate [a-zA-Z]+ not supported on device'): qf(Variable(0.5)) def test_qnode_fails_for_cv_observables_on_qubit_device(self, qubit_device_1_wire): @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): return qml.expval(qml.X(0)) with pytest.raises(DeviceError, match='Observable [a-zA-Z]+ not supported on device'): qf(Variable(0.5)) @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFQNodeParameterHandling: def test_qnode_fanout(self, qubit_device_1_wire, tol): @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(reused_param, other_param): qml.RX(reused_param, wires=[0]) qml.RZ(other_param, wires=[0]) qml.RX(reused_param, wires=[0]) return qml.expval(qml.PauliZ(0)) thetas = tf.linspace(-2*np.pi, 2*np.pi, 7) for reused_param in thetas: for theta in thetas: other_param = theta ** 2 / 11 y_eval = circuit(reused_param, other_param) Rx = Rotx(reused_param.numpy()) Rz = Rotz(other_param.numpy()) zero_state = np.array([1.,0.]) final_state = (Rx @ Rz @ Rx @ zero_state) y_true = expZ(final_state) assert np.allclose(y_eval, y_true, atol=tol, rtol=0) def test_qnode_array_parameters_scalar_return(self, qubit_device_1_wire, tol): @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) return qml.expval(qml.PauliX(0)) grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_qnode_array_parameters_1_vector_return(self, qubit_device_1_wire, tol): @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) return qml.expval(qml.PauliX(0)), grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) c = c[0] return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_qnode_array_parameters_2_vector_return(self, qubit_device_2_wires, tol): @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) qml.RY(array[1,0], wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) c = c[0] return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_array_parameters_evaluate(self, qubit_device_2_wires, tol): a, b, c = tf.constant(0.5), tf.constant(0.54), tf.constant(0.3) def ansatz(x, y, z): qml.QubitStateVector(np.array([1, 0, 1, 1])/np.sqrt(3), wires=[0, 1]) qml.Rot(x, y, z, wires=0) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit1(x, y, z): return ansatz(x, y, z) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit2(x, array): return ansatz(x, array[0], array[1]) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit3(array): return ansatz(*array) positional_res = circuit1(a, b, c) array_res1 = circuit2(a, Variable([b, c])) array_res2 = circuit3(Variable([a, b, c])) assert np.allclose(positional_res.numpy(), array_res1.numpy(), atol=tol, rtol=0) assert np.allclose(positional_res.numpy(), array_res2.numpy(), atol=tol, rtol=0) def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol): a, b, c = Variable(0.5), Variable(0.54), Variable(0.3) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(x, y, z): qml.RX(x, wires=[0]) qml.RZ(y, wires=[0]) qml.CNOT(wires=[0, 1]) qml.RY(y, wires=[0]) qml.RX(z, wires=[0]) return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1)) res = circuit(a, b, c) out_state = np.kron(Rotx(c.numpy()), I) @ np.kron(Roty(b.numpy()), I) @ CNOT \ @ np.kron(Rotz(b.numpy()), I) @ np.kron(Rotx(a.numpy()), I) @ np.array([1, 0, 0, 0]) ex0 = np.vdot(out_state, np.kron(Y, I) @ out_state) ex1 = np.vdot(out_state, np.kron(I, Z) @ out_state) ex = np.array([ex0, ex1]) assert np.allclose(ex, res.numpy(), atol=tol, rtol=0) def test_multiple_keywordargs_used(self, qubit_device_2_wires, tol): @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(w, x=None, y=None): qml.RX(x, wires=[0]) qml.RX(y, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) c = circuit(tf.constant(1.), x=np.pi, y=np.pi) assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0) def test_multidimensional_keywordargs_used(self, qubit_device_2_wires, tol): def circuit(w, x=None): qml.RX(x[0], wires=[0]) qml.RX(x[1], wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c = circuit(tf.constant(1.), x=[np.pi, np.pi]) assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0) def test_keywordargs_for_wires(self, qubit_device_2_wires, tol): default_q = 0 def circuit(x, q=default_q): qml.RY(x, wires=0) return qml.expval(qml.PauliZ(q)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c = circuit(tf.constant(np.pi), q=1) assert np.allclose(c, 1., atol=tol, rtol=0) c = circuit(tf.constant(np.pi)) assert np.allclose(c.numpy(), -1., atol=tol, rtol=0) def test_keywordargs_used(self, qubit_device_1_wire, tol): def circuit(w, x=None): qml.RX(x, wires=[0]) return qml.expval(qml.PauliZ(0)) circuit = qml.QNode(circuit, qubit_device_1_wire).to_tf() c = circuit(tf.constant(1.), x=np.pi) assert np.allclose(c.numpy(), -1., atol=tol, rtol=0) def test_mixture_numpy_tensors(self, qubit_device_2_wires, tol): @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(w, x, y): qml.RX(x, wires=[0]) qml.RX(y, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) c = circuit(tf.constant(1.), np.pi, np.pi).numpy() assert np.allclose(c, [-1., -1.], atol=tol, rtol=0) def test_keywordarg_updated_in_multiple_calls(self, qubit_device_2_wires): def circuit(w, x=None): qml.RX(w, wires=[0]) qml.RX(x, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c1 = circuit(tf.constant(0.1), x=tf.constant(0.)) c2 = circuit(tf.constant(0.1), x=np.pi) assert c1[1] != c2[1] def test_keywordarg_passes_through_classicalnode(self, qubit_device_2_wires, tol): def circuit(w, x=None): qml.RX(w, wires=[0]) qml.RX(x, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() def classnode(w, x=None): return circuit(w, x=x) c = classnode(tf.constant(0.), x=np.pi) assert np.allclose(c.numpy(), [1., -1.], atol=tol, rtol=0) def test_keywordarg_gradient(self, qubit_device_2_wires, tol): def circuit(x, y, input_state=np.array([0, 0])): qml.BasisState(input_state, wires=[0, 1]) qml.RX(x, wires=[0]) qml.RY(y, wires=[0]) return qml.expval(qml.PauliZ(0)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() x = 0.543 y = 0.45632 expected_grad = np.array([np.sin(x)*np.cos(y), np.sin(y)*np.cos(x)]) x_t = Variable(x) y_t = Variable(y) with tf.GradientTape() as tape: c = circuit(x_t, y_t, input_state=np.array([0, 0])) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, -expected_grad, atol=tol, rtol=0) with tf.GradientTape() as tape: c = circuit(x_t, y_t, input_state=np.array([1, 0])) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, expected_grad, atol=tol, rtol=0) with tf.GradientTape() as tape: c = circuit(x_t, y_t) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, -expected_grad, atol=tol, rtol=0) @pytest.mark.usefixtures("skip_if_no_tf_support") class TestIntegration: def test_qnode_evaluation_agrees(self, qubit_device_2_wires, tol): @qml.qnode(qubit_device_2_wires, interface='autograd') def circuit(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit_tf(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) phi = [0.5, 0.1] theta = [0.2] phi_t = Variable(phi) theta_t = Variable(theta) autograd_eval = circuit(phi, theta) tf_eval = circuit_tf(phi_t, theta_t) assert np.allclose(autograd_eval, tf_eval.numpy(), atol=tol, rtol=0) def test_qnode_gradient_agrees(self, qubit_device_2_wires, tol): @qml.qnode(qubit_device_2_wires, interface='autograd') def circuit(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit_tf(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) phi = [0.5, 0.1] theta = [0.2] phi_t = Variable(phi) theta_t = Variable(theta) dcircuit = qml.grad(circuit, [0, 1]) autograd_grad = dcircuit(phi, theta) with tf.GradientTape() as g: g.watch([phi_t, theta_t]) y = circuit_tf(phi_t, theta_t) tf_grad = g.gradient(y, [phi_t, theta_t]) assert np.allclose(autograd_grad[0], tf_grad[0], atol=tol, rtol=0) assert np.allclose(autograd_grad[1], tf_grad[1], atol=tol, rtol=0) gradient_test_data = [ (0.5, -0.1), (0.0, np.pi), (-3.6, -3.6), (1.0, 2.5), ] @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFGradients: @pytest.fixture def qnodes(self): dev = qml.device("default.qubit", wires=2) @qml.qnode(dev, interface="tf") def f(x): qml.RX(x, wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(dev, interface="tf") def g(y): qml.RY(y, wires=0) return qml.expval(qml.PauliX(0)) return f, g @pytest.mark.parametrize("x, y", gradient_test_data) def test_addition_qnodes_gradient(self, qnodes, x, y): f, g = qnodes def add(a, b): return a + b xt = Variable(x) yt = Variable(y) with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = add(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == 1.0 with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) y = add(a, a) grad = tape.gradient(y, [a, a]) assert grad[0].numpy() == 2.0 assert grad[1].numpy() == 2.0 with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(xt) y = add(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == 1.0 @pytest.mark.parametrize("x, y", gradient_test_data) def test_subtraction_qnodes_gradient(self, qnodes, x, y): f, g = qnodes def subtract(a, b): return a - b xt = Variable(x) yt = Variable(y) with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = subtract(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == -1.0 @pytest.mark.parametrize("x, y", gradient_test_data) def test_multiplication_qnodes_gradient(self, qnodes, x, y): f, g = qnodes def mult(a, b): return a * b xt = Variable(x) yt = Variable(y) with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = mult(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == b.numpy() assert grad[1].numpy() == a.numpy() @pytest.mark.parametrize("x, y", gradient_test_data) def test_division_qnodes_gradient(self, qnodes, x, y, tol): f, g = qnodes def div(a, b): return a / b xt = Variable(x) yt = Variable(y) with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = div(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1 / b.numpy() assert np.allclose(grad[1].numpy(), -a.numpy() / b.numpy() ** 2, atol=tol, rtol=0) @pytest.mark.parametrize("x, y", gradient_test_data) def test_composition_qnodes_gradient(self, qnodes, x, y): f, g = qnodes xt = Variable(x) yt = Variable(y) with tf.GradientTape() as tape: tape.watch([xt]) y = f(xt) grad1 = tape.gradient(y, xt) with tf.GradientTape() as tape: tape.watch([xt]) y = f(xt) grad2 = tape.gradient(y, xt) assert tf.equal(grad1, grad2) with tf.GradientTape() as tape: tape.watch([xt]) a = f(xt) y = f(a) grad1 = tape.gradient(y, a) with tf.GradientTape() as tape: tape.watch([xt]) a = f(xt) y = f(a) grad2 = tape.gradient(y, a) assert tf.equal(grad1, grad2) with tf.GradientTape() as tape: tape.watch([xt]) b = g(xt) y = g(b) grad1 = tape.gradient(y, b) with tf.GradientTape() as tape: tape.watch([xt]) b = g(xt) y = g(b) grad2 = tape.gradient(y, b) assert tf.equal(grad1, grad2)
true
true
7903025f1f9a9404dc70aaab4d2f4d39ef35b4fc
3,729
py
Python
CircuitPython_101/basic_data_structures/song_book/code.py
billagee/Adafruit_Learning_System_Guides
6e90bd839161573780ab9937c3deaa115deca055
[ "MIT" ]
1
2018-10-17T19:37:08.000Z
2018-10-17T19:37:08.000Z
CircuitPython_101/basic_data_structures/song_book/code.py
billagee/Adafruit_Learning_System_Guides
6e90bd839161573780ab9937c3deaa115deca055
[ "MIT" ]
null
null
null
CircuitPython_101/basic_data_structures/song_book/code.py
billagee/Adafruit_Learning_System_Guides
6e90bd839161573780ab9937c3deaa115deca055
[ "MIT" ]
1
2018-07-16T15:47:52.000Z
2018-07-16T15:47:52.000Z
import time import board import debouncer import busio as io import digitalio import pulseio import adafruit_ssd1306 i2c = io.I2C(board.SCL, board.SDA) reset_pin = digitalio.DigitalInOut(board.D11) oled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, reset=reset_pin) button_select = debouncer.Debouncer(board.D7, mode=digitalio.Pull.UP) button_play = debouncer.Debouncer(board.D9, mode=digitalio.Pull.UP) C4 = 261 C_SH_4 = 277 D4 = 293 D_SH_4 = 311 E4 = 329 F4 = 349 F_SH_4 = 369 G4 = 392 G_SH_4 = 415 A4 = 440 A_SH_4 = 466 B4 = 493 # pylint: disable=line-too-long songbook = {'Twinkle Twinkle': [(C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5)], 'ItsyBitsy Spider': [(G4, 0.5), (C4, 0.5), (C4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (C4, 0.5), (0, 0.5), (E4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (E4, 0.5), (0, 0.5)], 'Old MacDonald': [(G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5), (D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5)] } # pylint: enable=line-too-long def play_note(note): if note[0] != 0: pwm = pulseio.PWMOut(board.D12, duty_cycle = 0, frequency=note[0]) # Hex 7FFF (binary 0111111111111111) is half of the largest value for a 16-bit int, # i.e. 50% pwm.duty_cycle = 0x7FFF time.sleep(note[1]) if note[0] != 0: pwm.deinit() def play_song(songname): for note in songbook[songname]: play_note(note) def update(songnames, selected): oled.fill(0) line = 0 for songname in songnames: if line == selected: oled.text(">", 0, line * 8) oled.text(songname, 10, line * 8) line += 1 oled.show() selected_song = 0 song_names = sorted(list(songbook.keys())) while True: button_select.update() button_play.update() update(song_names, selected_song) if button_select.fell: print("select") selected_song = (selected_song + 1) % len(songbook) elif button_play.fell: print("play") play_song(song_names[selected_song])
41.433333
185
0.448914
import time import board import debouncer import busio as io import digitalio import pulseio import adafruit_ssd1306 i2c = io.I2C(board.SCL, board.SDA) reset_pin = digitalio.DigitalInOut(board.D11) oled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, reset=reset_pin) button_select = debouncer.Debouncer(board.D7, mode=digitalio.Pull.UP) button_play = debouncer.Debouncer(board.D9, mode=digitalio.Pull.UP) C4 = 261 C_SH_4 = 277 D4 = 293 D_SH_4 = 311 E4 = 329 F4 = 349 F_SH_4 = 369 G4 = 392 G_SH_4 = 415 A4 = 440 A_SH_4 = 466 B4 = 493 songbook = {'Twinkle Twinkle': [(C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5)], 'ItsyBitsy Spider': [(G4, 0.5), (C4, 0.5), (C4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (C4, 0.5), (0, 0.5), (E4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (E4, 0.5), (0, 0.5)], 'Old MacDonald': [(G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5), (D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5), (B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5)] } def play_note(note): if note[0] != 0: pwm = pulseio.PWMOut(board.D12, duty_cycle = 0, frequency=note[0]) pwm.duty_cycle = 0x7FFF time.sleep(note[1]) if note[0] != 0: pwm.deinit() def play_song(songname): for note in songbook[songname]: play_note(note) def update(songnames, selected): oled.fill(0) line = 0 for songname in songnames: if line == selected: oled.text(">", 0, line * 8) oled.text(songname, 10, line * 8) line += 1 oled.show() selected_song = 0 song_names = sorted(list(songbook.keys())) while True: button_select.update() button_play.update() update(song_names, selected_song) if button_select.fell: print("select") selected_song = (selected_song + 1) % len(songbook) elif button_play.fell: print("play") play_song(song_names[selected_song])
true
true
790303274af75b585a2a75ace0861b4caedf2d12
2,101
py
Python
tabkit/miniast.py
yandex-tabkit/tabkit
5d4cf05682c3435a89e85902b5ea1f1565aeff58
[ "X11" ]
9
2016-02-27T11:37:24.000Z
2020-04-02T09:13:35.000Z
tabkit/miniast.py
yandex-tabkit/tabkit
5d4cf05682c3435a89e85902b5ea1f1565aeff58
[ "X11" ]
4
2016-02-27T11:45:41.000Z
2020-04-24T10:50:37.000Z
tabkit/miniast.py
yandex-tabkit/tabkit
5d4cf05682c3435a89e85902b5ea1f1565aeff58
[ "X11" ]
2
2016-08-09T11:43:01.000Z
2020-04-23T18:43:52.000Z
from _ast import * from _ast import __version__ def parse(expr, filename='<unknown>', mode='exec'): """ Parse an expression into an AST node. Equivalent to compile(expr, filename, mode, PyCF_ONLY_AST). """ return compile(expr, filename, mode, PyCF_ONLY_AST) def dump(node, annotate_fields=True, include_attributes=False): """ Return a formatted dump of the tree in *node*. This is mainly useful for debugging purposes. The returned string will show the names and the values for fields. This makes the code impossible to evaluate, so if evaluation is wanted *annotate_fields* must be set to False. Attributes such as line numbers and column offsets are not dumped by default. If this is wanted, *include_attributes* can be set to True. """ def _format(node): if isinstance(node, AST): fields = [(a, _format(b)) for a, b in iter_fields(node)] if annotate_fields: rv = '%s(%s' % (node.__class__.__name__, ', '.join( ('%s=%s' % field for field in fields) )) else: rv = '%s(%s' % (node.__class__.__name__, ', '.join( (b for a, b in fields) )) if include_attributes and node._attributes: rv += fields and ', ' or ' ' rv += ', '.join('%s=%s' % (a, _format(getattr(node, a))) for a in node._attributes) return rv + ')' elif isinstance(node, list): return '[%s]' % ', '.join(_format(x) for x in node) return repr(node) if not isinstance(node, AST): raise TypeError('expected AST, got %r' % node.__class__.__name__) return _format(node) def iter_fields(node): """ Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` that is present on *node*. """ if node._fields: for field in node._fields: try: yield field, getattr(node, field) except AttributeError: pass
37.517857
80
0.569729
from _ast import * from _ast import __version__ def parse(expr, filename='<unknown>', mode='exec'): return compile(expr, filename, mode, PyCF_ONLY_AST) def dump(node, annotate_fields=True, include_attributes=False): def _format(node): if isinstance(node, AST): fields = [(a, _format(b)) for a, b in iter_fields(node)] if annotate_fields: rv = '%s(%s' % (node.__class__.__name__, ', '.join( ('%s=%s' % field for field in fields) )) else: rv = '%s(%s' % (node.__class__.__name__, ', '.join( (b for a, b in fields) )) if include_attributes and node._attributes: rv += fields and ', ' or ' ' rv += ', '.join('%s=%s' % (a, _format(getattr(node, a))) for a in node._attributes) return rv + ')' elif isinstance(node, list): return '[%s]' % ', '.join(_format(x) for x in node) return repr(node) if not isinstance(node, AST): raise TypeError('expected AST, got %r' % node.__class__.__name__) return _format(node) def iter_fields(node): if node._fields: for field in node._fields: try: yield field, getattr(node, field) except AttributeError: pass
true
true
7903046d64dbf2bd872e9a48d137c1877ae46763
2,264
py
Python
main.py
circlestarzero/GenshinMapAutoMarkTools
0d959938a93b43f0d9c082164ee1c5346752e364
[ "MIT" ]
167
2022-02-15T09:55:36.000Z
2022-03-31T08:10:34.000Z
main.py
ChengYang1998/GenshinMapAutoMarkTools
0d959938a93b43f0d9c082164ee1c5346752e364
[ "MIT" ]
5
2022-02-18T07:51:15.000Z
2022-03-12T11:19:19.000Z
main.py
ChengYang1998/GenshinMapAutoMarkTools
0d959938a93b43f0d9c082164ee1c5346752e364
[ "MIT" ]
30
2022-02-15T16:04:36.000Z
2022-03-25T08:18:58.000Z
import sys import win32gui import sys import re import os import delete_mark import keyboard import win32com.client import pythoncom from PyQt5.QtWidgets import QApplication from PyQt5.QtGui import QImage import map_insert import pic_locate import delete_mark base_dir = os.path.dirname(os.path.abspath(__file__)) app = QApplication(sys.argv) def JudgeWindowSize(hwnd): screen = QApplication.primaryScreen() pix = screen.grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888) print((pix.width(),pix.height())) if pix.width()==1920 and pix.height()==1080: return 1 if pix.width()==1366 and pix.height()==768: return 1 if pix.width()==1280 and pix.height()==720: return 1 return 0 def OpenSearchBox(): st=os.system(r'python {0}\search_box.py'.format(base_dir)) def ImportConfig(): config_list=[] with open('{0}\config.ini'.format(base_dir), 'r', encoding='UTF-8') as f: temp=f.readlines() print(temp) for i in range(5): gp=re.search(r'(:|:)\s*?(\S.*)\s*?', temp[i]) if gp==None or len(gp.groups())<2:config_list.append('') else: config_list.append(gp.group(2)) return config_list if __name__=='__main__': pythoncom.CoInitialize() shell = win32com.client.Dispatch("WScript.Shell") shell.SendKeys('%') hwnd = win32gui.FindWindow('UnityWndClass', None) if JudgeWindowSize(hwnd)==0: print('分辨率尚未适配') else: win32gui.SetForegroundWindow(hwnd) config=ImportConfig() if config[0]!='': keyboard.add_hotkey(config[0],OpenSearchBox,suppress = False) if config[1]!='': keyboard.add_hotkey(config[1],delete_mark.DeleteCenterMark,(map_insert.kp2,map_insert.des2),suppress = False) if config[2]!='': keyboard.add_hotkey(config[2],delete_mark.DeleteMouseMark,(map_insert.kp2,map_insert.des2),suppress = False) if config[3]!='': keyboard.add_hotkey(config[3],delete_mark.GetMarkInfo,(hwnd,map_insert.kp2,map_insert.des2),suppress = False) if config[4]!='': keyboard.add_hotkey(config[4],pic_locate.DeleteAllMarks,(hwnd,),suppress = False) keyboard.wait()
37.733333
121
0.658127
import sys import win32gui import sys import re import os import delete_mark import keyboard import win32com.client import pythoncom from PyQt5.QtWidgets import QApplication from PyQt5.QtGui import QImage import map_insert import pic_locate import delete_mark base_dir = os.path.dirname(os.path.abspath(__file__)) app = QApplication(sys.argv) def JudgeWindowSize(hwnd): screen = QApplication.primaryScreen() pix = screen.grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888) print((pix.width(),pix.height())) if pix.width()==1920 and pix.height()==1080: return 1 if pix.width()==1366 and pix.height()==768: return 1 if pix.width()==1280 and pix.height()==720: return 1 return 0 def OpenSearchBox(): st=os.system(r'python {0}\search_box.py'.format(base_dir)) def ImportConfig(): config_list=[] with open('{0}\config.ini'.format(base_dir), 'r', encoding='UTF-8') as f: temp=f.readlines() print(temp) for i in range(5): gp=re.search(r'(:|:)\s*?(\S.*)\s*?', temp[i]) if gp==None or len(gp.groups())<2:config_list.append('') else: config_list.append(gp.group(2)) return config_list if __name__=='__main__': pythoncom.CoInitialize() shell = win32com.client.Dispatch("WScript.Shell") shell.SendKeys('%') hwnd = win32gui.FindWindow('UnityWndClass', None) if JudgeWindowSize(hwnd)==0: print('分辨率尚未适配') else: win32gui.SetForegroundWindow(hwnd) config=ImportConfig() if config[0]!='': keyboard.add_hotkey(config[0],OpenSearchBox,suppress = False) if config[1]!='': keyboard.add_hotkey(config[1],delete_mark.DeleteCenterMark,(map_insert.kp2,map_insert.des2),suppress = False) if config[2]!='': keyboard.add_hotkey(config[2],delete_mark.DeleteMouseMark,(map_insert.kp2,map_insert.des2),suppress = False) if config[3]!='': keyboard.add_hotkey(config[3],delete_mark.GetMarkInfo,(hwnd,map_insert.kp2,map_insert.des2),suppress = False) if config[4]!='': keyboard.add_hotkey(config[4],pic_locate.DeleteAllMarks,(hwnd,),suppress = False) keyboard.wait()
true
true
7903048cb97d4d601b4f027fedda272a57825a7a
316
py
Python
examples/partial_path_process.py
mikelolasagasti/bandit
3dca7820158ea04668878186a29d34e81bbd7fe3
[ "Apache-2.0" ]
4,016
2018-04-26T13:01:54.000Z
2022-03-31T15:57:41.000Z
examples/partial_path_process.py
lweslen/bandit
2bd1ffaae1b83102af8587dcbeecd0b402dbec4e
[ "Apache-2.0" ]
570
2018-04-26T10:07:41.000Z
2022-03-29T18:55:09.000Z
examples/partial_path_process.py
lweslen/bandit
2bd1ffaae1b83102af8587dcbeecd0b402dbec4e
[ "Apache-2.0" ]
484
2018-04-26T09:59:46.000Z
2022-03-31T18:00:50.000Z
from subprocess import Popen as pop pop('gcc --version', shell=False) pop('/bin/gcc --version', shell=False) pop(var, shell=False) pop(['ls', '-l'], shell=False) pop(['/bin/ls', '-l'], shell=False) pop('../ls -l', shell=False) pop('c:\\hello\\something', shell=False) pop('c:/hello/something_else', shell=False)
22.571429
43
0.655063
from subprocess import Popen as pop pop('gcc --version', shell=False) pop('/bin/gcc --version', shell=False) pop(var, shell=False) pop(['ls', '-l'], shell=False) pop(['/bin/ls', '-l'], shell=False) pop('../ls -l', shell=False) pop('c:\\hello\\something', shell=False) pop('c:/hello/something_else', shell=False)
true
true
790304d3eb24049b4ad2a8328d9ce8cbafc80e03
2,532
py
Python
src/bot.py
paradox4280/Erimx
bb7a85d56927c0aa3195f9a1bfc9cef160a28066
[ "MIT" ]
null
null
null
src/bot.py
paradox4280/Erimx
bb7a85d56927c0aa3195f9a1bfc9cef160a28066
[ "MIT" ]
null
null
null
src/bot.py
paradox4280/Erimx
bb7a85d56927c0aa3195f9a1bfc9cef160a28066
[ "MIT" ]
null
null
null
#ERIMX Made By Paradox4280 aka c2FI, x2Fi, RG9t import discord, base64, codecs, requests, urllib.parse, datetime, asyncio, sys, praw import random, aiohttp, io, json, os, string, platform, time, bs4, colorama from discord.ext import ( commands ) from discord.voice_client import VoiceClient # from discord.ext.commands import bot from bs4 import BeautifulSoup as bs4 from colorama import Fore, Style from discord import Permissions from discord.utils import get from discord import User from os import system with open('config.json') as f: config = json.load(f) def get_prefix(paradox, message): with open('prefixes.json', 'r') as f: prefixes = json.load(f) paradox = commands.Bot(command_prefix = get_prefix, case_Insensitive = True) [paradox.load_extension(f"cogs.{cog[:-3]}") for cog in os.listdir("cogs") if cog.endswith(".py")] @paradox.event async def on_ready(): await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="Her")) print(f'\n{Fore.GREEN}[>] {Fore.RESET}{Fore.CYAN}Logged in as{Fore.RESET} {Fore.YELLOW}{paradox.user.name}#{paradox.user.discriminator}\n') print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}User ID:{Fore.RESET} {Fore.YELLOW}{paradox.user.id}\n') print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}Version:{Fore.RESET} {Fore.YELLOW}{discord.__version__}\n') @paradox.event async def on_command_error(ctx, error): embed = discord.Embed(description=f'Error. Try =help ({error})', color = 16202876) await ctx.send(embed = embed) @paradox.event async def on_guild_join(guild): with open('prefixes.json', 'r') as f: prefixes = json.load(f) prefixes[str(guild.id)] = '=' with open('prefixes.json', 'w') as f: json.dump(prefixes, f, indent=4) @paradox.event async def on_guild_remove(guild): with open('prefixes.json', 'r') as f: prefixes = json.load(f) prefixes.pop(str(guild.id)) with open('prefixes.json', 'w') as f: json.dump(prefixes, f, indent=4) @paradox.command() async def changeprefix(ctx, prefix): with open('prefixes.json', 'r') as f: prefixes = json.load(f) prefixes[str(ctx.guild.id)] = prefix with open('prefixes.json', 'w') as f: json.dump(prefixes, f, indent=4) embed = discord.Embed(description = f'prefix changed to: {prefix}', color = 16202876) await ctx.send(embed = embed) paradox.run(os.getenv('BOT_TOKEN'))
33.76
144
0.669431
import discord, base64, codecs, requests, urllib.parse, datetime, asyncio, sys, praw import random, aiohttp, io, json, os, string, platform, time, bs4, colorama from discord.ext import ( commands ) from discord.voice_client import VoiceClient from bs4 import BeautifulSoup as bs4 from colorama import Fore, Style from discord import Permissions from discord.utils import get from discord import User from os import system with open('config.json') as f: config = json.load(f) def get_prefix(paradox, message): with open('prefixes.json', 'r') as f: prefixes = json.load(f) paradox = commands.Bot(command_prefix = get_prefix, case_Insensitive = True) [paradox.load_extension(f"cogs.{cog[:-3]}") for cog in os.listdir("cogs") if cog.endswith(".py")] @paradox.event async def on_ready(): await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="Her")) print(f'\n{Fore.GREEN}[>] {Fore.RESET}{Fore.CYAN}Logged in as{Fore.RESET} {Fore.YELLOW}{paradox.user.name}#{paradox.user.discriminator}\n') print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}User ID:{Fore.RESET} {Fore.YELLOW}{paradox.user.id}\n') print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}Version:{Fore.RESET} {Fore.YELLOW}{discord.__version__}\n') @paradox.event async def on_command_error(ctx, error): embed = discord.Embed(description=f'Error. Try =help ({error})', color = 16202876) await ctx.send(embed = embed) @paradox.event async def on_guild_join(guild): with open('prefixes.json', 'r') as f: prefixes = json.load(f) prefixes[str(guild.id)] = '=' with open('prefixes.json', 'w') as f: json.dump(prefixes, f, indent=4) @paradox.event async def on_guild_remove(guild): with open('prefixes.json', 'r') as f: prefixes = json.load(f) prefixes.pop(str(guild.id)) with open('prefixes.json', 'w') as f: json.dump(prefixes, f, indent=4) @paradox.command() async def changeprefix(ctx, prefix): with open('prefixes.json', 'r') as f: prefixes = json.load(f) prefixes[str(ctx.guild.id)] = prefix with open('prefixes.json', 'w') as f: json.dump(prefixes, f, indent=4) embed = discord.Embed(description = f'prefix changed to: {prefix}', color = 16202876) await ctx.send(embed = embed) paradox.run(os.getenv('BOT_TOKEN'))
true
true
790305dd7cfb2800098189ccedfd45a30fe7dc03
9,287
py
Python
kuryr_kubernetes/tests/unit/cni/test_binding.py
dulek/kuryr-kubernetes
d76a9dad18320ecd57b7735aed34806aa07f4091
[ "Apache-2.0" ]
null
null
null
kuryr_kubernetes/tests/unit/cni/test_binding.py
dulek/kuryr-kubernetes
d76a9dad18320ecd57b7735aed34806aa07f4091
[ "Apache-2.0" ]
null
null
null
kuryr_kubernetes/tests/unit/cni/test_binding.py
dulek/kuryr-kubernetes
d76a9dad18320ecd57b7735aed34806aa07f4091
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import uuid from os_vif import objects as osv_objects from oslo_config import cfg from kuryr_kubernetes.cni.binding import base from kuryr_kubernetes import objects from kuryr_kubernetes.tests import base as test_base from kuryr_kubernetes.tests import fake CONF = cfg.CONF class TestDriverMixin(test_base.TestCase): def setUp(self): super(TestDriverMixin, self).setUp() self.instance_info = osv_objects.instance_info.InstanceInfo( uuid=uuid.uuid4(), name='foo') self.ifname = 'c_interface' self.netns = '/proc/netns/1234' # Mock IPDB context managers self.ipdbs = {} self.m_bridge_iface = mock.Mock(__exit__=mock.Mock(return_value=None)) self.m_c_iface = mock.Mock() self.m_h_iface = mock.Mock() self.h_ipdb, self.h_ipdb_exit = self._mock_ipdb_context_manager(None) self.c_ipdb, self.c_ipdb_exit = self._mock_ipdb_context_manager( self.netns) self.m_create = mock.Mock() self.h_ipdb.create = mock.Mock( return_value=mock.Mock( __enter__=mock.Mock(return_value=self.m_create), __exit__=mock.Mock(return_value=None))) self.c_ipdb.create = mock.Mock( return_value=mock.Mock( __enter__=mock.Mock(return_value=self.m_create), __exit__=mock.Mock(return_value=None))) def _mock_ipdb_context_manager(self, netns): mock_ipdb = mock.Mock( interfaces={ 'bridge': mock.Mock( __enter__=mock.Mock(return_value=self.m_bridge_iface), __exit__=mock.Mock(return_value=None), ), 'c_interface': mock.Mock( __enter__=mock.Mock(return_value=self.m_c_iface), __exit__=mock.Mock(return_value=None), ), 'h_interface': mock.Mock( __enter__=mock.Mock(return_value=self.m_h_iface), __exit__=mock.Mock(return_value=None), ), } ) mock_exit = mock.Mock(return_value=None) mock_ipdb.__exit__ = mock_exit mock_ipdb.__enter__ = mock.Mock(return_value=mock_ipdb) self.ipdbs[netns] = mock_ipdb return mock_ipdb, mock_exit @mock.patch('kuryr_kubernetes.cni.binding.base.get_ipdb') @mock.patch('os_vif.plug') def _test_connect(self, m_vif_plug, m_get_ipdb, report=None): def get_ipdb(netns=None): return self.ipdbs[netns] m_get_ipdb.side_effect = get_ipdb base.connect(self.vif, self.instance_info, self.ifname, self.netns, report) m_vif_plug.assert_called_once_with(self.vif, self.instance_info) self.m_c_iface.add_ip.assert_called_once_with('192.168.0.2/24') if report: report.assert_called_once() @mock.patch('os_vif.unplug') def _test_disconnect(self, m_vif_unplug, report=None): base.disconnect(self.vif, self.instance_info, self.ifname, self.netns, report) m_vif_unplug.assert_called_once_with(self.vif, self.instance_info) if report: report.assert_called_once() class TestOpenVSwitchDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestOpenVSwitchDriver, self).setUp() self.vif = fake._fake_vif(osv_objects.vif.VIFOpenVSwitch) @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' 'K8sCNIRegistryPlugin.report_drivers_health') @mock.patch('os.getpid', mock.Mock(return_value=123)) @mock.patch('kuryr_kubernetes.linux_net_utils.create_ovs_vif_port') def test_connect(self, mock_create_ovs, m_report): self._test_connect(report=m_report) self.assertEqual(3, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.c_ipdb.create.assert_called_once_with( ifname=self.ifname, peer='h_interface', kind='veth') self.assertEqual(1, self.m_create.mtu) self.assertEqual(str(self.vif.address), self.m_create.address) self.m_create.up.assert_called_once_with() self.assertEqual(123, self.m_h_iface.net_ns_pid) self.assertEqual(1, self.m_h_iface.mtu) self.m_h_iface.up.assert_called_once_with() mock_create_ovs.assert_called_once_with( 'bridge', 'h_interface', '89eccd45-43e9-43d8-b4cc-4c13db13f782', '3e:94:b7:31:a0:83', 'kuryr') @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' 'K8sCNIRegistryPlugin.report_drivers_health') @mock.patch('kuryr_kubernetes.linux_net_utils.delete_ovs_vif_port') def test_disconnect(self, mock_delete_ovs, m_report): self._test_disconnect(report=m_report) mock_delete_ovs.assert_called_once_with('bridge', 'h_interface') class TestBridgeDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestBridgeDriver, self).setUp() self.vif = fake._fake_vif(osv_objects.vif.VIFBridge) @mock.patch('os.getpid', mock.Mock(return_value=123)) def test_connect(self): self._test_connect() self.m_h_iface.remove.assert_called_once_with() self.assertEqual(3, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.c_ipdb.create.assert_called_once_with( ifname=self.ifname, peer='h_interface', kind='veth') self.assertEqual(1, self.m_create.mtu) self.assertEqual(str(self.vif.address), self.m_create.address) self.m_create.up.assert_called_once_with() self.assertEqual(123, self.m_h_iface.net_ns_pid) self.assertEqual(1, self.m_h_iface.mtu) self.m_h_iface.up.assert_called_once_with() self.m_bridge_iface.add_port.assert_called_once_with('h_interface') def test_disconnect(self): self._test_disconnect() class TestNestedVlanDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestNestedVlanDriver, self).setUp() self.vif = fake._fake_vif(objects.vif.VIFVlanNested) self.vif.vlan_id = 7 CONF.set_override('link_iface', 'bridge', group='binding') self.addCleanup(CONF.clear_override, 'link_iface', group='binding') def test_connect(self): self._test_connect() self.assertEqual(1, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.assertEqual(self.ifname, self.m_h_iface.ifname) self.assertEqual(1, self.m_h_iface.mtu) self.assertEqual(str(self.vif.address), self.m_h_iface.address) self.m_h_iface.up.assert_called_once_with() def test_disconnect(self): self._test_disconnect() class TestNestedMacvlanDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestNestedMacvlanDriver, self).setUp() self.vif = fake._fake_vif(objects.vif.VIFMacvlanNested) CONF.set_override('link_iface', 'bridge', group='binding') self.addCleanup(CONF.clear_override, 'link_iface', group='binding') def test_connect(self): self._test_connect() self.assertEqual(1, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.assertEqual(self.ifname, self.m_h_iface.ifname) self.assertEqual(1, self.m_h_iface.mtu) self.assertEqual(str(self.vif.address), self.m_h_iface.address) self.m_h_iface.up.assert_called_once_with() def test_disconnect(self): self._test_disconnect() class TestSriovDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestSriovDriver, self).setUp() self.vif = fake._fake_vif(objects.vif.VIFSriov) self.vif.physnet = 'test_physnet' @mock.patch('kuryr_kubernetes.cni.binding.sriov.VIFSriovDriver.' '_get_host_pf_names') @mock.patch('kuryr_kubernetes.cni.binding.sriov.VIFSriovDriver.' '_get_available_vf_info') def test_connect(self, m_avail_vf_info, m_host_pf_names): m_avail_vf_info.return_value = [self.ifname, 1, 'h_interface'] m_host_pf_names.return_value = 'h_interface' self._test_connect() self.assertEqual(self.ifname, self.m_c_iface.ifname) self.assertEqual(1, self.m_c_iface.mtu) self.assertEqual(str(self.vif.address), self.m_c_iface.address) self.m_c_iface.up.assert_called_once_with() def test_disconnect(self): self._test_disconnect()
39.688034
78
0.676214
import mock import uuid from os_vif import objects as osv_objects from oslo_config import cfg from kuryr_kubernetes.cni.binding import base from kuryr_kubernetes import objects from kuryr_kubernetes.tests import base as test_base from kuryr_kubernetes.tests import fake CONF = cfg.CONF class TestDriverMixin(test_base.TestCase): def setUp(self): super(TestDriverMixin, self).setUp() self.instance_info = osv_objects.instance_info.InstanceInfo( uuid=uuid.uuid4(), name='foo') self.ifname = 'c_interface' self.netns = '/proc/netns/1234' self.ipdbs = {} self.m_bridge_iface = mock.Mock(__exit__=mock.Mock(return_value=None)) self.m_c_iface = mock.Mock() self.m_h_iface = mock.Mock() self.h_ipdb, self.h_ipdb_exit = self._mock_ipdb_context_manager(None) self.c_ipdb, self.c_ipdb_exit = self._mock_ipdb_context_manager( self.netns) self.m_create = mock.Mock() self.h_ipdb.create = mock.Mock( return_value=mock.Mock( __enter__=mock.Mock(return_value=self.m_create), __exit__=mock.Mock(return_value=None))) self.c_ipdb.create = mock.Mock( return_value=mock.Mock( __enter__=mock.Mock(return_value=self.m_create), __exit__=mock.Mock(return_value=None))) def _mock_ipdb_context_manager(self, netns): mock_ipdb = mock.Mock( interfaces={ 'bridge': mock.Mock( __enter__=mock.Mock(return_value=self.m_bridge_iface), __exit__=mock.Mock(return_value=None), ), 'c_interface': mock.Mock( __enter__=mock.Mock(return_value=self.m_c_iface), __exit__=mock.Mock(return_value=None), ), 'h_interface': mock.Mock( __enter__=mock.Mock(return_value=self.m_h_iface), __exit__=mock.Mock(return_value=None), ), } ) mock_exit = mock.Mock(return_value=None) mock_ipdb.__exit__ = mock_exit mock_ipdb.__enter__ = mock.Mock(return_value=mock_ipdb) self.ipdbs[netns] = mock_ipdb return mock_ipdb, mock_exit @mock.patch('kuryr_kubernetes.cni.binding.base.get_ipdb') @mock.patch('os_vif.plug') def _test_connect(self, m_vif_plug, m_get_ipdb, report=None): def get_ipdb(netns=None): return self.ipdbs[netns] m_get_ipdb.side_effect = get_ipdb base.connect(self.vif, self.instance_info, self.ifname, self.netns, report) m_vif_plug.assert_called_once_with(self.vif, self.instance_info) self.m_c_iface.add_ip.assert_called_once_with('192.168.0.2/24') if report: report.assert_called_once() @mock.patch('os_vif.unplug') def _test_disconnect(self, m_vif_unplug, report=None): base.disconnect(self.vif, self.instance_info, self.ifname, self.netns, report) m_vif_unplug.assert_called_once_with(self.vif, self.instance_info) if report: report.assert_called_once() class TestOpenVSwitchDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestOpenVSwitchDriver, self).setUp() self.vif = fake._fake_vif(osv_objects.vif.VIFOpenVSwitch) @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' 'K8sCNIRegistryPlugin.report_drivers_health') @mock.patch('os.getpid', mock.Mock(return_value=123)) @mock.patch('kuryr_kubernetes.linux_net_utils.create_ovs_vif_port') def test_connect(self, mock_create_ovs, m_report): self._test_connect(report=m_report) self.assertEqual(3, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.c_ipdb.create.assert_called_once_with( ifname=self.ifname, peer='h_interface', kind='veth') self.assertEqual(1, self.m_create.mtu) self.assertEqual(str(self.vif.address), self.m_create.address) self.m_create.up.assert_called_once_with() self.assertEqual(123, self.m_h_iface.net_ns_pid) self.assertEqual(1, self.m_h_iface.mtu) self.m_h_iface.up.assert_called_once_with() mock_create_ovs.assert_called_once_with( 'bridge', 'h_interface', '89eccd45-43e9-43d8-b4cc-4c13db13f782', '3e:94:b7:31:a0:83', 'kuryr') @mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.' 'K8sCNIRegistryPlugin.report_drivers_health') @mock.patch('kuryr_kubernetes.linux_net_utils.delete_ovs_vif_port') def test_disconnect(self, mock_delete_ovs, m_report): self._test_disconnect(report=m_report) mock_delete_ovs.assert_called_once_with('bridge', 'h_interface') class TestBridgeDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestBridgeDriver, self).setUp() self.vif = fake._fake_vif(osv_objects.vif.VIFBridge) @mock.patch('os.getpid', mock.Mock(return_value=123)) def test_connect(self): self._test_connect() self.m_h_iface.remove.assert_called_once_with() self.assertEqual(3, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.c_ipdb.create.assert_called_once_with( ifname=self.ifname, peer='h_interface', kind='veth') self.assertEqual(1, self.m_create.mtu) self.assertEqual(str(self.vif.address), self.m_create.address) self.m_create.up.assert_called_once_with() self.assertEqual(123, self.m_h_iface.net_ns_pid) self.assertEqual(1, self.m_h_iface.mtu) self.m_h_iface.up.assert_called_once_with() self.m_bridge_iface.add_port.assert_called_once_with('h_interface') def test_disconnect(self): self._test_disconnect() class TestNestedVlanDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestNestedVlanDriver, self).setUp() self.vif = fake._fake_vif(objects.vif.VIFVlanNested) self.vif.vlan_id = 7 CONF.set_override('link_iface', 'bridge', group='binding') self.addCleanup(CONF.clear_override, 'link_iface', group='binding') def test_connect(self): self._test_connect() self.assertEqual(1, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.assertEqual(self.ifname, self.m_h_iface.ifname) self.assertEqual(1, self.m_h_iface.mtu) self.assertEqual(str(self.vif.address), self.m_h_iface.address) self.m_h_iface.up.assert_called_once_with() def test_disconnect(self): self._test_disconnect() class TestNestedMacvlanDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestNestedMacvlanDriver, self).setUp() self.vif = fake._fake_vif(objects.vif.VIFMacvlanNested) CONF.set_override('link_iface', 'bridge', group='binding') self.addCleanup(CONF.clear_override, 'link_iface', group='binding') def test_connect(self): self._test_connect() self.assertEqual(1, self.h_ipdb_exit.call_count) self.assertEqual(2, self.c_ipdb_exit.call_count) self.assertEqual(self.ifname, self.m_h_iface.ifname) self.assertEqual(1, self.m_h_iface.mtu) self.assertEqual(str(self.vif.address), self.m_h_iface.address) self.m_h_iface.up.assert_called_once_with() def test_disconnect(self): self._test_disconnect() class TestSriovDriver(TestDriverMixin, test_base.TestCase): def setUp(self): super(TestSriovDriver, self).setUp() self.vif = fake._fake_vif(objects.vif.VIFSriov) self.vif.physnet = 'test_physnet' @mock.patch('kuryr_kubernetes.cni.binding.sriov.VIFSriovDriver.' '_get_host_pf_names') @mock.patch('kuryr_kubernetes.cni.binding.sriov.VIFSriovDriver.' '_get_available_vf_info') def test_connect(self, m_avail_vf_info, m_host_pf_names): m_avail_vf_info.return_value = [self.ifname, 1, 'h_interface'] m_host_pf_names.return_value = 'h_interface' self._test_connect() self.assertEqual(self.ifname, self.m_c_iface.ifname) self.assertEqual(1, self.m_c_iface.mtu) self.assertEqual(str(self.vif.address), self.m_c_iface.address) self.m_c_iface.up.assert_called_once_with() def test_disconnect(self): self._test_disconnect()
true
true
79030645f283270e550da64e73d9bf2c2f6c7fe1
51,131
py
Python
tensorflow/python/ops/nn.py
calebchoo/modulabs
10fbaf0581700641fc9b38b1bd722044bfb7c638
[ "Apache-2.0" ]
2
2020-06-30T05:52:37.000Z
2021-01-21T04:16:39.000Z
tensorflow/python/ops/nn.py
alainrk/tensorflow
314d9cd9b607460f8bfea80fc828b1521ca18443
[ "Apache-2.0" ]
null
null
null
tensorflow/python/ops/nn.py
alainrk/tensorflow
314d9cd9b607460f8bfea80fc828b1521ca18443
[ "Apache-2.0" ]
2
2018-03-14T03:10:40.000Z
2018-09-13T13:59:40.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=unused-import,g-bad-import-order """## Activation Functions The activation ops provide different types of nonlinearities for use in neural networks. These include smooth nonlinearities (`sigmoid`, `tanh`, `elu`, `softplus`, and `softsign`), continuous but not everywhere differentiable functions (`relu`, `relu6`, and `relu_x`), and random regularization (`dropout`). All activation ops apply componentwise, and produce a tensor of the same shape as the input tensor. @@relu @@relu6 @@elu @@softplus @@softsign @@dropout @@bias_add @@sigmoid @@tanh ## Convolution The convolution ops sweep a 2-D filter over a batch of images, applying the filter to each window of each image of the appropriate size. The different ops trade off between generic vs. specific filters: * `conv2d`: Arbitrary filters that can mix channels together. * `depthwise_conv2d`: Filters that operate on each channel independently. * `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter. Note that although these ops are called "convolution", they are strictly speaking "cross-correlation" since the filter is combined with an input window without reversing the filter. For details, see [the properties of cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties). The filter is applied to image patches of the same size as the filter and strided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies the filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the filter to every other image patch in each dimension, etc. Ignoring channels for the moment, and assume that the 4-D `input` has shape `[batch, in_height, in_width, ...]` and the 4-D `filter` has shape `[filter_height, filter_width, ...]`, then the spatial semantics of the convolution ops are as follows: first, according to the padding scheme chosen as `'SAME'` or `'VALID'`, the output size and the padding pixels are computed. For the `'SAME'` padding, the output height and width are computed as: out_height = ceil(float(in_height) / float(strides[1])) out_width = ceil(float(in_width) / float(strides[2])) and the padding on the top and left are computed as: pad_along_height = ((out_height - 1) * strides[1] + filter_height - in_height) pad_along_width = ((out_width - 1) * strides[2] + filter_width - in_width) pad_top = pad_along_height / 2 pad_left = pad_along_width / 2 Note that the division by 2 means that there might be cases when the padding on both sides (top vs bottom, right vs left) are off by one. In this case, the bottom and right sides always get the one additional padded pixel. For example, when `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the bottom. Note that this is different from existing libraries such as cuDNN and Caffe, which explicitly specify the number of padded pixels and always pad the same number of pixels on both sides. For the `'VALID`' padding, the output height and width are computed as: out_height = ceil(float(in_height - filter_height + 1) / float(strides[1])) out_width = ceil(float(in_width - filter_width + 1) / float(strides[2])) and the padding values are always zero. The output is then computed as output[b, i, j, :] = sum_{di, dj} input[b, strides[1] * i + di - pad_top, strides[2] * j + dj - pad_left, ...] * filter[di, dj, ...] where any value outside the original input image region are considered zero ( i.e. we pad zero values around the border of the image). Since `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these vectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new vectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]` is multiplied by a vector `filter[di, dj, k]`, and all the vectors are concatenated. @@conv2d @@depthwise_conv2d @@separable_conv2d @@atrous_conv2d @@conv2d_transpose @@conv3d ## Pooling The pooling ops sweep a rectangular window over the input tensor, computing a reduction operation for each window (average, max, or max with argmax). Each pooling op uses rectangular windows of size `ksize` separated by offset `strides`. For example, if `strides` is all ones every window is used, if `strides` is all twos every other window is used in each dimension, etc. In detail, the output is output[i] = reduce(value[strides * i:strides * i + ksize]) where the indices also take into consideration the padding values. Please refer to the `Convolution` section for details about the padding calculation. @@avg_pool @@max_pool @@max_pool_with_argmax @@avg_pool3d @@max_pool3d ## Morphological filtering Morphological operators are non-linear filters used in image processing. [Greyscale morphological dilation] (https://en.wikipedia.org/wiki/Dilation_(morphology)) is the max-sum counterpart of standard sum-product convolution: output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] The `filter` is usually called structuring function. Max-pooling is a special case of greyscale morphological dilation when the filter assumes all-zero values (a.k.a. flat structuring function). [Greyscale morphological erosion] (https://en.wikipedia.org/wiki/Erosion_(morphology)) is the min-sum counterpart of standard sum-product convolution: output[b, y, x, c] = min_{dy, dx} input[b, strides[1] * y - rates[1] * dy, strides[2] * x - rates[2] * dx, c] - filter[dy, dx, c] Dilation and erosion are dual to each other. The dilation of the input signal `f` by the structuring signal `g` is equal to the negation of the erosion of `-f` by the reflected `g`, and vice versa. Striding and padding is carried out in exactly the same way as in standard convolution. Please refer to the `Convolution` section for details. @@dilation2d @@erosion2d ## Normalization Normalization is useful to prevent neurons from saturating when inputs may have varying scale, and to aid generalization. @@l2_normalize @@local_response_normalization @@sufficient_statistics @@normalize_moments @@moments ## Losses The loss ops measure error between two tensors, or between a tensor and zero. These can be used for measuring accuracy of a network in a regression task or for regularization purposes (weight decay). @@l2_loss ## Classification TensorFlow provides several operations that help you perform classification. @@sigmoid_cross_entropy_with_logits @@softmax @@log_softmax @@softmax_cross_entropy_with_logits @@sparse_softmax_cross_entropy_with_logits @@weighted_cross_entropy_with_logits ## Embeddings TensorFlow provides library support for looking up values in embedding tensors. @@embedding_lookup @@embedding_lookup_sparse ## Recurrent Neural Networks TensorFlow provides a number of methods for constructing Recurrent Neural Networks. Most accept an `RNNCell`-subclassed object (see the documentation for `tf.nn.rnn_cell`). @@dynamic_rnn @@rnn @@state_saving_rnn @@bidirectional_rnn ## Conectionist Temporal Classification (CTC) @@ctc_loss @@ctc_greedy_decoder @@ctc_beam_search_decoder ## Evaluation The evaluation ops are useful for measuring the performance of a network. Since they are nondifferentiable, they are typically used at evaluation time. @@top_k @@in_top_k ## Candidate Sampling Do you want to train a multiclass or multilabel model with thousands or millions of output classes (for example, a language model with a large vocabulary)? Training with a full Softmax is slow in this case, since all of the classes are evaluated for every training example. Candidate Sampling training algorithms can speed up your step times by only considering a small randomly-chosen subset of contrastive classes (called candidates) for each batch of training examples. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) ### Sampled Loss Functions TensorFlow provides the following sampled loss functions for faster training. @@nce_loss @@sampled_softmax_loss ### Candidate Samplers TensorFlow provides the following samplers for randomly sampling candidate classes when using one of the sampled loss functions above. @@uniform_candidate_sampler @@log_uniform_candidate_sampler @@learned_unigram_candidate_sampler @@fixed_unigram_candidate_sampler ### Miscellaneous candidate sampling utilities @@compute_accidental_hits """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import candidate_sampling_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad from tensorflow.python.ops import nn_ops from tensorflow.python.ops import numerics from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import seq2seq from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.math_ops import sigmoid from tensorflow.python.ops.math_ops import tanh from tensorflow.python.util.all_util import make_all # Bring more nn-associated functionality into this package. # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.ctc_ops import * from tensorflow.python.ops.nn_ops import * from tensorflow.python.ops.candidate_sampling_ops import * from tensorflow.python.ops.embedding_ops import * from tensorflow.python.ops.rnn import * # pylint: enable=wildcard-import def sigmoid_cross_entropy_with_logits(logits, targets, name=None): """Computes sigmoid cross entropy given `logits`. Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = targets`. The logistic loss is z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) For x < 0, to avoid overflow in exp(-x), we reformulate the above x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) = - x * z + log(1 + exp(x)) Hence, to ensure stability and avoid overflow, the implementation uses this equivalent formulation max(x, 0) - x * z + log(1 + exp(-abs(x))) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise logistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape. """ with ops.op_scope([logits, targets], name, "logistic_loss") as name: logits = ops.convert_to_tensor(logits, name="logits") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError( "logits and targets must have the same shape (%s vs %s)" % (logits.get_shape(), targets.get_shape())) # The logistic loss formula from above is # x - x * z + log(1 + exp(-x)) # For x < 0, a more numerically stable formula is # -x * z + log(1 + exp(x)) # Note that these two expressions can be combined into the following: # max(x, 0) - x * z + log(1 + exp(-abs(x))) # To allow computing gradients at zero, we define custom versions of max and # abs functions. zeros = array_ops.zeros_like(logits, dtype=logits.dtype) cond = (logits >= zeros) relu_logits = math_ops.select(cond, logits, zeros) neg_abs_logits = math_ops.select(cond, -logits, logits) return math_ops.add(relu_logits - logits * targets, math_ops.log(1 + math_ops.exp(neg_abs_logits)), name=name) def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None): """Computes a weighted cross entropy. This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`, allows one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. The usual cross-entropy cost is defined as: targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits)) The argument `pos_weight` is used as a multiplier for the positive targets: targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits)) For brevity, let `x = logits`, `z = targets`, `q = pos_weight`. The loss is: qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x)) = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow, the implementation uses (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. pos_weight: A coefficient to use on the positive examples. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise weightedlogistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape. """ with ops.op_scope([logits, targets], name, "logistic_loss") as name: logits = ops.convert_to_tensor(logits, name="logits") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError( "logits and targets must have the same shape (%s vs %s)" % (logits.get_shape(), targets.get_shape())) # The logistic loss formula from above is # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) # For x < 0, a more numerically stable formula is # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x # To avoid branching, we use the combined version # (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) log_weight = 1 + (pos_weight - 1) * targets return math_ops.add( (1 - targets) * logits, log_weight * (math_ops.log(1 + math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits)), name=name) def relu_layer(x, weights, biases, name=None): """Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "nn_relu_layer" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units. """ with ops.op_scope([x, weights, biases], name, "relu_layer") as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) return nn_ops.relu(xw_plus_b, name=name) def l2_normalize(x, dim, epsilon=1e-12, name=None): """Normalizes along dimension `dim` using an L2 norm. For a 1-D tensor with `dim = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `dim`. Args: x: A `Tensor`. dim: Dimension along which to normalize. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`. """ with ops.op_scope([x], name, "l2_normalize") as name: x = ops.convert_to_tensor(x, name="x") square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True) x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon)) return math_ops.mul(x, x_inv_norm, name=name) def zero_fraction(value, name=None): """Returns the fraction of zeros in `value`. If `value` is empty, the result is `nan`. This is useful in summaries to measure and report sparsity. For example, z = tf.Relu(...) summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z)) Args: value: A tensor of numeric type. name: A name for the operation (optional). Returns: The fraction of zeros in `value`, with type `float32`. """ with ops.op_scope([value], name, "zero_fraction"): value = ops.convert_to_tensor(value, name="value") zero = constant_op.constant(0, dtype=value.dtype, name="zero") return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero), dtypes.float32)) def depthwise_conv2d(input, filter, strides, padding, name=None): """Depthwise 2-D convolution. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]` containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. The output has `in_channels * channel_multiplier` channels. In detail, output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter: 4-D with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. strides: 1-D of size 4. The stride of the sliding window for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, in_channels * channel_multiplier].` """ with ops.op_scope([input, filter], name, "depthwise") as name: input = ops.convert_to_tensor(input, name="tensor_in") filter = ops.convert_to_tensor(filter, name="filter_in") # A shape is required to statically compute the number of separable filters. if filter.get_shape().ndims is not None: assert len(filter.get_shape()) == 4 in_channels = filter.get_shape()[2] # Sanity checks, if shape information is available for the inputs. if input.get_shape().ndims is not None: assert len(input.get_shape()) == 4 assert input.get_shape()[3] == in_channels, ( "Mismatched input depth %d and number of depthwise filters %d." % ( input.get_shape()[3].value, in_channels)) else: assert input.get_shape().ndims is not None, ( "Either tensor must provide static shape information.") assert input.get_shape().ndims == 4 in_channels = input.get_shape()[3] if in_channels == 1: return nn_ops.conv2d(input, filter, strides, padding, name=name) else: return nn_ops.depthwise_conv2d_native(input, filter, strides, padding, name=name) def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None): """2-D convolution with separable filters. Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions `[1, 2]` and `3`, not spatial separability between dimensions `1` and `2`. In detail, output[b, i, j, k] = sum_{di, dj, q, r] input[b, strides[1] * i + di, strides[2] * j + dj, q] * depthwise_filter[di, dj, q, r] * pointwise_filter[0, 0, q * channel_multiplier + r, k] `strides` controls the strides for the depthwise convolution only, since the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`. depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. Contains `in_channels` convolutional filters of depth 1. pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise filter to mix channels after `depthwise_filter` has convolved spatially. strides: 1-D of size 4. The strides for the depthwise convolution for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`. Raises: ValueError: If channel_multiplier * in_channels > out_channels, which means that the separable convolution is overparameterized. """ with ops.op_scope([input, depthwise_filter, pointwise_filter], name, "separable_conv2d") as name: input = ops.convert_to_tensor(input, name="tensor_in") depthwise_filter = ops.convert_to_tensor(depthwise_filter, name="depthwise_filter") pointwise_filter = ops.convert_to_tensor(pointwise_filter, name="pointwise_filter") if pointwise_filter.get_shape().ndims is not None: assert len(pointwise_filter.get_shape()) == 4 assert pointwise_filter.get_shape()[0] == 1 assert pointwise_filter.get_shape()[1] == 1 if depthwise_filter.get_shape().ndims and input.get_shape().ndims: channel_multiplier = depthwise_filter.get_shape()[3] in_channels = input.get_shape()[3] out_channels = pointwise_filter.get_shape()[3] if channel_multiplier * in_channels > out_channels: raise ValueError( ("Refusing to perform an overparameterized separable " "convolution: channel_multiplier * in_channels = " "%d * %d = %d > %d = out_channels" % (channel_multiplier, in_channels, channel_multiplier * in_channels, out_channels))) # The layout of the ops in the graph are expected to be as follows: # depthwise_conv2d // Conv2D op corresponding to native deptwise conv. # separable_conv2d // Conv2D op corresponding to the pointwise conv. depthwise = nn_ops.depthwise_conv2d_native(input, depthwise_filter, strides, padding, name="depthwise") return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1], padding="VALID", name=name) def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ with ops.op_scope([x, axes, shift], name, "sufficient_statistics"): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if x_shape.is_fully_defined(): counts = 1 m_shape = [] for d in xrange(x_shape.ndims): dim = x_shape[d].value if d in set(axes): counts *= dim dim = 1 m_shape.append(dim) counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_shape = array_ops.shape(x) select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape), True, False) m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape), x_shape) counts = math_ops.cast( math_ops.reduce_prod(x_shape / m_shape), x.dtype, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.sub(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.op_scope([counts, mean_ss, variance_ss, shift], name, "normalize"): divisor = math_ops.inv(counts, name="divisor") if shift is not None: shifted_mean = math_ops.mul(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.mul(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.sub( math_ops.mul(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance) def moments(x, axes, shift=None, name=None, keep_dims=False): """Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called "global normalization", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce moments with the same dimensionality as the input. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.op_scope([x, axes, shift], name, "moments"): # The dynamic range of fp16 is too limited to support the collection of # sufficient statistics. As a workaround we simply perform the operations # on 32-bit floats before converting the mean and variance back to fp16 y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x counts, m_ss, v_ss, shift = sufficient_statistics(y, axes, shift=shift, keep_dims=keep_dims, name=name) with ops.control_dependencies([counts, m_ss, v_ss]): mean, variance = normalize_moments(counts, m_ss, v_ss, shift, name=name) if x.dtype == dtypes.float16: return (math_ops.cast(mean, dtypes.float16), math_ops.cast( variance, dtypes.float16)) else: return (mean, variance) def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None): """Batch normalization. As described in http://arxiv.org/abs/1502.03167. Normalizes a tensor by `mean` and `variance`, and applies (optionally) a `scale` \\\\(\gamma\\\\) to it, as well as an `offset` \\\\(\\beta\\\\): \\\\(\\frac{\gamma(x-\mu)}{\sigma}+\\beta\\\\) `mean`, `variance`, `offset` and `scale` are all expected to be of one of two shapes: * In all generality, they can have the same number of dimensions as the input `x`, with identical sizes as `x` for the dimensions that are not normalized over (the 'depth' dimension(s)), and dimension 1 for the others which are being normalized over. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=True)` during training, or running averages thereof during inference. * In the common case where the 'depth' dimension is the last dimension in the input tensor `x`, they may be one dimensional tensors of the same size as the 'depth' dimension. This is the case for example for the common `[batch, depth]` layout of fully-connected layers, and `[batch, height, width, depth]` for convolutions. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=False)` during training, or running averages thereof during inference. Args: x: Input `Tensor` of arbitrary dimensionality. mean: A mean `Tensor`. variance: A variance `Tensor`. offset: An offset `Tensor`, often denoted \\\\(\\beta\\\\) in equations, or None. If present, will be added to the normalized tensor. scale: A scale `Tensor`, often denoted \\\\(\gamma\\\\) in equations, or `None`. If present, the scale is applied to the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. name: A name for this operation (optional). Returns: the normalized, scaled, offset tensor. """ with ops.op_scope([x, mean, variance, scale, offset], name, "batchnorm"): inv = math_ops.rsqrt(variance + variance_epsilon) if scale is not None: inv *= scale return x * inv + ( offset - mean * inv if offset is not None else -mean * inv) def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None): """Batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. Args: t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). Returns: A batch-normalized `t`. """ return batch_normalization(t, m, v, beta, gamma if scale_after_normalization else None, variance_epsilon, name) def _sum_rows(x): """Returns a vector summing up each row of the matrix x.""" # _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is # a matrix. The gradient of _sum_rows(x) is more efficient than # reduce_sum(x, 1)'s gradient in today's implementation. Therefore, # we use _sum_rows(x) in the nce_loss() computation since the loss # is mostly used for training. cols = array_ops.shape(x)[1] ones_shape = array_ops.pack([cols, 1]) ones = array_ops.ones(ones_shape, x.dtype) return array_ops.reshape(math_ops.matmul(x, ones), [-1]) def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy="mod", name=None): """Helper function for nce_loss and sampled_softmax_loss functions. Computes sampled output training logits and labels suitable for implementing e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see sampled_softmax_loss). Note: In the case where num_true > 1, we assign to each target class the target probability 1 / num_true so that the target probabilities sum to 1 per-example. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape `[num_classes, dim]`. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) subtract_log_q: A `bool`. whether to subtract the log expected count of the labels in the sample to get the logits of the true labels. Default is True. Turn off for Negative Sampling. remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: out_logits, out_labels: `Tensor` objects each with shape `[batch_size, num_true + num_sampled]`, for passing to either `nn.sigmoid_cross_entropy_with_logits` (NCE) or `nn.softmax_cross_entropy_with_logits` (sampled softmax). """ if not isinstance(weights, list): weights = [weights] with ops.op_scope( weights + [biases, inputs, labels], name, "compute_sampled_logits"): if labels.dtype != dtypes.int64: labels = math_ops.cast(labels, dtypes.int64) labels_flat = array_ops.reshape(labels, [-1]) # Sample the negative labels. # sampled shape: [num_sampled] tensor # true_expected_count shape = [batch_size, 1] tensor # sampled_expected_count shape = [num_sampled] tensor if sampled_values is None: sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler( true_classes=labels, num_true=num_true, num_sampled=num_sampled, unique=True, range_max=num_classes) # NOTE: pylint cannot tell that 'sampled_values' is a sequence # pylint: disable=unpacking-non-sequence sampled, true_expected_count, sampled_expected_count = sampled_values # pylint: enable=unpacking-non-sequence # labels_flat is a [batch_size * num_true] tensor # sampled is a [num_sampled] int tensor all_ids = array_ops.concat(0, [labels_flat, sampled]) # weights shape is [num_classes, dim] all_w = embedding_ops.embedding_lookup( weights, all_ids, partition_strategy=partition_strategy) all_b = embedding_ops.embedding_lookup(biases, all_ids) # true_w shape is [batch_size * num_true, dim] # true_b is a [batch_size * num_true] tensor true_w = array_ops.slice( all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], -1])) true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat)) # inputs shape is [batch_size, dim] # true_w shape is [batch_size * num_true, dim] # row_wise_dots is [batch_size, num_true, dim] dim = array_ops.shape(true_w)[1:2] new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim]) row_wise_dots = math_ops.mul( array_ops.expand_dims(inputs, 1), array_ops.reshape(true_w, new_true_w_shape)) # We want the row-wise dot plus biases which yields a # [batch_size, num_true] tensor of true_logits. dots_as_matrix = array_ops.reshape(row_wise_dots, array_ops.concat(0, [[-1], dim])) true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true]) true_b = array_ops.reshape(true_b, [-1, num_true]) true_logits += true_b # Lookup weights and biases for sampled labels. # sampled_w shape is [num_sampled, dim] # sampled_b is a [num_sampled] float tensor sampled_w = array_ops.slice( all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [-1, -1]) sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1]) # inputs has shape [batch_size, dim] # sampled_w has shape [num_sampled, dim] # sampled_b has shape [num_sampled] # Apply X*W'+B, which yields [batch_size, num_sampled] sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True) + sampled_b if remove_accidental_hits: acc_hits = candidate_sampling_ops.compute_accidental_hits( labels, sampled, num_true=num_true) acc_indices, acc_ids, acc_weights = acc_hits # This is how SparseToDense expects the indices. acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1]) acc_ids_2d_int32 = array_ops.reshape(math_ops.cast( acc_ids, dtypes.int32), [-1, 1]) sparse_indices = array_ops.concat( 1, [acc_indices_2d, acc_ids_2d_int32], "sparse_indices") # Create sampled_logits_shape = [batch_size, num_sampled] sampled_logits_shape = array_ops.concat( 0, [array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)]) if sampled_logits.dtype != acc_weights.dtype: acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype) sampled_logits += sparse_ops.sparse_to_dense( sparse_indices, sampled_logits_shape, acc_weights, default_value=0.0, validate_indices=False) if subtract_log_q: # Subtract log of Q(l), prior probability that l appears in sampled. true_logits -= math_ops.log(true_expected_count) sampled_logits -= math_ops.log(sampled_expected_count) # Construct output logits and labels. The true labels/logits start at col 0. out_logits = array_ops.concat(1, [true_logits, sampled_logits]) # true_logits is a float tensor, ones_like(true_logits) is a float tensor # of ones. We then divide by num_true to ensure the per-example labels sum # to 1.0, i.e. form a proper probability distribution. out_labels = array_ops.concat( 1, [array_ops.ones_like(true_logits) / num_true, array_ops.zeros_like(sampled_logits)]) return out_logits, out_labels def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy="mod", name="nce_loss"): """Computes and returns the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models] (http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). Also see our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Note: In the case where `num_true` > 1, we assign to each target class the target probability 1 / `num_true` so that the target probabilities sum to 1 per-example. Note: It would be useful to allow a variable number of target classes per example. We hope to provide this functionality in a future release. For now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. Whether to remove "accidental hits" where a sampled class equals one of the target classes. If set to `True`, this is a "Sampled Logistic" loss instead of NCE, and we are learning to generate log-odds instead of log probabilities. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf). Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example NCE losses. """ logits, labels = _compute_sampled_logits( weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = sigmoid_cross_entropy_with_logits(logits, labels, name="sampled_losses") # sampled_losses is batch_size x {true_loss, sampled_losses...} # We sum out true and sampled losses. return _sum_rows(sampled_losses) def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy="mod", name="sampled_softmax_loss"): """Computes and returns the sampled softmax training loss. This is a faster way to train a softmax classifier over a huge number of classes. This operation is for training only. It is generally an underestimate of the full softmax loss. At inference time, you can compute full softmax probabilities with the expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007) ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is True. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example sampled softmax losses. """ logits, labels = _compute_sampled_logits( weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels) # sampled_losses is a [batch_size] tensor. return sampled_losses # TODO(cwhipkey): sigmoid and tanh should not be exposed from tf.nn. __all__ = make_all(__name__) __all__.append("zero_fraction") # documented in training.py # Modules whitelisted for reference through tf.nn. # TODO(cwhipkey): migrate callers to use the submodule directly. __all__.extend(["nn_ops", "rnn_cell", "seq2seq"]) # Symbols whitelisted for export without documentation. # TODO(cwhipkey): review these and move to contrib or expose through # documentation. __all__.extend([ "all_candidate_sampler", "batch_norm_with_global_normalization", "batch_normalization", "conv2d_backprop_filter", "conv2d_backprop_input", "depthwise_conv2d_native", "lrn", "relu_layer", "xw_plus_b", ])
42.152514
92
0.678258
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import candidate_sampling_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad from tensorflow.python.ops import nn_ops from tensorflow.python.ops import numerics from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import seq2seq from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.math_ops import sigmoid from tensorflow.python.ops.math_ops import tanh from tensorflow.python.util.all_util import make_all from tensorflow.python.ops.ctc_ops import * from tensorflow.python.ops.nn_ops import * from tensorflow.python.ops.candidate_sampling_ops import * from tensorflow.python.ops.embedding_ops import * from tensorflow.python.ops.rnn import * def sigmoid_cross_entropy_with_logits(logits, targets, name=None): with ops.op_scope([logits, targets], name, "logistic_loss") as name: logits = ops.convert_to_tensor(logits, name="logits") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError( "logits and targets must have the same shape (%s vs %s)" % (logits.get_shape(), targets.get_shape())) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) cond = (logits >= zeros) relu_logits = math_ops.select(cond, logits, zeros) neg_abs_logits = math_ops.select(cond, -logits, logits) return math_ops.add(relu_logits - logits * targets, math_ops.log(1 + math_ops.exp(neg_abs_logits)), name=name) def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None): with ops.op_scope([logits, targets], name, "logistic_loss") as name: logits = ops.convert_to_tensor(logits, name="logits") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError( "logits and targets must have the same shape (%s vs %s)" % (logits.get_shape(), targets.get_shape())) log_weight = 1 + (pos_weight - 1) * targets return math_ops.add( (1 - targets) * logits, log_weight * (math_ops.log(1 + math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits)), name=name) def relu_layer(x, weights, biases, name=None): with ops.op_scope([x, weights, biases], name, "relu_layer") as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) return nn_ops.relu(xw_plus_b, name=name) def l2_normalize(x, dim, epsilon=1e-12, name=None): with ops.op_scope([x], name, "l2_normalize") as name: x = ops.convert_to_tensor(x, name="x") square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True) x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon)) return math_ops.mul(x, x_inv_norm, name=name) def zero_fraction(value, name=None): with ops.op_scope([value], name, "zero_fraction"): value = ops.convert_to_tensor(value, name="value") zero = constant_op.constant(0, dtype=value.dtype, name="zero") return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero), dtypes.float32)) def depthwise_conv2d(input, filter, strides, padding, name=None): with ops.op_scope([input, filter], name, "depthwise") as name: input = ops.convert_to_tensor(input, name="tensor_in") filter = ops.convert_to_tensor(filter, name="filter_in") if filter.get_shape().ndims is not None: assert len(filter.get_shape()) == 4 in_channels = filter.get_shape()[2] if input.get_shape().ndims is not None: assert len(input.get_shape()) == 4 assert input.get_shape()[3] == in_channels, ( "Mismatched input depth %d and number of depthwise filters %d." % ( input.get_shape()[3].value, in_channels)) else: assert input.get_shape().ndims is not None, ( "Either tensor must provide static shape information.") assert input.get_shape().ndims == 4 in_channels = input.get_shape()[3] if in_channels == 1: return nn_ops.conv2d(input, filter, strides, padding, name=name) else: return nn_ops.depthwise_conv2d_native(input, filter, strides, padding, name=name) def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None): with ops.op_scope([input, depthwise_filter, pointwise_filter], name, "separable_conv2d") as name: input = ops.convert_to_tensor(input, name="tensor_in") depthwise_filter = ops.convert_to_tensor(depthwise_filter, name="depthwise_filter") pointwise_filter = ops.convert_to_tensor(pointwise_filter, name="pointwise_filter") if pointwise_filter.get_shape().ndims is not None: assert len(pointwise_filter.get_shape()) == 4 assert pointwise_filter.get_shape()[0] == 1 assert pointwise_filter.get_shape()[1] == 1 if depthwise_filter.get_shape().ndims and input.get_shape().ndims: channel_multiplier = depthwise_filter.get_shape()[3] in_channels = input.get_shape()[3] out_channels = pointwise_filter.get_shape()[3] if channel_multiplier * in_channels > out_channels: raise ValueError( ("Refusing to perform an overparameterized separable " "convolution: channel_multiplier * in_channels = " "%d * %d = %d > %d = out_channels" % (channel_multiplier, in_channels, channel_multiplier * in_channels, out_channels))) depthwise = nn_ops.depthwise_conv2d_native(input, depthwise_filter, strides, padding, name="depthwise") return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1], padding="VALID", name=name) def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): with ops.op_scope([x, axes, shift], name, "sufficient_statistics"): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if x_shape.is_fully_defined(): counts = 1 m_shape = [] for d in xrange(x_shape.ndims): dim = x_shape[d].value if d in set(axes): counts *= dim dim = 1 m_shape.append(dim) counts = constant_op.constant(counts, dtype=x.dtype) else: x_shape = array_ops.shape(x) select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape), True, False) m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape), x_shape) counts = math_ops.cast( math_ops.reduce_prod(x_shape / m_shape), x.dtype, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.sub(x, shift) v_ss = math_ops.squared_difference(x, shift) else: m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): with ops.op_scope([counts, mean_ss, variance_ss, shift], name, "normalize"): divisor = math_ops.inv(counts, name="divisor") if shift is not None: shifted_mean = math_ops.mul(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: shifted_mean = math_ops.mul(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.sub( math_ops.mul(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance) def moments(x, axes, shift=None, name=None, keep_dims=False): with ops.op_scope([x, axes, shift], name, "moments"): y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x counts, m_ss, v_ss, shift = sufficient_statistics(y, axes, shift=shift, keep_dims=keep_dims, name=name) with ops.control_dependencies([counts, m_ss, v_ss]): mean, variance = normalize_moments(counts, m_ss, v_ss, shift, name=name) if x.dtype == dtypes.float16: return (math_ops.cast(mean, dtypes.float16), math_ops.cast( variance, dtypes.float16)) else: return (mean, variance) def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None): with ops.op_scope([x, mean, variance, scale, offset], name, "batchnorm"): inv = math_ops.rsqrt(variance + variance_epsilon) if scale is not None: inv *= scale return x * inv + ( offset - mean * inv if offset is not None else -mean * inv) def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None): return batch_normalization(t, m, v, beta, gamma if scale_after_normalization else None, variance_epsilon, name) def _sum_rows(x): cols = array_ops.shape(x)[1] ones_shape = array_ops.pack([cols, 1]) ones = array_ops.ones(ones_shape, x.dtype) return array_ops.reshape(math_ops.matmul(x, ones), [-1]) def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy="mod", name=None): if not isinstance(weights, list): weights = [weights] with ops.op_scope( weights + [biases, inputs, labels], name, "compute_sampled_logits"): if labels.dtype != dtypes.int64: labels = math_ops.cast(labels, dtypes.int64) labels_flat = array_ops.reshape(labels, [-1]) if sampled_values is None: sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler( true_classes=labels, num_true=num_true, num_sampled=num_sampled, unique=True, range_max=num_classes) sampled, true_expected_count, sampled_expected_count = sampled_values all_ids = array_ops.concat(0, [labels_flat, sampled]) all_w = embedding_ops.embedding_lookup( weights, all_ids, partition_strategy=partition_strategy) all_b = embedding_ops.embedding_lookup(biases, all_ids) true_w = array_ops.slice( all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], -1])) true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat)) dim = array_ops.shape(true_w)[1:2] new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim]) row_wise_dots = math_ops.mul( array_ops.expand_dims(inputs, 1), array_ops.reshape(true_w, new_true_w_shape)) dots_as_matrix = array_ops.reshape(row_wise_dots, array_ops.concat(0, [[-1], dim])) true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true]) true_b = array_ops.reshape(true_b, [-1, num_true]) true_logits += true_b sampled_w = array_ops.slice( all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [-1, -1]) sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1]) sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True) + sampled_b if remove_accidental_hits: acc_hits = candidate_sampling_ops.compute_accidental_hits( labels, sampled, num_true=num_true) acc_indices, acc_ids, acc_weights = acc_hits # This is how SparseToDense expects the indices. acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1]) acc_ids_2d_int32 = array_ops.reshape(math_ops.cast( acc_ids, dtypes.int32), [-1, 1]) sparse_indices = array_ops.concat( 1, [acc_indices_2d, acc_ids_2d_int32], "sparse_indices") # Create sampled_logits_shape = [batch_size, num_sampled] sampled_logits_shape = array_ops.concat( 0, [array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)]) if sampled_logits.dtype != acc_weights.dtype: acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype) sampled_logits += sparse_ops.sparse_to_dense( sparse_indices, sampled_logits_shape, acc_weights, default_value=0.0, validate_indices=False) if subtract_log_q: # Subtract log of Q(l), prior probability that l appears in sampled. true_logits -= math_ops.log(true_expected_count) sampled_logits -= math_ops.log(sampled_expected_count) # Construct output logits and labels. The true labels/logits start at col 0. out_logits = array_ops.concat(1, [true_logits, sampled_logits]) # true_logits is a float tensor, ones_like(true_logits) is a float tensor # of ones. We then divide by num_true to ensure the per-example labels sum # to 1.0, i.e. form a proper probability distribution. out_labels = array_ops.concat( 1, [array_ops.ones_like(true_logits) / num_true, array_ops.zeros_like(sampled_logits)]) return out_logits, out_labels def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy="mod", name="nce_loss"): logits, labels = _compute_sampled_logits( weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = sigmoid_cross_entropy_with_logits(logits, labels, name="sampled_losses") # sampled_losses is batch_size x {true_loss, sampled_losses...} # We sum out true and sampled losses. return _sum_rows(sampled_losses) def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy="mod", name="sampled_softmax_loss"): logits, labels = _compute_sampled_logits( weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels) # sampled_losses is a [batch_size] tensor. return sampled_losses # TODO(cwhipkey): sigmoid and tanh should not be exposed from tf.nn. __all__ = make_all(__name__) __all__.append("zero_fraction") # documented in training.py # Modules whitelisted for reference through tf.nn. # TODO(cwhipkey): migrate callers to use the submodule directly. __all__.extend(["nn_ops", "rnn_cell", "seq2seq"]) # Symbols whitelisted for export without documentation. # TODO(cwhipkey): review these and move to contrib or expose through # documentation. __all__.extend([ "all_candidate_sampler", "batch_norm_with_global_normalization", "batch_normalization", "conv2d_backprop_filter", "conv2d_backprop_input", "depthwise_conv2d_native", "lrn", "relu_layer", "xw_plus_b", ])
true
true
7903076f9289940c5666a3312dc3329eddfa0677
8,230
py
Python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_vpn_sites_configuration_operations.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
3
2020-06-23T02:25:27.000Z
2021-09-07T18:48:11.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_vpn_sites_configuration_operations.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
510
2019-07-17T16:11:19.000Z
2021-08-02T08:38:32.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_vpn_sites_configuration_operations.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
5
2019-09-04T12:51:37.000Z
2020-09-16T07:28:40.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VpnSitesConfigurationOperations: """VpnSitesConfigurationOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _download_initial( self, resource_group_name: str, virtual_wan_name: str, request: "_models.GetVpnSitesConfigurationRequest", **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._download_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore async def begin_download( self, resource_group_name: str, virtual_wan_name: str, request: "_models.GetVpnSitesConfigurationRequest", **kwargs ) -> AsyncLROPoller[None]: """Gives the sas-url to download the configurations for vpn-sites in a resource group. :param resource_group_name: The resource group name. :type resource_group_name: str :param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is needed. :type virtual_wan_name: str :param request: Parameters supplied to download vpn-sites configuration. :type request: ~azure.mgmt.network.v2020_06_01.models.GetVpnSitesConfigurationRequest :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._download_initial( resource_group_name=resource_group_name, virtual_wan_name=virtual_wan_name, request=request, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
50.490798
200
0.685541
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VpnSitesConfigurationOperations: models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _download_initial( self, resource_group_name: str, virtual_wan_name: str, request: "_models.GetVpnSitesConfigurationRequest", **kwargs ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" url = self._download_initial.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} async def begin_download( self, resource_group_name: str, virtual_wan_name: str, request: "_models.GetVpnSitesConfigurationRequest", **kwargs ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._download_initial( resource_group_name=resource_group_name, virtual_wan_name=virtual_wan_name, request=request, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'}
true
true
790307f07fa4a5f5524d0083c9cf6207573d15d4
383
py
Python
sample_settings.py
avinassh/damaris
8c8f7922e4519f66124ff26ac7b107257f162185
[ "MIT" ]
null
null
null
sample_settings.py
avinassh/damaris
8c8f7922e4519f66124ff26ac7b107257f162185
[ "MIT" ]
null
null
null
sample_settings.py
avinassh/damaris
8c8f7922e4519f66124ff26ac7b107257f162185
[ "MIT" ]
null
null
null
# Telegram settings TG_CLI = '/opt/tg/bin/telegram-cli' TG_PUBKEY = '/opt/tg/tg-server.pub' RECEPIENT = '@your-tg-recepient' # Reddit App settings REDDIT_APP_KEY = 'c...w' REDDIT_APP_SECRET = 'T...c' REDDIT_USER_AGENT = ('Damaris Bot, v0.1. Read only bot to read posts from' '/r/cats') # Sample Captions CAPTIONS = [ "some", "random", "strings", ]
21.277778
74
0.631854
TG_CLI = '/opt/tg/bin/telegram-cli' TG_PUBKEY = '/opt/tg/tg-server.pub' RECEPIENT = '@your-tg-recepient' REDDIT_APP_KEY = 'c...w' REDDIT_APP_SECRET = 'T...c' REDDIT_USER_AGENT = ('Damaris Bot, v0.1. Read only bot to read posts from' '/r/cats') CAPTIONS = [ "some", "random", "strings", ]
true
true
79030847982518e44efd97a2103c064babe7d8b0
279
py
Python
cart/urls.py
knkemree/django_ecommerce_website
19876976bc872cf4835778d12d82756c573cf3b9
[ "bzip2-1.0.6" ]
null
null
null
cart/urls.py
knkemree/django_ecommerce_website
19876976bc872cf4835778d12d82756c573cf3b9
[ "bzip2-1.0.6" ]
8
2020-06-06T01:22:59.000Z
2022-01-13T02:14:24.000Z
cart/urls.py
knkemree/django_ecommerce_website
19876976bc872cf4835778d12d82756c573cf3b9
[ "bzip2-1.0.6" ]
null
null
null
from django.urls import path from . import views app_name = 'cart' urlpatterns = [ path('', views.cart_detail, name='cart_detail'), path('add/<int:product_id>/',views.cart_add,name='cart_add'), path('remove/<int:product_id>/',views.cart_remove,name='cart_remove'), ]
31
74
0.702509
from django.urls import path from . import views app_name = 'cart' urlpatterns = [ path('', views.cart_detail, name='cart_detail'), path('add/<int:product_id>/',views.cart_add,name='cart_add'), path('remove/<int:product_id>/',views.cart_remove,name='cart_remove'), ]
true
true
79030b771a82130fd565bc3651c300b549c8e2a4
733
py
Python
soft/template.qiskit.ansatz/python_code/tiny2/custom_ansatz.py
ctuning/qiskit
33126f7c2e00331303727b712717ded6c7420e18
[ "BSD-3-Clause" ]
7
2018-05-05T09:47:54.000Z
2019-04-04T10:00:38.000Z
soft/template.qiskit.ansatz/python_code/tiny2/custom_ansatz.py
ctuning/qiskit
33126f7c2e00331303727b712717ded6c7420e18
[ "BSD-3-Clause" ]
7
2018-03-13T15:38:12.000Z
2018-12-21T15:15:20.000Z
soft/template.qiskit.ansatz/python_code/tiny2/custom_ansatz.py
ctuning/qiskit
33126f7c2e00331303727b712717ded6c7420e18
[ "BSD-3-Clause" ]
2
2018-10-05T11:58:06.000Z
2020-01-19T21:10:17.000Z
#!/usr/bin/env python3 import numpy as np import qiskit num_params = 2 # make sure you set this correctly to the number of parameters used by the ansatz ## Previously used for Helium VQE in Rigetti implementation # def tiny_ansatz_2(current_params): q = qiskit.QuantumRegister(2, "q") qc = qiskit.QuantumCircuit(q, qiskit.ClassicalRegister(2, "c")) qc.x(q[0]) qc.x(q[1]) qc.rx( np.pi/2, q[0]) qc.h(q[1]) qc.cx(q[0], q[1]) qc.rz(current_params[0], q[1]) qc.cx(q[0], q[1]) qc.rx(-np.pi/2, q[0]) qc.h(q[1]) qc.h(q[0]) qc.rx( np.pi/2, q[1]) qc.cx(q[0], q[1]) qc.rz(current_params[1], q[1]) qc.cx(q[0], q[1]) qc.h(q[0]) qc.rx(-np.pi/2, q[1]) return qc
22.90625
101
0.581173
import numpy as np import qiskit num_params = 2 mRegister(2, "q") qc = qiskit.QuantumCircuit(q, qiskit.ClassicalRegister(2, "c")) qc.x(q[0]) qc.x(q[1]) qc.rx( np.pi/2, q[0]) qc.h(q[1]) qc.cx(q[0], q[1]) qc.rz(current_params[0], q[1]) qc.cx(q[0], q[1]) qc.rx(-np.pi/2, q[0]) qc.h(q[1]) qc.h(q[0]) qc.rx( np.pi/2, q[1]) qc.cx(q[0], q[1]) qc.rz(current_params[1], q[1]) qc.cx(q[0], q[1]) qc.h(q[0]) qc.rx(-np.pi/2, q[1]) return qc
true
true
79030be3f52ee8c3da903478362c1c99f6620bcb
6,599
py
Python
tests-cuda/test_0345-cuda-num.py
colesbury/awkward-1.0
d036ab18eb54de8a2571d9f179d315ac8ee22119
[ "BSD-3-Clause" ]
null
null
null
tests-cuda/test_0345-cuda-num.py
colesbury/awkward-1.0
d036ab18eb54de8a2571d9f179d315ac8ee22119
[ "BSD-3-Clause" ]
null
null
null
tests-cuda/test_0345-cuda-num.py
colesbury/awkward-1.0
d036ab18eb54de8a2571d9f179d315ac8ee22119
[ "BSD-3-Clause" ]
null
null
null
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE import pytest # noqa: F401 import numpy as np # noqa: F401 import cupy as cp # noqa: F401 import awkward as ak # noqa: F401 def test_num_1(): content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout bitmask = ak.layout.IndexU8(np.array([40, 34], dtype=np.uint8)) array = ak.Array(ak.layout.BitMaskedArray(bitmask, content, False, 9, False)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_2(): content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout bytemask = ak.layout.Index8(np.array([False, True, False], dtype=np.bool)) array = ak.Array(ak.layout.ByteMaskedArray(bytemask, content, True)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_3(): array = ak.Array(ak.layout.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) def test_num_4(): array = ak.Array( ak.layout.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]])) ) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_5(): array = ak.Array(ak.layout.EmptyArray()) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) def test_num_6(): content = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9])) array = ak.Array(ak.layout.ListOffsetArray64(offsets, content)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_7(): content = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.IndexU32(np.array([0, 3, 3, 5, 6, 9])) array = ak.Array(ak.layout.ListOffsetArrayU32(offsets, content)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_8(): content = ak.layout.NumpyArray( np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10]) ) offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 10, 10])) listoffsetarray = ak.layout.ListOffsetArray64(offsets, content) regulararray = ak.layout.RegularArray(listoffsetarray, 2) starts = ak.layout.Index64(np.array([0, 1])) stops = ak.layout.Index64(np.array([2, 3])) listarray = ak.layout.ListArray64(starts, stops, regulararray) cuda_listoffsetarray = ak.to_kernels(listoffsetarray, "cuda") assert ak.num(cuda_listoffsetarray, 0) == ak.num(ak.Array(listoffsetarray), 0) assert ( ak.num(cuda_listoffsetarray, 1).tolist() == ak.num(ak.Array(listoffsetarray), 1).tolist() ) cuda_regulararray = ak.to_kernels(regulararray, "cuda") assert ak.num(cuda_regulararray, 0) == ak.num(ak.Array(regulararray), 0) assert ( ak.num(cuda_regulararray, 1).tolist() == ak.num(ak.Array(regulararray), 1).tolist() ) cuda_listarray = ak.to_kernels(listarray, "cuda") assert ak.num(cuda_listarray, 0) == ak.num(ak.Array(listarray), 0) assert ak.num(cuda_listarray, 1).tolist() == ak.num(ak.Array(listarray), 1).tolist() content1 = ak.layout.NumpyArray(np.array([1, 2, 3, 4, 5])) content2 = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.Index32(np.array([0, 3, 3, 5, 6, 9])) recordarray = ak.Array( ak.layout.RecordArray( [content1, listoffsetarray, content2, content1], keys=["one", "two", "2", "wonky"], ) ) cuda_recordarray = ak.to_kernels(recordarray, "cuda") assert ak.num(cuda_recordarray, 0).tolist() == ak.num(recordarray, 0).tolist() content0 = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8)) index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32)) unionarray = ak.Array(ak.layout.UnionArray8_32(tags, index, [content0, content1])) cuda_unionarray = ak.to_kernels(unionarray, "cuda") assert ak.num(cuda_unionarray, 0) == ak.num(unionarray, 0) def test_num_9(): content = ak.layout.NumpyArray( np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) index = ak.layout.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64)) indexedarray = ak.Array(ak.layout.IndexedArray32(index, content)) cuda_indexedarray = ak.to_kernels(indexedarray, "cuda") assert ak.num(cuda_indexedarray, 0) == ak.num(indexedarray, 0) ioa = ak.Array( ak.layout.IndexedOptionArray32( ak.layout.Index32([-30, 19, 6, 7, -3, 21, 13, 22, 17, 9, -12, 16]), ak.layout.NumpyArray( np.array( [ 5.2, 1.7, 6.7, -0.4, 4.0, 7.8, 3.8, 6.8, 4.2, 0.3, 4.6, 6.2, 6.9, -0.7, 3.9, 1.6, 8.7, -0.7, 3.2, 4.3, 4.0, 5.8, 4.2, 7.0, 5.6, 3.8, ] ) ), ) ) cuda_ioa = ak.to_kernels(ioa, "cuda") ak.to_kernels(cuda_ioa, "cpu") assert ak.num(cuda_ioa, 0) == ak.num(ioa, 0)
36.258242
88
0.54387
import pytest import numpy as np import cupy as cp import awkward as ak def test_num_1(): content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout bitmask = ak.layout.IndexU8(np.array([40, 34], dtype=np.uint8)) array = ak.Array(ak.layout.BitMaskedArray(bitmask, content, False, 9, False)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_2(): content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout bytemask = ak.layout.Index8(np.array([False, True, False], dtype=np.bool)) array = ak.Array(ak.layout.ByteMaskedArray(bytemask, content, True)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_3(): array = ak.Array(ak.layout.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) def test_num_4(): array = ak.Array( ak.layout.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]])) ) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_5(): array = ak.Array(ak.layout.EmptyArray()) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) def test_num_6(): content = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9])) array = ak.Array(ak.layout.ListOffsetArray64(offsets, content)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_7(): content = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.IndexU32(np.array([0, 3, 3, 5, 6, 9])) array = ak.Array(ak.layout.ListOffsetArrayU32(offsets, content)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_8(): content = ak.layout.NumpyArray( np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10]) ) offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 10, 10])) listoffsetarray = ak.layout.ListOffsetArray64(offsets, content) regulararray = ak.layout.RegularArray(listoffsetarray, 2) starts = ak.layout.Index64(np.array([0, 1])) stops = ak.layout.Index64(np.array([2, 3])) listarray = ak.layout.ListArray64(starts, stops, regulararray) cuda_listoffsetarray = ak.to_kernels(listoffsetarray, "cuda") assert ak.num(cuda_listoffsetarray, 0) == ak.num(ak.Array(listoffsetarray), 0) assert ( ak.num(cuda_listoffsetarray, 1).tolist() == ak.num(ak.Array(listoffsetarray), 1).tolist() ) cuda_regulararray = ak.to_kernels(regulararray, "cuda") assert ak.num(cuda_regulararray, 0) == ak.num(ak.Array(regulararray), 0) assert ( ak.num(cuda_regulararray, 1).tolist() == ak.num(ak.Array(regulararray), 1).tolist() ) cuda_listarray = ak.to_kernels(listarray, "cuda") assert ak.num(cuda_listarray, 0) == ak.num(ak.Array(listarray), 0) assert ak.num(cuda_listarray, 1).tolist() == ak.num(ak.Array(listarray), 1).tolist() content1 = ak.layout.NumpyArray(np.array([1, 2, 3, 4, 5])) content2 = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.Index32(np.array([0, 3, 3, 5, 6, 9])) recordarray = ak.Array( ak.layout.RecordArray( [content1, listoffsetarray, content2, content1], keys=["one", "two", "2", "wonky"], ) ) cuda_recordarray = ak.to_kernels(recordarray, "cuda") assert ak.num(cuda_recordarray, 0).tolist() == ak.num(recordarray, 0).tolist() content0 = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8)) index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32)) unionarray = ak.Array(ak.layout.UnionArray8_32(tags, index, [content0, content1])) cuda_unionarray = ak.to_kernels(unionarray, "cuda") assert ak.num(cuda_unionarray, 0) == ak.num(unionarray, 0) def test_num_9(): content = ak.layout.NumpyArray( np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) index = ak.layout.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64)) indexedarray = ak.Array(ak.layout.IndexedArray32(index, content)) cuda_indexedarray = ak.to_kernels(indexedarray, "cuda") assert ak.num(cuda_indexedarray, 0) == ak.num(indexedarray, 0) ioa = ak.Array( ak.layout.IndexedOptionArray32( ak.layout.Index32([-30, 19, 6, 7, -3, 21, 13, 22, 17, 9, -12, 16]), ak.layout.NumpyArray( np.array( [ 5.2, 1.7, 6.7, -0.4, 4.0, 7.8, 3.8, 6.8, 4.2, 0.3, 4.6, 6.2, 6.9, -0.7, 3.9, 1.6, 8.7, -0.7, 3.2, 4.3, 4.0, 5.8, 4.2, 7.0, 5.6, 3.8, ] ) ), ) ) cuda_ioa = ak.to_kernels(ioa, "cuda") ak.to_kernels(cuda_ioa, "cpu") assert ak.num(cuda_ioa, 0) == ak.num(ioa, 0)
true
true
79030c8e38c8d5bd7994b0e47923280d40e16904
4,714
py
Python
exercises/city_temperature_prediction.py
noamwino/IML.HUJI
0b1b6f333a16200fa7717af1be12e5f38694b74c
[ "MIT" ]
null
null
null
exercises/city_temperature_prediction.py
noamwino/IML.HUJI
0b1b6f333a16200fa7717af1be12e5f38694b74c
[ "MIT" ]
null
null
null
exercises/city_temperature_prediction.py
noamwino/IML.HUJI
0b1b6f333a16200fa7717af1be12e5f38694b74c
[ "MIT" ]
null
null
null
import os.path import IMLearn.learners.regressors.linear_regression from IMLearn.learners.regressors import PolynomialFitting from IMLearn.utils import split_train_test import numpy as np import pandas as pd import plotly.express as px import plotly.io as pio pio.templates.default = "simple_white" from IMLearn.metrics.loss_functions import mean_square_error CITY_TEMPERATURE_DATA_PATH = os.path.join(os.path.curdir, "..", "datasets", "City_Temperature.csv") def load_data(filename: str) -> pd.DataFrame: """ Load city daily temperature dataset and preprocess data. Parameters ---------- filename: str Path to house prices dataset Returns ------- Design matrix and response vector (Temp) """ data = pd.read_csv(filename, parse_dates=["Date"]).drop_duplicates() data = data.drop(data[data["Temp"] < -70].index) # invalid Temp data["DayOfYear"] = data['Date'].dt.dayofyear return data def question_2(data): """ Exploring data specifically in Israel """ data = data.copy() data = data[data["Country"] == "Israel"] data["Year"] = data["Year"].astype(str) fig = px.scatter(data, x="DayOfYear", y="Temp", color="Year", width=1500, height=700, labels={"DayOfYear": "Day of Year", "Temp": "Temperature"}, title="Q2(1) The relation between the day in the year and the temperature in Israel") fig.update_xaxes(range=[0, 365], tick0=0, dtick=20) fig.show() std_by_month = data.groupby("Month").std().reset_index() fig = px.bar(std_by_month, x="Month", y="Temp", width=1500, height=700, labels={"Temp": "Std of the daily temperatures"}, title="Q2(2) The Standard Deviation of the Daily Temperatures Per Month in Israel") fig.data[-1].text = np.round(std_by_month["Temp"], 3) fig.update_xaxes(tick0=1, dtick=1) fig.update_traces(textposition='outside') fig.show() def question_3(data): """ Exploring differences between countries""" agg_data_mean = data.groupby(["Country", "Month"]).mean().reset_index() agg_data_std = data.groupby(["Country", "Month"]).std().reset_index() fig = px.line(agg_data_mean, x="Month", y="Temp", color="Country", error_y=agg_data_std["Temp"], width=1500, height=700, labels={"Temp": "Averaged Temperature"}, title="Q3 The Average Monthly Temperatures in Different Countries") fig.update_xaxes(tick0=1, dtick=1) fig.show() def question_4(data): """ Fitting model for different values of `k` """ data = data[data["Country"] == "Israel"] train_X, train_y, test_X, test_y = split_train_test(data["DayOfYear"], data["Temp"]) losses = np.array([]) for k in range(1, 11): poly_fit = PolynomialFitting(k) poly_fit.fit(train_X.to_numpy(), train_y.to_numpy()) loss = poly_fit.loss(test_X.to_numpy(), test_y.to_numpy()) losses = np.append(losses, round(loss, 2)) print(k, loss) fig = px.bar(x=range(1, 11), y=losses, width=1500, height=700, labels={"x": "Polynomials Degrees (k)", "y": "Test Error (MSE)"}, title="Q4 Test Errors for Different Polynomials Degrees (k)") fig.data[-1].text = losses fig.update_xaxes(tick0=1, dtick=1) fig.update_traces(textposition="outside") fig.show() def question_5(data): """ Evaluating fitted model on different countries """ data_israel = data[data["Country"] == "Israel"] poly_fit = PolynomialFitting(k=5) poly_fit.fit(data_israel["DayOfYear"], data_israel["Temp"]) other_countries = ["Jordan", "South Africa", "The Netherlands"] losses = np.array([]) for country in other_countries: country_data = data[data["Country"] == country] loss = poly_fit.loss(country_data["DayOfYear"], country_data["Temp"]) losses = np.append(losses, loss) fig = px.bar(x=np.array(other_countries), y=losses, width=700, height=700, labels={"x": "Country", "y": "Losses (MSE)"}, title="Q5 Losses (MSE) per Country With k=5") fig.data[-1].text = np.round(losses, 3) fig.update_traces(textposition="outside") fig.show() if __name__ == '__main__': np.random.seed(0) # Question 1 - Load and preprocessing of city temperature dataset data = load_data(CITY_TEMPERATURE_DATA_PATH) # Question 2 - Exploring data for specific country question_2(data) # Question 3 - Exploring differences between countries question_3(data) # Question 4 - Fitting model for different values of `k` question_4(data) # Question 5 - Evaluating fitted model on different countries question_5(data)
35.179104
108
0.65507
import os.path import IMLearn.learners.regressors.linear_regression from IMLearn.learners.regressors import PolynomialFitting from IMLearn.utils import split_train_test import numpy as np import pandas as pd import plotly.express as px import plotly.io as pio pio.templates.default = "simple_white" from IMLearn.metrics.loss_functions import mean_square_error CITY_TEMPERATURE_DATA_PATH = os.path.join(os.path.curdir, "..", "datasets", "City_Temperature.csv") def load_data(filename: str) -> pd.DataFrame: data = pd.read_csv(filename, parse_dates=["Date"]).drop_duplicates() data = data.drop(data[data["Temp"] < -70].index) data["DayOfYear"] = data['Date'].dt.dayofyear return data def question_2(data): data = data.copy() data = data[data["Country"] == "Israel"] data["Year"] = data["Year"].astype(str) fig = px.scatter(data, x="DayOfYear", y="Temp", color="Year", width=1500, height=700, labels={"DayOfYear": "Day of Year", "Temp": "Temperature"}, title="Q2(1) The relation between the day in the year and the temperature in Israel") fig.update_xaxes(range=[0, 365], tick0=0, dtick=20) fig.show() std_by_month = data.groupby("Month").std().reset_index() fig = px.bar(std_by_month, x="Month", y="Temp", width=1500, height=700, labels={"Temp": "Std of the daily temperatures"}, title="Q2(2) The Standard Deviation of the Daily Temperatures Per Month in Israel") fig.data[-1].text = np.round(std_by_month["Temp"], 3) fig.update_xaxes(tick0=1, dtick=1) fig.update_traces(textposition='outside') fig.show() def question_3(data): agg_data_mean = data.groupby(["Country", "Month"]).mean().reset_index() agg_data_std = data.groupby(["Country", "Month"]).std().reset_index() fig = px.line(agg_data_mean, x="Month", y="Temp", color="Country", error_y=agg_data_std["Temp"], width=1500, height=700, labels={"Temp": "Averaged Temperature"}, title="Q3 The Average Monthly Temperatures in Different Countries") fig.update_xaxes(tick0=1, dtick=1) fig.show() def question_4(data): data = data[data["Country"] == "Israel"] train_X, train_y, test_X, test_y = split_train_test(data["DayOfYear"], data["Temp"]) losses = np.array([]) for k in range(1, 11): poly_fit = PolynomialFitting(k) poly_fit.fit(train_X.to_numpy(), train_y.to_numpy()) loss = poly_fit.loss(test_X.to_numpy(), test_y.to_numpy()) losses = np.append(losses, round(loss, 2)) print(k, loss) fig = px.bar(x=range(1, 11), y=losses, width=1500, height=700, labels={"x": "Polynomials Degrees (k)", "y": "Test Error (MSE)"}, title="Q4 Test Errors for Different Polynomials Degrees (k)") fig.data[-1].text = losses fig.update_xaxes(tick0=1, dtick=1) fig.update_traces(textposition="outside") fig.show() def question_5(data): data_israel = data[data["Country"] == "Israel"] poly_fit = PolynomialFitting(k=5) poly_fit.fit(data_israel["DayOfYear"], data_israel["Temp"]) other_countries = ["Jordan", "South Africa", "The Netherlands"] losses = np.array([]) for country in other_countries: country_data = data[data["Country"] == country] loss = poly_fit.loss(country_data["DayOfYear"], country_data["Temp"]) losses = np.append(losses, loss) fig = px.bar(x=np.array(other_countries), y=losses, width=700, height=700, labels={"x": "Country", "y": "Losses (MSE)"}, title="Q5 Losses (MSE) per Country With k=5") fig.data[-1].text = np.round(losses, 3) fig.update_traces(textposition="outside") fig.show() if __name__ == '__main__': np.random.seed(0) data = load_data(CITY_TEMPERATURE_DATA_PATH) question_2(data) question_3(data) question_4(data) question_5(data)
true
true
79030dfbe3d093524733e1c0dff423f969d07f9f
1,060
py
Python
quantum.py
duboviy/misc
4cd8cbcf12fc29dd2f12699fbd2f3dd738b5e4b5
[ "MIT" ]
10
2016-12-19T19:22:17.000Z
2021-03-08T21:12:36.000Z
quantum.py
duboviy/misc
4cd8cbcf12fc29dd2f12699fbd2f3dd738b5e4b5
[ "MIT" ]
null
null
null
quantum.py
duboviy/misc
4cd8cbcf12fc29dd2f12699fbd2f3dd738b5e4b5
[ "MIT" ]
6
2016-12-20T18:12:21.000Z
2017-05-15T06:09:21.000Z
"""Simple quantum computations simulation.""" import numpy as np def I(): """Identity operator.""" return np.identity(2) def X(): """X-rotation, negation operator.""" return np.identity(2)[..., ::-1] def H(): """Adamara operator, superposition.""" return np.array([[1, 1], [1, -1]]) / np.sqrt(2) def SWAP(): """Swap 2 qubits""" m = np.identity(4) m[[1, 2]] = m[[2, 1]] return m def CX(): """Controlled negation.""" m = np.identity(4) m[[3, 2]] = m[[2, 3]] return m def apply(v, *gates): m = gates[0] gates = gates[1:] for gate in gates: m = np.kron(gate, m) return m.dot(v) def observe(v): v2 = np.absolute(v) ** 2 c = np.random.choice(v.size, 1, p=v2) return c[0] # Usage example # create 3 qubits in state 000, array size 2 ^ n a = np.array([1, 0, 0, 0, 0, 0, 0, 0]) # transform the 2nd qubit into a superposition of 0 and 1 a = apply(a, I(), H(), I()) # entangle the 1st and 2nd qubit a = apply(a, CX(), I()) # swap the 2nd and 3rd qubit a = apply(a, I(), SWAP()) # observe the state observe(a)
17.966102
57
0.580189
import numpy as np def I(): return np.identity(2) def X(): return np.identity(2)[..., ::-1] def H(): return np.array([[1, 1], [1, -1]]) / np.sqrt(2) def SWAP(): m = np.identity(4) m[[1, 2]] = m[[2, 1]] return m def CX(): m = np.identity(4) m[[3, 2]] = m[[2, 3]] return m def apply(v, *gates): m = gates[0] gates = gates[1:] for gate in gates: m = np.kron(gate, m) return m.dot(v) def observe(v): v2 = np.absolute(v) ** 2 c = np.random.choice(v.size, 1, p=v2) return c[0] a = np.array([1, 0, 0, 0, 0, 0, 0, 0]) a = apply(a, I(), H(), I()) a = apply(a, CX(), I()) a = apply(a, I(), SWAP()) observe(a)
true
true
79030e1e3ca6bf67306602b801266594f48148c1
2,766
py
Python
homeassistant/components/iaqualink/light.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
11
2018-02-16T15:35:47.000Z
2020-01-14T15:20:00.000Z
homeassistant/components/iaqualink/light.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
87
2020-07-06T22:22:54.000Z
2022-03-31T06:01:46.000Z
homeassistant/components/iaqualink/light.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
11
2020-12-16T13:48:14.000Z
2022-02-01T00:28:05.000Z
"""Support for Aqualink pool lights.""" from iaqualink import AqualinkLightEffect from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_EFFECT, DOMAIN, SUPPORT_BRIGHTNESS, SUPPORT_EFFECT, LightEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from . import AqualinkEntity, refresh_system from .const import DOMAIN as AQUALINK_DOMAIN PARALLEL_UPDATES = 0 async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ) -> None: """Set up discovered lights.""" devs = [] for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]: devs.append(HassAqualinkLight(dev)) async_add_entities(devs, True) class HassAqualinkLight(AqualinkEntity, LightEntity): """Representation of a light.""" @property def name(self) -> str: """Return the name of the light.""" return self.dev.label @property def is_on(self) -> bool: """Return whether the light is on or off.""" return self.dev.is_on @refresh_system async def async_turn_on(self, **kwargs) -> None: """Turn on the light. This handles brightness and light effects for lights that do support them. """ brightness = kwargs.get(ATTR_BRIGHTNESS) effect = kwargs.get(ATTR_EFFECT) # For now I'm assuming lights support either effects or brightness. if effect: effect = AqualinkLightEffect[effect].value await self.dev.set_effect(effect) elif brightness: # Aqualink supports percentages in 25% increments. pct = int(round(brightness * 4.0 / 255)) * 25 await self.dev.set_brightness(pct) else: await self.dev.turn_on() @refresh_system async def async_turn_off(self, **kwargs) -> None: """Turn off the light.""" await self.dev.turn_off() @property def brightness(self) -> int: """Return current brightness of the light. The scale needs converting between 0-100 and 0-255. """ return self.dev.brightness * 255 / 100 @property def effect(self) -> str: """Return the current light effect if supported.""" return AqualinkLightEffect(self.dev.effect).name @property def effect_list(self) -> list: """Return supported light effects.""" return list(AqualinkLightEffect.__members__) @property def supported_features(self) -> int: """Return the list of features supported by the light.""" if self.dev.is_dimmer: return SUPPORT_BRIGHTNESS if self.dev.is_color: return SUPPORT_EFFECT return 0
28.22449
76
0.649313
from iaqualink import AqualinkLightEffect from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_EFFECT, DOMAIN, SUPPORT_BRIGHTNESS, SUPPORT_EFFECT, LightEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from . import AqualinkEntity, refresh_system from .const import DOMAIN as AQUALINK_DOMAIN PARALLEL_UPDATES = 0 async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ) -> None: devs = [] for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]: devs.append(HassAqualinkLight(dev)) async_add_entities(devs, True) class HassAqualinkLight(AqualinkEntity, LightEntity): @property def name(self) -> str: return self.dev.label @property def is_on(self) -> bool: return self.dev.is_on @refresh_system async def async_turn_on(self, **kwargs) -> None: brightness = kwargs.get(ATTR_BRIGHTNESS) effect = kwargs.get(ATTR_EFFECT) if effect: effect = AqualinkLightEffect[effect].value await self.dev.set_effect(effect) elif brightness: # Aqualink supports percentages in 25% increments. pct = int(round(brightness * 4.0 / 255)) * 25 await self.dev.set_brightness(pct) else: await self.dev.turn_on() @refresh_system async def async_turn_off(self, **kwargs) -> None: await self.dev.turn_off() @property def brightness(self) -> int: return self.dev.brightness * 255 / 100 @property def effect(self) -> str: return AqualinkLightEffect(self.dev.effect).name @property def effect_list(self) -> list: return list(AqualinkLightEffect.__members__) @property def supported_features(self) -> int: if self.dev.is_dimmer: return SUPPORT_BRIGHTNESS if self.dev.is_color: return SUPPORT_EFFECT return 0
true
true
79030f39862224134730805990b7b4e2beb8bd18
1,930
py
Python
src/apscheduler/eventbrokers/async_local.py
sasirajpuvvada/apscheduler
8b68b6c5d1c63faae1ba3769b6475b396328e3a3
[ "MIT" ]
4,294
2015-12-25T19:52:20.000Z
2022-03-31T19:40:12.000Z
src/apscheduler/eventbrokers/async_local.py
sasirajpuvvada/apscheduler
8b68b6c5d1c63faae1ba3769b6475b396328e3a3
[ "MIT" ]
505
2015-12-03T13:57:22.000Z
2022-03-31T00:32:56.000Z
src/apscheduler/eventbrokers/async_local.py
sasirajpuvvada/apscheduler
8b68b6c5d1c63faae1ba3769b6475b396328e3a3
[ "MIT" ]
692
2015-12-24T22:54:56.000Z
2022-03-29T09:32:02.000Z
from __future__ import annotations from asyncio import iscoroutine from contextlib import AsyncExitStack from typing import Any, Callable import attr from anyio import create_task_group from anyio.abc import TaskGroup from ..abc import AsyncEventBroker from ..events import Event from ..util import reentrant from .base import BaseEventBroker @reentrant @attr.define(eq=False) class LocalAsyncEventBroker(AsyncEventBroker, BaseEventBroker): _task_group: TaskGroup = attr.field(init=False) _exit_stack: AsyncExitStack = attr.field(init=False) async def __aenter__(self) -> LocalAsyncEventBroker: self._exit_stack = AsyncExitStack() self._task_group = create_task_group() await self._exit_stack.enter_async_context(self._task_group) return self async def __aexit__(self, exc_type, exc_val, exc_tb): await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb) del self._task_group async def publish(self, event: Event) -> None: await self.publish_local(event) async def publish_local(self, event: Event) -> None: event_type = type(event) one_shot_tokens: list[object] = [] for token, subscription in self._subscriptions.items(): if subscription.event_types is None or event_type in subscription.event_types: self._task_group.start_soon(self._deliver_event, subscription.callback, event) if subscription.one_shot: one_shot_tokens.append(subscription.token) for token in one_shot_tokens: super().unsubscribe(token) async def _deliver_event(self, func: Callable[[Event], Any], event: Event) -> None: try: retval = func(event) if iscoroutine(retval): await retval except BaseException: self._logger.exception('Error delivering %s event', event.__class__.__name__)
33.859649
94
0.701036
from __future__ import annotations from asyncio import iscoroutine from contextlib import AsyncExitStack from typing import Any, Callable import attr from anyio import create_task_group from anyio.abc import TaskGroup from ..abc import AsyncEventBroker from ..events import Event from ..util import reentrant from .base import BaseEventBroker @reentrant @attr.define(eq=False) class LocalAsyncEventBroker(AsyncEventBroker, BaseEventBroker): _task_group: TaskGroup = attr.field(init=False) _exit_stack: AsyncExitStack = attr.field(init=False) async def __aenter__(self) -> LocalAsyncEventBroker: self._exit_stack = AsyncExitStack() self._task_group = create_task_group() await self._exit_stack.enter_async_context(self._task_group) return self async def __aexit__(self, exc_type, exc_val, exc_tb): await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb) del self._task_group async def publish(self, event: Event) -> None: await self.publish_local(event) async def publish_local(self, event: Event) -> None: event_type = type(event) one_shot_tokens: list[object] = [] for token, subscription in self._subscriptions.items(): if subscription.event_types is None or event_type in subscription.event_types: self._task_group.start_soon(self._deliver_event, subscription.callback, event) if subscription.one_shot: one_shot_tokens.append(subscription.token) for token in one_shot_tokens: super().unsubscribe(token) async def _deliver_event(self, func: Callable[[Event], Any], event: Event) -> None: try: retval = func(event) if iscoroutine(retval): await retval except BaseException: self._logger.exception('Error delivering %s event', event.__class__.__name__)
true
true
7903105d025e30fcc0fce2293a672e902b226ce1
3,923
py
Python
tests/parsers/esedb_plugins/msie_webcache.py
roshanmaskey/plaso
637856f578eb4bc81f62b97d7f483f69314e7f47
[ "Apache-2.0" ]
1,253
2015-01-02T13:58:02.000Z
2022-03-31T08:43:39.000Z
tests/parsers/esedb_plugins/msie_webcache.py
roshanmaskey/plaso
637856f578eb4bc81f62b97d7f483f69314e7f47
[ "Apache-2.0" ]
3,388
2015-01-02T11:17:58.000Z
2022-03-30T10:21:45.000Z
tests/parsers/esedb_plugins/msie_webcache.py
roshanmaskey/plaso
637856f578eb4bc81f62b97d7f483f69314e7f47
[ "Apache-2.0" ]
376
2015-01-20T07:04:54.000Z
2022-03-04T23:53:00.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tests for the Microsoft Internet Explorer WebCache database.""" import unittest from plaso.lib import definitions from plaso.parsers.esedb_plugins import msie_webcache from tests.parsers.esedb_plugins import test_lib class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase): """Tests for the MSIE WebCache ESE database plugin.""" # pylint: disable=protected-access def testConvertHeadersValues(self): """Tests the _ConvertHeadersValues function.""" plugin = msie_webcache.MsieWebCacheESEDBPlugin() binary_value = ( b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n' b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n' b'X-XSS-Protection: 1; mode=block\r\n' b'Alternate-Protocol: 80:quic\r\n\r\n') expected_headers_value = ( '[HTTP/1.1 200 OK; Content-Type: image/png; ' 'X-Content-Type-Options: nosniff; Content-Length: 2759; ' 'X-XSS-Protection: 1; mode=block; ' 'Alternate-Protocol: 80:quic]') headers_value = plugin._ConvertHeadersValues(binary_value) self.assertEqual(headers_value, expected_headers_value) def testProcessOnDatabaseWithPartitionsTable(self): """Tests the Process function on database with a Partitions table.""" plugin = msie_webcache.MsieWebCacheESEDBPlugin() storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin) self.assertEqual(storage_writer.number_of_events, 1372) self.assertEqual(storage_writer.number_of_extraction_warnings, 0) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) # The order in which ESEDBPlugin._GetRecordValues() generates events is # nondeterministic hence we sort the events. events = list(storage_writer.GetSortedEvents()) expected_event_values = { 'container_identifier': 1, 'data_type': 'msie:webcache:containers', 'date_time': '2014-05-12 07:30:25.4861987', 'directory': ( 'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\' 'INetCache\\IE\\'), 'name': 'Content', 'set_identifier': 0, 'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS} self.CheckEventValues(storage_writer, events[573], expected_event_values) def testProcessOnDatabaseWithPartitionsExTable(self): """Tests the Process function on database with a PartitionsEx table.""" plugin = msie_webcache.MsieWebCacheESEDBPlugin() storage_writer = self._ParseESEDBFileWithPlugin( ['PartitionsEx-WebCacheV01.dat'], plugin) self.assertEqual(storage_writer.number_of_events, 4200) self.assertEqual(storage_writer.number_of_extraction_warnings, 3) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) # The order in which ESEDBPlugin._GetRecordValues() generates events is # nondeterministic hence we sort the events. events = list(storage_writer.GetSortedEvents()) expected_event_values = { 'access_count': 5, 'cache_identifier': 0, 'cached_file_size': 726, 'cached_filename': 'b83d57c0[1].svg', 'container_identifier': 14, 'data_type': 'msie:webcache:container', 'date_time': '2019-03-20 17:22:14.0000000', 'entry_identifier': 63, 'sync_count': 0, 'response_headers': ( '[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; ' 'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62' 'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: ' 'Mon, 16 Dec 2019 20:55:28 GMT]'), 'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION, 'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'} self.CheckEventValues(storage_writer, events[100], expected_event_values) if __name__ == '__main__': unittest.main()
39.23
80
0.702014
import unittest from plaso.lib import definitions from plaso.parsers.esedb_plugins import msie_webcache from tests.parsers.esedb_plugins import test_lib class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase): def testConvertHeadersValues(self): plugin = msie_webcache.MsieWebCacheESEDBPlugin() binary_value = ( b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n' b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n' b'X-XSS-Protection: 1; mode=block\r\n' b'Alternate-Protocol: 80:quic\r\n\r\n') expected_headers_value = ( '[HTTP/1.1 200 OK; Content-Type: image/png; ' 'X-Content-Type-Options: nosniff; Content-Length: 2759; ' 'X-XSS-Protection: 1; mode=block; ' 'Alternate-Protocol: 80:quic]') headers_value = plugin._ConvertHeadersValues(binary_value) self.assertEqual(headers_value, expected_headers_value) def testProcessOnDatabaseWithPartitionsTable(self): plugin = msie_webcache.MsieWebCacheESEDBPlugin() storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin) self.assertEqual(storage_writer.number_of_events, 1372) self.assertEqual(storage_writer.number_of_extraction_warnings, 0) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) events = list(storage_writer.GetSortedEvents()) expected_event_values = { 'container_identifier': 1, 'data_type': 'msie:webcache:containers', 'date_time': '2014-05-12 07:30:25.4861987', 'directory': ( 'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\' 'INetCache\\IE\\'), 'name': 'Content', 'set_identifier': 0, 'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS} self.CheckEventValues(storage_writer, events[573], expected_event_values) def testProcessOnDatabaseWithPartitionsExTable(self): plugin = msie_webcache.MsieWebCacheESEDBPlugin() storage_writer = self._ParseESEDBFileWithPlugin( ['PartitionsEx-WebCacheV01.dat'], plugin) self.assertEqual(storage_writer.number_of_events, 4200) self.assertEqual(storage_writer.number_of_extraction_warnings, 3) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) events = list(storage_writer.GetSortedEvents()) expected_event_values = { 'access_count': 5, 'cache_identifier': 0, 'cached_file_size': 726, 'cached_filename': 'b83d57c0[1].svg', 'container_identifier': 14, 'data_type': 'msie:webcache:container', 'date_time': '2019-03-20 17:22:14.0000000', 'entry_identifier': 63, 'sync_count': 0, 'response_headers': ( '[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; ' 'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62' 'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: ' 'Mon, 16 Dec 2019 20:55:28 GMT]'), 'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION, 'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'} self.CheckEventValues(storage_writer, events[100], expected_event_values) if __name__ == '__main__': unittest.main()
true
true
790311018db12a006971d64e0c9032f3195ee66e
8,652
py
Python
dummydf/sql/dataframe.py
moriyoshi/dummydf
39d82f0022ea9d072ce56724f16bf363a37b1bbf
[ "MIT" ]
null
null
null
dummydf/sql/dataframe.py
moriyoshi/dummydf
39d82f0022ea9d072ce56724f16bf363a37b1bbf
[ "MIT" ]
null
null
null
dummydf/sql/dataframe.py
moriyoshi/dummydf
39d82f0022ea9d072ce56724f16bf363a37b1bbf
[ "MIT" ]
null
null
null
# coding: utf-8 # # Copyright 2018 Moriyoshi Koizumi # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import six import pandas from .column import _DataFrameColumn, _Function, _Literal, eval_column, infer_data_type, compile_to_raf, resolve_alias from .functions import SimpleAggregationFunctionSpec from .group import GroupedData from .types import StructType, StructField class _Raw(object): def __init__(self, pdf): self.pdf = pdf def __call__(self, df): return self.pdf class _Filter(object): def __init__(self, df, expr): self.df = df self.expr = expr def __call__(self, df): raf = compile_to_raf(df, self.expr) pdf = self.df._yield_pdf() return pdf.loc[raf] class _Aggregation(object): def __init__(self, grouped_data, agg_cols): self.grouped_data = grouped_data self.agg_cols = agg_cols def __call__(self, df): pdf = self.grouped_data.df._yield_pdf() agg_fn_cols = [] agg_variations = set() const_cols = [] resolved_cols = [] for col in self.agg_cols: col = resolve_alias(col) resolved_cols.append(col) if isinstance(col, _Function): agg_fn_cols.append(col) if isinstance(col.spec, SimpleAggregationFunctionSpec): agg_variations.add(col.spec.fn) else: raise TypeError() elif isinstance(col, _Literal): const_cols.append(col) else: raise TypeError(col.__class__) if len(self.grouped_data.cols) > 0: pg = pdf.groupby( by=[pdf.iloc[:,col.index] for col in self.grouped_data.cols] ) agg_result = pg.aggregate(list(agg_variations)) agg_result = pandas.concat([agg_result.index.to_frame(), agg_result], axis=1) # convert columns to a set of series agg_result_index = agg_result.index.to_frame() series_set = [ agg_result_index[col].rename(i) for i, col in enumerate(agg_result_index.columns) ] for col in resolved_cols: if isinstance(col, _Function): if isinstance(col.spec, SimpleAggregationFunctionSpec): series = agg_result[col.operands[0].index, col.spec.fn].rename(len(series_set)) else: # should never get here; already validated in the above loop assert False elif isinstance(col, _Literal): series = pandas.Series([col.value], name=len(series_set)) else: # should never get here; already validated in the above loop assert False series_set.append(series) else: agg_result = pdf.aggregate(list(agg_variations)) # convert columns to a set of series series_set = [] for col in self.agg_cols: if isinstance(col, _Function): if isinstance(col.spec, SimpleAggregationFunctionSpec): series = pandas.Series([agg_result[col.operands[0].index][col.spec.fn]], name=len(series_set)) else: # should never get here; already validated in the above loop assert False elif isinstance(col, _Literal): series = pandas.Series([col.value], name=len(series_set)) else: # should never get here; already validated in the above loop assert False series_set.append(series) return pandas.concat(series_set, axis=1) class _WithColumns(object): def __init__(self, df, name_col_pairs): self.df = df self.name_col_pairs = name_col_pairs def __call__(self, df): extra_fields = df.schema.fields[len(self.df.schema.fields):] lhs = self.df._yield_pdf() return pandas.concat( [lhs] + [ eval_column(df, lhs, col).rename(i) for i, (_, col) in enumerate(self.name_col_pairs, len(self.df.columns)) ], axis=1 ) class _Union(object): def __init__(self, df, following): self.df = df self.following = following def __call__(self, df): return pandas.concat([self.df._yield_pdf(), self.following._yield_pdf()], axis=0) class _OrderBy(object): def __init__(self, df, cols, ascending=None): self.df = df self.cols = cols self.ascending = ascending def __call__(self, df): assert all(isinstance(col, _DataFrameColumn) for col in self.cols) return self.df._yield_pdf().sort_values(by=[col.index for col in self.cols], ascending=self.ascending) class Row(object): def __init__(self, pdf, schema, i, name_to_column_map): self.pdf = pdf self.schema = schema self.i = i self.name_to_column_map = name_to_column_map def __str__(self): return str(self.pdf.iloc[self.i]) def __getitem__(self, i): if isinstance(i, six.string_types): return self.pdf.iloc[self.i][self.name_to_column_map[i].index] else: return self.pdf.iloc[self.i][i] class DataFrame(object): def __init__(self, sql_ctx, schema, modifier=None): self.sql_ctx = sql_ctx self.schema = schema self.modifier = modifier self._columns = [ _DataFrameColumn(self, f, i) for i, f in enumerate(schema.fields) ] self._name_to_column_map = { f.name: c for f, c in zip(schema.fields, self._columns) } def __getitem__(self, i): if isinstance(i, six.string_types): return self._name_to_column_map[i] elif isinstance(i, (int, long)): return self._columns[i] else: raise TypeError() def filter(self, cond): return DataFrame( self.sql_ctx, self.schema, _Filter(self, cond) ) def groupBy(self, *cols): return GroupedData(self, cols) def agg(self, *exprs): return self.groupBy().agg(*exprs) def withColumn(self, name, col): return self._with_columns([(name, col)]) def unionAll(self, following): return DataFrame( self.sql_ctx, self.schema, _Union(self, following) ) def orderBy(self, *cols, **kwargs): ascending = kwargs.pop('ascending', None) return DataFrame( self.sql_ctx, self.schema, _OrderBy(self, cols, ascending) ) @property def columns(self): return [col.field.name for col in self._columns] def _with_columns(self, name_col_pairs): return DataFrame( self.sql_ctx, StructType( fields=self.schema.fields + [ StructField( name, infer_data_type(col) ) for name, col in name_col_pairs ] ), _WithColumns(self, name_col_pairs) ) def _yield_pdf(self): return self.modifier(self) def collect(self): pdf = self._yield_pdf() return [ Row(pdf, self.schema, i, self._name_to_column_map) for i in range(0, len(pdf)) ]
33.796875
118
0.589112
import six import pandas from .column import _DataFrameColumn, _Function, _Literal, eval_column, infer_data_type, compile_to_raf, resolve_alias from .functions import SimpleAggregationFunctionSpec from .group import GroupedData from .types import StructType, StructField class _Raw(object): def __init__(self, pdf): self.pdf = pdf def __call__(self, df): return self.pdf class _Filter(object): def __init__(self, df, expr): self.df = df self.expr = expr def __call__(self, df): raf = compile_to_raf(df, self.expr) pdf = self.df._yield_pdf() return pdf.loc[raf] class _Aggregation(object): def __init__(self, grouped_data, agg_cols): self.grouped_data = grouped_data self.agg_cols = agg_cols def __call__(self, df): pdf = self.grouped_data.df._yield_pdf() agg_fn_cols = [] agg_variations = set() const_cols = [] resolved_cols = [] for col in self.agg_cols: col = resolve_alias(col) resolved_cols.append(col) if isinstance(col, _Function): agg_fn_cols.append(col) if isinstance(col.spec, SimpleAggregationFunctionSpec): agg_variations.add(col.spec.fn) else: raise TypeError() elif isinstance(col, _Literal): const_cols.append(col) else: raise TypeError(col.__class__) if len(self.grouped_data.cols) > 0: pg = pdf.groupby( by=[pdf.iloc[:,col.index] for col in self.grouped_data.cols] ) agg_result = pg.aggregate(list(agg_variations)) agg_result = pandas.concat([agg_result.index.to_frame(), agg_result], axis=1) agg_result_index = agg_result.index.to_frame() series_set = [ agg_result_index[col].rename(i) for i, col in enumerate(agg_result_index.columns) ] for col in resolved_cols: if isinstance(col, _Function): if isinstance(col.spec, SimpleAggregationFunctionSpec): series = agg_result[col.operands[0].index, col.spec.fn].rename(len(series_set)) else: assert False elif isinstance(col, _Literal): series = pandas.Series([col.value], name=len(series_set)) else: assert False series_set.append(series) else: agg_result = pdf.aggregate(list(agg_variations)) series_set = [] for col in self.agg_cols: if isinstance(col, _Function): if isinstance(col.spec, SimpleAggregationFunctionSpec): series = pandas.Series([agg_result[col.operands[0].index][col.spec.fn]], name=len(series_set)) else: assert False elif isinstance(col, _Literal): series = pandas.Series([col.value], name=len(series_set)) else: assert False series_set.append(series) return pandas.concat(series_set, axis=1) class _WithColumns(object): def __init__(self, df, name_col_pairs): self.df = df self.name_col_pairs = name_col_pairs def __call__(self, df): extra_fields = df.schema.fields[len(self.df.schema.fields):] lhs = self.df._yield_pdf() return pandas.concat( [lhs] + [ eval_column(df, lhs, col).rename(i) for i, (_, col) in enumerate(self.name_col_pairs, len(self.df.columns)) ], axis=1 ) class _Union(object): def __init__(self, df, following): self.df = df self.following = following def __call__(self, df): return pandas.concat([self.df._yield_pdf(), self.following._yield_pdf()], axis=0) class _OrderBy(object): def __init__(self, df, cols, ascending=None): self.df = df self.cols = cols self.ascending = ascending def __call__(self, df): assert all(isinstance(col, _DataFrameColumn) for col in self.cols) return self.df._yield_pdf().sort_values(by=[col.index for col in self.cols], ascending=self.ascending) class Row(object): def __init__(self, pdf, schema, i, name_to_column_map): self.pdf = pdf self.schema = schema self.i = i self.name_to_column_map = name_to_column_map def __str__(self): return str(self.pdf.iloc[self.i]) def __getitem__(self, i): if isinstance(i, six.string_types): return self.pdf.iloc[self.i][self.name_to_column_map[i].index] else: return self.pdf.iloc[self.i][i] class DataFrame(object): def __init__(self, sql_ctx, schema, modifier=None): self.sql_ctx = sql_ctx self.schema = schema self.modifier = modifier self._columns = [ _DataFrameColumn(self, f, i) for i, f in enumerate(schema.fields) ] self._name_to_column_map = { f.name: c for f, c in zip(schema.fields, self._columns) } def __getitem__(self, i): if isinstance(i, six.string_types): return self._name_to_column_map[i] elif isinstance(i, (int, long)): return self._columns[i] else: raise TypeError() def filter(self, cond): return DataFrame( self.sql_ctx, self.schema, _Filter(self, cond) ) def groupBy(self, *cols): return GroupedData(self, cols) def agg(self, *exprs): return self.groupBy().agg(*exprs) def withColumn(self, name, col): return self._with_columns([(name, col)]) def unionAll(self, following): return DataFrame( self.sql_ctx, self.schema, _Union(self, following) ) def orderBy(self, *cols, **kwargs): ascending = kwargs.pop('ascending', None) return DataFrame( self.sql_ctx, self.schema, _OrderBy(self, cols, ascending) ) @property def columns(self): return [col.field.name for col in self._columns] def _with_columns(self, name_col_pairs): return DataFrame( self.sql_ctx, StructType( fields=self.schema.fields + [ StructField( name, infer_data_type(col) ) for name, col in name_col_pairs ] ), _WithColumns(self, name_col_pairs) ) def _yield_pdf(self): return self.modifier(self) def collect(self): pdf = self._yield_pdf() return [ Row(pdf, self.schema, i, self._name_to_column_map) for i in range(0, len(pdf)) ]
true
true
790311fbfeca18a7586a061b27aa05ed3a16fc1d
29,143
py
Python
pommerman/forward_model.py
psyoblade/playground
28e60c24004a84d2fd70907988b06bd46d0446ca
[ "Apache-2.0" ]
2
2018-08-26T05:41:10.000Z
2018-09-09T04:36:48.000Z
pommerman/forward_model.py
psyoblade/playground
28e60c24004a84d2fd70907988b06bd46d0446ca
[ "Apache-2.0" ]
null
null
null
pommerman/forward_model.py
psyoblade/playground
28e60c24004a84d2fd70907988b06bd46d0446ca
[ "Apache-2.0" ]
2
2018-08-26T05:41:12.000Z
2018-09-09T04:37:11.000Z
'''Module to manage and advanced game state''' from collections import defaultdict import numpy as np from . import constants from . import characters from . import utility class ForwardModel(object): """Class for helping with the [forward] modeling of the game state.""" def run(self, num_times, board, agents, bombs, items, flames, is_partially_observable, agent_view_size, action_space, training_agent=None, is_communicative=False): """Run the forward model. Args: num_times: The number of times to run it for. This is a maximum and it will stop early if we reach a done. board: The board state to run it from. agents: The agents to use to run it. bombs: The starting bombs. items: The starting items. flames: The starting flames. is_partially_observable: Whether the board is partially observable or not. Only applies to TeamRadio. agent_view_size: If it's partially observable, then the size of the square that the agent can view. action_space: The actions that each agent can take. training_agent: The training agent to pass to done. is_communicative: Whether the action depends on communication observations as well. Returns: steps: The list of step results, which are each a dict of "obs", "next_obs", "reward", "action". board: Updated board. agents: Updated agents, same models though. bombs: Updated bombs. items: Updated items. flames: Updated flames. done: Whether we completed the game in these steps. info: The result of the game if it's completed. """ steps = [] for _ in num_times: obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) actions = self.act( agents, obs, action_space, is_communicative=is_communicative) board, agents, bombs, items, flames = self.step( actions, board, agents, bombs, items, flames) next_obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) reward = self.get_rewards(agents, game_type, step_count, max_steps) done = self.get_done(agents, game_type, step_count, max_steps, training_agent) info = self.get_info(done, rewards, game_type, agents) steps.append({ "obs": obs, "next_obs": next_obs, "reward": reward, "actions": actions, }) if done: # Callback to let the agents know that the game has ended. for agent in agents: agent.episode_end(reward[agent.agent_id]) break return steps, board, agents, bombs, items, flames, done, info @staticmethod def act(agents, obs, action_space, is_communicative=False): """Returns actions for each agent in this list. Args: agents: A list of agent objects. obs: A list of matching observations per agent. action_space: The action space for the environment using this model. is_communicative: Whether the action depends on communication observations as well. Returns a list of actions. """ def act_ex_communication(agent): '''Handles agent's move without communication''' if agent.is_alive: return agent.act(obs[agent.agent_id], action_space=action_space) else: return constants.Action.Stop.value def act_with_communication(agent): '''Handles agent's move with communication''' if agent.is_alive: action = agent.act( obs[agent.agent_id], action_space=action_space) if type(action) == int: action = [action] + [0, 0] assert (type(action) == list) return action else: return [constants.Action.Stop.value, 0, 0] ret = [] for agent in agents: if is_communicative: ret.append(act_with_communication(agent)) else: ret.append(act_ex_communication(agent)) return ret @staticmethod def step(actions, curr_board, curr_agents, curr_bombs, curr_items, curr_flames, max_blast_strength=10): board_size = len(curr_board) # Tick the flames. Replace any dead ones with passages. If there is an # item there, then reveal that item. flames = [] for flame in curr_flames: position = flame.position if flame.is_dead(): item_value = curr_items.get(position) if item_value: del curr_items[position] else: item_value = constants.Item.Passage.value curr_board[position] = item_value else: flame.tick() flames.append(flame) curr_flames = flames # Redraw all current flames # Multiple flames may share a position and the map should contain # a flame until all flames are dead to avoid issues with bomb # movements and explosions. for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value # Step the living agents and moving bombs. # If two agents try to go to the same spot, they should bounce back to # their previous spots. This is complicated with one example being when # there are three agents all in a row. If the one in the middle tries # to go to the left and bounces with the one on the left, and then the # one on the right tried to go to the middle one's position, she should # also bounce. A way of doing this is to gather all the new positions # before taking any actions. Then, if there are disputes, correct those # disputes iteratively. # Additionally, if two agents try to switch spots by moving into each # Figure out desired next position for alive agents alive_agents = [agent for agent in curr_agents if agent.is_alive] desired_agent_positions = [agent.position for agent in alive_agents] for num_agent, agent in enumerate(alive_agents): position = agent.position # We change the curr_board here as a safeguard. We will later # update the agent's new position. curr_board[position] = constants.Item.Passage.value action = actions[agent.agent_id] if action == constants.Action.Stop.value: pass elif action == constants.Action.Bomb.value: position = agent.position if not utility.position_is_bomb(curr_bombs, position): bomb = agent.maybe_lay_bomb() if bomb: curr_bombs.append(bomb) elif utility.is_valid_direction(curr_board, position, action): desired_agent_positions[num_agent] = agent.get_next_position( action) # Gather desired next positions for moving bombs. Handle kicks later. desired_bomb_positions = [bomb.position for bomb in curr_bombs] for num_bomb, bomb in enumerate(curr_bombs): curr_board[bomb.position] = constants.Item.Passage.value if bomb.is_moving(): desired_position = utility.get_next_position( bomb.position, bomb.moving_direction) if utility.position_on_board(curr_board, desired_position) \ and not utility.position_is_powerup(curr_board, desired_position) \ and not utility.position_is_wall(curr_board, desired_position): desired_bomb_positions[num_bomb] = desired_position # Position switches: # Agent <-> Agent => revert both to previous position. # Bomb <-> Bomb => revert both to previous position. # Agent <-> Bomb => revert Bomb to previous position. crossings = {} def crossing(current, desired): '''Checks to see if an agent is crossing paths''' current_x, current_y = current desired_x, desired_y = desired if current_x != desired_x: assert current_y == desired_y return ('X', min(current_x, desired_x), current_y) assert current_x == desired_x return ('Y', current_x, min(current_y, desired_y)) for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: desired_position = desired_agent_positions[num_agent] border = crossing(agent.position, desired_position) if border in crossings: # Crossed another agent - revert both to prior positions. desired_agent_positions[num_agent] = agent.position num_agent2, _ = crossings[border] desired_agent_positions[num_agent2] = alive_agents[ num_agent2].position else: crossings[border] = (num_agent, True) for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] != bomb.position: desired_position = desired_bomb_positions[num_bomb] border = crossing(bomb.position, desired_position) if border in crossings: # Crossed - revert to prior position. desired_bomb_positions[num_bomb] = bomb.position num, is_agent = crossings[border] if not is_agent: # Crossed bomb - revert that to prior position as well. desired_bomb_positions[num] = curr_bombs[num].position else: crossings[border] = (num_bomb, False) # Deal with multiple agents or multiple bomb collisions on desired next # position by resetting desired position to current position for # everyone involved in the collision. agent_occupancy = defaultdict(int) bomb_occupancy = defaultdict(int) for desired_position in desired_agent_positions: agent_occupancy[desired_position] += 1 for desired_position in desired_bomb_positions: bomb_occupancy[desired_position] += 1 # Resolve >=2 agents or >=2 bombs trying to occupy the same space. change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position # Either another agent is going to this position or more than # one bomb is going to this position. In both scenarios, revert # to the original position. if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1): desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position if desired_position != curr_position and \ (bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1): desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 change = True # Handle kicks. agent_indexed_by_kicked_bomb = {} kicked_bomb_indexed_by_agent = {} delayed_bomb_updates = [] delayed_agent_updates = [] # Loop through all bombs to see if they need a good kicking or cause # collisions with an agent. for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] if agent_occupancy[desired_position] == 0: # There was never an agent around to kick or collide. continue agent_list = [ (num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \ if desired_position == desired_agent_positions[num_agent]] if not agent_list: # Agents moved from collision. continue # The agent_list should contain a single element at this point. assert (len(agent_list) == 1) num_agent, agent = agent_list[0] if desired_position == agent.position: # Agent did not move if desired_position != bomb.position: # Bomb moved, but agent did not. The bomb should revert # and stop. delayed_bomb_updates.append((num_bomb, bomb.position)) continue # NOTE: At this point, we have that the agent in question tried to # move into this position. if not agent.can_kick: # If we move the agent at this point, then we risk having two # agents on a square in future iterations of the loop. So we # push this change to the next stage instead. delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) continue # Agent moved and can kick - see if the target for the kick never had anyhing on it direction = constants.Action(actions[agent.agent_id]) target_position = utility.get_next_position(desired_position, direction) if utility.position_on_board(curr_board, target_position) and \ agent_occupancy[target_position] == 0 and \ bomb_occupancy[target_position] == 0 and \ not utility.position_is_powerup(curr_board, target_position) and \ not utility.position_is_wall(curr_board, target_position): # Ok to update bomb desired location as we won't iterate over it again here # but we can not update bomb_occupancy on target position and need to check it again # However we need to set the bomb count on the current position to zero so # that the agent can stay on this position. bomb_occupancy[desired_position] = 0 delayed_bomb_updates.append((num_bomb, target_position)) agent_indexed_by_kicked_bomb[num_bomb] = num_agent kicked_bomb_indexed_by_agent[num_agent] = num_bomb bomb.moving_direction = direction # Bombs may still collide and we then need to reverse bomb and agent .. else: delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) for (num_bomb, bomb_position) in delayed_bomb_updates: desired_bomb_positions[num_bomb] = bomb_position bomb_occupancy[bomb_position] += 1 change = True for (num_agent, agent_position) in delayed_agent_updates: desired_agent_positions[num_agent] = agent_position agent_occupancy[agent_position] += 1 change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position # Agents and bombs can only share a square if they are both in their # original position (Agent dropped bomb and has not moved) if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0): # Late collisions resulting from failed kicks force this agent to stay at the # original position. Check if this agent successfully kicked a bomb above and undo # the kick. if num_agent in kicked_bomb_indexed_by_agent: num_bomb = kicked_bomb_indexed_by_agent[num_agent] bomb = curr_bombs[num_bomb] desired_bomb_positions[num_bomb] = bomb.position bomb_occupancy[bomb.position] += 1 del agent_indexed_by_kicked_bomb[num_bomb] del kicked_bomb_indexed_by_agent[num_agent] desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position # This bomb may be a boomerang, i.e. it was kicked back to the # original location it moved from. If it is blocked now, it # can't be kicked and the agent needs to move back to stay # consistent with other movements. if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb: continue bomb_occupancy_ = bomb_occupancy[desired_position] agent_occupancy_ = agent_occupancy[desired_position] # Agents and bombs can only share a square if they are both in their # original position (Agent dropped bomb and has not moved) if bomb_occupancy_ > 1 or agent_occupancy_ != 0: desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 num_agent = agent_indexed_by_kicked_bomb.get(num_bomb) if num_agent is not None: agent = alive_agents[num_agent] desired_agent_positions[num_agent] = agent.position agent_occupancy[agent.position] += 1 del kicked_bomb_indexed_by_agent[num_agent] del agent_indexed_by_kicked_bomb[num_bomb] change = True for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] == bomb.position and \ not num_bomb in agent_indexed_by_kicked_bomb: # Bomb was not kicked this turn and its desired position is its # current location. Stop it just in case it was moving before. bomb.stop() else: # Move bomb to the new position. # NOTE: We already set the moving direction up above. bomb.position = desired_bomb_positions[num_bomb] for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: agent.move(actions[agent.agent_id]) if utility.position_is_powerup(curr_board, agent.position): agent.pick_up( constants.Item(curr_board[agent.position]), max_blast_strength=max_blast_strength) # Explode bombs. exploded_map = np.zeros_like(curr_board) has_new_explosions = False for bomb in curr_bombs: bomb.tick() if bomb.exploded(): has_new_explosions = True elif curr_board[bomb.position] == constants.Item.Flames.value: bomb.fire() has_new_explosions = True # Chain the explosions. while has_new_explosions: next_bombs = [] has_new_explosions = False for bomb in curr_bombs: if not bomb.exploded(): next_bombs.append(bomb) continue bomb.bomber.incr_ammo() for _, indices in bomb.explode().items(): for r, c in indices: if not all( [r >= 0, c >= 0, r < board_size, c < board_size]): break if curr_board[r][c] == constants.Item.Rigid.value: break exploded_map[r][c] = 1 if curr_board[r][c] == constants.Item.Wood.value: break curr_bombs = next_bombs for bomb in curr_bombs: if bomb.in_range(exploded_map): bomb.fire() has_new_explosions = True # Update the board's bombs. for bomb in curr_bombs: curr_board[bomb.position] = constants.Item.Bomb.value # Update the board's flames. flame_positions = np.where(exploded_map == 1) for row, col in zip(flame_positions[0], flame_positions[1]): curr_flames.append(characters.Flame((row, col))) for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value # Kill agents on flames. Otherwise, update position on curr_board. for agent in alive_agents: if curr_board[agent.position] == constants.Item.Flames.value: agent.die() else: curr_board[agent.position] = utility.agent_value(agent.agent_id) return curr_board, curr_agents, curr_bombs, curr_items, curr_flames def get_observations(self, curr_board, agents, bombs, is_partially_observable, agent_view_size, game_type, game_env): """Gets the observations as an np.array of the visible squares. The agent gets to choose whether it wants to keep the fogged part in memory. """ board_size = len(curr_board) def make_bomb_maps(position): ''' Makes an array of an agents bombs and the bombs attributes ''' blast_strengths = np.zeros((board_size, board_size)) life = np.zeros((board_size, board_size)) for bomb in bombs: x, y = bomb.position if not is_partially_observable \ or in_view_range(position, x, y): blast_strengths[(x, y)] = bomb.blast_strength life[(x, y)] = bomb.life return blast_strengths, life def in_view_range(position, v_row, v_col): '''Checks to see if a tile is in an agents viewing area''' row, col = position return all([ row >= v_row - agent_view_size, row <= v_row + agent_view_size, col >= v_col - agent_view_size, col <= v_col + agent_view_size ]) attrs = [ 'position', 'blast_strength', 'can_kick', 'teammate', 'ammo', 'enemies' ] alive_agents = [ utility.agent_value(agent.agent_id) for agent in agents if agent.is_alive ] observations = [] for agent in agents: agent_obs = {'alive': alive_agents} board = curr_board if is_partially_observable: board = board.copy() for row in range(board_size): for col in range(board_size): if not in_view_range(agent.position, row, col): board[row, col] = constants.Item.Fog.value agent_obs['board'] = board bomb_blast_strengths, bomb_life = make_bomb_maps(agent.position) agent_obs['bomb_blast_strength'] = bomb_blast_strengths agent_obs['bomb_life'] = bomb_life agent_obs['game_type'] = game_type.value agent_obs['game_env'] = game_env for attr in attrs: assert hasattr(agent, attr) agent_obs[attr] = getattr(agent, attr) observations.append(agent_obs) return observations @staticmethod def get_done(agents, step_count, max_steps, game_type, training_agent): # print('get_done called...', training_agent) alive = [agent for agent in agents if agent.is_alive] alive_ids = sorted([agent.agent_id for agent in alive]) if step_count >= max_steps: print('gameover : max timestep over') return True elif game_type == constants.GameType.FFA: if training_agent is not None and training_agent not in alive_ids: print('gameover : ffa training_agent has died') return True if len(alive) <= 1: print('checkout : ffa only %s player survived' % len(alive)) return len(alive) <= 1 elif len(alive_ids) <= 1: print('gameover : only one player survived') return True elif alive_ids == [0, 2]: print('gameover : [0,2] team won') return True elif any([ alive_ids == [1, 3] ]): print('gameover : [1,3] team won') return True return False @staticmethod def get_info(done, rewards, game_type, agents): if game_type == constants.GameType.FFA: alive = [agent for agent in agents if agent.is_alive] if done: if len(alive) != 1: # Either we have more than 1 alive (reached max steps) or # we have 0 alive (last agents died at the same time). return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1] } else: return { 'result': constants.Result.Incomplete, } elif done: # We are playing a team game. if rewards == [-1] * 4: return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1], } else: return { 'result': constants.Result.Incomplete, } @staticmethod def get_rewards(agents, game_type, step_count, max_steps): print('get_rewards called..', self.training_agent) def any_lst_equal(lst, values): '''Checks if list are equal''' return any([lst == v for v in values]) alive_agents = [num for num, agent in enumerate(agents) \ if agent.is_alive] if game_type == constants.GameType.FFA: if len(alive_agents) == 1: # An agent won. Give them +1, others -1. return [2 * int(agent.is_alive) - 1 for agent in agents] elif step_count >= max_steps: # Game is over from time. Everyone gets -1. return [-1] * 4 else: # Game running: 0 for alive, -1 for dead. return [int(agent.is_alive) - 1 for agent in agents] else: # We are playing a team game. if any_lst_equal(alive_agents, [[0, 2], [0], [2]]): # Team [0, 2] wins. return [1, -1, 1, -1] elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]): # Team [1, 3] wins. return [-1, 1, -1, 1] elif step_count >= max_steps: # Game is over by max_steps. All agents tie. return [-1] * 4 elif len(alive_agents) == 0: # Everyone's dead. All agents tie. return [-1] * 4 else: # No team has yet won or lost. return [0] * 4
44.561162
103
0.564801
from collections import defaultdict import numpy as np from . import constants from . import characters from . import utility class ForwardModel(object): def run(self, num_times, board, agents, bombs, items, flames, is_partially_observable, agent_view_size, action_space, training_agent=None, is_communicative=False): steps = [] for _ in num_times: obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) actions = self.act( agents, obs, action_space, is_communicative=is_communicative) board, agents, bombs, items, flames = self.step( actions, board, agents, bombs, items, flames) next_obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) reward = self.get_rewards(agents, game_type, step_count, max_steps) done = self.get_done(agents, game_type, step_count, max_steps, training_agent) info = self.get_info(done, rewards, game_type, agents) steps.append({ "obs": obs, "next_obs": next_obs, "reward": reward, "actions": actions, }) if done: for agent in agents: agent.episode_end(reward[agent.agent_id]) break return steps, board, agents, bombs, items, flames, done, info @staticmethod def act(agents, obs, action_space, is_communicative=False): def act_ex_communication(agent): if agent.is_alive: return agent.act(obs[agent.agent_id], action_space=action_space) else: return constants.Action.Stop.value def act_with_communication(agent): if agent.is_alive: action = agent.act( obs[agent.agent_id], action_space=action_space) if type(action) == int: action = [action] + [0, 0] assert (type(action) == list) return action else: return [constants.Action.Stop.value, 0, 0] ret = [] for agent in agents: if is_communicative: ret.append(act_with_communication(agent)) else: ret.append(act_ex_communication(agent)) return ret @staticmethod def step(actions, curr_board, curr_agents, curr_bombs, curr_items, curr_flames, max_blast_strength=10): board_size = len(curr_board) flames = [] for flame in curr_flames: position = flame.position if flame.is_dead(): item_value = curr_items.get(position) if item_value: del curr_items[position] else: item_value = constants.Item.Passage.value curr_board[position] = item_value else: flame.tick() flames.append(flame) curr_flames = flames for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value # also bounce. A way of doing this is to gather all the new positions # before taking any actions. Then, if there are disputes, correct those # disputes iteratively. # Additionally, if two agents try to switch spots by moving into each # Figure out desired next position for alive agents alive_agents = [agent for agent in curr_agents if agent.is_alive] desired_agent_positions = [agent.position for agent in alive_agents] for num_agent, agent in enumerate(alive_agents): position = agent.position # We change the curr_board here as a safeguard. We will later # update the agent's new position. curr_board[position] = constants.Item.Passage.value action = actions[agent.agent_id] if action == constants.Action.Stop.value: pass elif action == constants.Action.Bomb.value: position = agent.position if not utility.position_is_bomb(curr_bombs, position): bomb = agent.maybe_lay_bomb() if bomb: curr_bombs.append(bomb) elif utility.is_valid_direction(curr_board, position, action): desired_agent_positions[num_agent] = agent.get_next_position( action) desired_bomb_positions = [bomb.position for bomb in curr_bombs] for num_bomb, bomb in enumerate(curr_bombs): curr_board[bomb.position] = constants.Item.Passage.value if bomb.is_moving(): desired_position = utility.get_next_position( bomb.position, bomb.moving_direction) if utility.position_on_board(curr_board, desired_position) \ and not utility.position_is_powerup(curr_board, desired_position) \ and not utility.position_is_wall(curr_board, desired_position): desired_bomb_positions[num_bomb] = desired_position crossings = {} def crossing(current, desired): current_x, current_y = current desired_x, desired_y = desired if current_x != desired_x: assert current_y == desired_y return ('X', min(current_x, desired_x), current_y) assert current_x == desired_x return ('Y', current_x, min(current_y, desired_y)) for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: desired_position = desired_agent_positions[num_agent] border = crossing(agent.position, desired_position) if border in crossings: desired_agent_positions[num_agent] = agent.position num_agent2, _ = crossings[border] desired_agent_positions[num_agent2] = alive_agents[ num_agent2].position else: crossings[border] = (num_agent, True) for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] != bomb.position: desired_position = desired_bomb_positions[num_bomb] border = crossing(bomb.position, desired_position) if border in crossings: desired_bomb_positions[num_bomb] = bomb.position num, is_agent = crossings[border] if not is_agent: desired_bomb_positions[num] = curr_bombs[num].position else: crossings[border] = (num_bomb, False) agent_occupancy = defaultdict(int) bomb_occupancy = defaultdict(int) for desired_position in desired_agent_positions: agent_occupancy[desired_position] += 1 for desired_position in desired_bomb_positions: bomb_occupancy[desired_position] += 1 change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1): desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position if desired_position != curr_position and \ (bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1): desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 change = True agent_indexed_by_kicked_bomb = {} kicked_bomb_indexed_by_agent = {} delayed_bomb_updates = [] delayed_agent_updates = [] for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] if agent_occupancy[desired_position] == 0: continue agent_list = [ (num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \ if desired_position == desired_agent_positions[num_agent]] if not agent_list: continue assert (len(agent_list) == 1) num_agent, agent = agent_list[0] if desired_position == agent.position: if desired_position != bomb.position: delayed_bomb_updates.append((num_bomb, bomb.position)) continue if not agent.can_kick: delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) continue direction = constants.Action(actions[agent.agent_id]) target_position = utility.get_next_position(desired_position, direction) if utility.position_on_board(curr_board, target_position) and \ agent_occupancy[target_position] == 0 and \ bomb_occupancy[target_position] == 0 and \ not utility.position_is_powerup(curr_board, target_position) and \ not utility.position_is_wall(curr_board, target_position): # but we can not update bomb_occupancy on target position and need to check it again # However we need to set the bomb count on the current position to zero so # that the agent can stay on this position. bomb_occupancy[desired_position] = 0 delayed_bomb_updates.append((num_bomb, target_position)) agent_indexed_by_kicked_bomb[num_bomb] = num_agent kicked_bomb_indexed_by_agent[num_agent] = num_bomb bomb.moving_direction = direction # Bombs may still collide and we then need to reverse bomb and agent .. else: delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) for (num_bomb, bomb_position) in delayed_bomb_updates: desired_bomb_positions[num_bomb] = bomb_position bomb_occupancy[bomb_position] += 1 change = True for (num_agent, agent_position) in delayed_agent_updates: desired_agent_positions[num_agent] = agent_position agent_occupancy[agent_position] += 1 change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position # Agents and bombs can only share a square if they are both in their # original position (Agent dropped bomb and has not moved) if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0): # Late collisions resulting from failed kicks force this agent to stay at the # original position. Check if this agent successfully kicked a bomb above and undo # the kick. if num_agent in kicked_bomb_indexed_by_agent: num_bomb = kicked_bomb_indexed_by_agent[num_agent] bomb = curr_bombs[num_bomb] desired_bomb_positions[num_bomb] = bomb.position bomb_occupancy[bomb.position] += 1 del agent_indexed_by_kicked_bomb[num_bomb] del kicked_bomb_indexed_by_agent[num_agent] desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position # This bomb may be a boomerang, i.e. it was kicked back to the # original location it moved from. If it is blocked now, it # can't be kicked and the agent needs to move back to stay if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb: continue bomb_occupancy_ = bomb_occupancy[desired_position] agent_occupancy_ = agent_occupancy[desired_position] if bomb_occupancy_ > 1 or agent_occupancy_ != 0: desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 num_agent = agent_indexed_by_kicked_bomb.get(num_bomb) if num_agent is not None: agent = alive_agents[num_agent] desired_agent_positions[num_agent] = agent.position agent_occupancy[agent.position] += 1 del kicked_bomb_indexed_by_agent[num_agent] del agent_indexed_by_kicked_bomb[num_bomb] change = True for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] == bomb.position and \ not num_bomb in agent_indexed_by_kicked_bomb: bomb.stop() else: bomb.position = desired_bomb_positions[num_bomb] for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: agent.move(actions[agent.agent_id]) if utility.position_is_powerup(curr_board, agent.position): agent.pick_up( constants.Item(curr_board[agent.position]), max_blast_strength=max_blast_strength) exploded_map = np.zeros_like(curr_board) has_new_explosions = False for bomb in curr_bombs: bomb.tick() if bomb.exploded(): has_new_explosions = True elif curr_board[bomb.position] == constants.Item.Flames.value: bomb.fire() has_new_explosions = True while has_new_explosions: next_bombs = [] has_new_explosions = False for bomb in curr_bombs: if not bomb.exploded(): next_bombs.append(bomb) continue bomb.bomber.incr_ammo() for _, indices in bomb.explode().items(): for r, c in indices: if not all( [r >= 0, c >= 0, r < board_size, c < board_size]): break if curr_board[r][c] == constants.Item.Rigid.value: break exploded_map[r][c] = 1 if curr_board[r][c] == constants.Item.Wood.value: break curr_bombs = next_bombs for bomb in curr_bombs: if bomb.in_range(exploded_map): bomb.fire() has_new_explosions = True for bomb in curr_bombs: curr_board[bomb.position] = constants.Item.Bomb.value # Update the board's flames. flame_positions = np.where(exploded_map == 1) for row, col in zip(flame_positions[0], flame_positions[1]): curr_flames.append(characters.Flame((row, col))) for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value for agent in alive_agents: if curr_board[agent.position] == constants.Item.Flames.value: agent.die() else: curr_board[agent.position] = utility.agent_value(agent.agent_id) return curr_board, curr_agents, curr_bombs, curr_items, curr_flames def get_observations(self, curr_board, agents, bombs, is_partially_observable, agent_view_size, game_type, game_env): board_size = len(curr_board) def make_bomb_maps(position): blast_strengths = np.zeros((board_size, board_size)) life = np.zeros((board_size, board_size)) for bomb in bombs: x, y = bomb.position if not is_partially_observable \ or in_view_range(position, x, y): blast_strengths[(x, y)] = bomb.blast_strength life[(x, y)] = bomb.life return blast_strengths, life def in_view_range(position, v_row, v_col): row, col = position return all([ row >= v_row - agent_view_size, row <= v_row + agent_view_size, col >= v_col - agent_view_size, col <= v_col + agent_view_size ]) attrs = [ 'position', 'blast_strength', 'can_kick', 'teammate', 'ammo', 'enemies' ] alive_agents = [ utility.agent_value(agent.agent_id) for agent in agents if agent.is_alive ] observations = [] for agent in agents: agent_obs = {'alive': alive_agents} board = curr_board if is_partially_observable: board = board.copy() for row in range(board_size): for col in range(board_size): if not in_view_range(agent.position, row, col): board[row, col] = constants.Item.Fog.value agent_obs['board'] = board bomb_blast_strengths, bomb_life = make_bomb_maps(agent.position) agent_obs['bomb_blast_strength'] = bomb_blast_strengths agent_obs['bomb_life'] = bomb_life agent_obs['game_type'] = game_type.value agent_obs['game_env'] = game_env for attr in attrs: assert hasattr(agent, attr) agent_obs[attr] = getattr(agent, attr) observations.append(agent_obs) return observations @staticmethod def get_done(agents, step_count, max_steps, game_type, training_agent): alive = [agent for agent in agents if agent.is_alive] alive_ids = sorted([agent.agent_id for agent in alive]) if step_count >= max_steps: print('gameover : max timestep over') return True elif game_type == constants.GameType.FFA: if training_agent is not None and training_agent not in alive_ids: print('gameover : ffa training_agent has died') return True if len(alive) <= 1: print('checkout : ffa only %s player survived' % len(alive)) return len(alive) <= 1 elif len(alive_ids) <= 1: print('gameover : only one player survived') return True elif alive_ids == [0, 2]: print('gameover : [0,2] team won') return True elif any([ alive_ids == [1, 3] ]): print('gameover : [1,3] team won') return True return False @staticmethod def get_info(done, rewards, game_type, agents): if game_type == constants.GameType.FFA: alive = [agent for agent in agents if agent.is_alive] if done: if len(alive) != 1: return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1] } else: return { 'result': constants.Result.Incomplete, } elif done: if rewards == [-1] * 4: return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1], } else: return { 'result': constants.Result.Incomplete, } @staticmethod def get_rewards(agents, game_type, step_count, max_steps): print('get_rewards called..', self.training_agent) def any_lst_equal(lst, values): return any([lst == v for v in values]) alive_agents = [num for num, agent in enumerate(agents) \ if agent.is_alive] if game_type == constants.GameType.FFA: if len(alive_agents) == 1: return [2 * int(agent.is_alive) - 1 for agent in agents] elif step_count >= max_steps: return [-1] * 4 else: return [int(agent.is_alive) - 1 for agent in agents] else: if any_lst_equal(alive_agents, [[0, 2], [0], [2]]): return [1, -1, 1, -1] elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]): return [-1, 1, -1, 1] elif step_count >= max_steps: return [-1] * 4 elif len(alive_agents) == 0: return [-1] * 4 else: # No team has yet won or lost. return [0] * 4
true
true
7903133b41fa1b97b42a57086a2ea357c2af4943
160
py
Python
civet/__init__.py
PMKielstra/Civet
1731a1b1e670a082dfcbf545f3431a79d7954411
[ "MIT" ]
3
2020-06-27T21:33:53.000Z
2020-07-03T07:39:46.000Z
civet/__init__.py
PMKielstra/Civet
1731a1b1e670a082dfcbf545f3431a79d7954411
[ "MIT" ]
null
null
null
civet/__init__.py
PMKielstra/Civet
1731a1b1e670a082dfcbf545f3431a79d7954411
[ "MIT" ]
null
null
null
from .civet import Civet from .building_blocks import * from .builtin_scenario_sources import * from .builtin_analyzers import * from .builtin_outputs import *
26.666667
39
0.81875
from .civet import Civet from .building_blocks import * from .builtin_scenario_sources import * from .builtin_analyzers import * from .builtin_outputs import *
true
true
79031374fd0c3d90fc7647c75e2c9ee9cc97362f
1,534
py
Python
widgets/lv_arc/lv_arc.py
ndrogness/lvgl_micropython_examples
ea2245a47af8bb8545d465d3b6611b85b9ca948b
[ "MIT" ]
1
2020-11-20T00:48:38.000Z
2020-11-20T00:48:38.000Z
widgets/lv_arc/lv_arc.py
ndrogness/lvgl_micropython_examples
ea2245a47af8bb8545d465d3b6611b85b9ca948b
[ "MIT" ]
null
null
null
widgets/lv_arc/lv_arc.py
ndrogness/lvgl_micropython_examples
ea2245a47af8bb8545d465d3b6611b85b9ca948b
[ "MIT" ]
null
null
null
import lvgl as lv import styles def lv_arc(screen): # Create the arc object on lv screen, ie a lv.scr() object arc = lv.arc(screen) # Set arc size arc.set_size(150, 150) # Set arc background style color blue arc.add_style(arc.PART.BG, styles.gstyle_bg1) # Set arc indicator (i.e. line) style to color red arc.add_style(arc.PART.INDIC, styles.gstyle_line1) # Setup Angles, from docs: # Zero degree is at the middle right (3 o'clock) of the object and the degrees are increasing # in a clockwise direction. The angles should be in [0;360] range. # # Get background angle start and end in degrees start = arc.get_bg_angle_start() # default is 135 end = arc.get_bg_angle_end() # default is 45 # Set background angles #arc.set_bg_angles(180,max) # Set start angle of the arc (0-360 degrees) #arc.set_start_angle(0) # Get current value of arc # print(arc.get_value()) # default is 0 # print(arc.get_min_value()) # default is 0 # print(arc.get_max_value()) # default is 100 # Set the current value (0-100) # A percentage of the arc foreground that is filled # Note: This doesnt work on micropython? # Examples: # 50 is 50% filled # 100 is 100% filled # arc.set_value(5) # # Or set the value base on end angle (0-360) degrees # Set end angle of the arc (0-360 degrees) arc.set_end_angle(200) if __name__ == '__main__': lv.init() scr = lv.obj() lv.scr_load(scr) lv_arc(scr)
29.5
98
0.653846
import lvgl as lv import styles def lv_arc(screen): arc = lv.arc(screen) arc.set_size(150, 150) arc.add_style(arc.PART.BG, styles.gstyle_bg1) arc.add_style(arc.PART.INDIC, styles.gstyle_line1) # in a clockwise direction. The angles should be in [0;360] range. # # Get background angle start and end in degrees start = arc.get_bg_angle_start() # default is 135 end = arc.get_bg_angle_end() # default is 45 # Set background angles #arc.set_bg_angles(180,max) # Set start angle of the arc (0-360 degrees) #arc.set_start_angle(0) # Get current value of arc # print(arc.get_value()) # default is 0 # print(arc.get_min_value()) # default is 0 # print(arc.get_max_value()) # default is 100 # Set the current value (0-100) # A percentage of the arc foreground that is filled # Note: This doesnt work on micropython? # Examples: # 50 is 50% filled # 100 is 100% filled # arc.set_value(5) # # Or set the value base on end angle (0-360) degrees # Set end angle of the arc (0-360 degrees) arc.set_end_angle(200) if __name__ == '__main__': lv.init() scr = lv.obj() lv.scr_load(scr) lv_arc(scr)
true
true
7903138967210dfe1c31193e8ec853099fe942e5
4,968
py
Python
diy/api.py
sodrooome/diy
1e7e087ad6608ed6770d89aa06ebb347ce5c665c
[ "BSD-3-Clause" ]
6
2019-07-25T07:36:52.000Z
2019-10-31T13:08:01.000Z
diy/api.py
sodrooome/diy
1e7e087ad6608ed6770d89aa06ebb347ce5c665c
[ "BSD-3-Clause" ]
4
2019-06-26T04:25:34.000Z
2020-01-06T19:43:41.000Z
diy/api.py
sodrooome/diy
1e7e087ad6608ed6770d89aa06ebb347ce5c665c
[ "BSD-3-Clause" ]
null
null
null
from __future__ import unicode_literals import os import inspect from webob import Request, Response from parse import parse from jinja2 import FileSystemLoader, Environment from requests import session as RequestsSession from wsgiadapter import WSGIAdapter as RequestWSGIAdapter class API: """ this is just for examples to get reponse Hello World in web browser """ def __call__(self, environ, start_response): response_body = b"Hello World!" status = "200 OK" start_response(status, headers=[]) return iter([response_body]) class RequestAPI: """ this is just for examples to get reponse Hello World in web browser """ def __call__(self, environ, start_response): request = Request(environ) response = Response() response.text = "Hello, World!" return response(environ, start_response) class UserRequest: def __call__(self, environ, start_response): request = Request(environ) response = self.handle_request(request) return response(environ, start_response) def handle_request(self, request): """ based on mozilla documentation """ user = request.environ.get("HTTP_USER_AGENT", "No User Agent Found") response = Response() response.text = f"This is {user}" return response class UserRequestHandler: def __init__(self): self.routes = {} def __call__(self, environ, start_response): request = Request(environ) response = self.handle_request(request) return response(environ, start_response) def route_url(self, path, handler): assert path not in self.routes, "Routes Already Exists" self.routes[path] = handler def route(self, path): def wrapper(handler): self.route_url(path, handler) return handler return wrapper def default_response(self, response): response.status_code = 404 response.text = "Page Not Found." def handle_request(self, request): response = Response() handler, kwargs = self.find_handler(request_path=request.path) if handler is not None: handler(request, response, **kwargs) else: self.default_response(response) return response def find_handler(self, request_path): for path, handler in self.routes.items(): parse_result = parse(path, request_path) if parse_result is not None: return handler, parse_result.named return None, None class UserRequestBasedHandler: """ class for implemented alternative route using class-based handlers """ def __init__(self, templates_dirs="templates"): self.routes = {} self.templates_env = Environment( loader=FileSystemLoader(os.path.abspath(templates_dirs))) def __call__(self, environ, start_response): request = Request(environ) response = self.class_based_request(request) return response(environ, start_response) def url(self, path, handler): assert path not in self.routes, "Routes Already Exists" self.routes[path] = handler def route(self, path): def wrapper(handler): self.url(path, handler) return handler return wrapper def default_response(self, response): response.status_code = 404 response.text = "Page Not Found" def class_based_request(self, request): """ class based views such as Django already implemented """ response = Response() handler, kwargs = self.find_handler_request(request_path=request.path) if handler is not None: if inspect.isclass(handler): handler = getattr(handler(), request.method.lower(), None) if handler is None: raise AttributeError("Method now allowed", request.method) handler(request, response, **kwargs) else: self.default_response(response) return response def find_handler_request(self, request_path): for path, handler in self.routes.items(): parse_result = parse(path, request_path) if parse_result is not None: return handler, parse_result.named return None, None def template(self, template_name, context=None): if context is None: context = {} return self.templates_env.get_template(template_name).render(**context) def session(self, base_url="http://baseserver"): """ mount it to session object any request will start using URL given by prefix base_url """ session = RequestsSession() session.mount(prefix=base_url, adapter=RequestWSGIAdapter(self)) return session
25.218274
79
0.633655
from __future__ import unicode_literals import os import inspect from webob import Request, Response from parse import parse from jinja2 import FileSystemLoader, Environment from requests import session as RequestsSession from wsgiadapter import WSGIAdapter as RequestWSGIAdapter class API: def __call__(self, environ, start_response): response_body = b"Hello World!" status = "200 OK" start_response(status, headers=[]) return iter([response_body]) class RequestAPI: def __call__(self, environ, start_response): request = Request(environ) response = Response() response.text = "Hello, World!" return response(environ, start_response) class UserRequest: def __call__(self, environ, start_response): request = Request(environ) response = self.handle_request(request) return response(environ, start_response) def handle_request(self, request): user = request.environ.get("HTTP_USER_AGENT", "No User Agent Found") response = Response() response.text = f"This is {user}" return response class UserRequestHandler: def __init__(self): self.routes = {} def __call__(self, environ, start_response): request = Request(environ) response = self.handle_request(request) return response(environ, start_response) def route_url(self, path, handler): assert path not in self.routes, "Routes Already Exists" self.routes[path] = handler def route(self, path): def wrapper(handler): self.route_url(path, handler) return handler return wrapper def default_response(self, response): response.status_code = 404 response.text = "Page Not Found." def handle_request(self, request): response = Response() handler, kwargs = self.find_handler(request_path=request.path) if handler is not None: handler(request, response, **kwargs) else: self.default_response(response) return response def find_handler(self, request_path): for path, handler in self.routes.items(): parse_result = parse(path, request_path) if parse_result is not None: return handler, parse_result.named return None, None class UserRequestBasedHandler: def __init__(self, templates_dirs="templates"): self.routes = {} self.templates_env = Environment( loader=FileSystemLoader(os.path.abspath(templates_dirs))) def __call__(self, environ, start_response): request = Request(environ) response = self.class_based_request(request) return response(environ, start_response) def url(self, path, handler): assert path not in self.routes, "Routes Already Exists" self.routes[path] = handler def route(self, path): def wrapper(handler): self.url(path, handler) return handler return wrapper def default_response(self, response): response.status_code = 404 response.text = "Page Not Found" def class_based_request(self, request): response = Response() handler, kwargs = self.find_handler_request(request_path=request.path) if handler is not None: if inspect.isclass(handler): handler = getattr(handler(), request.method.lower(), None) if handler is None: raise AttributeError("Method now allowed", request.method) handler(request, response, **kwargs) else: self.default_response(response) return response def find_handler_request(self, request_path): for path, handler in self.routes.items(): parse_result = parse(path, request_path) if parse_result is not None: return handler, parse_result.named return None, None def template(self, template_name, context=None): if context is None: context = {} return self.templates_env.get_template(template_name).render(**context) def session(self, base_url="http://baseserver"): session = RequestsSession() session.mount(prefix=base_url, adapter=RequestWSGIAdapter(self)) return session
true
true
790313c768903bf599eb99df2b3f8f8da465457d
1,083
py
Python
python_basics/Method_ function/mixed_function_guess_game.py
alok8765/basic_python_practicse
9bd61f0b03fc1e703a75df39862a24692bb3fdb7
[ "MIT" ]
null
null
null
python_basics/Method_ function/mixed_function_guess_game.py
alok8765/basic_python_practicse
9bd61f0b03fc1e703a75df39862a24692bb3fdb7
[ "MIT" ]
null
null
null
python_basics/Method_ function/mixed_function_guess_game.py
alok8765/basic_python_practicse
9bd61f0b03fc1e703a75df39862a24692bb3fdb7
[ "MIT" ]
null
null
null
# First create a Shuffle list my_shuffle_list = [1,2,3,4,5] # Now Import shuffle from random import shuffle shuffle(my_shuffle_list) print(my_shuffle_list) # check wether shuffle is working or not # Now let's create Guess Game. First create a list mylist = ['','o',''] # Define function which will used further def shuffle_list(mylist): shuffle(mylist) return mylist print(mylist) # First check your mylist without shuffle print(shuffle_list(mylist)) # Now check that function for shuffle worning or not # Now create function for user to take input as guess number def user_guess(): guess = '' while guess not in ['0','1','2']: guess = input("Pick a number : 0, 1 or 2 : ") return int(guess) print(user_guess()) def check_guess(mylist,guess): if mylist[guess] == 'o': print('Correct Guess') else: print('Wrong Better luck next Time') # Initial list mylist = ['','o',''] #shuffle list mixedup_list = shuffle_list(mylist) # Get user guess guess = user_guess() check_guess(mixedup_list,guess)
27.075
84
0.6759
my_shuffle_list = [1,2,3,4,5] from random import shuffle shuffle(my_shuffle_list) print(my_shuffle_list) mylist = ['','o',''] # Define function which will used further def shuffle_list(mylist): shuffle(mylist) return mylist print(mylist) # First check your mylist without shuffle print(shuffle_list(mylist)) # Now check that function for shuffle worning or not # Now create function for user to take input as guess number def user_guess(): guess = '' while guess not in ['0','1','2']: guess = input("Pick a number : 0, 1 or 2 : ") return int(guess) print(user_guess()) def check_guess(mylist,guess): if mylist[guess] == 'o': print('Correct Guess') else: print('Wrong Better luck next Time') # Initial list mylist = ['','o',''] #shuffle list mixedup_list = shuffle_list(mylist) # Get user guess guess = user_guess() check_guess(mixedup_list,guess)
true
true
790313d43c91c5b782960900908cf29f70b61a60
232
py
Python
fieldbillard/__init__.py
DFNaiff/FieldBillard
0cbdfbe3e0ee516f5820b2dfa27d9c4ca10aaba4
[ "BSD-3-Clause" ]
null
null
null
fieldbillard/__init__.py
DFNaiff/FieldBillard
0cbdfbe3e0ee516f5820b2dfa27d9c4ca10aaba4
[ "BSD-3-Clause" ]
null
null
null
fieldbillard/__init__.py
DFNaiff/FieldBillard
0cbdfbe3e0ee516f5820b2dfa27d9c4ca10aaba4
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from . import fields from . import integrators from . import points from . import system from . import utils from . import visualizer from .system import NBodySystem from .visualizer import Visualizer, run
19.333333
39
0.75
from . import fields from . import integrators from . import points from . import system from . import utils from . import visualizer from .system import NBodySystem from .visualizer import Visualizer, run
true
true
790313efffec6d2ea46eff668ac1614bdf07855b
23,135
py
Python
index.py
markschrik/syncarr
5a0a252538964d0bc53a8142ab7d8106db51f635
[ "MIT" ]
1
2022-02-20T03:22:35.000Z
2022-02-20T03:22:35.000Z
index.py
markschrik/syncarr
5a0a252538964d0bc53a8142ab7d8106db51f635
[ "MIT" ]
null
null
null
index.py
markschrik/syncarr
5a0a252538964d0bc53a8142ab7d8106db51f635
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import logging import requests import json import configparser import sys import time import re from os.path import dirname from config import ( instanceA_url, instanceA_key, instanceA_path, instanceA_profile, instanceA_profile_id, instanceA_profile_filter, instanceA_profile_filter_id, instanceA_language_id, instanceA_language, instanceA_quality_match, instanceA_tag_filter_id, instanceA_tag_filter, instanceA_blacklist, instanceB_url, instanceB_key, instanceB_path, instanceB_profile, instanceB_profile_id, instanceB_profile_filter, instanceB_profile_filter_id, instanceB_language_id, instanceB_language, instanceB_quality_match, instanceB_tag_filter_id, instanceB_tag_filter, instanceB_blacklist, content_id_key, logger, is_sonarr, is_radarr, is_lidarr, get_status_path, get_content_path, get_profile_path, get_language_path, get_tag_path, get_content_put_path, is_in_docker, instance_sync_interval_seconds, sync_bidirectionally, auto_search, skip_missing, monitor_new_content, api_version, is_test_run, sync_monitor ) def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None): """gets details of a content item""" global monitor_new_content, auto_search images = content.get('images') for image in images: image['url'] = '{0}{1}'.format(instance_url, image.get('url')) monitored = content.get('monitored') if monitor_new_content is not None: monitored = True if monitor_new_content else False payload = { content_id_key: content.get(content_id_key), 'qualityProfileId': int(instance_profile_id or content.get('qualityProfileId')), 'monitored': monitored, 'rootFolderPath': instance_path, 'images': images, } add_options = content.get('addOptions', {}) search_missing = True if auto_search else False if is_sonarr: payload['title'] = content.get('title') payload['titleSlug'] = content.get('titleSlug') payload['seasons'] = content.get('seasons') payload['year'] = content.get('year') payload['tvRageId'] = content.get('tvRageId') payload['seasonFolder'] = content.get('seasonFolder') payload['languageProfileId'] = instance_language_id if instance_language_id else content.get( 'languageProfileId') payload['tags'] = content.get('tags') payload['seriesType'] = content.get('seriesType') payload['useSceneNumbering'] = content.get('useSceneNumbering') payload['addOptions'] = { **add_options, **{'searchForMissingEpisodes': search_missing} } elif is_radarr: payload['title'] = content.get('title') payload['year'] = content.get('year') payload['tmdbId'] = content.get('tmdbId') payload['titleSlug'] = content.get('titleSlug') payload['addOptions'] = { **add_options, **{'searchForMovie': search_missing} } elif is_lidarr: payload['artistName'] = content.get('artistName') payload['albumFolder'] = content.get('albumFolder') payload['metadataProfileId'] = content.get('metadataProfileId') payload['addOptions'] = { **add_options, **{ "monitored": monitored, "searchForMissingAlbums": search_missing } } logger.debug(payload) return payload def get_quality_profiles(instance_session, instance_url, instance_key): instance_profile_url = get_profile_path(instance_url, instance_key) profiles_response = instance_session.get(instance_profile_url) if profiles_response.status_code != 200: logger.error(f'Could not get profile id from {instance_profile_url}') exit_system() instance_profiles = None try: instance_profiles = profiles_response.json() return instance_profiles except: logger.error(f'Could not decode profile id from {instance_profile_url}') exit_system() def get_profile_from_id(instance_session, instance_url, instance_key, instance_profile, instance_name=''): instance_profiles = get_quality_profiles(instance_session=instance_session, instance_url=instance_url, instance_key=instance_key) profile = next((item for item in instance_profiles if item["name"].lower() == instance_profile.lower()), False) if not profile: logger.error('Could not find profile_id for instance {} profile {}'.format(instance_name, instance_profile)) exit_system() instance_profile_id = profile.get('id') logger.debug(f'found profile_id (instance{instance_name}) "{instance_profile_id}" from profile "{instance_profile}"') return instance_profile_id def get_tag_from_id(instance_session, instance_url, instance_key, instance_tag, instance_name=''): instance_tag_url = get_tag_path(instance_url, instance_key) tag_response = instance_session.get(instance_tag_url) if tag_response.status_code != 200: logger.error(f'Could not get tag id from (instance{instance_name}) {instance_tag_url} - only works on Sonarr') exit_system() instance_tags = None try: instance_tags = tag_response.json() except: logger.error(f'Could not decode tag id from {instance_tag_url}') exit_system() tag_ids = [] for item in instance_tags: for instance_item in instance_tag: if item.get('label').lower() == instance_item.lower(): tag_ids.append(item) if not tag_ids: logger.error(f'Could not find tag_id for instance {instance_name} and tag {instance_tags}') exit_system() instance_tag_ids = [tag.get('id') for tag in tag_ids] logger.debug(f'found id "{instance_tag_ids}" from tag "{instance_tag}" for instance {instance_name}') if instance_tag_ids is None: logger.error(f'tag_id is None for instance {instance_name} and tag {instance_tag}') exit_system() return instance_tag_ids def get_language_from_id(instance_session, instance_url, instance_key, instance_language, instance_name=''): instance_language_url = get_language_path(instance_url, instance_key) language_response = instance_session.get(instance_language_url) if language_response.status_code != 200: logger.error(f'Could not get language id from (instance{instance_name}) {instance_language_url} - only works on sonarr v3') exit_system() instance_languages = None try: instance_languages = language_response.json() except: logger.error(f'Could not decode language id from {instance_language_url}') exit_system() instance_languages = instance_languages[0]['languages'] language = next((item for item in instance_languages if item.get('language', {}).get('name').lower() == instance_language.lower()), False) if not language: logger.error(f'Could not find language_id for instance {instance_name} and language {instance_language}') exit_system() instance_language_id = language.get('language', {}).get('id') logger.debug(f'found id "{instance_language_id}" from language "{instance_language}" for instance {instance_name}') if instance_language_id is None: logger.error(f'language_id is None for instance {instance_name} and language {instance_language}') exit_system() return instance_language_id def sync_servers(instanceA_contents, instanceB_language_id, instanceB_contentIds, instanceB_path, instanceB_profile_id, instanceA_profile_filter_id, instanceB_session, instanceB_url, instanceB_key, instanceA_quality_match, instanceA_tag_filter_id, instanceA_blacklist, instanceB_contents): global is_radarr, is_sonarr, is_test_run, sync_monitor search_ids = [] # if given instance A profile id then we want to filter out content without that id if instanceA_profile_filter_id: logging.info(f'only filtering content with instanceA_profile_filter_id {instanceA_profile_filter_id}') # for each content id in instance A, check if it needs to be synced to instance B for content in instanceA_contents: content_not_synced = content[content_id_key] not in instanceB_contentIds # only skip alrerady synced items if we arent syncing monitoring as well if content_not_synced or sync_monitor: title = content.get('title') or content.get('artistName') instance_path = instanceB_path or dirname(content.get('path')) # if skipping missing files, we want to skip any that don't have files if is_radarr and skip_missing: content_has_file = content.get('hasFile') if not content_has_file: logging.debug(f'Skipping content {title} - file missing') continue # if given this, we want to filter from instance by profile id if instanceA_profile_filter_id: quality_profile_id = content.get('qualityProfileId') if instanceA_profile_filter_id != quality_profile_id: logging.debug(f'Skipping content {title} - mismatched quality_profile_id {quality_profile_id} with instanceA_profile_filter_id {instanceA_profile_filter_id}') continue # if given quality filter we want to filter if quality from instanceA isnt high enough yet if is_radarr and instanceA_quality_match: content_quality = content.get('movieFile', {}).get('quality', {}).get('quality', {}).get('name', '') if content_quality and not re.match(instanceA_quality_match, content_quality): logging.debug(f'Skipping content {title} - mismatched content_quality {content_quality} with instanceA_quality_match {instanceA_quality_match}') continue # if given tag filter then filter by tag - (Sonarr/Radarr v3 only) if (is_sonarr or is_radarr) and instanceA_tag_filter_id: content_tag_ids = content.get('tags') if not (set(content_tag_ids) & set(instanceA_tag_filter_id)): logging.debug(f'Skipping content {title} - mismatched content_tag_ids {content_tag_ids} with instanceA_tag_filter_id {instanceA_tag_filter_id}') continue # if black list given then dont sync matching slugs/ids if instanceA_blacklist: title_slug = content.get('titleSlug') or content.get('foreignArtistId') if title_slug in instanceA_blacklist: logging.debug(f'Skipping content {title} - blacklist slug: {title_slug}') continue content_id = str(content.get('id')) if content_id in instanceA_blacklist: logging.debug(f'Skipping content {title} - blacklist ID: {content_id}') continue # generate content from instance A to sync into instance B formatted_content = get_content_details( content=dict(content), instance_path=instance_path, instance_profile_id=instanceB_profile_id, instance_url=instanceB_url, instance_language_id=instanceB_language_id, ) instanceB_content_url = get_content_path(instanceB_url, instanceB_key) if is_test_run: logging.info('content title "{0}" synced successfully (test only)'.format(title)) elif content_not_synced: # sync content if not synced logging.info(f'syncing content title "{title}"') sync_response = instanceB_session.post(instanceB_content_url, json=formatted_content) # check response and save content id for searching later on if success if sync_response.status_code != 201 and sync_response.status_code != 200: logger.error(f'server sync error for {title} - response: {sync_response.text}') else: try: search_ids.append(int(sync_response.json()['id'])) except: logger.error(f'Could not decode sync response from {instanceB_content_url}') logging.info('content title "{0}" synced successfully'.format(title)) elif sync_monitor: # else if is already synced and we want to sync monitoring then sync that now # find matching content from instance B to check monitored status matching_content_instanceB = list(filter(lambda content_instanceB: content_instanceB['titleSlug'] == content.get('titleSlug'), instanceB_contents)) if(len(matching_content_instanceB) == 1): matching_content_instanceB = matching_content_instanceB[0] # if we found a content match from instance B, then check monitored status - if different then sync from A to B if matching_content_instanceB['monitored'] != content['monitored']: matching_content_instanceB['monitored'] = content['monitored'] instanceB_content_url = get_content_put_path(instanceB_url, instanceB_key, matching_content_instanceB.get('id')) sync_response = instanceB_session.put(instanceB_content_url, json=matching_content_instanceB) # check response and save content id for searching later on if success if sync_response.status_code != 202: logger.error(f'server monitoring sync error for {title} - response: {sync_response.text}') else: try: search_ids.append(int(sync_response.json()['id'])) except: logger.error(f'Could not decode sync response from {instanceB_content_url}') logging.info('content title "{0}" monitoring synced successfully'.format(title)) logging.info(f'{len(search_ids)} contents synced successfully') def get_instance_contents(instance_url, instance_key, instance_session, instance_name=''): instance_contentIds = [] instance_content_url = get_content_path(instance_url, instance_key) instance_contents = instance_session.get(instance_content_url) if instance_contents.status_code != 200: logger.error('instance{} server error - response {}'.format(instance_name, instance_contents.status_code)) exit_system() else: try: instance_contents = instance_contents.json() except: logger.error(f'Could not decode contents from {instance_content_url}') exit_system() for content_to_sync in instance_contents: instance_contentIds.append(content_to_sync[content_id_key]) logger.debug('{} contents in instance {}'.format(len(instance_contentIds), instance_name)) return instance_contents, instance_contentIds def check_status(instance_session, instance_url, instance_key, instance_name=''): global api_version instance_status_url = get_status_path(instance_url, instance_key) error_message = f'Could not connect to instance{instance_name}: {instance_status_url}' status_response = None try: status_response = instance_session.get(instance_status_url) if status_response.status_code != 200: logger.error(error_message) exit_system() except: logger.error(error_message) exit_system() if status_response is None: logger.error(error_message) exit_system() else: try: status_response = status_response.json() except Exception as error: if not isinstance(status_response, dict): logger.error( f"Could not retrieve status for {instance_status_url}: {status_response} - {error}") exit_system() if(status_response.get('error')): logger.error(f"{instance_status_url} error {status_response.get('error')}") exit_system() logger.debug(f"{instance_status_url} version {status_response.get('version')}") return status_response def sync_content(): global instanceA_profile_id, instanceA_profile, instanceB_profile_id, instanceB_profile, instanceA_profile_filter, instanceA_profile_filter_id, instanceB_profile_filter, instanceB_profile_filter_id, tested_api_version, instanceA_language_id, instanceA_language, instanceB_language_id, instanceB_language, instanceA_quality_match, instanceB_quality_match, is_sonarr, instanceA_tag_filter_id, instanceA_tag_filter, instanceB_tag_filter_id, instanceB_tag_filter, is_radarr, instanceA_blacklist, instanceB_blacklist # get sessions instanceA_session = requests.Session() instanceA_session.trust_env = False instanceB_session = requests.Session() instanceB_session.trust_env = False # if given a profile instead of a profile id then try to find the profile id if not instanceA_profile_id and instanceA_profile: instanceA_profile_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile, 'A') if not instanceB_profile_id and instanceB_profile: instanceB_profile_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile, 'B') logger.debug({ 'instanceA_profile_id': instanceA_profile_id, 'instanceA_profile': instanceA_profile, 'instanceB_profile_id': instanceB_profile_id, 'instanceB_profile': instanceB_profile, }) # do the same for profile id filters if they exist if not instanceA_profile_filter_id and instanceA_profile_filter: instanceA_profile_filter_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile_filter, 'A') if not instanceB_profile_filter_id and instanceB_profile_filter: instanceB_profile_filter_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile_filter, 'B') logger.debug({ 'instanceAprofile_filter_id': instanceA_profile_filter_id, 'instanceAprofile_filter': instanceA_profile_filter, 'instanceBprofile_filter_id': instanceB_profile_filter_id, 'instanceBprofile_filter': instanceB_profile_filter, }) # do the same for tag id filters if they exist - (only Sonarr) if is_sonarr or is_radarr: if not instanceA_tag_filter_id and instanceA_tag_filter: instanceA_tag_filter_id = get_tag_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_tag_filter, 'A') if not instanceB_tag_filter_id and instanceB_tag_filter: instanceB_tag_filter_id = get_tag_from_id(instanceB_session, instanceB_url, instanceB_key, instanceA_tag_filter, 'B') logger.debug({ 'instanceA_tag_filter': instanceA_tag_filter, 'instanceA_profile_filter': instanceA_profile_filter, 'instanceB_tag_filter_id': instanceB_tag_filter_id, 'instanceB_tag_filter': instanceB_tag_filter, }) # if given language instead of language id then try to find the lanaguage id - (only Sonarr v3) if is_sonarr: if not instanceA_language_id and instanceA_language: instanceA_language_id = get_language_from_id( instance_session=instanceA_session, instance_url=instanceA_url, instance_key=instanceA_key, instance_language=instanceA_language, instance_name='A' ) if not instanceB_language_id and instanceB_language: instanceB_language_id = get_language_from_id( instance_session=instanceB_session, instance_url=instanceB_url, instance_key=instanceB_key, instance_language=instanceB_language, instance_name='B' ) logger.debug({ 'instanceA_language_id': instanceA_language_id, 'instanceA_language': instanceA_language, 'instanceB_language_id': instanceB_language_id, 'instanceB_language': instanceB_language, 'is_sonarr': is_sonarr, 'api_version': api_version, }) # get contents to compare instanceA_contents, instanceA_contentIds = get_instance_contents(instanceA_url, instanceA_key, instanceA_session, instance_name='A') instanceB_contents, instanceB_contentIds = get_instance_contents(instanceB_url, instanceB_key, instanceB_session, instance_name='B') logger.info('syncing content from instance A to instance B') sync_servers( instanceA_contents=instanceA_contents, instanceB_contents=instanceB_contents, instanceB_contentIds=instanceB_contentIds, instanceB_language_id=instanceB_language_id, instanceB_path=instanceB_path, instanceB_profile_id=instanceB_profile_id, instanceB_session=instanceB_session, instanceB_url=instanceB_url, instanceA_profile_filter_id=instanceA_profile_filter_id, instanceB_key=instanceB_key, instanceA_quality_match=instanceA_quality_match, instanceA_tag_filter_id=instanceA_tag_filter_id, instanceA_blacklist=instanceA_blacklist ) # if given bidirectional flag then sync from instance B to instance A if sync_bidirectionally: logger.info('syncing content from instance B to instance A') sync_servers( instanceA_contents=instanceB_contents, instanceB_contents=instanceA_contents, instanceB_contentIds=instanceA_contentIds, instanceB_language_id=instanceA_language_id, instanceB_path=instanceA_path, instanceB_profile_id=instanceA_profile_id, instanceB_session=instanceA_session, instanceB_url=instanceA_url, instanceA_profile_filter_id=instanceB_profile_filter_id, instanceB_key=instanceA_key, instanceA_quality_match=instanceB_quality_match, instanceA_tag_filter_id=instanceB_tag_filter_id, instanceA_blacklist=instanceB_blacklist ) ######################################################################################################################## def exit_system(): """we dont want to exit if in docker""" if is_in_docker: raise Exception else: sys.exit(0) if is_in_docker: logger.info('syncing every {} seconds'.format(instance_sync_interval_seconds)) sync_content() if is_in_docker: while True: try: time.sleep(instance_sync_interval_seconds) sync_content() except Exception as inst: d = inst
46.085657
515
0.684504
import os import logging import requests import json import configparser import sys import time import re from os.path import dirname from config import ( instanceA_url, instanceA_key, instanceA_path, instanceA_profile, instanceA_profile_id, instanceA_profile_filter, instanceA_profile_filter_id, instanceA_language_id, instanceA_language, instanceA_quality_match, instanceA_tag_filter_id, instanceA_tag_filter, instanceA_blacklist, instanceB_url, instanceB_key, instanceB_path, instanceB_profile, instanceB_profile_id, instanceB_profile_filter, instanceB_profile_filter_id, instanceB_language_id, instanceB_language, instanceB_quality_match, instanceB_tag_filter_id, instanceB_tag_filter, instanceB_blacklist, content_id_key, logger, is_sonarr, is_radarr, is_lidarr, get_status_path, get_content_path, get_profile_path, get_language_path, get_tag_path, get_content_put_path, is_in_docker, instance_sync_interval_seconds, sync_bidirectionally, auto_search, skip_missing, monitor_new_content, api_version, is_test_run, sync_monitor ) def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None): global monitor_new_content, auto_search images = content.get('images') for image in images: image['url'] = '{0}{1}'.format(instance_url, image.get('url')) monitored = content.get('monitored') if monitor_new_content is not None: monitored = True if monitor_new_content else False payload = { content_id_key: content.get(content_id_key), 'qualityProfileId': int(instance_profile_id or content.get('qualityProfileId')), 'monitored': monitored, 'rootFolderPath': instance_path, 'images': images, } add_options = content.get('addOptions', {}) search_missing = True if auto_search else False if is_sonarr: payload['title'] = content.get('title') payload['titleSlug'] = content.get('titleSlug') payload['seasons'] = content.get('seasons') payload['year'] = content.get('year') payload['tvRageId'] = content.get('tvRageId') payload['seasonFolder'] = content.get('seasonFolder') payload['languageProfileId'] = instance_language_id if instance_language_id else content.get( 'languageProfileId') payload['tags'] = content.get('tags') payload['seriesType'] = content.get('seriesType') payload['useSceneNumbering'] = content.get('useSceneNumbering') payload['addOptions'] = { **add_options, **{'searchForMissingEpisodes': search_missing} } elif is_radarr: payload['title'] = content.get('title') payload['year'] = content.get('year') payload['tmdbId'] = content.get('tmdbId') payload['titleSlug'] = content.get('titleSlug') payload['addOptions'] = { **add_options, **{'searchForMovie': search_missing} } elif is_lidarr: payload['artistName'] = content.get('artistName') payload['albumFolder'] = content.get('albumFolder') payload['metadataProfileId'] = content.get('metadataProfileId') payload['addOptions'] = { **add_options, **{ "monitored": monitored, "searchForMissingAlbums": search_missing } } logger.debug(payload) return payload def get_quality_profiles(instance_session, instance_url, instance_key): instance_profile_url = get_profile_path(instance_url, instance_key) profiles_response = instance_session.get(instance_profile_url) if profiles_response.status_code != 200: logger.error(f'Could not get profile id from {instance_profile_url}') exit_system() instance_profiles = None try: instance_profiles = profiles_response.json() return instance_profiles except: logger.error(f'Could not decode profile id from {instance_profile_url}') exit_system() def get_profile_from_id(instance_session, instance_url, instance_key, instance_profile, instance_name=''): instance_profiles = get_quality_profiles(instance_session=instance_session, instance_url=instance_url, instance_key=instance_key) profile = next((item for item in instance_profiles if item["name"].lower() == instance_profile.lower()), False) if not profile: logger.error('Could not find profile_id for instance {} profile {}'.format(instance_name, instance_profile)) exit_system() instance_profile_id = profile.get('id') logger.debug(f'found profile_id (instance{instance_name}) "{instance_profile_id}" from profile "{instance_profile}"') return instance_profile_id def get_tag_from_id(instance_session, instance_url, instance_key, instance_tag, instance_name=''): instance_tag_url = get_tag_path(instance_url, instance_key) tag_response = instance_session.get(instance_tag_url) if tag_response.status_code != 200: logger.error(f'Could not get tag id from (instance{instance_name}) {instance_tag_url} - only works on Sonarr') exit_system() instance_tags = None try: instance_tags = tag_response.json() except: logger.error(f'Could not decode tag id from {instance_tag_url}') exit_system() tag_ids = [] for item in instance_tags: for instance_item in instance_tag: if item.get('label').lower() == instance_item.lower(): tag_ids.append(item) if not tag_ids: logger.error(f'Could not find tag_id for instance {instance_name} and tag {instance_tags}') exit_system() instance_tag_ids = [tag.get('id') for tag in tag_ids] logger.debug(f'found id "{instance_tag_ids}" from tag "{instance_tag}" for instance {instance_name}') if instance_tag_ids is None: logger.error(f'tag_id is None for instance {instance_name} and tag {instance_tag}') exit_system() return instance_tag_ids def get_language_from_id(instance_session, instance_url, instance_key, instance_language, instance_name=''): instance_language_url = get_language_path(instance_url, instance_key) language_response = instance_session.get(instance_language_url) if language_response.status_code != 200: logger.error(f'Could not get language id from (instance{instance_name}) {instance_language_url} - only works on sonarr v3') exit_system() instance_languages = None try: instance_languages = language_response.json() except: logger.error(f'Could not decode language id from {instance_language_url}') exit_system() instance_languages = instance_languages[0]['languages'] language = next((item for item in instance_languages if item.get('language', {}).get('name').lower() == instance_language.lower()), False) if not language: logger.error(f'Could not find language_id for instance {instance_name} and language {instance_language}') exit_system() instance_language_id = language.get('language', {}).get('id') logger.debug(f'found id "{instance_language_id}" from language "{instance_language}" for instance {instance_name}') if instance_language_id is None: logger.error(f'language_id is None for instance {instance_name} and language {instance_language}') exit_system() return instance_language_id def sync_servers(instanceA_contents, instanceB_language_id, instanceB_contentIds, instanceB_path, instanceB_profile_id, instanceA_profile_filter_id, instanceB_session, instanceB_url, instanceB_key, instanceA_quality_match, instanceA_tag_filter_id, instanceA_blacklist, instanceB_contents): global is_radarr, is_sonarr, is_test_run, sync_monitor search_ids = [] if instanceA_profile_filter_id: logging.info(f'only filtering content with instanceA_profile_filter_id {instanceA_profile_filter_id}') for content in instanceA_contents: content_not_synced = content[content_id_key] not in instanceB_contentIds if content_not_synced or sync_monitor: title = content.get('title') or content.get('artistName') instance_path = instanceB_path or dirname(content.get('path')) if is_radarr and skip_missing: content_has_file = content.get('hasFile') if not content_has_file: logging.debug(f'Skipping content {title} - file missing') continue # if given this, we want to filter from instance by profile id if instanceA_profile_filter_id: quality_profile_id = content.get('qualityProfileId') if instanceA_profile_filter_id != quality_profile_id: logging.debug(f'Skipping content {title} - mismatched quality_profile_id {quality_profile_id} with instanceA_profile_filter_id {instanceA_profile_filter_id}') continue # if given quality filter we want to filter if quality from instanceA isnt high enough yet if is_radarr and instanceA_quality_match: content_quality = content.get('movieFile', {}).get('quality', {}).get('quality', {}).get('name', '') if content_quality and not re.match(instanceA_quality_match, content_quality): logging.debug(f'Skipping content {title} - mismatched content_quality {content_quality} with instanceA_quality_match {instanceA_quality_match}') continue # if given tag filter then filter by tag - (Sonarr/Radarr v3 only) if (is_sonarr or is_radarr) and instanceA_tag_filter_id: content_tag_ids = content.get('tags') if not (set(content_tag_ids) & set(instanceA_tag_filter_id)): logging.debug(f'Skipping content {title} - mismatched content_tag_ids {content_tag_ids} with instanceA_tag_filter_id {instanceA_tag_filter_id}') continue # if black list given then dont sync matching slugs/ids if instanceA_blacklist: title_slug = content.get('titleSlug') or content.get('foreignArtistId') if title_slug in instanceA_blacklist: logging.debug(f'Skipping content {title} - blacklist slug: {title_slug}') continue content_id = str(content.get('id')) if content_id in instanceA_blacklist: logging.debug(f'Skipping content {title} - blacklist ID: {content_id}') continue # generate content from instance A to sync into instance B formatted_content = get_content_details( content=dict(content), instance_path=instance_path, instance_profile_id=instanceB_profile_id, instance_url=instanceB_url, instance_language_id=instanceB_language_id, ) instanceB_content_url = get_content_path(instanceB_url, instanceB_key) if is_test_run: logging.info('content title "{0}" synced successfully (test only)'.format(title)) elif content_not_synced: # sync content if not synced logging.info(f'syncing content title "{title}"') sync_response = instanceB_session.post(instanceB_content_url, json=formatted_content) # check response and save content id for searching later on if success if sync_response.status_code != 201 and sync_response.status_code != 200: logger.error(f'server sync error for {title} - response: {sync_response.text}') else: try: search_ids.append(int(sync_response.json()['id'])) except: logger.error(f'Could not decode sync response from {instanceB_content_url}') logging.info('content title "{0}" synced successfully'.format(title)) elif sync_monitor: # else if is already synced and we want to sync monitoring then sync that now # find matching content from instance B to check monitored status matching_content_instanceB = list(filter(lambda content_instanceB: content_instanceB['titleSlug'] == content.get('titleSlug'), instanceB_contents)) if(len(matching_content_instanceB) == 1): matching_content_instanceB = matching_content_instanceB[0] # if we found a content match from instance B, then check monitored status - if different then sync from A to B if matching_content_instanceB['monitored'] != content['monitored']: matching_content_instanceB['monitored'] = content['monitored'] instanceB_content_url = get_content_put_path(instanceB_url, instanceB_key, matching_content_instanceB.get('id')) sync_response = instanceB_session.put(instanceB_content_url, json=matching_content_instanceB) # check response and save content id for searching later on if success if sync_response.status_code != 202: logger.error(f'server monitoring sync error for {title} - response: {sync_response.text}') else: try: search_ids.append(int(sync_response.json()['id'])) except: logger.error(f'Could not decode sync response from {instanceB_content_url}') logging.info('content title "{0}" monitoring synced successfully'.format(title)) logging.info(f'{len(search_ids)} contents synced successfully') def get_instance_contents(instance_url, instance_key, instance_session, instance_name=''): instance_contentIds = [] instance_content_url = get_content_path(instance_url, instance_key) instance_contents = instance_session.get(instance_content_url) if instance_contents.status_code != 200: logger.error('instance{} server error - response {}'.format(instance_name, instance_contents.status_code)) exit_system() else: try: instance_contents = instance_contents.json() except: logger.error(f'Could not decode contents from {instance_content_url}') exit_system() for content_to_sync in instance_contents: instance_contentIds.append(content_to_sync[content_id_key]) logger.debug('{} contents in instance {}'.format(len(instance_contentIds), instance_name)) return instance_contents, instance_contentIds def check_status(instance_session, instance_url, instance_key, instance_name=''): global api_version instance_status_url = get_status_path(instance_url, instance_key) error_message = f'Could not connect to instance{instance_name}: {instance_status_url}' status_response = None try: status_response = instance_session.get(instance_status_url) if status_response.status_code != 200: logger.error(error_message) exit_system() except: logger.error(error_message) exit_system() if status_response is None: logger.error(error_message) exit_system() else: try: status_response = status_response.json() except Exception as error: if not isinstance(status_response, dict): logger.error( f"Could not retrieve status for {instance_status_url}: {status_response} - {error}") exit_system() if(status_response.get('error')): logger.error(f"{instance_status_url} error {status_response.get('error')}") exit_system() logger.debug(f"{instance_status_url} version {status_response.get('version')}") return status_response def sync_content(): global instanceA_profile_id, instanceA_profile, instanceB_profile_id, instanceB_profile, instanceA_profile_filter, instanceA_profile_filter_id, instanceB_profile_filter, instanceB_profile_filter_id, tested_api_version, instanceA_language_id, instanceA_language, instanceB_language_id, instanceB_language, instanceA_quality_match, instanceB_quality_match, is_sonarr, instanceA_tag_filter_id, instanceA_tag_filter, instanceB_tag_filter_id, instanceB_tag_filter, is_radarr, instanceA_blacklist, instanceB_blacklist # get sessions instanceA_session = requests.Session() instanceA_session.trust_env = False instanceB_session = requests.Session() instanceB_session.trust_env = False # if given a profile instead of a profile id then try to find the profile id if not instanceA_profile_id and instanceA_profile: instanceA_profile_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile, 'A') if not instanceB_profile_id and instanceB_profile: instanceB_profile_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile, 'B') logger.debug({ 'instanceA_profile_id': instanceA_profile_id, 'instanceA_profile': instanceA_profile, 'instanceB_profile_id': instanceB_profile_id, 'instanceB_profile': instanceB_profile, }) # do the same for profile id filters if they exist if not instanceA_profile_filter_id and instanceA_profile_filter: instanceA_profile_filter_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile_filter, 'A') if not instanceB_profile_filter_id and instanceB_profile_filter: instanceB_profile_filter_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile_filter, 'B') logger.debug({ 'instanceAprofile_filter_id': instanceA_profile_filter_id, 'instanceAprofile_filter': instanceA_profile_filter, 'instanceBprofile_filter_id': instanceB_profile_filter_id, 'instanceBprofile_filter': instanceB_profile_filter, }) # do the same for tag id filters if they exist - (only Sonarr) if is_sonarr or is_radarr: if not instanceA_tag_filter_id and instanceA_tag_filter: instanceA_tag_filter_id = get_tag_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_tag_filter, 'A') if not instanceB_tag_filter_id and instanceB_tag_filter: instanceB_tag_filter_id = get_tag_from_id(instanceB_session, instanceB_url, instanceB_key, instanceA_tag_filter, 'B') logger.debug({ 'instanceA_tag_filter': instanceA_tag_filter, 'instanceA_profile_filter': instanceA_profile_filter, 'instanceB_tag_filter_id': instanceB_tag_filter_id, 'instanceB_tag_filter': instanceB_tag_filter, }) # if given language instead of language id then try to find the lanaguage id - (only Sonarr v3) if is_sonarr: if not instanceA_language_id and instanceA_language: instanceA_language_id = get_language_from_id( instance_session=instanceA_session, instance_url=instanceA_url, instance_key=instanceA_key, instance_language=instanceA_language, instance_name='A' ) if not instanceB_language_id and instanceB_language: instanceB_language_id = get_language_from_id( instance_session=instanceB_session, instance_url=instanceB_url, instance_key=instanceB_key, instance_language=instanceB_language, instance_name='B' ) logger.debug({ 'instanceA_language_id': instanceA_language_id, 'instanceA_language': instanceA_language, 'instanceB_language_id': instanceB_language_id, 'instanceB_language': instanceB_language, 'is_sonarr': is_sonarr, 'api_version': api_version, }) # get contents to compare instanceA_contents, instanceA_contentIds = get_instance_contents(instanceA_url, instanceA_key, instanceA_session, instance_name='A') instanceB_contents, instanceB_contentIds = get_instance_contents(instanceB_url, instanceB_key, instanceB_session, instance_name='B') logger.info('syncing content from instance A to instance B') sync_servers( instanceA_contents=instanceA_contents, instanceB_contents=instanceB_contents, instanceB_contentIds=instanceB_contentIds, instanceB_language_id=instanceB_language_id, instanceB_path=instanceB_path, instanceB_profile_id=instanceB_profile_id, instanceB_session=instanceB_session, instanceB_url=instanceB_url, instanceA_profile_filter_id=instanceA_profile_filter_id, instanceB_key=instanceB_key, instanceA_quality_match=instanceA_quality_match, instanceA_tag_filter_id=instanceA_tag_filter_id, instanceA_blacklist=instanceA_blacklist ) # if given bidirectional flag then sync from instance B to instance A if sync_bidirectionally: logger.info('syncing content from instance B to instance A') sync_servers( instanceA_contents=instanceB_contents, instanceB_contents=instanceA_contents, instanceB_contentIds=instanceA_contentIds, instanceB_language_id=instanceA_language_id, instanceB_path=instanceA_path, instanceB_profile_id=instanceA_profile_id, instanceB_session=instanceA_session, instanceB_url=instanceA_url, instanceA_profile_filter_id=instanceB_profile_filter_id, instanceB_key=instanceA_key, instanceA_quality_match=instanceB_quality_match, instanceA_tag_filter_id=instanceB_tag_filter_id, instanceA_blacklist=instanceB_blacklist ) ######################################################################################################################## def exit_system(): if is_in_docker: raise Exception else: sys.exit(0) if is_in_docker: logger.info('syncing every {} seconds'.format(instance_sync_interval_seconds)) sync_content() if is_in_docker: while True: try: time.sleep(instance_sync_interval_seconds) sync_content() except Exception as inst: d = inst
true
true
7903150dcac929651d658babe370557022ca5561
18,651
py
Python
opensilexClientToolsPython/models/study_details_dto.py
OpenSILEX/opensilexClientToolsPython
41b1e7e707670ecf1b2c06d79bdd9749945788cb
[ "RSA-MD" ]
null
null
null
opensilexClientToolsPython/models/study_details_dto.py
OpenSILEX/opensilexClientToolsPython
41b1e7e707670ecf1b2c06d79bdd9749945788cb
[ "RSA-MD" ]
7
2021-05-25T14:06:04.000Z
2021-11-05T15:42:14.000Z
opensilexClientToolsPython/models/study_details_dto.py
OpenSILEX/opensilexClientToolsPython
41b1e7e707670ecf1b2c06d79bdd9749945788cb
[ "RSA-MD" ]
null
null
null
# coding: utf-8 """ OpenSilex API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: INSTANCE-SNAPSHOT Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class StudyDetailsDTO(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'active': 'str', 'additional_info': 'dict(str, object)', 'common_crop_name': 'str', 'documentation_url': 'str', 'end_date': 'str', 'location_db_id': 'str', 'location_name': 'str', 'name': 'str', 'program_db_id': 'str', 'program_name': 'str', 'seasons': 'list[Season]', 'start_date': 'str', 'study_db_id': 'str', 'study_name': 'str', 'study_type': 'str', 'study_type_db_id': 'str', 'study_type_name': 'str', 'trial_db_id': 'str', 'trial_name': 'str', 'contacts': 'list[Contact]', 'datalinks': 'list[DataLink]', 'location': 'Location' } attribute_map = { 'active': 'active', 'additional_info': 'additionalInfo', 'common_crop_name': 'commonCropName', 'documentation_url': 'documentationURL', 'end_date': 'endDate', 'location_db_id': 'locationDbId', 'location_name': 'locationName', 'name': 'name', 'program_db_id': 'programDbId', 'program_name': 'programName', 'seasons': 'seasons', 'start_date': 'startDate', 'study_db_id': 'studyDbId', 'study_name': 'studyName', 'study_type': 'studyType', 'study_type_db_id': 'studyTypeDbId', 'study_type_name': 'studyTypeName', 'trial_db_id': 'trialDbId', 'trial_name': 'trialName', 'contacts': 'contacts', 'datalinks': 'datalinks', 'location': 'location' } def __init__(self, active=None, additional_info=None, common_crop_name=None, documentation_url=None, end_date=None, location_db_id=None, location_name=None, name=None, program_db_id=None, program_name=None, seasons=None, start_date=None, study_db_id=None, study_name=None, study_type=None, study_type_db_id=None, study_type_name=None, trial_db_id=None, trial_name=None, contacts=None, datalinks=None, location=None): # noqa: E501 """StudyDetailsDTO - a model defined in Swagger""" # noqa: E501 self._active = None self._additional_info = None self._common_crop_name = None self._documentation_url = None self._end_date = None self._location_db_id = None self._location_name = None self._name = None self._program_db_id = None self._program_name = None self._seasons = None self._start_date = None self._study_db_id = None self._study_name = None self._study_type = None self._study_type_db_id = None self._study_type_name = None self._trial_db_id = None self._trial_name = None self._contacts = None self._datalinks = None self._location = None self.discriminator = None if active is not None: self.active = active if additional_info is not None: self.additional_info = additional_info if common_crop_name is not None: self.common_crop_name = common_crop_name if documentation_url is not None: self.documentation_url = documentation_url if end_date is not None: self.end_date = end_date if location_db_id is not None: self.location_db_id = location_db_id if location_name is not None: self.location_name = location_name if name is not None: self.name = name if program_db_id is not None: self.program_db_id = program_db_id if program_name is not None: self.program_name = program_name if seasons is not None: self.seasons = seasons if start_date is not None: self.start_date = start_date if study_db_id is not None: self.study_db_id = study_db_id if study_name is not None: self.study_name = study_name if study_type is not None: self.study_type = study_type if study_type_db_id is not None: self.study_type_db_id = study_type_db_id if study_type_name is not None: self.study_type_name = study_type_name if trial_db_id is not None: self.trial_db_id = trial_db_id if trial_name is not None: self.trial_name = trial_name if contacts is not None: self.contacts = contacts if datalinks is not None: self.datalinks = datalinks if location is not None: self.location = location @property def active(self): """Gets the active of this StudyDetailsDTO. # noqa: E501 :return: The active of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._active @active.setter def active(self, active): """Sets the active of this StudyDetailsDTO. :param active: The active of this StudyDetailsDTO. # noqa: E501 :type: str """ self._active = active @property def additional_info(self): """Gets the additional_info of this StudyDetailsDTO. # noqa: E501 :return: The additional_info of this StudyDetailsDTO. # noqa: E501 :rtype: dict(str, object) """ return self._additional_info @additional_info.setter def additional_info(self, additional_info): """Sets the additional_info of this StudyDetailsDTO. :param additional_info: The additional_info of this StudyDetailsDTO. # noqa: E501 :type: dict(str, object) """ self._additional_info = additional_info @property def common_crop_name(self): """Gets the common_crop_name of this StudyDetailsDTO. # noqa: E501 :return: The common_crop_name of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._common_crop_name @common_crop_name.setter def common_crop_name(self, common_crop_name): """Sets the common_crop_name of this StudyDetailsDTO. :param common_crop_name: The common_crop_name of this StudyDetailsDTO. # noqa: E501 :type: str """ self._common_crop_name = common_crop_name @property def documentation_url(self): """Gets the documentation_url of this StudyDetailsDTO. # noqa: E501 :return: The documentation_url of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._documentation_url @documentation_url.setter def documentation_url(self, documentation_url): """Sets the documentation_url of this StudyDetailsDTO. :param documentation_url: The documentation_url of this StudyDetailsDTO. # noqa: E501 :type: str """ self._documentation_url = documentation_url @property def end_date(self): """Gets the end_date of this StudyDetailsDTO. # noqa: E501 :return: The end_date of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._end_date @end_date.setter def end_date(self, end_date): """Sets the end_date of this StudyDetailsDTO. :param end_date: The end_date of this StudyDetailsDTO. # noqa: E501 :type: str """ self._end_date = end_date @property def location_db_id(self): """Gets the location_db_id of this StudyDetailsDTO. # noqa: E501 :return: The location_db_id of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._location_db_id @location_db_id.setter def location_db_id(self, location_db_id): """Sets the location_db_id of this StudyDetailsDTO. :param location_db_id: The location_db_id of this StudyDetailsDTO. # noqa: E501 :type: str """ self._location_db_id = location_db_id @property def location_name(self): """Gets the location_name of this StudyDetailsDTO. # noqa: E501 :return: The location_name of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._location_name @location_name.setter def location_name(self, location_name): """Sets the location_name of this StudyDetailsDTO. :param location_name: The location_name of this StudyDetailsDTO. # noqa: E501 :type: str """ self._location_name = location_name @property def name(self): """Gets the name of this StudyDetailsDTO. # noqa: E501 :return: The name of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this StudyDetailsDTO. :param name: The name of this StudyDetailsDTO. # noqa: E501 :type: str """ self._name = name @property def program_db_id(self): """Gets the program_db_id of this StudyDetailsDTO. # noqa: E501 :return: The program_db_id of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._program_db_id @program_db_id.setter def program_db_id(self, program_db_id): """Sets the program_db_id of this StudyDetailsDTO. :param program_db_id: The program_db_id of this StudyDetailsDTO. # noqa: E501 :type: str """ self._program_db_id = program_db_id @property def program_name(self): """Gets the program_name of this StudyDetailsDTO. # noqa: E501 :return: The program_name of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._program_name @program_name.setter def program_name(self, program_name): """Sets the program_name of this StudyDetailsDTO. :param program_name: The program_name of this StudyDetailsDTO. # noqa: E501 :type: str """ self._program_name = program_name @property def seasons(self): """Gets the seasons of this StudyDetailsDTO. # noqa: E501 :return: The seasons of this StudyDetailsDTO. # noqa: E501 :rtype: list[Season] """ return self._seasons @seasons.setter def seasons(self, seasons): """Sets the seasons of this StudyDetailsDTO. :param seasons: The seasons of this StudyDetailsDTO. # noqa: E501 :type: list[Season] """ self._seasons = seasons @property def start_date(self): """Gets the start_date of this StudyDetailsDTO. # noqa: E501 :return: The start_date of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._start_date @start_date.setter def start_date(self, start_date): """Sets the start_date of this StudyDetailsDTO. :param start_date: The start_date of this StudyDetailsDTO. # noqa: E501 :type: str """ self._start_date = start_date @property def study_db_id(self): """Gets the study_db_id of this StudyDetailsDTO. # noqa: E501 :return: The study_db_id of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._study_db_id @study_db_id.setter def study_db_id(self, study_db_id): """Sets the study_db_id of this StudyDetailsDTO. :param study_db_id: The study_db_id of this StudyDetailsDTO. # noqa: E501 :type: str """ self._study_db_id = study_db_id @property def study_name(self): """Gets the study_name of this StudyDetailsDTO. # noqa: E501 :return: The study_name of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._study_name @study_name.setter def study_name(self, study_name): """Sets the study_name of this StudyDetailsDTO. :param study_name: The study_name of this StudyDetailsDTO. # noqa: E501 :type: str """ self._study_name = study_name @property def study_type(self): """Gets the study_type of this StudyDetailsDTO. # noqa: E501 :return: The study_type of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._study_type @study_type.setter def study_type(self, study_type): """Sets the study_type of this StudyDetailsDTO. :param study_type: The study_type of this StudyDetailsDTO. # noqa: E501 :type: str """ self._study_type = study_type @property def study_type_db_id(self): """Gets the study_type_db_id of this StudyDetailsDTO. # noqa: E501 :return: The study_type_db_id of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._study_type_db_id @study_type_db_id.setter def study_type_db_id(self, study_type_db_id): """Sets the study_type_db_id of this StudyDetailsDTO. :param study_type_db_id: The study_type_db_id of this StudyDetailsDTO. # noqa: E501 :type: str """ self._study_type_db_id = study_type_db_id @property def study_type_name(self): """Gets the study_type_name of this StudyDetailsDTO. # noqa: E501 :return: The study_type_name of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._study_type_name @study_type_name.setter def study_type_name(self, study_type_name): """Sets the study_type_name of this StudyDetailsDTO. :param study_type_name: The study_type_name of this StudyDetailsDTO. # noqa: E501 :type: str """ self._study_type_name = study_type_name @property def trial_db_id(self): """Gets the trial_db_id of this StudyDetailsDTO. # noqa: E501 :return: The trial_db_id of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._trial_db_id @trial_db_id.setter def trial_db_id(self, trial_db_id): """Sets the trial_db_id of this StudyDetailsDTO. :param trial_db_id: The trial_db_id of this StudyDetailsDTO. # noqa: E501 :type: str """ self._trial_db_id = trial_db_id @property def trial_name(self): """Gets the trial_name of this StudyDetailsDTO. # noqa: E501 :return: The trial_name of this StudyDetailsDTO. # noqa: E501 :rtype: str """ return self._trial_name @trial_name.setter def trial_name(self, trial_name): """Sets the trial_name of this StudyDetailsDTO. :param trial_name: The trial_name of this StudyDetailsDTO. # noqa: E501 :type: str """ self._trial_name = trial_name @property def contacts(self): """Gets the contacts of this StudyDetailsDTO. # noqa: E501 :return: The contacts of this StudyDetailsDTO. # noqa: E501 :rtype: list[Contact] """ return self._contacts @contacts.setter def contacts(self, contacts): """Sets the contacts of this StudyDetailsDTO. :param contacts: The contacts of this StudyDetailsDTO. # noqa: E501 :type: list[Contact] """ self._contacts = contacts @property def datalinks(self): """Gets the datalinks of this StudyDetailsDTO. # noqa: E501 :return: The datalinks of this StudyDetailsDTO. # noqa: E501 :rtype: list[DataLink] """ return self._datalinks @datalinks.setter def datalinks(self, datalinks): """Sets the datalinks of this StudyDetailsDTO. :param datalinks: The datalinks of this StudyDetailsDTO. # noqa: E501 :type: list[DataLink] """ self._datalinks = datalinks @property def location(self): """Gets the location of this StudyDetailsDTO. # noqa: E501 :return: The location of this StudyDetailsDTO. # noqa: E501 :rtype: Location """ return self._location @location.setter def location(self, location): """Sets the location of this StudyDetailsDTO. :param location: The location of this StudyDetailsDTO. # noqa: E501 :type: Location """ self._location = location def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(StudyDetailsDTO, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, StudyDetailsDTO): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
28.173716
434
0.609458
import pprint import re import six class StudyDetailsDTO(object): swagger_types = { 'active': 'str', 'additional_info': 'dict(str, object)', 'common_crop_name': 'str', 'documentation_url': 'str', 'end_date': 'str', 'location_db_id': 'str', 'location_name': 'str', 'name': 'str', 'program_db_id': 'str', 'program_name': 'str', 'seasons': 'list[Season]', 'start_date': 'str', 'study_db_id': 'str', 'study_name': 'str', 'study_type': 'str', 'study_type_db_id': 'str', 'study_type_name': 'str', 'trial_db_id': 'str', 'trial_name': 'str', 'contacts': 'list[Contact]', 'datalinks': 'list[DataLink]', 'location': 'Location' } attribute_map = { 'active': 'active', 'additional_info': 'additionalInfo', 'common_crop_name': 'commonCropName', 'documentation_url': 'documentationURL', 'end_date': 'endDate', 'location_db_id': 'locationDbId', 'location_name': 'locationName', 'name': 'name', 'program_db_id': 'programDbId', 'program_name': 'programName', 'seasons': 'seasons', 'start_date': 'startDate', 'study_db_id': 'studyDbId', 'study_name': 'studyName', 'study_type': 'studyType', 'study_type_db_id': 'studyTypeDbId', 'study_type_name': 'studyTypeName', 'trial_db_id': 'trialDbId', 'trial_name': 'trialName', 'contacts': 'contacts', 'datalinks': 'datalinks', 'location': 'location' } def __init__(self, active=None, additional_info=None, common_crop_name=None, documentation_url=None, end_date=None, location_db_id=None, location_name=None, name=None, program_db_id=None, program_name=None, seasons=None, start_date=None, study_db_id=None, study_name=None, study_type=None, study_type_db_id=None, study_type_name=None, trial_db_id=None, trial_name=None, contacts=None, datalinks=None, location=None): self._active = None self._additional_info = None self._common_crop_name = None self._documentation_url = None self._end_date = None self._location_db_id = None self._location_name = None self._name = None self._program_db_id = None self._program_name = None self._seasons = None self._start_date = None self._study_db_id = None self._study_name = None self._study_type = None self._study_type_db_id = None self._study_type_name = None self._trial_db_id = None self._trial_name = None self._contacts = None self._datalinks = None self._location = None self.discriminator = None if active is not None: self.active = active if additional_info is not None: self.additional_info = additional_info if common_crop_name is not None: self.common_crop_name = common_crop_name if documentation_url is not None: self.documentation_url = documentation_url if end_date is not None: self.end_date = end_date if location_db_id is not None: self.location_db_id = location_db_id if location_name is not None: self.location_name = location_name if name is not None: self.name = name if program_db_id is not None: self.program_db_id = program_db_id if program_name is not None: self.program_name = program_name if seasons is not None: self.seasons = seasons if start_date is not None: self.start_date = start_date if study_db_id is not None: self.study_db_id = study_db_id if study_name is not None: self.study_name = study_name if study_type is not None: self.study_type = study_type if study_type_db_id is not None: self.study_type_db_id = study_type_db_id if study_type_name is not None: self.study_type_name = study_type_name if trial_db_id is not None: self.trial_db_id = trial_db_id if trial_name is not None: self.trial_name = trial_name if contacts is not None: self.contacts = contacts if datalinks is not None: self.datalinks = datalinks if location is not None: self.location = location @property def active(self): return self._active @active.setter def active(self, active): self._active = active @property def additional_info(self): return self._additional_info @additional_info.setter def additional_info(self, additional_info): self._additional_info = additional_info @property def common_crop_name(self): return self._common_crop_name @common_crop_name.setter def common_crop_name(self, common_crop_name): self._common_crop_name = common_crop_name @property def documentation_url(self): return self._documentation_url @documentation_url.setter def documentation_url(self, documentation_url): self._documentation_url = documentation_url @property def end_date(self): return self._end_date @end_date.setter def end_date(self, end_date): self._end_date = end_date @property def location_db_id(self): return self._location_db_id @location_db_id.setter def location_db_id(self, location_db_id): self._location_db_id = location_db_id @property def location_name(self): return self._location_name @location_name.setter def location_name(self, location_name): self._location_name = location_name @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def program_db_id(self): return self._program_db_id @program_db_id.setter def program_db_id(self, program_db_id): self._program_db_id = program_db_id @property def program_name(self): return self._program_name @program_name.setter def program_name(self, program_name): self._program_name = program_name @property def seasons(self): return self._seasons @seasons.setter def seasons(self, seasons): self._seasons = seasons @property def start_date(self): return self._start_date @start_date.setter def start_date(self, start_date): self._start_date = start_date @property def study_db_id(self): return self._study_db_id @study_db_id.setter def study_db_id(self, study_db_id): self._study_db_id = study_db_id @property def study_name(self): return self._study_name @study_name.setter def study_name(self, study_name): self._study_name = study_name @property def study_type(self): return self._study_type @study_type.setter def study_type(self, study_type): self._study_type = study_type @property def study_type_db_id(self): return self._study_type_db_id @study_type_db_id.setter def study_type_db_id(self, study_type_db_id): self._study_type_db_id = study_type_db_id @property def study_type_name(self): return self._study_type_name @study_type_name.setter def study_type_name(self, study_type_name): self._study_type_name = study_type_name @property def trial_db_id(self): return self._trial_db_id @trial_db_id.setter def trial_db_id(self, trial_db_id): self._trial_db_id = trial_db_id @property def trial_name(self): return self._trial_name @trial_name.setter def trial_name(self, trial_name): self._trial_name = trial_name @property def contacts(self): return self._contacts @contacts.setter def contacts(self, contacts): self._contacts = contacts @property def datalinks(self): return self._datalinks @datalinks.setter def datalinks(self, datalinks): self._datalinks = datalinks @property def location(self): return self._location @location.setter def location(self, location): self._location = location def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(StudyDetailsDTO, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, StudyDetailsDTO): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
79031584beddb09002af962a76bc1dbb6e773115
371
py
Python
metaci/cumulusci/utils.py
abhishekalgo/metaci
cd62473b3fb85fb0f39623f9fb2850993ff708a5
[ "BSD-3-Clause" ]
null
null
null
metaci/cumulusci/utils.py
abhishekalgo/metaci
cd62473b3fb85fb0f39623f9fb2850993ff708a5
[ "BSD-3-Clause" ]
null
null
null
metaci/cumulusci/utils.py
abhishekalgo/metaci
cd62473b3fb85fb0f39623f9fb2850993ff708a5
[ "BSD-3-Clause" ]
1
2018-12-07T09:51:07.000Z
2018-12-07T09:51:07.000Z
from cumulusci.core.config import ConnectedAppOAuthConfig from django.conf import settings def get_connected_app(): return ConnectedAppOAuthConfig( { "callback_url": settings.CONNECTED_APP_CALLBACK_URL, "client_id": settings.CONNECTED_APP_CLIENT_ID, "client_secret": settings.CONNECTED_APP_CLIENT_SECRET, } )
28.538462
66
0.708895
from cumulusci.core.config import ConnectedAppOAuthConfig from django.conf import settings def get_connected_app(): return ConnectedAppOAuthConfig( { "callback_url": settings.CONNECTED_APP_CALLBACK_URL, "client_id": settings.CONNECTED_APP_CLIENT_ID, "client_secret": settings.CONNECTED_APP_CLIENT_SECRET, } )
true
true
790315e08da77a2a1c9248ca5b30294a6922ce54
7,492
py
Python
data_loaders/KLens.py
klens-codes/MaskFlownet-Pytorch
94d41fd20f774845a1b2df7f77ec95c44217af94
[ "MIT" ]
null
null
null
data_loaders/KLens.py
klens-codes/MaskFlownet-Pytorch
94d41fd20f774845a1b2df7f77ec95c44217af94
[ "MIT" ]
null
null
null
data_loaders/KLens.py
klens-codes/MaskFlownet-Pytorch
94d41fd20f774845a1b2df7f77ec95c44217af94
[ "MIT" ]
null
null
null
import os import re import struct import glob import numpy as np import frame_utils import skimage import skimage.io import torch from torch.utils.data import Dataset class KLens(Dataset): #def __init__(self,raft_path="/data2/opticalflow/rnd/opticalflow/RAFT/out_klens_raft_chairs", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""): def __init__(self,raft_path="/data2/opticalflow/algo_comp/flownet2/out/", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""): super(KLens, self).__init__() self.split = split raftflowpaths = glob.glob(os.path.join(raft_path,"*.flo")) file_list = {} file_list['train'] = [] file_list['valid'] = [] file_list['test'] = [] file_list['train+valid'] = [] for filenum in filenumberlist: for raftflowpath in raftflowpaths: #print(raftflowpath) if "KLE_"+filenum in raftflowpath: file_list['train'].append([os.path.join(root_path,"KLE_"+filenum+".jpg3.png"),os.path.join(root_path,"KLE_"+filenum+".jpg5.png"),raftflowpath]) file_list["train"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) file_list["valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) file_list["test"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) file_list["train+valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) #file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_3.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_3.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_3.jpg")],]) #file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_0.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_0.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_0.jpg")],]) #file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_1.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_1.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_1.jpg")],]) #file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_4.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_4.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_4.jpg")],]) self.dataset = file_list def __len__(self): return len(self.dataset[self.split]) def __getitem__(self, idx): try: im0_path, im1_path, raftflow_path = self.dataset[self.split][idx] raftflow = frame_utils.readFlow(raftflow_path) except: im0_path, im1_path = self.dataset[self.split][idx] raftflow = np.array([]) img0 = skimage.io.imread(im0_path) img1 = skimage.io.imread(im1_path) img0 = torch.tensor(img0/255.).float() img1 = torch.tensor(img1/255.).float() return img0, img1,np.array([]),np.array([]), [im0_path , im1_path],raftflow class Flo: def __init__(self, w, h): self.__floec1__ = float(202021.25) self.__floec2__ = int(w) self.__floec3__ = int(h) self.__floheader__ = struct.pack('fii', self.__floec1__, self.__floec2__, self.__floec3__) self.__floheaderlen__ = len(self.__floheader__) self.__flow__ = w self.__floh__ = h self.__floshape__ = [self.__floh__, self.__flow__, 2] if self.__floheader__[:4] != b'PIEH': raise Exception('Expect machine to be LE.') def load(self, file): with open(file, 'rb') as fp: if fp.read(self.__floheaderlen__) != self.__floheader__: raise Exception('Bad flow header: ' + file) result = np.ndarray(shape=self.__floshape__, dtype=np.float32, buffer=fp.read(), order='C') return result def save(self, arr, fname): with open(fname, 'wb') as fp: fp.write(self.__floheader__) fp.write(arr.astype(np.float32).tobytes())
85.136364
788
0.695275
import os import re import struct import glob import numpy as np import frame_utils import skimage import skimage.io import torch from torch.utils.data import Dataset class KLens(Dataset): def __init__(self,raft_path="/data2/opticalflow/algo_comp/flownet2/out/", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""): super(KLens, self).__init__() self.split = split raftflowpaths = glob.glob(os.path.join(raft_path,"*.flo")) file_list = {} file_list['train'] = [] file_list['valid'] = [] file_list['test'] = [] file_list['train+valid'] = [] for filenum in filenumberlist: for raftflowpath in raftflowpaths: if "KLE_"+filenum in raftflowpath: file_list['train'].append([os.path.join(root_path,"KLE_"+filenum+".jpg3.png"),os.path.join(root_path,"KLE_"+filenum+".jpg5.png"),raftflowpath]) file_list["train"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) file_list["valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) file_list["test"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) file_list["train+valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]]) self.dataset = file_list def __len__(self): return len(self.dataset[self.split]) def __getitem__(self, idx): try: im0_path, im1_path, raftflow_path = self.dataset[self.split][idx] raftflow = frame_utils.readFlow(raftflow_path) except: im0_path, im1_path = self.dataset[self.split][idx] raftflow = np.array([]) img0 = skimage.io.imread(im0_path) img1 = skimage.io.imread(im1_path) img0 = torch.tensor(img0/255.).float() img1 = torch.tensor(img1/255.).float() return img0, img1,np.array([]),np.array([]), [im0_path , im1_path],raftflow class Flo: def __init__(self, w, h): self.__floec1__ = float(202021.25) self.__floec2__ = int(w) self.__floec3__ = int(h) self.__floheader__ = struct.pack('fii', self.__floec1__, self.__floec2__, self.__floec3__) self.__floheaderlen__ = len(self.__floheader__) self.__flow__ = w self.__floh__ = h self.__floshape__ = [self.__floh__, self.__flow__, 2] if self.__floheader__[:4] != b'PIEH': raise Exception('Expect machine to be LE.') def load(self, file): with open(file, 'rb') as fp: if fp.read(self.__floheaderlen__) != self.__floheader__: raise Exception('Bad flow header: ' + file) result = np.ndarray(shape=self.__floshape__, dtype=np.float32, buffer=fp.read(), order='C') return result def save(self, arr, fname): with open(fname, 'wb') as fp: fp.write(self.__floheader__) fp.write(arr.astype(np.float32).tobytes())
true
true
790316507df3bccaae1a4f1bba883ca7ede96d29
27,963
py
Python
pycoin/cmds/tx.py
mewald55/pycoin_ypub-zpub
8c7933802288e2ab33bf611659447fe24b96ada5
[ "MIT" ]
null
null
null
pycoin/cmds/tx.py
mewald55/pycoin_ypub-zpub
8c7933802288e2ab33bf611659447fe24b96ada5
[ "MIT" ]
null
null
null
pycoin/cmds/tx.py
mewald55/pycoin_ypub-zpub
8c7933802288e2ab33bf611659447fe24b96ada5
[ "MIT" ]
null
null
null
#!/usr/bin/env python from __future__ import print_function import argparse import calendar import codecs import datetime import io import os.path import re import subprocess import sys from pycoinzpub.convention import tx_fee, satoshi_to_mbtc from pycoinzpub.encoding import hash160 from pycoinzpub.key import Key from pycoinzpub.key.validate import is_address_valid from pycoinzpub.networks import address_prefix_for_netcode, full_network_name_for_netcode, network_codes from pycoinzpub.networks.default import get_current_netcode from pycoinzpub.serialize import b2h_rev, h2b, h2b_rev, stream_to_bytes from pycoinzpub.services import spendables_for_address, get_tx_db from pycoinzpub.services.providers import message_about_tx_cache_env, \ message_about_tx_for_tx_hash_env, message_about_spendables_for_address_env from pycoinzpub.tx.exceptions import BadSpendableError from pycoinzpub.tx.script.tools import opcode_list, disassemble_for_opcode_data from pycoinzpub.tx.script.check_signature import parse_signature_blob from pycoinzpub.tx.script.der import UnexpectedDER from pycoinzpub.tx.script.disassemble import disassemble_scripts, sighash_type_to_string from pycoinzpub.tx.tx_utils import distribute_from_split_pool, sign_tx from pycoinzpub.tx.Tx import Spendable, Tx, TxOut from pycoinzpub.ui import standard_tx_out_script DEFAULT_VERSION = 1 DEFAULT_LOCK_TIME = 0 LOCKTIME_THRESHOLD = 500000000 def range_int(min, max, name): def cast(v): v = int(v) if not (min <= v <= max): raise ValueError() return v cast.__name__ = name return cast def validate_bitcoind(tx, tx_db, bitcoind_url): try: from pycoinzpub.services.bitcoind import bitcoind_agrees_on_transaction_validity if bitcoind_agrees_on_transaction_validity(bitcoind_url, tx): print("interop test passed for %s" % tx.id(), file=sys.stderr) else: print("tx ==> %s FAILED interop test" % tx.id(), file=sys.stderr) except ImportError: print("warning: can't talk to bitcoind due to missing library") def dump_header(tx): tx_bin = stream_to_bytes(tx.stream) print("Version: %2d tx hash %s %d bytes" % (tx.version, tx.id(), len(tx_bin))) if tx.has_witness_data(): print(" segwit tx hash %s" % tx.w_id()) print("TxIn count: %d; TxOut count: %d" % (len(tx.txs_in), len(tx.txs_out))) if tx.lock_time == 0: meaning = "valid anytime" elif tx.lock_time < LOCKTIME_THRESHOLD: meaning = "valid after block index %d" % tx.lock_time else: when = datetime.datetime.utcfromtimestamp(tx.lock_time) meaning = "valid on or after %s utc" % when.isoformat() print("Lock time: %d (%s)" % (tx.lock_time, meaning)) print("Input%s:" % ('s' if len(tx.txs_in) != 1 else '')) def make_trace_script(do_trace, use_pdb): if not (do_trace or use_pdb): return None def trace_script(pc, opcode, data, stack, altstack, if_condition_stack, is_signature): from pycoinzpub.serialize import b2h print("stack: [%s]" % ' '.join(b2h(s) for s in stack)) if len(altstack) > 0: print("altstack: %s" % altstack) print("condition stack: %s" % if_condition_stack) print("%3d : %02x %s" % (pc, opcode, disassemble_for_opcode_data(opcode, data))) if use_pdb: import pdb pdb.set_trace() return trace_script def dump_inputs(tx, netcode, verbose_signature, address_prefix, traceback_f, disassembly_level): def signature_for_hash_type_f(hash_type, script): return tx.signature_hash(script, idx, hash_type) for idx, tx_in in enumerate(tx.txs_in): if tx.is_coinbase(): print("%4d: COINBASE %12.5f mBTC" % (idx, satoshi_to_mbtc(tx.total_in()))) continue suffix = "" if tx.missing_unspent(idx): tx_out = None address = tx_in.bitcoin_address(address_prefix=address_prefix) else: tx_out = tx.unspents[idx] sig_result = " sig ok" if tx.is_signature_ok(idx, traceback_f=traceback_f) else " BAD SIG" suffix = " %12.5f mBTC %s" % (satoshi_to_mbtc(tx_out.coin_value), sig_result) address = tx_out.bitcoin_address(netcode=netcode) t = "%4d: %34s from %s:%-4d%s" % (idx, address, b2h_rev(tx_in.previous_hash), tx_in.previous_index, suffix) print(t.rstrip()) if disassembly_level > 0: dump_disassembly(tx_in, tx_out, tx.lock_time, signature_for_hash_type_f) if verbose_signature: dump_signatures(tx, tx_in, tx_out, idx, netcode, address_prefix, traceback_f, disassembly_level) def dump_disassembly(tx_in, tx_out, lock_time, signature_for_hash_type_f): out_script = b'' if tx_out: out_script = tx_out.script for (pre_annotations, pc, opcode, instruction, post_annotations) in \ disassemble_scripts( tx_in.script, out_script, lock_time, signature_for_hash_type_f): for l in pre_annotations: print(" %s" % l) if 1: print(" %4x: %02x %s" % (pc, opcode, instruction)) for l in post_annotations: print(" %s" % l) def dump_signatures(tx, tx_in, tx_out, idx, netcode, address_prefix, traceback_f, disassembly_level): signatures = [] for opcode in opcode_list(tx_in.script): if not opcode.startswith("OP_"): try: signatures.append(parse_signature_blob(h2b(opcode[1:-1]))) except UnexpectedDER: pass if signatures: sig_types_identical = ( tuple(zip(*signatures))[1].count(signatures[0][1]) == len(signatures)) i = 1 if len(signatures) > 1 else '' for sig_pair, sig_type in signatures: print(" r{0}: {1:#x}\n s{0}: {2:#x}".format(i, *sig_pair)) if not sig_types_identical and tx_out: print(" z{}: {:#x} {}".format(i, tx.signature_hash(tx_out.script, idx, sig_type), sighash_type_to_string(sig_type))) if i: i += 1 if sig_types_identical and tx_out: print(" z:{} {:#x} {}".format(' ' if i else '', tx.signature_hash( tx_out.script, idx, sig_type), sighash_type_to_string(sig_type))) def dump_footer(tx, missing_unspents): if not missing_unspents: print("Total input %12.5f mBTC" % satoshi_to_mbtc(tx.total_in())) if 1: print("Total output %12.5f mBTC" % satoshi_to_mbtc(tx.total_out())) if not missing_unspents: print("Total fees %12.5f mBTC" % satoshi_to_mbtc(tx.fee())) def dump_tx(tx, netcode, verbose_signature, disassembly_level, do_trace, use_pdb): address_prefix = address_prefix_for_netcode(netcode) missing_unspents = tx.missing_unspents() traceback_f = make_trace_script(do_trace, use_pdb) dump_header(tx) dump_inputs(tx, netcode, verbose_signature, address_prefix, traceback_f, disassembly_level) def signature_for_hash_type_f(hash_type, script): return tx.signature_hash(script, idx, hash_type) print("Output%s:" % ('s' if len(tx.txs_out) != 1 else '')) for idx, tx_out in enumerate(tx.txs_out): amount_mbtc = satoshi_to_mbtc(tx_out.coin_value) address = tx_out.bitcoin_address(netcode=netcode) or "(unknown)" print("%4d: %34s receives %12.5f mBTC" % (idx, address, amount_mbtc)) if disassembly_level > 0: for (pre_annotations, pc, opcode, instruction, post_annotations) in \ disassemble_scripts(b'', tx_out.script, tx.lock_time, signature_for_hash_type_f): for l in pre_annotations: print(" %s" % l) if 1: print(" %4x: %02x %s" % (pc, opcode, instruction)) for l in post_annotations: print(" %s" % l) dump_footer(tx, missing_unspents) def check_fees(tx): total_in, total_out = tx.total_in(), tx.total_out() actual_tx_fee = total_in - total_out recommended_tx_fee = tx_fee.recommended_fee_for_tx(tx) print("warning: transaction fees recommendations casually calculated and estimates may be incorrect", file=sys.stderr) if actual_tx_fee > recommended_tx_fee: print("warning: transaction fee of %s exceeds expected value of %s mBTC" % (satoshi_to_mbtc(actual_tx_fee), satoshi_to_mbtc(recommended_tx_fee)), file=sys.stderr) elif actual_tx_fee < 0: print("not enough source coins (%s mBTC) for destination (%s mBTC)." " Short %s mBTC" % (satoshi_to_mbtc(total_in), satoshi_to_mbtc(total_out), satoshi_to_mbtc(-actual_tx_fee)), file=sys.stderr) elif actual_tx_fee < recommended_tx_fee: print("warning: transaction fee lower than (casually calculated)" " expected value of %s mBTC, transaction might not propogate" % satoshi_to_mbtc(recommended_tx_fee), file=sys.stderr) return actual_tx_fee EARLIEST_DATE = datetime.datetime(year=2009, month=1, day=1) def parse_locktime(s): s = re.sub(r"[ ,:\-]+", r"-", s) for fmt1 in ["%Y-%m-%dT", "%Y-%m-%d", "%b-%d-%Y", "%b-%d-%y", "%B-%d-%Y", "%B-%d-%y"]: for fmt2 in ["T%H-%M-%S", "T%H-%M", "-%H-%M-%S", "-%H-%M", ""]: fmt = fmt1 + fmt2 try: when = datetime.datetime.strptime(s, fmt) if when < EARLIEST_DATE: raise ValueError("invalid date: must be after %s" % EARLIEST_DATE) return calendar.timegm(when.timetuple()) except ValueError: pass return int(s) parse_locktime.__name__ = 'locktime' def parse_fee(fee): if fee in ["standard"]: return fee return int(fee) def create_parser(): codes = network_codes() EPILOG = ('Files are binary by default unless they end with the suffix ".hex". ' + 'Known networks codes:\n ' + ', '.join(['%s (%s)' % (i, full_network_name_for_netcode(i)) for i in codes])) parser = argparse.ArgumentParser( description="Manipulate bitcoin (or alt coin) transactions.", epilog=EPILOG) parser.add_argument('-t', "--transaction-version", type=range_int(0, 255, "version"), help='Transaction version, either 1 (default) or 3 (not yet supported).') parser.add_argument('-l', "--lock-time", type=parse_locktime, help='Lock time; either a block' 'index, or a date/time (example: "2014-01-01T15:00:00"') parser.add_argument('-n', "--network", default=get_current_netcode(), choices=codes, help='Define network code (BTC=Bitcoin mainnet, XTN=Bitcoin testnet).') parser.add_argument('-a', "--augment", action='store_true', help='augment tx by adding any missing spendable metadata by fetching' ' inputs from cache and/or web services') parser.add_argument('-s', "--verbose-signature", action='store_true', help='Display technical signature details.') parser.add_argument("-i", "--fetch-spendables", metavar="address", action="append", help='Add all unspent spendables for the given bitcoin address. This information' ' is fetched from web services. With no outputs, incoming spendables will be printed.') parser.add_argument('-f', "--private-key-file", metavar="path-to-private-keys", action="append", default=[], help='file containing WIF or BIP0032 private keys. If file name ends with .gpg, ' '"gpg -d" will be invoked automatically. File is read one line at a time, and if ' 'the file contains only one WIF per line, it will also be scanned for a bitcoin ' 'address, and any addresses found will be assumed to be public keys for the given' ' private key.', type=argparse.FileType('r')) parser.add_argument('-g', "--gpg-argument", help='argument to pass to gpg (besides -d).', default='') parser.add_argument("--remove-tx-in", metavar="tx_in_index_to_delete", action="append", type=int, help='remove a tx_in') parser.add_argument("--remove-tx-out", metavar="tx_out_index_to_delete", action="append", type=int, help='remove a tx_out') parser.add_argument('-F', "--fee", help='fee, in satoshis, to pay on transaction, or ' '"standard" to auto-calculate. This is only useful if the "split pool" ' 'is used; otherwise, the fee is automatically set to the unclaimed funds.', default="standard", metavar="transaction-fee", type=parse_fee) parser.add_argument('-C', "--cache", help='force the resultant transaction into the transaction cache.' ' Mostly for testing.', action='store_true'), parser.add_argument("--db", type=Tx.from_hex, help='force the transaction expressed by the given hex ' 'into a RAM-based transaction cache. Mostly for testing.', action="append"), parser.add_argument('-u', "--show-unspents", action='store_true', help='show TxOut items for this transaction in Spendable form.') parser.add_argument('-b', "--bitcoind-url", help='URL to bitcoind instance to validate against (http://user:pass@host:port).') parser.add_argument('-o', "--output-file", metavar="path-to-output-file", type=argparse.FileType('wb'), help='file to write transaction to. This supresses most other output.') parser.add_argument('-d', "--disassemble", action='store_true', help='Disassemble scripts.') parser.add_argument("--pdb", action="store_true", help='Enter PDB debugger on each script instruction.') parser.add_argument("--trace", action='store_true', help='Trace scripts.') parser.add_argument('-p', "--pay-to-script", metavar="pay-to-script", action="append", help='a hex version of a script required for a pay-to-script' 'input (a bitcoin address that starts with 3)') parser.add_argument('-P', "--pay-to-script-file", metavar="pay-to-script-file", nargs=1, type=argparse.FileType('r'), help='a file containing hex scripts ' '(one per line) corresponding to pay-to-script inputs') parser.add_argument("argument", nargs="*", help='generic argument: can be a hex transaction id ' '(exactly 64 characters) to be fetched from cache or a web service;' ' a transaction as a hex string; a path name to a transaction to be loaded;' ' a spendable 4-tuple of the form tx_id/tx_out_idx/script_hex/satoshi_count ' 'to be added to TxIn list; an address/satoshi_count to be added to the TxOut ' 'list; an address to be added to the TxOut list and placed in the "split' ' pool".') return parser def replace_with_gpg_pipe(args, f): gpg_args = ["gpg", "-d"] if args.gpg_argument: gpg_args.extend(args.gpg_argument.split()) gpg_args.append(f.name) popen = subprocess.Popen(gpg_args, stdout=subprocess.PIPE) return popen.stdout def parse_private_key_file(args, key_list): wif_re = re.compile(r"[1-9a-km-zA-LMNP-Z]{51,111}") # address_re = re.compile(r"[1-9a-kmnp-zA-KMNP-Z]{27-31}") for f in args.private_key_file: if f.name.endswith(".gpg"): f = replace_with_gpg_pipe(args, f) for line in f.readlines(): # decode if isinstance(line, bytes): line = line.decode("utf8") # look for WIFs possible_keys = wif_re.findall(line) def make_key(x): try: return Key.from_text(x) except Exception: return None keys = [make_key(x) for x in possible_keys] for key in keys: if key: key_list.append((k.wif() for k in key.subkeys(""))) # if len(keys) == 1 and key.hierarchical_wallet() is None: # # we have exactly 1 WIF. Let's look for an address # potential_addresses = address_re.findall(line) TX_ID_RE = re.compile(r"^[0-9a-fA-F]{64}$") def parse_tx(arg, parser, tx_db, network): # hex transaction id tx = None if TX_ID_RE.match(arg): if tx_db is None: tx_db = create_tx_db(network) tx = tx_db.get(h2b_rev(arg)) if not tx: parser.error("can't find Tx with id %s" % arg) return tx, tx_db # hex transaction data try: return Tx.from_hex(arg), tx_db except Exception: pass if os.path.exists(arg): try: with open(arg, "rb") as f: if f.name.endswith("hex"): f = io.BytesIO(codecs.getreader("hex_codec")(f).read()) tx = Tx.parse(f) tx.parse_unspents(f) except Exception: pass return tx, tx_db def parse_scripts(args): scripts = [] warnings = [] for p2s in args.pay_to_script or []: try: scripts.append(h2b(p2s)) except Exception: warnings.append("warning: error parsing pay-to-script value %s" % p2s) hex_re = re.compile(r"[0-9a-fA-F]+") for f in args.pay_to_script_file or []: count = 0 for l in f: try: m = hex_re.search(l) if m: p2s = m.group(0) scripts.append(h2b(p2s)) count += 1 except Exception: warnings.append("warning: error parsing pay-to-script file %s" % f.name) if count == 0: warnings.append("warning: no scripts found in %s" % f.name) return scripts, warnings def build_p2sh_lookup(args): scripts, warnings = parse_scripts(args) for w in warnings: print(w) p2sh_lookup = {} for script in scripts: p2sh_lookup[hash160(script)] = script return p2sh_lookup def create_tx_db(network): tx_db = get_tx_db(network) tx_db.warning_tx_cache = message_about_tx_cache_env() tx_db.warning_tx_for_tx_hash = message_about_tx_for_tx_hash_env(network) return tx_db def parse_parts(arg, spendables, payables, network): parts = arg.split("/") if 4 <= len(parts) <= 7: # spendable try: spendables.append(Spendable.from_text(arg)) return True except Exception: pass if len(parts) == 2 and is_address_valid(parts[0], allowable_netcodes=[network]): try: payables.append(parts) return True except ValueError: pass def key_found(arg, payables, key_iters): try: key = Key.from_text(arg) # TODO: check network if key.wif() is None: payables.append((key.address(), 0)) return True key_iters.append(iter([key.wif()])) return True except Exception: pass return False def parse_context(args, parser): # we create the tx_db lazily tx_db = None if args.db: the_ram_tx_db = dict((tx.hash(), tx) for tx in args.db) if tx_db is None: tx_db = create_tx_db(args.network) tx_db.lookup_methods.append(the_ram_tx_db.get) # defaults txs = [] spendables = [] payables = [] key_iters = [] # there are a few warnings we might optionally print out, but only if # they are relevant. We don't want to print them out multiple times, so we # collect them here and print them at the end if they ever kick in. warning_spendables = None for arg in args.argument: if is_address_valid(arg, allowable_netcodes=[args.network], allowable_types=[ "address", "pay_to_script", "segwit"]): payables.append((arg, 0)) continue if key_found(arg, payables, key_iters): continue tx, tx_db = parse_tx(arg, parser, tx_db, args.network) if tx: txs.append(tx) continue if parse_parts(arg, spendables, payables, args.network): continue parser.error("can't parse %s" % arg) parse_private_key_file(args, key_iters) if args.fetch_spendables: warning_spendables = message_about_spendables_for_address_env(args.network) for address in args.fetch_spendables: spendables.extend(spendables_for_address(address, args.network)) return (txs, spendables, payables, key_iters, tx_db, warning_spendables) def merge_txs(txs, spendables, payables): txs_in = [] txs_out = [] unspents = [] # we use a clever trick here to keep each tx_in corresponding with its tx_out for tx in txs: smaller = min(len(tx.txs_in), len(tx.txs_out)) txs_in.extend(tx.txs_in[:smaller]) txs_out.extend(tx.txs_out[:smaller]) unspents.extend(tx.unspents[:smaller]) for tx in txs: smaller = min(len(tx.txs_in), len(tx.txs_out)) txs_in.extend(tx.txs_in[smaller:]) txs_out.extend(tx.txs_out[smaller:]) unspents.extend(tx.unspents[smaller:]) for spendable in spendables: txs_in.append(spendable.tx_in()) unspents.append(spendable) for address, coin_value in payables: script = standard_tx_out_script(address) txs_out.append(TxOut(coin_value, script)) return txs_in, txs_out, unspents def calculate_lock_time_and_version(args, txs): # if no lock_time is explicitly set, inherit from the first tx or use default lock_time = args.lock_time if lock_time is None: if txs: lock_time = txs[0].lock_time else: lock_time = DEFAULT_LOCK_TIME # if no version is explicitly set, inherit from the first tx or use default version = args.transaction_version if version is None: if txs: version = txs[0].version else: version = DEFAULT_VERSION return lock_time, version def remove_indices(items, indices): if indices: s = set(indices) items = [i for idx, i in enumerate(items) if idx not in s] return items def wif_iter(iters): while len(iters) > 0: for idx, iter in enumerate(iters): try: wif = next(iter) yield wif except StopIteration: iters = iters[:idx] + iters[idx+1:] break def generate_tx(txs, spendables, payables, args): txs_in, txs_out, unspents = merge_txs(txs, spendables, payables) lock_time, version = calculate_lock_time_and_version(args, txs) if len(unspents) == len(txs_in): unspents = remove_indices(unspents, args.remove_tx_in) txs_in = remove_indices(txs_in, args.remove_tx_in) txs_out = remove_indices(txs_out, args.remove_tx_out) tx = Tx(txs_in=txs_in, txs_out=txs_out, lock_time=lock_time, version=version, unspents=unspents) fee = args.fee try: if len(payables) > 0: distribute_from_split_pool(tx, fee) except ValueError as ex: print("warning: %s" % ex.args[0], file=sys.stderr) return tx def print_output(tx, include_unspents, output_file, show_unspents, network, verbose_signature, disassemble, trace, pdb): if len(tx.txs_in) == 0: print("warning: transaction has no inputs", file=sys.stderr) if len(tx.txs_out) == 0: print("warning: transaction has no outputs", file=sys.stderr) tx_as_hex = tx.as_hex(include_unspents=include_unspents) if output_file: f = output_file if f.name.endswith(".hex"): f.write(tx_as_hex.encode("utf8")) else: tx.stream(f, include_unspents=include_unspents) f.close() elif show_unspents: for spendable in tx.tx_outs_as_spendable(): print(spendable.as_text()) elif len(tx.txs_out) == 0: for spendable in tx.unspents: print(spendable.as_text()) else: if not tx.missing_unspents(): check_fees(tx) dump_tx(tx, network, verbose_signature, disassemble, trace, pdb) if include_unspents: print("including unspents in hex dump since transaction not fully signed") print(tx_as_hex) def do_signing(tx, key_iters, p2sh_lookup, netcode): unsigned_before = tx.bad_signature_count() unsigned_after = unsigned_before if unsigned_before > 0 and key_iters: print("signing...", file=sys.stderr) sign_tx(tx, wif_iter(key_iters), p2sh_lookup=p2sh_lookup, netcode=netcode) unsigned_after = tx.bad_signature_count() if unsigned_after > 0: print("warning: %d TxIn items still unsigned" % unsigned_after, file=sys.stderr) return unsigned_after == 0 def cache_result(tx, tx_db, cache, network): if cache: if tx_db is None: tx_db = create_tx_db(network) tx_db.put(tx) return tx_db def validate_tx(tx, tx_db, network): if not tx.txs_out: return if tx.missing_unspents(): print("\n** can't validate transaction as source transactions missing", file=sys.stderr) else: try: if tx_db is None: tx_db = create_tx_db(network) tx.validate_unspents(tx_db) print('all incoming transaction values validated') except BadSpendableError as ex: print("\n**** ERROR: FEES INCORRECTLY STATED: %s" % ex.args[0], file=sys.stderr) except Exception as ex: print("\n*** can't validate source transactions as untampered: %s" % ex.args[0], file=sys.stderr) def validate_against_bitcoind(tx, tx_db, network, bitcoind_url): if bitcoind_url: if tx_db is None: tx_db = create_tx_db(network) validate_bitcoind(tx, tx_db, bitcoind_url) return tx_db def tx(args, parser): (txs, spendables, payables, key_iters, tx_db, warning_spendables) = parse_context(args, parser) for tx in txs: if tx.missing_unspents() and (args.augment or tx_db): if tx_db is None: tx_db = create_tx_db(args.network) tx.unspents_from_db(tx_db, ignore_missing=True) # build p2sh_lookup p2sh_lookup = build_p2sh_lookup(args) tx = generate_tx(txs, spendables, payables, args) is_fully_signed = do_signing(tx, key_iters, p2sh_lookup, args.network) include_unspents = not is_fully_signed print_output(tx, include_unspents, args.output_file, args.show_unspents, args.network, args.verbose_signature, args.disassemble, args.trace, args.pdb) tx_db = cache_result(tx, tx_db, args.cache, args.network) tx_db = validate_against_bitcoind(tx, tx_db, args.network, args.bitcoind_url) if not args.show_unspents: tx_db = validate_tx(tx, tx_db, args.network) # print warnings if tx_db: for m in [tx_db.warning_tx_cache, tx_db.warning_tx_for_tx_hash]: if m: print("warning: %s" % m, file=sys.stderr) if warning_spendables: print("warning: %s" % warning_spendables, file=sys.stderr) def main(): parser = create_parser() args = parser.parse_args() tx(args, parser) if __name__ == '__main__': main()
37.086207
120
0.620999
from __future__ import print_function import argparse import calendar import codecs import datetime import io import os.path import re import subprocess import sys from pycoinzpub.convention import tx_fee, satoshi_to_mbtc from pycoinzpub.encoding import hash160 from pycoinzpub.key import Key from pycoinzpub.key.validate import is_address_valid from pycoinzpub.networks import address_prefix_for_netcode, full_network_name_for_netcode, network_codes from pycoinzpub.networks.default import get_current_netcode from pycoinzpub.serialize import b2h_rev, h2b, h2b_rev, stream_to_bytes from pycoinzpub.services import spendables_for_address, get_tx_db from pycoinzpub.services.providers import message_about_tx_cache_env, \ message_about_tx_for_tx_hash_env, message_about_spendables_for_address_env from pycoinzpub.tx.exceptions import BadSpendableError from pycoinzpub.tx.script.tools import opcode_list, disassemble_for_opcode_data from pycoinzpub.tx.script.check_signature import parse_signature_blob from pycoinzpub.tx.script.der import UnexpectedDER from pycoinzpub.tx.script.disassemble import disassemble_scripts, sighash_type_to_string from pycoinzpub.tx.tx_utils import distribute_from_split_pool, sign_tx from pycoinzpub.tx.Tx import Spendable, Tx, TxOut from pycoinzpub.ui import standard_tx_out_script DEFAULT_VERSION = 1 DEFAULT_LOCK_TIME = 0 LOCKTIME_THRESHOLD = 500000000 def range_int(min, max, name): def cast(v): v = int(v) if not (min <= v <= max): raise ValueError() return v cast.__name__ = name return cast def validate_bitcoind(tx, tx_db, bitcoind_url): try: from pycoinzpub.services.bitcoind import bitcoind_agrees_on_transaction_validity if bitcoind_agrees_on_transaction_validity(bitcoind_url, tx): print("interop test passed for %s" % tx.id(), file=sys.stderr) else: print("tx ==> %s FAILED interop test" % tx.id(), file=sys.stderr) except ImportError: print("warning: can't talk to bitcoind due to missing library") def dump_header(tx): tx_bin = stream_to_bytes(tx.stream) print("Version: %2d tx hash %s %d bytes" % (tx.version, tx.id(), len(tx_bin))) if tx.has_witness_data(): print(" segwit tx hash %s" % tx.w_id()) print("TxIn count: %d; TxOut count: %d" % (len(tx.txs_in), len(tx.txs_out))) if tx.lock_time == 0: meaning = "valid anytime" elif tx.lock_time < LOCKTIME_THRESHOLD: meaning = "valid after block index %d" % tx.lock_time else: when = datetime.datetime.utcfromtimestamp(tx.lock_time) meaning = "valid on or after %s utc" % when.isoformat() print("Lock time: %d (%s)" % (tx.lock_time, meaning)) print("Input%s:" % ('s' if len(tx.txs_in) != 1 else '')) def make_trace_script(do_trace, use_pdb): if not (do_trace or use_pdb): return None def trace_script(pc, opcode, data, stack, altstack, if_condition_stack, is_signature): from pycoinzpub.serialize import b2h print("stack: [%s]" % ' '.join(b2h(s) for s in stack)) if len(altstack) > 0: print("altstack: %s" % altstack) print("condition stack: %s" % if_condition_stack) print("%3d : %02x %s" % (pc, opcode, disassemble_for_opcode_data(opcode, data))) if use_pdb: import pdb pdb.set_trace() return trace_script def dump_inputs(tx, netcode, verbose_signature, address_prefix, traceback_f, disassembly_level): def signature_for_hash_type_f(hash_type, script): return tx.signature_hash(script, idx, hash_type) for idx, tx_in in enumerate(tx.txs_in): if tx.is_coinbase(): print("%4d: COINBASE %12.5f mBTC" % (idx, satoshi_to_mbtc(tx.total_in()))) continue suffix = "" if tx.missing_unspent(idx): tx_out = None address = tx_in.bitcoin_address(address_prefix=address_prefix) else: tx_out = tx.unspents[idx] sig_result = " sig ok" if tx.is_signature_ok(idx, traceback_f=traceback_f) else " BAD SIG" suffix = " %12.5f mBTC %s" % (satoshi_to_mbtc(tx_out.coin_value), sig_result) address = tx_out.bitcoin_address(netcode=netcode) t = "%4d: %34s from %s:%-4d%s" % (idx, address, b2h_rev(tx_in.previous_hash), tx_in.previous_index, suffix) print(t.rstrip()) if disassembly_level > 0: dump_disassembly(tx_in, tx_out, tx.lock_time, signature_for_hash_type_f) if verbose_signature: dump_signatures(tx, tx_in, tx_out, idx, netcode, address_prefix, traceback_f, disassembly_level) def dump_disassembly(tx_in, tx_out, lock_time, signature_for_hash_type_f): out_script = b'' if tx_out: out_script = tx_out.script for (pre_annotations, pc, opcode, instruction, post_annotations) in \ disassemble_scripts( tx_in.script, out_script, lock_time, signature_for_hash_type_f): for l in pre_annotations: print(" %s" % l) if 1: print(" %4x: %02x %s" % (pc, opcode, instruction)) for l in post_annotations: print(" %s" % l) def dump_signatures(tx, tx_in, tx_out, idx, netcode, address_prefix, traceback_f, disassembly_level): signatures = [] for opcode in opcode_list(tx_in.script): if not opcode.startswith("OP_"): try: signatures.append(parse_signature_blob(h2b(opcode[1:-1]))) except UnexpectedDER: pass if signatures: sig_types_identical = ( tuple(zip(*signatures))[1].count(signatures[0][1]) == len(signatures)) i = 1 if len(signatures) > 1 else '' for sig_pair, sig_type in signatures: print(" r{0}: {1:#x}\n s{0}: {2:#x}".format(i, *sig_pair)) if not sig_types_identical and tx_out: print(" z{}: {:#x} {}".format(i, tx.signature_hash(tx_out.script, idx, sig_type), sighash_type_to_string(sig_type))) if i: i += 1 if sig_types_identical and tx_out: print(" z:{} {:#x} {}".format(' ' if i else '', tx.signature_hash( tx_out.script, idx, sig_type), sighash_type_to_string(sig_type))) def dump_footer(tx, missing_unspents): if not missing_unspents: print("Total input %12.5f mBTC" % satoshi_to_mbtc(tx.total_in())) if 1: print("Total output %12.5f mBTC" % satoshi_to_mbtc(tx.total_out())) if not missing_unspents: print("Total fees %12.5f mBTC" % satoshi_to_mbtc(tx.fee())) def dump_tx(tx, netcode, verbose_signature, disassembly_level, do_trace, use_pdb): address_prefix = address_prefix_for_netcode(netcode) missing_unspents = tx.missing_unspents() traceback_f = make_trace_script(do_trace, use_pdb) dump_header(tx) dump_inputs(tx, netcode, verbose_signature, address_prefix, traceback_f, disassembly_level) def signature_for_hash_type_f(hash_type, script): return tx.signature_hash(script, idx, hash_type) print("Output%s:" % ('s' if len(tx.txs_out) != 1 else '')) for idx, tx_out in enumerate(tx.txs_out): amount_mbtc = satoshi_to_mbtc(tx_out.coin_value) address = tx_out.bitcoin_address(netcode=netcode) or "(unknown)" print("%4d: %34s receives %12.5f mBTC" % (idx, address, amount_mbtc)) if disassembly_level > 0: for (pre_annotations, pc, opcode, instruction, post_annotations) in \ disassemble_scripts(b'', tx_out.script, tx.lock_time, signature_for_hash_type_f): for l in pre_annotations: print(" %s" % l) if 1: print(" %4x: %02x %s" % (pc, opcode, instruction)) for l in post_annotations: print(" %s" % l) dump_footer(tx, missing_unspents) def check_fees(tx): total_in, total_out = tx.total_in(), tx.total_out() actual_tx_fee = total_in - total_out recommended_tx_fee = tx_fee.recommended_fee_for_tx(tx) print("warning: transaction fees recommendations casually calculated and estimates may be incorrect", file=sys.stderr) if actual_tx_fee > recommended_tx_fee: print("warning: transaction fee of %s exceeds expected value of %s mBTC" % (satoshi_to_mbtc(actual_tx_fee), satoshi_to_mbtc(recommended_tx_fee)), file=sys.stderr) elif actual_tx_fee < 0: print("not enough source coins (%s mBTC) for destination (%s mBTC)." " Short %s mBTC" % (satoshi_to_mbtc(total_in), satoshi_to_mbtc(total_out), satoshi_to_mbtc(-actual_tx_fee)), file=sys.stderr) elif actual_tx_fee < recommended_tx_fee: print("warning: transaction fee lower than (casually calculated)" " expected value of %s mBTC, transaction might not propogate" % satoshi_to_mbtc(recommended_tx_fee), file=sys.stderr) return actual_tx_fee EARLIEST_DATE = datetime.datetime(year=2009, month=1, day=1) def parse_locktime(s): s = re.sub(r"[ ,:\-]+", r"-", s) for fmt1 in ["%Y-%m-%dT", "%Y-%m-%d", "%b-%d-%Y", "%b-%d-%y", "%B-%d-%Y", "%B-%d-%y"]: for fmt2 in ["T%H-%M-%S", "T%H-%M", "-%H-%M-%S", "-%H-%M", ""]: fmt = fmt1 + fmt2 try: when = datetime.datetime.strptime(s, fmt) if when < EARLIEST_DATE: raise ValueError("invalid date: must be after %s" % EARLIEST_DATE) return calendar.timegm(when.timetuple()) except ValueError: pass return int(s) parse_locktime.__name__ = 'locktime' def parse_fee(fee): if fee in ["standard"]: return fee return int(fee) def create_parser(): codes = network_codes() EPILOG = ('Files are binary by default unless they end with the suffix ".hex". ' + 'Known networks codes:\n ' + ', '.join(['%s (%s)' % (i, full_network_name_for_netcode(i)) for i in codes])) parser = argparse.ArgumentParser( description="Manipulate bitcoin (or alt coin) transactions.", epilog=EPILOG) parser.add_argument('-t', "--transaction-version", type=range_int(0, 255, "version"), help='Transaction version, either 1 (default) or 3 (not yet supported).') parser.add_argument('-l', "--lock-time", type=parse_locktime, help='Lock time; either a block' 'index, or a date/time (example: "2014-01-01T15:00:00"') parser.add_argument('-n', "--network", default=get_current_netcode(), choices=codes, help='Define network code (BTC=Bitcoin mainnet, XTN=Bitcoin testnet).') parser.add_argument('-a', "--augment", action='store_true', help='augment tx by adding any missing spendable metadata by fetching' ' inputs from cache and/or web services') parser.add_argument('-s', "--verbose-signature", action='store_true', help='Display technical signature details.') parser.add_argument("-i", "--fetch-spendables", metavar="address", action="append", help='Add all unspent spendables for the given bitcoin address. This information' ' is fetched from web services. With no outputs, incoming spendables will be printed.') parser.add_argument('-f', "--private-key-file", metavar="path-to-private-keys", action="append", default=[], help='file containing WIF or BIP0032 private keys. If file name ends with .gpg, ' '"gpg -d" will be invoked automatically. File is read one line at a time, and if ' 'the file contains only one WIF per line, it will also be scanned for a bitcoin ' 'address, and any addresses found will be assumed to be public keys for the given' ' private key.', type=argparse.FileType('r')) parser.add_argument('-g', "--gpg-argument", help='argument to pass to gpg (besides -d).', default='') parser.add_argument("--remove-tx-in", metavar="tx_in_index_to_delete", action="append", type=int, help='remove a tx_in') parser.add_argument("--remove-tx-out", metavar="tx_out_index_to_delete", action="append", type=int, help='remove a tx_out') parser.add_argument('-F', "--fee", help='fee, in satoshis, to pay on transaction, or ' '"standard" to auto-calculate. This is only useful if the "split pool" ' 'is used; otherwise, the fee is automatically set to the unclaimed funds.', default="standard", metavar="transaction-fee", type=parse_fee) parser.add_argument('-C', "--cache", help='force the resultant transaction into the transaction cache.' ' Mostly for testing.', action='store_true'), parser.add_argument("--db", type=Tx.from_hex, help='force the transaction expressed by the given hex ' 'into a RAM-based transaction cache. Mostly for testing.', action="append"), parser.add_argument('-u', "--show-unspents", action='store_true', help='show TxOut items for this transaction in Spendable form.') parser.add_argument('-b', "--bitcoind-url", help='URL to bitcoind instance to validate against (http://user:pass@host:port).') parser.add_argument('-o', "--output-file", metavar="path-to-output-file", type=argparse.FileType('wb'), help='file to write transaction to. This supresses most other output.') parser.add_argument('-d', "--disassemble", action='store_true', help='Disassemble scripts.') parser.add_argument("--pdb", action="store_true", help='Enter PDB debugger on each script instruction.') parser.add_argument("--trace", action='store_true', help='Trace scripts.') parser.add_argument('-p', "--pay-to-script", metavar="pay-to-script", action="append", help='a hex version of a script required for a pay-to-script' 'input (a bitcoin address that starts with 3)') parser.add_argument('-P', "--pay-to-script-file", metavar="pay-to-script-file", nargs=1, type=argparse.FileType('r'), help='a file containing hex scripts ' '(one per line) corresponding to pay-to-script inputs') parser.add_argument("argument", nargs="*", help='generic argument: can be a hex transaction id ' '(exactly 64 characters) to be fetched from cache or a web service;' ' a transaction as a hex string; a path name to a transaction to be loaded;' ' a spendable 4-tuple of the form tx_id/tx_out_idx/script_hex/satoshi_count ' 'to be added to TxIn list; an address/satoshi_count to be added to the TxOut ' 'list; an address to be added to the TxOut list and placed in the "split' ' pool".') return parser def replace_with_gpg_pipe(args, f): gpg_args = ["gpg", "-d"] if args.gpg_argument: gpg_args.extend(args.gpg_argument.split()) gpg_args.append(f.name) popen = subprocess.Popen(gpg_args, stdout=subprocess.PIPE) return popen.stdout def parse_private_key_file(args, key_list): wif_re = re.compile(r"[1-9a-km-zA-LMNP-Z]{51,111}") # address_re = re.compile(r"[1-9a-kmnp-zA-KMNP-Z]{27-31}") for f in args.private_key_file: if f.name.endswith(".gpg"): f = replace_with_gpg_pipe(args, f) for line in f.readlines(): # decode if isinstance(line, bytes): line = line.decode("utf8") # look for WIFs possible_keys = wif_re.findall(line) def make_key(x): try: return Key.from_text(x) except Exception: return None keys = [make_key(x) for x in possible_keys] for key in keys: if key: key_list.append((k.wif() for k in key.subkeys(""))) # if len(keys) == 1 and key.hierarchical_wallet() is None: # # we have exactly 1 WIF. Let's look for an address TX_ID_RE = re.compile(r"^[0-9a-fA-F]{64}$") def parse_tx(arg, parser, tx_db, network): tx = None if TX_ID_RE.match(arg): if tx_db is None: tx_db = create_tx_db(network) tx = tx_db.get(h2b_rev(arg)) if not tx: parser.error("can't find Tx with id %s" % arg) return tx, tx_db # hex transaction data try: return Tx.from_hex(arg), tx_db except Exception: pass if os.path.exists(arg): try: with open(arg, "rb") as f: if f.name.endswith("hex"): f = io.BytesIO(codecs.getreader("hex_codec")(f).read()) tx = Tx.parse(f) tx.parse_unspents(f) except Exception: pass return tx, tx_db def parse_scripts(args): scripts = [] warnings = [] for p2s in args.pay_to_script or []: try: scripts.append(h2b(p2s)) except Exception: warnings.append("warning: error parsing pay-to-script value %s" % p2s) hex_re = re.compile(r"[0-9a-fA-F]+") for f in args.pay_to_script_file or []: count = 0 for l in f: try: m = hex_re.search(l) if m: p2s = m.group(0) scripts.append(h2b(p2s)) count += 1 except Exception: warnings.append("warning: error parsing pay-to-script file %s" % f.name) if count == 0: warnings.append("warning: no scripts found in %s" % f.name) return scripts, warnings def build_p2sh_lookup(args): scripts, warnings = parse_scripts(args) for w in warnings: print(w) p2sh_lookup = {} for script in scripts: p2sh_lookup[hash160(script)] = script return p2sh_lookup def create_tx_db(network): tx_db = get_tx_db(network) tx_db.warning_tx_cache = message_about_tx_cache_env() tx_db.warning_tx_for_tx_hash = message_about_tx_for_tx_hash_env(network) return tx_db def parse_parts(arg, spendables, payables, network): parts = arg.split("/") if 4 <= len(parts) <= 7: # spendable try: spendables.append(Spendable.from_text(arg)) return True except Exception: pass if len(parts) == 2 and is_address_valid(parts[0], allowable_netcodes=[network]): try: payables.append(parts) return True except ValueError: pass def key_found(arg, payables, key_iters): try: key = Key.from_text(arg) # TODO: check network if key.wif() is None: payables.append((key.address(), 0)) return True key_iters.append(iter([key.wif()])) return True except Exception: pass return False def parse_context(args, parser): # we create the tx_db lazily tx_db = None if args.db: the_ram_tx_db = dict((tx.hash(), tx) for tx in args.db) if tx_db is None: tx_db = create_tx_db(args.network) tx_db.lookup_methods.append(the_ram_tx_db.get) # defaults txs = [] spendables = [] payables = [] key_iters = [] # there are a few warnings we might optionally print out, but only if # they are relevant. We don't want to print them out multiple times, so we warning_spendables = None for arg in args.argument: if is_address_valid(arg, allowable_netcodes=[args.network], allowable_types=[ "address", "pay_to_script", "segwit"]): payables.append((arg, 0)) continue if key_found(arg, payables, key_iters): continue tx, tx_db = parse_tx(arg, parser, tx_db, args.network) if tx: txs.append(tx) continue if parse_parts(arg, spendables, payables, args.network): continue parser.error("can't parse %s" % arg) parse_private_key_file(args, key_iters) if args.fetch_spendables: warning_spendables = message_about_spendables_for_address_env(args.network) for address in args.fetch_spendables: spendables.extend(spendables_for_address(address, args.network)) return (txs, spendables, payables, key_iters, tx_db, warning_spendables) def merge_txs(txs, spendables, payables): txs_in = [] txs_out = [] unspents = [] # we use a clever trick here to keep each tx_in corresponding with its tx_out for tx in txs: smaller = min(len(tx.txs_in), len(tx.txs_out)) txs_in.extend(tx.txs_in[:smaller]) txs_out.extend(tx.txs_out[:smaller]) unspents.extend(tx.unspents[:smaller]) for tx in txs: smaller = min(len(tx.txs_in), len(tx.txs_out)) txs_in.extend(tx.txs_in[smaller:]) txs_out.extend(tx.txs_out[smaller:]) unspents.extend(tx.unspents[smaller:]) for spendable in spendables: txs_in.append(spendable.tx_in()) unspents.append(spendable) for address, coin_value in payables: script = standard_tx_out_script(address) txs_out.append(TxOut(coin_value, script)) return txs_in, txs_out, unspents def calculate_lock_time_and_version(args, txs): # if no lock_time is explicitly set, inherit from the first tx or use default lock_time = args.lock_time if lock_time is None: if txs: lock_time = txs[0].lock_time else: lock_time = DEFAULT_LOCK_TIME # if no version is explicitly set, inherit from the first tx or use default version = args.transaction_version if version is None: if txs: version = txs[0].version else: version = DEFAULT_VERSION return lock_time, version def remove_indices(items, indices): if indices: s = set(indices) items = [i for idx, i in enumerate(items) if idx not in s] return items def wif_iter(iters): while len(iters) > 0: for idx, iter in enumerate(iters): try: wif = next(iter) yield wif except StopIteration: iters = iters[:idx] + iters[idx+1:] break def generate_tx(txs, spendables, payables, args): txs_in, txs_out, unspents = merge_txs(txs, spendables, payables) lock_time, version = calculate_lock_time_and_version(args, txs) if len(unspents) == len(txs_in): unspents = remove_indices(unspents, args.remove_tx_in) txs_in = remove_indices(txs_in, args.remove_tx_in) txs_out = remove_indices(txs_out, args.remove_tx_out) tx = Tx(txs_in=txs_in, txs_out=txs_out, lock_time=lock_time, version=version, unspents=unspents) fee = args.fee try: if len(payables) > 0: distribute_from_split_pool(tx, fee) except ValueError as ex: print("warning: %s" % ex.args[0], file=sys.stderr) return tx def print_output(tx, include_unspents, output_file, show_unspents, network, verbose_signature, disassemble, trace, pdb): if len(tx.txs_in) == 0: print("warning: transaction has no inputs", file=sys.stderr) if len(tx.txs_out) == 0: print("warning: transaction has no outputs", file=sys.stderr) tx_as_hex = tx.as_hex(include_unspents=include_unspents) if output_file: f = output_file if f.name.endswith(".hex"): f.write(tx_as_hex.encode("utf8")) else: tx.stream(f, include_unspents=include_unspents) f.close() elif show_unspents: for spendable in tx.tx_outs_as_spendable(): print(spendable.as_text()) elif len(tx.txs_out) == 0: for spendable in tx.unspents: print(spendable.as_text()) else: if not tx.missing_unspents(): check_fees(tx) dump_tx(tx, network, verbose_signature, disassemble, trace, pdb) if include_unspents: print("including unspents in hex dump since transaction not fully signed") print(tx_as_hex) def do_signing(tx, key_iters, p2sh_lookup, netcode): unsigned_before = tx.bad_signature_count() unsigned_after = unsigned_before if unsigned_before > 0 and key_iters: print("signing...", file=sys.stderr) sign_tx(tx, wif_iter(key_iters), p2sh_lookup=p2sh_lookup, netcode=netcode) unsigned_after = tx.bad_signature_count() if unsigned_after > 0: print("warning: %d TxIn items still unsigned" % unsigned_after, file=sys.stderr) return unsigned_after == 0 def cache_result(tx, tx_db, cache, network): if cache: if tx_db is None: tx_db = create_tx_db(network) tx_db.put(tx) return tx_db def validate_tx(tx, tx_db, network): if not tx.txs_out: return if tx.missing_unspents(): print("\n** can't validate transaction as source transactions missing", file=sys.stderr) else: try: if tx_db is None: tx_db = create_tx_db(network) tx.validate_unspents(tx_db) print('all incoming transaction values validated') except BadSpendableError as ex: print("\n**** ERROR: FEES INCORRECTLY STATED: %s" % ex.args[0], file=sys.stderr) except Exception as ex: print("\n*** can't validate source transactions as untampered: %s" % ex.args[0], file=sys.stderr) def validate_against_bitcoind(tx, tx_db, network, bitcoind_url): if bitcoind_url: if tx_db is None: tx_db = create_tx_db(network) validate_bitcoind(tx, tx_db, bitcoind_url) return tx_db def tx(args, parser): (txs, spendables, payables, key_iters, tx_db, warning_spendables) = parse_context(args, parser) for tx in txs: if tx.missing_unspents() and (args.augment or tx_db): if tx_db is None: tx_db = create_tx_db(args.network) tx.unspents_from_db(tx_db, ignore_missing=True) # build p2sh_lookup p2sh_lookup = build_p2sh_lookup(args) tx = generate_tx(txs, spendables, payables, args) is_fully_signed = do_signing(tx, key_iters, p2sh_lookup, args.network) include_unspents = not is_fully_signed print_output(tx, include_unspents, args.output_file, args.show_unspents, args.network, args.verbose_signature, args.disassemble, args.trace, args.pdb) tx_db = cache_result(tx, tx_db, args.cache, args.network) tx_db = validate_against_bitcoind(tx, tx_db, args.network, args.bitcoind_url) if not args.show_unspents: tx_db = validate_tx(tx, tx_db, args.network) # print warnings if tx_db: for m in [tx_db.warning_tx_cache, tx_db.warning_tx_for_tx_hash]: if m: print("warning: %s" % m, file=sys.stderr) if warning_spendables: print("warning: %s" % warning_spendables, file=sys.stderr) def main(): parser = create_parser() args = parser.parse_args() tx(args, parser) if __name__ == '__main__': main()
true
true
790317158e85223b1ef9cc2e4ede51c7c865500d
165,803
py
Python
regolith/schemas.py
priyankaanehra/regolith
393c8a88eb7657d1ae5ea017e2cd0b72ed981e8f
[ "CC0-1.0" ]
null
null
null
regolith/schemas.py
priyankaanehra/regolith
393c8a88eb7657d1ae5ea017e2cd0b72ed981e8f
[ "CC0-1.0" ]
null
null
null
regolith/schemas.py
priyankaanehra/regolith
393c8a88eb7657d1ae5ea017e2cd0b72ed981e8f
[ "CC0-1.0" ]
null
null
null
"""Database schemas, examples, and tools""" import copy from warnings import warn from cerberus import Validator from .sorters import POSITION_LEVELS SORTED_POSITION = sorted(POSITION_LEVELS.keys(), key=POSITION_LEVELS.get) ACTIVITIES_TYPE = ["teaching", "research"] AGENCIES = ["nsf", "doe"] APPOINTMENTS_TYPE = ["gra", "ss", "pd", "ug"] COMMITTEES_TYPE = ["phdoral", "phddefense", "phdproposal", "promotion"] COMMITTEES_LEVEL = ["department", "school", "university", "external"] EXPENSES_TYPE = ["unsubmitted", "submitted", "reimbursed"] FACILITIES_TYPE = ["teaching", "research", "shared", "other", "teaching_wish", "research_wish"] POSITION_STATUS = ["pi", "adjunct", "high-school", "undergrad","ms", "phd", "postdoc","visitor-supported","visitor-unsupported"] PRESENTATION_TYPE = ["award", "colloquium", "contributed_oral", "invited", "keynote", "plenary", "poster", "seminar", "tutorial"] PRESENTATION_STATUS = ["in-prep", "submitted", "accepted", "declined", "cancelled", "postponed"] PROJECT_TYPE = ["ossoftware", "funded"] PROPOSAL_STATI = ["pending", "declined", "accepted", "inprep", "submitted"] PUBLICITY_TYPE = ["online", "article"] REVIEW_STATI = ["invited", "accepted", "declined", "downloaded", "inprogress", "submitted", "cancelled"] REVIEW_RECOMMENDATION = ["reject", "asis", "smalledits", "diffjournal", "majoredits"] SERVICE_TYPE = ["profession", "university", "school", "department"] EXEMPLARS = { "abstracts": { "_id": "Mouginot.Model", "coauthors": "P.P.H. Wilson", "email": "mouginot@wisc.edu", "firstname": "Baptiste", "institution": "University of Wisconsin-Madison", "lastname": "Mouginot", "references": "[1] B. MOUGINOT, “cyCLASS: CLASS " "models for Cyclus,”, Figshare, " "https://dx.doi.org/10.6084/" "m9.figshare.3468671.v2 (2016).", "text": "The CLASS team has developed high " "quality predictors based on pre-trained " "neural network...", "timestamp": "5/5/2017 13:15:59", "title": "Model Performance Analysis", }, "assignments": { "_id": "hw01-rx-power", "category": "homework", "courses": ["EMCH-558-2016-S", "EMCH-758-2016-S"], "points": [1, 2, 3], "questions": ["1-9", "1-10", "1-12"], }, "beamplan": { '_id': "test", 'beamtime': '2020-1-XPD', 'begin_date': '2020-01-01', 'end_date': '2020-01-02', 'devices': ['cryostream'], 'exp_plan': ['load samples on the holder', 'scan the holder to locate the samples', 'take room temperature measurement of sample and the subtrate', 'ramp down temperature to 100K', 'ramp up, measure PDF at temperature 100K ~ 300K, 10K stepsize, 1 min exposure'], 'holder': 'film holder (1 cm * 1 cm * 1 mm)', 'measurement': 'Tramp', 'objective': 'temperature ramping PDF of one WO3 film (100, 300K, 10K)', 'pipeline': 'usual', 'prep_plan': ['films will be made by kriti'], 'project': '20ks_wo3', 'project_lead': 'kseth', 'samples': ['WO3 film', 'glass subtrate'], 'scanplan': ['Scanplan(bt, Tramp, 30, 80, 500, 10)'], 'ship_plan': ['seal and ship to CU', 'carry to the beamline'], 'time': 190, 'todo': ["todo something"]}, "beamtime": { "_id": "2020-1-XPD", "begin_date": "2020-02-14", "begin_time": "8:00 am", "end_date": "2020-02-17", "end_time": "8:00 am" }, "blog": { "_id": "my-vision", "author": "Anthony Scopatz", "day": 18, "month": "September", "original": "https://scopatz.com/my-vision/", "post": "I would like see things move forward. Deep, I know!", "title": "My Vision", "year": 2015, }, "citations": { "_id": "meurer2016sympy", "author": [ "Meurer, Aaron", "Smith, Christopher P", "Paprocki, Mateusz", "{\\v{C}}ert{\\'\\i}k, Ond{\\v{r}}ej", "Rocklin, Matthew", "Kumar, AMiT", "Ivanov, Sergiu", "Moore, Jason K", "Singh, Sartaj", "Rathnayake, Thilina", "Sean Vig", "Brian E Granger", "Richard P Muller", "Francesco Bonazzi", "Harsh Gupta", "Shivam Vats", "Fredrik Johansson", "Fabian Pedregosa", "Matthew J Curry", "Ashutosh Saboo", "Isuru Fernando", "Sumith Kulal", "Robert Cimrman", "Anthony Scopatz", ], "doi": "10.1021/nn501591g", "entrytype": "article", "journal": "PeerJ Computer Science", "month": "Jan", "pages": "e103", "publisher": "PeerJ Inc. San Francisco, USA", "synopsis": "The description of symbolic computing in Python", "tags": "pdf", "title": "SymPy: Symbolic computing in Python", "volume": "4", "year": "2017", }, "contacts": { "_id": "afriend", "aka": [ "A. B. Friend", "AB Friend", "Tony Friend" ], "department": "physics", "email": "friend@deed.com", "institution": "columbiau", "name": "Anthony B Friend", "notes": ["The guy I meet for coffee sometimes"], "title": "Mr.", "month": "January", "year": 2020, "day": 15, "uuid": "76f2a4c7-aa63-4fa3-88b5-396b0c15d368", }, "courses": { "_id": "EMCH-552-2016-F", "active": False, "department": "EMCH", "number": 552, "scale": [ [0.875, "A"], [0.8125, "B+"], [0.75, "B"], [0.6875, "C+"], [0.625, "C"], [0.5625, "D+"], [0.5, "D"], [-1.0, "F"], ], "season": "F", "students": ["Human A. Person", "Human B. Person"], "syllabus": "emch552-2016-f-syllabus.pdf", "weights": { "class-notes": 0.15, "final": 0.3, "homework": 0.35, "midterm": 0.2, }, "year": 2016, }, "expenses": { "_id": "test", "expense_type": "business", "grant_percentages": ["50", "50"], "grants": ["dmref15", "SymPy-1.1"], "itemized_expenses": [ { "day": i, "month": "Jan", "year": 2018, "purpose": "test", "unsegregated_expense": 10 * i, "segregated_expense": 0, } for i in range(1, 11) ], "payee": "scopatz", "project": "Cyclus", "overall_purpose": "testing the databallectionsse", }, "grades": { "_id": "Human A. Person-rx-power-hw02-EMCH-758-2017-S", "student": "hap", "assignment": "2017-rx-power-hw02", "course": "EMCH-758-2017-S", "scores": [1, 1.6, 3], }, "grants": [ { "_id": "SymPy-1.1", "amount": 3000.0, "alias": "sym", "begin_day": 1, "begin_month": "May", "begin_year": 2030, "call_for_proposals": "https://groups.google.com/d/msg" "/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ", "end_day": 31, "end_month": "December", "end_year": 2030, "funder": "NumFOCUS", "narrative": "https://docs.google.com/document/d/1nZxqoL" "-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp" "=sharing", "program": "Small Development Grants", "team": [ { "institution": "University of South Carolina", "name": "Anthony Scopatz", "position": "pi", }, { "institution": "University of South Carolina", "name": "Aaron Meurer", "position": "researcher", }, ], "status": "pending", "title": "SymPy 1.1 Release Support", "budget": [ {"begin_date": "2030-05-01", "end_date": "2030-06-30", "student_months": 0.5, "postdoc_months": 0.0, "ss_months": 1.0, "amount": 1000.0, }, {"begin_date": "2030-07-01", "end_date": "2030-09-30", "student_months": 1.5, "postdoc_months": 0.0, "ss_months": 2.0, "amount": 1000.0, }, {"begin_date": "2030-10-01", "end_date": "2030-12-31", "student_months": 3.0, "postdoc_months": 0.0, "ss_months": 0.0, "amount": 1000.0, }, ], "proposal_id": "SymPy-1.1" }, { "_id": "SymPy-2.0", "amount": 3000.0, "alias": "sym2.0", "begin_day": 1, "begin_month": 6, "begin_year": 2019, "call_for_proposals": "https://groups.google.com/d/msg" "/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ", "end_day": 31, "end_month": "December", "end_year": 2030, "funder": "NumFOCUS", "narrative": "https://docs.google.com/document/d/1nZxqoL" "-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp" "=sharing", "program": "Small Development Grants", "team": [ { "institution": "University of South Carolina", "name": "Anthony Scopatz", "position": "pi", }, { "institution": "University of South Carolina", "name": "Aaron Meurer", "position": "researcher", }, ], "status": "pending", "title": "SymPy 1.1 Release Support", "budget": [ {"begin_date": "2019-06-01", "end_date": "2024-12-31", "student_months": 12.0, "postdoc_months": 24.0, "ss_months": 14.0, "amount": 1500.0, }, {"begin_date": "2025-01-01", "end_date": "2030-12-31", "student_months": 12.0, "postdoc_months": 24.0, "ss_months": 0.0, "amount": 1500.0, }, ], "proposal_id": "SymPy-2.0", }, { "_id": "dmref15", "alias": "dmref15", "account": "GG012345", "amount": 982785.0, "funder": "NSF", "grant_id": "DMREF-1534910", "institution": "Columbia University", "notes": "Designing Materials to Revolutionize and Engineer our " "Future (DMREF)", "person_months_academic": 0.0, "person_months_summer": 0.25, "program": "DMREF", "scope": "This grant is to develop complex modeling methods for regularizing " "ill-posed nanostructure inverse problems using data analytic and " "machine learning based approaches. This does not overlap with any " "other grant.", "team": [ { "institution": "Columbia University", "name": "qdu", "position": "co-pi", }, { "institution": "Columbia University", "name": "dhsu", "position": "co-pi", }, { "institution": "Columbia University", "name": "Anthony Scopatz", "position": "pi", "subaward_amount": 330000.0, }, ], "title": "DMREF: Novel, data validated, nanostructure determination " "methods for accelerating materials discovery", "budget": [ {"begin_date": "2018-05-01", "end_date": "2018-09-30", "student_months": 12.0, "postdoc_months": 0.0, "ss_months": 6.0, "amount": 327595.0, }, {"begin_date": "2018-10-01", "end_date": "2019-01-30", "student_months": 8.0, "postdoc_months": 0.0, "ss_months": 12.0, "amount": 327595.0, }, {"begin_date": "2019-02-01", "end_date": "2019-05-01", "student_months": 12.0, "postdoc_months": 0.0, "ss_months": 6.0, "amount": 327595.0, }, ], "proposal_id": "dmref15" }, {"_id": "abc42", "alias": "abc42", "amount": 42000.0, "begin_date": "2020-06-01", "end_date": "2020-12-31", "funder": "Life", "program": "Metaphysical Grants", "team": [ {"institution": "University of Pedagogy", "name": "Chief Pedagogue", "position": "pi" }, {"institution": "University of Pedagogy", "name": "Pedagogue Jr.", "position": "co-pi" }, ], "title": "The answer to life, the universe, and everything", "budget": [ {"begin_date": "2020-06-01", "end_date": "2020-12-31", "student_months": 0.0, "postdoc_months": 0.0, "ss_months": 1.0, "amount": 42000.0, } ], "proposal_id": "abc42", }, {"_id": "ta", "amount": 0.0, "begin_date": "2020-06-01", "end_date": "2020-12-31", "funder": "Life", "program": "Underground Grants", "team": [ {"institution": "Ministry of Magic", "name": "Chief Witch", "position": "pi" }, {"institution": "Ministry of Magic", "name": "Chief Wizard", "position": "co-pi" }, ], "title": "Support for teaching assistants", "budget": [ {"begin_date": "2020-06-01", "end_date": "2020-08-30", "student_months": 0.0, "postdoc_months": 0.0, "ss_months": 0.0, "amount": 0.0, } ] }, ], "groups": { "_id": "ergs", "pi_name": "Anthony Scopatz", "department": "Mechanical Engineering", "institution": "University of South Carolina", "name": "ERGS", "aka": ["Energy Research Group Something", "Scopatz Group"], "website": "www.ergs.sc.edu", "mission_statement": """<b>ERGS</b>, or <i>Energy Research Group: Scopatz</i>, is the Computational <a href="http://www.me.sc.edu/nuclear/">Nuclear Engineering</a> research group at the <a href="http://sc.edu/">University of South Carolina</a>. Our focus is on uncertainty quantification & predictive modeling, nuclear fuel cycle simulation, and improving nuclear engineering techniques through automation. We are committed to open & accessible research tools and methods.""", "projects": """ERGS is involved in a large number of computational projects. Please visit the <a href="projects.html">projects page</a> for more information! """, "email": "<b>scopatz</b> <i>(AT)</i> <b>cec.sc.edu</b>", }, "institutions": [{ "_id": "columbiau", "aka": ["Columbia University", "Columbia"], "city": "New York", "country": "USA", "day": 30, "departments": { "physics": { "name": "Department of Physics", "aka": ["Dept. of Physics", "Physics"], }, "chemistry": { "name": "Department of Chemistry", "aka": ["Chemistry", "Dept. of Chemistry"], }, "apam": { "name": "Department of Applied Physics " "and Applied Mathematics", "aka": ["APAM"], }, }, "month": "May", "name": "Columbia University", "schools": { "seas": { "name": "School of Engineering and " "Applied Science", "aka": [ "SEAS", "Columbia Engineering", "Fu Foundation School of Engineering " "and Applied Science", ], } }, "state": "NY", "street": "500 W 120th St", "updated": "2020-05-30", "uuid": "avacazdraca345rfsvwre", "year": 2020, "zip": "10027", }, { "_id": "usouthcarolina", "aka": ["The University of South Carolina"], "city": "Columbia", "country": "USA", "day": 30, "departments": { "physics": { "name": "Department of Physics", "aka": ["Dept. of Physics", "Physics"], }, "chemistry": { "name": "Department of Chemistry", "aka": ["Chemistry", "Dept. of Chemistry"], }, "apam": { "name": "Department of Applied Physics" "and Applied Mathematics", "aka": ["APAM"], }, "mechanical engineering": { "name": "Department of Mechanical Engineering", "aka": ["Mechanical", "Dept. of Mechanical"], } }, "month": "May", "name": "The University of South Carolina", "schools": { "cec": { "name": "College of Engineering and" "Computing", "aka": [ "CEC", "College of Engineering and Computing", ], } }, "state": "SC", "street": "1716 College Street", "updated": "2020-06-30", "uuid": "4E89A0DD-19AE-45CC-BCB4-83A2D84545E3", "year": 2020, "zip": "29208", }, ], "jobs": { "_id": "0004", "background_fields": [ "Data Science", "Data Engineering", "Computer Engineering", "Computer Science", "Applied Mathematics", "Physics", "Nuclear Engineering", "Mechanical Engineering", "Or similar", ], "compensation": [ "Salary and compensation will be based on prior work " "experience." ], "contact": "Please send CV or resume to Prof. Scopatz at " "scopatzATcec.sc.edu.", "day": 1, "description": "<p>We are seeking a dedicated individual to " "help to aid in ...", "month": "July", "open": False, "positions": ["Scientific Software Developer", "Programmer"], "start_date": "ASAP", "title": "Open Source Scientific Software Maintainer", "year": 2015, }, "meetings": [{ "_id": "grp1000-01-01", "actions": [ "(Everyone) Update overdue milestones", "(Professor Billinge) Explore, and plan a machine learning project for DSI" "(Professor Billinge, Emil, Yevgeny, Songsheng) Come up with a Kaggle competition for this DSI project" "(Emil) Set up the slack channel for the DSI project" ], "agenda": ["Review actions", "Fargo is not free on any streaming platforms", "Review Airtable for deliverables and celebrate", "Mention diversity action initiative", "Songsheng's journal club presentation", "(Vivian and Zicheng) Finish rest of crystallography presentation next week", "Emil's 7th inning Yoga Stretch", "Crystallography talk", "Presentation"], "buddies": [ " Jaylyn C. Umana, " " Simon J. L. Billinge", " Long Yang, " " Emil Kjaer", " Sani Harouna-Mayer," " Akshay Choudhry", " Vivian Lin, " " Songsheng Tao", " Ran Gu, " " Adiba Ejaz", " Zach Thatcher, " " Yevgeny Rakita", " Zicheng 'Taylor' Liu, " " Eric Shen ", " Hung Vuong, " " Daniela Hikari Yano", " Ahmed Shaaban, " " Jiawei Zang", " Berrak Ozer, " " Michael Winitch", " Shomik Ghose", ], "day": 1, "journal_club": { "doi": "10.1107/S2053273319005606", "presenter": "sbillinge", }, "lead": "sbillinge", "minutes": [ "Talked about eyesight and prescription lenses", "Professor Billinge tells everyone a Logician/Mathematician joke", "Mentioned pyjokes, a package in Python that lists bad jokes", "Jaylyn greets everyone", "Reviewed action items from last time", "Talked about fargo, and the merits (or lack thereof) of the Dakotas", "Celebrated finished prums", "Songhsheng holds journal club presentation on Machine Learning techniques", "Discussed Linear Classification, Gradient Descent, Perceptrons, Convolution and other ML topics", "Discussed how we can derive scientific meaning from ML algorithms", "Discussed real space versus reciprocal space", "Finished journal club, had to postpone Akshay's presentation, and the Yoga session to next week", ], "month": 1, "place": "Mudd 1106", "presentation": { "title": "PDF Distance Extraction", "link": "2007ac_grpmtg", "presenter": "sbillinge", }, "scribe": "sbillinge", "time": '0', "updated": "2020-07-31 23:27:50.764475", "uuid": "3fbee8d9-e283-48e7-948f-eecfc2a123b7", "year": 1000 }, { "_id": "grp2020-07-31", "actions": [ "(Everyone) Update overdue milestones", "(Professor Billinge) Explore, and plan a machine learning project for DSI" "(Professor Billinge, Emil, Yevgeny, Songsheng) Come up with a Kaggle competition for this DSI project" "(Emil) Set up the slack channel for the DSI project" ], "agenda": ["Review actions", "Fargo is not free on any streaming platforms", "Review Airtable for deliverables and celebrate", "Mention diversity action initiative", "Songsheng's journal club presentation", "(Vivian and Zicheng) Finish rest of crystallography presentation next week", "Emil's 7th inning Yoga Stretch", "Crystallography talk", "Presentation"], "buddies": [ " Jaylyn C. Umana, " " Simon J. L. Billinge", " Long Yang, " " Emil Kjaer", " Sani Harouna-Mayer," " Akshay Choudhry", " Vivian Lin, " " Songsheng Tao", " Ran Gu, " " Adiba Ejaz", " Zach Thatcher, " " Yevgeny Rakita", " Zicheng 'Taylor' Liu, " " Eric Shen ", " Hung Vuong, " " Daniela Hikari Yano", " Ahmed Shaaban, " " Jiawei Zang", " Berrak Ozer, " " Michael Winitch", " Shomik Ghose", ], "day": 1, "journal_club": { "doi": "10.1107/S2053273319005606", "presenter": "sbillinge", }, "lead": "sbillinge", "minutes": [ "Talked about eyesight and prescription lenses", "Professor Billinge tells everyone a Logician/Mathematician joke", "Mentioned pyjokes, a package in Python that lists bad jokes", "Jaylyn greets everyone", "Reviewed action items from last time", "Talked about fargo, and the merits (or lack thereof) of the Dakotas", "Celebrated finished prums", "Songhsheng holds journal club presentation on Machine Learning techniques", "Discussed Linear Classification, Gradient Descent, Perceptrons, Convolution and other ML topics", "Discussed how we can derive scientific meaning from ML algorithms", "Discussed real space versus reciprocal space", "Finished journal club, had to postpone Akshay's presentation, and the Yoga session to next week", ], "month": 1, "place": "Mudd 1106", "presentation": { "title": "PDF Distance Extraction", "link": "2007ac_grpmtg", "presenter": "sbillinge", }, "scribe": "sbillinge", "time": '0', "updated": "2020-07-31 23:27:50.764475", "uuid": "3fbee8d9-e283-48e7-948f-eecfc2a123b7", "year": 7000 } ], "news": { "_id": "56b4eb6d421aa921504ef2a9", "author": "Anthony Scopatz", "body": "Dr. Robert Flanagan joined ERGS as a post-doctoral " "scholar.", "day": 1, "month": "February", "year": 2016, }, "people": [{ "_id": "scopatz", "aka": [ "Scopatz", "Scopatz, A", "Scopatz, A.", "Scopatz, A M", "Anthony Michael Scopatz", ], "avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200", "appointments": { "f19": { "begin_year": 2019, "begin_month": 9, "begin_day": 1, "end_year": 2019, "end_month": 10, "end_day": 31, "grant": "dmref15", "type": "pd", "loading": 0.75, "status": "finalized", "notes": ["forgetmenot"] }, "s20": { "begin_date": "2020-01-01", "end_date": "2020-05-15", "grant": "sym", "type": "pd", "loading": 1.0, "status": "finalized", "notes": ["fully appointed", "outdated grant"] }, "ss20": { "begin_date": "2020-06-01", "end_date": "2020-08-31", "grant": "abc42", "type": "ss", "loading": 0.8, "status": "proposed", "notes": [] } }, "bio": "Anthony Scopatz is currently an Assistant Professor", "bios": ["Anthony Scopatz is currently an Assistant Professor but will go on to do great things"], "committees": [{ "name": "Heather Stanford", "type": "phdoral", "year": 2020, "month": 3, "day": 1, "level": "department", "unit": "apam" }, {"name": "Heather Stanford", "type": "promotion", "year": 2020, "month": 3, "day": 1, "level": "school", "unit": "seas" }, {"name": "Heather Stanford", "type": "phddefense", "year": 2020, "month": 3, "day": 1, "notes": "something else to remember about it, not published", "level": "external", "unit": "U Denmark" }, {"name": "Heather Stanford", "type": "promotion", "year": 2020, "month": 3, "day": 1, "unit": "columbiau", "level": "university", }], "education": [ { "advisor": "ascopatz", "begin_year": 2008, "degree": "Ph.D. Mechanical Engineering, " "Nuclear and Radiation Engineering " "Program", "end_year": 2011, "group": "ergs", "institution": "The University of Texas at Austin", "department": "apam", "location": "Austin, TX", "other": [ "Adviser: Erich A. Schneider", "Dissertation: Essential Physics for Fuel Cycle " "Modeling & Analysis", ], }, { "begin_year": 2006, "degree": "M.S.E. Mechanical Engineering, Nuclear and " "Radiation Engineering Program", "end_year": 2007, "institution": "The University of Texas at Austin", "location": "Austin, TX", "other": [ "Adviser: Erich A. Schneider", "Thesis: Recyclable Uranium Options under the Global " "Nuclear Energy Partnership", ], }, { "begin_year": 2002, "begin_month": "Sep", "begin_day": 1, "degree": "B.S. Physics", "end_year": 2006, "end_month": 5, "end_day": 20, "institution": "University of California, Santa Barbara", "location": "Santa Barbara, CA", "other": [ "Graduated with a Major in Physics and a Minor in " "Mathematics" ], }, { "begin_year": 2008, "degree": "ongoing", "group": "life", "institution": "solar system", "department": "earth", "location": "land, mostly", }, ], "email": "scopatz@cec.sc.edu", "employment": [ { "advisor": "ascopatz", "begin_year": 2015, "coworkers": ["afriend"], "group": "ergs", "location": "Columbia, SC", "organization": "The University of South Carolina", "other": [ "Cyclus: An agent-based, discrete time nuclear fuel " "cycle simulator.", "PyNE: The Nuclear Engineering Toolkit.", "Website: http://www.ergs.sc.edu/", ], "permanent": True, "position": "assistant professor", "position_full": "Assistant Professor, Mechanical Engineering " "Department", }, { "begin_year": 2013, "begin_month": "Jun", "begin_day": 1, "end_year": 2015, "end_month": 3, "end_day": 15, "location": "Madison, WI", "organization": "CNERG, The University of " "Wisconsin-Madison", "department": "Physics", "other": [ "Cyclus: An agent-based, discrete time nuclear fuel " "cycle simulator.", "PyNE: The Nuclear Engineering Toolkit.", "Website: https://cnerg.github.io/", ], "position": "associate scientist", "position_full": "Associate Scientist, Engineering Physics " "Department", }, { "begin_day": 1, "begin_month": "Nov", "begin_year": 2011, "end_month": "May", "end_year": 2013, "location": "Chicago, IL", "organization": "The FLASH Center, The University of " "Chicago", "other": [ "NIF: Simulation of magnetic field generation from " "neutral plasmas using FLASH.", "CosmoB: Simulation of magnetic field generation " "from neutral plasmas using FLASH.", "FLASH4: High-energy density physics capabilities " "and utilities.", "Simulated Diagnostics: Schlieren, shadowgraphy, " "Langmuir probes, etc. from FLASH.", "OpacPlot: HDF5-based equation of state and opacity " "file format.", "Website: http://flash.uchicago.edu/site/", ], "position": "post-doctoral scholar", "position_full": "Research Scientist, Postdoctoral Scholar", "status": "pi" }, ], "funding": [ { "name": "Omega Laser User's Group Travel Award", "value": 1100, "year": 2013, }, {"name": "NIF User's Group Travel Award", "value": 1150, "year": 2013}, ], "google_scholar_url": "https://scholar.google.com/citations?user=dRm8f", "github_id": "ascopatz", "hindex": [{ "h": 25, "h_last_five": 46, "citations": 19837, "citations_last_five": 9419, "origin": "Google Scholar", "since": 1991, "year": 2020, "month": 2, "day": 19 }], "home_address": { "street": "123 Wallabe Ln", "city": "The big apple", "state": "plasma", "zip": "007", }, "initials": "AMS", "membership": [ { "begin_year": 2006, "organization": "American Nuclear Society", "position": "Member", }, { "begin_year": 2013, "organization": "Python Software Foundation", "position": "Fellow", }, ], "name": "Anthony Scopatz", "orcid_id": "0000-0002-9432-4248", "position": "professor", "research_focus_areas": [ {"begin_year": 2010, "description": "software applied to nuclear " "engineering and life"} ], "service": [{ "name": "International Steering Committee", "role": "chair", "type": "profession", "year": 2020, "month": 3, "notes": ["something"], }, { "name": "National Steering Committee", "type": "profession", "begin_year": 2018, "end_year": 2021, "notes": "something", }, ], "skills": [ {"category": "Programming Languages", "level": "expert", "name": "Python"}, {"category": "Programming Languages", "level": "expert", "name": "Cython"}, ], "teaching": [ { "course": "EMCH 552: Intro to Nuclear Engineering", "courseid": "EMCH 552", "description": "This course is an introduction to nuclear " "physics.", "enrollment": "tbd", "month": "August", "organization": "University of South Carolina", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2017, }, { "course": "EMCH 558/758: Reactor Power Systems", "courseid": "EMCH 558", "description": "This course covers conventional " "reactors.", "enrollment": 28, "evaluation": { "response_rate": 66.76, "amount_learned": 3.5, "appropriateness_workload": 3.15, "course_overall": 3.67, "fairness_grading": 3.54, "organization": 3.25, "classroom_delivery": 4, "approachability": 4.3, "instructor_overall": 3.5, "comments": ["super duper", "dandy"] }, "month": "January", "organization": "University of South Carolina", "position": "professor", "syllabus": "https://docs.google.com/document/d" "/1uMAx_KFZK9ugYyF6wWtLLWgITVhaTBkAf8" "-PxiboYdM/edit?usp=sharing", "year": 2017, }, ], "title": "Dr.", }, { "_id": "sbillinge", "active": True, "activities": [{ "type": "teaching", "name": "course development", "year": 2018, "other": "Developed a new course for Materials Science" }], "aka": [ "Billinge", ], "avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200", "bio": "Simon teaches and does research", "committees": [{ "name": "Same Old", "type": "phddefense", "year": 2018, "unit": "Materials Science", "level": "department", "notes": "something" }], "education": [ { "begin_year": 2008, "degree": "Ph.D. Mechanical Engineering, " "Nuclear and Radiation Engineering " "Program", "end_year": 2011, "group": "ergs", "institution": "The University of Texas at Austin", "department": "apam", "location": "Austin, TX", "other": [ "Adviser: Erich A. Schneider", "Dissertation: Essential Physics for Fuel Cycle " "Modeling & Analysis", ], }, ], "email": "sb2896@columbia.edu", "employment": [ { "begin_year": 2015, "group": "ergs", "location": "Columbia, SC", "organization": "The University of South Carolina", "other": [ "Cyclus: An agent-based, discrete time nuclear fuel " "cycle simulator.", "PyNE: The Nuclear Engineering Toolkit.", "Website: http://www.ergs.sc.edu/", ], "position": "assistant professor", }, ], "facilities": [{ "type": "other", "name": "Shared {Habanero} compute cluster", "begin_year": 2015 }, { "type": "research_wish", "name": "Shared access to wet lab", "begin_year": 2015 }, { "type": "teaching", "name": "Courseworks2", "begin_year": 2017 }, { "type": "teaching_wish", "name": "nothing right now", "begin_year": 2019 }, { "type": "research", "name": "I don't have one", "begin_year": 2008 }, ], "funding": [ { "name": "Omega Laser User's Group Travel Award", "value": 1100, "year": 2013, }, {"name": "NIF User's Group Travel Award", "value": 1150, "year": 2013}, ], "google_scholar_url": "https://scholar.google.com/citations?user=dRm8f", "grp_mtg_active": True, "hindex": [{ "h": 65, "h_last_five": 43, "citations": 17890, "citations_last_five": 8817, "origin": "Google Scholar", "since": 1991, "year": 2019, "month": "May", "day": 12, }], "office": "1105 Seely W. Mudd Building (inner office)", "home_address": { "street": "123 Wallabe Ln", "city": "The big apple", "state": "plasma", "zip": "007", }, "initials": "SJLB", "membership": [ { "begin_year": 2006, "organization": "American Nuclear Society", "position": "Member", }, ], "miscellaneous": { "metrics_for_success": [ "publications(quality, quantity)", "invite talks", "funding", "citations", ], }, "name": "Simon J. L. Billinge", "orcid_id": "0000-0002-9432-4248", "position": "professor", "publicity": [{ "type": "online", "publication": "Brookhaven National Laboratory Web Story", "topic": "LDRD Provenance project", "title": "An awesome project and well worth the money", "day": 24, "month": "Jul", "year": 2019, "grant": "bnlldrd18", "url": "http://www.google.com" }, ], "research_focus_areas": [ {"begin_year": 2010, "description": "software applied to materials " "engineering and life"} ], "service": [ { "type": "profession", "name": "Master of Ceremonies and Organizer Brown University " '"Chemistry: Believe it or Not" public chemistry ' "demonstration", "year": 2017, "month": "August" }, { "type": "department", "name": "Applied Physics program committee", "year": 2018, "month": 1 }, { "type": "school", "name": "Ad hoc tenure committee", "year": 2017, "month": 6, "notes": "Albert Einstein" }, { "type": "profession", "name": "Co-organizer JUAMI", "year": 2017, "month": 12, "role": "co-organizer", "other": "great way to meet people", }, ], "skills": [ {"category": "Programming Languages", "level": "expert", "name": "Python"}, ], "teaching": [ { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2016, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2017, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "s17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "Jan", "organization": "Columbia University", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2018, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "s17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "Jan", "organization": "Columbia University", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2017, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "s17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "month": "Jan", "organization": "Columbia University", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2019, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f18-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2018, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f19-3010", "description": "This course is an introduction to nuclear " "physics.", "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2019, }, ], "title": "Dr.", "todos": [ {"description": "read paper", "due_date": "2020-07-19", "begin_date": "2020-06-15", "duration": 60.0, "importance": 2, "status": "started", "assigned_by": "scopatz", "running_index": 1 }, {"description": "prepare the presentation", "due_date": "2020-07-29", "begin_date": "2020-06-22", "duration": 30.0, "importance": 0, "status": "started", "notes": ["about 10 minutes", "don't forget to upload to the website"], "assigned_by": "sbillinge", "running_index": 2 } ], }, {"_id": "abeing", "active": False, "aka": ["being", "human", "person"], "avatar": "https://xkcd.com/1221/", "bio": "Abstract Being is an exemplar human existence", "education": [ {"degree": "bachelors", "institution": "University of Laughs", "begin_year": 2010}, ], "employment": [ {"group": "bg", "begin_date": "2015-06-01", "end_date": "2015-08-31", "organization": "columbiau", "position": "intern"}, {"group": "agroup", "begin_date": "2020-01-01", "end_date": "2030-12-31", "organization": "usouthcarolina", "position": "intern"}, {"group": "bg", "begin_date": "2010-06-01", "end_date": "2012-08-31", "organization": "columbiau", "position": "intern"}, {"group": "bg", "begin_date": "2017-06-01", "end_date": "2019-08-31", "organization": "columbiau", "position": "intern"}, ], "position": "intern", "name": "Abstract Being", } ], "presentations": [ { "_id": "18sb_this_and_that", "abstract": "We pulled apart graphite with tape", "authors": ["scopatz", "afriend"], "begin_year": 2018, "begin_month": 5, "begin_day": 22, "department": "apam", "institution": "columbiau", "location": "Upton NY", "meeting_name": "Meeting to check flexibility on dates", "notes": [ "We hope the weather will be sunny", "if the weather is nice we will go to the " "beach", ], "project": "18sob_clustermining", "status": "accepted", "title": "Graphitic Dephenestration", "type": "award", "webinar": False, }, { "_id": "18sb_nslsii", "abstract": "We pulled apart graphite with tape", "authors": ["scopatz"], "begin_year": 2018, "begin_month": 5, "begin_day": 22, "department": "apam", "end_year": 2018, "end_month": 5, "end_day": 22, "institution": "columbiau", "location": "Upton NY", "meeting_name": "2018 NSLS-II and CFN Users Meeting", "notes": [ "We hope the weather will be sunny", "if the weather is nice we will go to the " "beach", ], "project": "18sob_clustermining", "status": "accepted", "title": "ClusterMining: extracting core structures of " "metallic nanoparticles from the atomic pair " "distribution function", "type": "poster", }, { "_id": "18sb04_kentstate", "abstract": "We made the case for local structure", "authors": ["scopatz"], "begin_year": 2018, "begin_month": "May", "begin_day": 22, "department": "physics", "end_year": 2018, "end_month": 5, "end_day": 22, "institution": "columbiau", "notes": ["what a week!"], "project": "18kj_conservation", "status": "accepted", "title": "Nanostructure challenges and successes from " "16th Century warships to 21st Century energy", "type": "colloquium", "webinar": True, }, ], "projecta": { "_id": "sb_firstprojectum", "begin_date": "2020-04-28", "collaborators": ["aeinstein", "pdirac"], "deliverable": { "audience": ["beginning grad in chemistry"], "due_date": "2021-05-05", "success_def": "audience is happy", "scope": ["UCs that are supported or some other scope description " "if it is software", "sketch of science story if it is paper" ], "platform": "description of how and where the audience will access " "the deliverable. Journal if it is a paper", "roll_out": [ "steps that the audience will take to access and interact with " "the deliverable", "not needed for paper submissions"], "notes": ["deliverable note"], "status": "proposed" }, "description": "My first projectum", "end_date": "2020-06-05", "grants": "SymPy-1.1", "group_members": ["ascopatz"], "kickoff": { "date": "2020-05-05", "due_date": "2020-05-06", "name": "Kick off meeting", "objective": "introduce project to the lead", "audience": ["lead", "pi", "group_members"], "notes": ["kickoff note"], "status": "finished" }, "lead": "ascopatz", "log_url": "https://docs.google.com/document/d/1YC_wtW5Q", "milestones": [{ 'due_date': '2020-05-20', 'name': 'Project lead presentation', 'notes': ["do background reading", "understand math"], 'objective': 'lead presents background reading and ' 'initial project plan', 'audience': ['lead', 'pi', 'group_members'], 'status': 'proposed', 'type': 'meeting' }, {'due_date': '2020-05-27', 'name': 'planning meeting', 'objective': 'develop a detailed plan with dates', 'audience': ['lead', 'pi', 'group_members'], 'status': 'proposed', 'type': 'pr', }], "name": "First Projectum", "pi_id": "scopatz", "status": "started" }, "projects": { "_id": "Cyclus", "name": "Cyclus", "description": "Agent-Based Nuclear Fuel Cycle Simulator", "group": "ergs", "highlights": [ {"year": 2020, "month": 5, "description": "high profile pub in Nature"} ], "logo": "http://fuelcycle.org/_static/big_c.png", "other": [ "Discrete facilities with discrete material transactions", "Low barrier to entry, rapid payback to adoption", ], "repo": "https://github.com/cyclus/cyclus/", "team": [ { "begin_month": "June", "begin_year": 2013, "end_month": "July", "end_year": 2015, "name": "Anthony Scopatz", "position": "Project Lead", } ], "type": "funded", "website": "http://fuelcycle.org/", "grant": "dmref15", }, "proposalReviews": [ { "_id": "1906doeExample", "adequacy_of_resources": [ "The resources available to the PI seem adequate" ], "agency": "doe", "competency_of_team": ["super competent!"], "doe_appropriateness_of_approach": [ "The proposed approach is highly innovative" ], "doe_reasonableness_of_budget": [ "They could do it with half the money"], "doe_relevance_to_program_mission": ["super relevant"], "does_how": [ "they will find the cause of Malaria", "when they find it they will determine a cure", ], "due_date": "2020-04-10", "does_what": "Find a cure for Malaria", "freewrite": [ "I can put extra things here, such as special instructions from the", "program officer", ], "goals": [ "The goals of the proposal are to put together a team to find a cure" "for Malaria, and then to find it" ], "importance": ["save lives", "lift people from poverty"], "institutions": "columbiau", "month": "May", "names": ["B. Cause", "A.N. Effect"], "nsf_broader_impacts": [], "nsf_create_original_transformative": [], "nsf_plan_good": [], "nsf_pot_to_advance_knowledge": [], "nsf_pot_to_benefit_society": [], "requester": "Lane Wilson", "reviewer": "sbillinge", "status": "submitted", "summary": "dynamite proposal", "title": "A stunning new way to cure Malaria", "year": 2019, }, { "_id": "1906nsfExample", "adequacy_of_resources": [ "The resources available to the PI seem adequate" ], "agency": "nsf", "competency_of_team": ["super competent!"], "doe_appropriateness_of_approach": [], "doe_reasonableness_of_budget": [], "doe_relevance_to_program_mission": [], "does_how": [ "they will find the cause of Poverty", "when they find it they will determine a cure", ], "does_what": "Find a cure for Poverty", "due_date": "2020-04-10", "freewrite": [ "I can put extra things here, such as special instructions from the", "program officer", ], "goals": [ "The goals of the proposal are to put together a team to find a cure" "for Poverty, and then to find it" ], "importance": ["save lives", "lift people from poverty"], "institutions": "upenn", "month": "May", "names": ["A Genius"], "nsf_broader_impacts": ["Poor people will be made unpoor"], "nsf_create_original_transformative": [ "transformative because lives will be transformed" ], "nsf_plan_good": [ "I don't see any issues with the plan", "it should be very straightforward", ], "nsf_pot_to_advance_knowledge": [ "This won't advance knowledge at all"], "nsf_pot_to_benefit_society": [ "Society will benefit by poor people being made unpoor if they want " "to be" ], "requester": "Tessemer Guebre", "reviewer": "sbillinge", "status": "submitted", "summary": "dynamite proposal", "title": "A stunning new way to cure Poverty", "year": 2019, }, ], "proposals": [ { "_id": "mypropsal", "amount": 1000000.0, "authors": ["Anthony Scopatz", "Robert Flanagan"], "begin_day": 1, "begin_month": "May", "begin_year": 2030, "currency": "USD", "submitted_day": 18, "duration": 3, "end_day": 31, "end_month": "December", "end_year": 2030, "full": { "benefit_of_collaboration": "http://pdf.com" "/benefit_of_collaboration", "cv": ["http://pdf.com/scopatz-cv", "http://pdf.com/flanagan-cv"], "narrative": "http://some.com/pdf", }, "submitted_month": "Aug", "notes": "Quite an idea", "pi": "Anthony Scopatz", "pre": { "benefit_of_collaboration": "http://pdf.com" "/benefit_of_collaboration", "cv": ["http://pdf.com/scopatz-cv", "http://pdf.com/flanagan-cv"], "day": 2, "month": "Aug", "narrative": "http://some.com/pdf", "year": 1998, }, "status": "submitted", "title": "A very fine proposal indeed", "submitted_year": 1999, }, { "_id": "dmref15", "amount": 982785.0, "authors": ["qdu", "dhsu", "sbillinge"], "call_for_proposals": "http://www.nsf.gov/pubs/2014/nsf14591/" "nsf14591.htm", "begin_day": 1, "begin_month": "May", "begin_year": 2018, "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "lots to do but it doesn't overlap with any " "other of my grants", "single_pi": True }, "currency": "USD", "submitted_date": "2015-02-02", "duration": 3, "end_day": 1, "end_month": "May", "end_year": 2019, "funder": "NSF", "notes": "Quite an idea", "pi": "Simon Billinge", "status": "accepted", "team": [ { "institution": "Columbia University", "name": "qdu", "position": "co-pi", }, { "institution": "Columbia University", "name": "dhsu", "position": "co-pi", }, { "institution": "Columbia University", "name": "sbillinge", "position": "pi", "subaward_amount": 330000.0, }, ], "title": "DMREF: Novel, data validated, nanostructure determination " "methods for accelerating materials discovery", "title_short": "DMREF nanostructure", }, { "_id": "SymPy-1.1", "amount": 3000.0, "begin_date": "2030-05-01", "end_date": "2030-12-31", "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "" }, "currency": "USD", "pi": "sbillinge", "status": "submitted", "title": "SymPy 1.1 Release Support", }, { "_id": "SymPy-2.0", "amount": 3000.0, "begin_date": "2019-06-01", "end_date": "2030-12-31", "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "" }, "currency": "USD", "pi": "sbillinge", "status": "submitted", "title": "SymPy 1.1 Release Support", }, { "_id": "abc42", "amount": 42000.0, "begin_date": "2020-06-01", "end_date": "2020-12-31", "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "" }, "currency": "USD", "pi": "sbillinge", "status": "submitted", "title": "The answer to life, the universe, and everything", } ], "reading_lists": { "_id": "getting_started_with_pdf", "day": "15", "month": "12", "papers": [{"doi": "10.1107/97809553602060000935", "text": "Very basic, but brief, intro to powder diffraction in general"}, {"doi": "10.1039/9781847558237-00464", "text": "Lightest weight overview of PDF analysis around. Good starting point" }, {"url": "http://www.diffpy.org", "text": "Download and install PDFgui software and run through the step by step tutorial under the help tab"} ], "purpose": "Beginning reading about PDF", "title": "A step-by-step pathway towards PDF understanding. It is recommended to read the papers in the order they are listed here.", "year": 2019, }, "refereeReports": { "_id": "1902nature", "claimed_found_what": ["gravity waves"], "claimed_why_important": ["more money for ice cream"], "did_how": ["measured with a ruler"], "did_what": ["found a much cheaper way to measure gravity waves"], "due_date": '2020-04-11', "editor_eyes_only": "to be honest, I don't believe a word of it", "final_assessment": ["The authors should really start over"], "first_author_last_name": "Wingit", "freewrite": "this comment didn't fit anywhere above", "journal": "Nature", "recommendation": "reject", "requester": "Max Planck", "reviewer": "sbillinge", "status": "submitted", "submitted_date": "2019-01-01", "title": "a ruler approach to measuring gravity waves", "validity_assessment": ["complete rubbish"], "year": 2019, }, "students": { "_id": "Human A. Person", "aka": ["H. A. Person"], "email": "haperson@uni.edu", "university_id": "HAP42", }, } SCHEMAS = { "abstracts": { "_description": { "description": "Abstracts for a conference or workshop. This is " "generally public information" }, "_id": { "description": "Unique identifier for submission. This generally " "includes the author name and part of the title.", "required": True, "type": "string", }, "coauthors": { "description": "names of coauthors", "required": False, "type": "string", }, "email": { "description": "contact email for the author.", "required": True, "type": "string", }, "firstname": { "description": "first name of the author.", "required": True, "type": "string", }, "institution": { "description": "name of the institution", "required": True, "type": "string", }, "lastname": { "description": "last name of the author.", "required": True, "type": "string", }, "references": { "description": "HTML string of reference for the abstract itself", "required": False, "type": "string", }, "text": { "description": "HTML string of the abstract.", "required": True, "type": "string", }, "timestamp": { "description": "The time when the abstract was submitted.", "required": True, "type": "string", }, "title": { "description": "title of the presentation/paper.", "required": True, "type": "string", }, }, "assignments": { "_description": { "description": "Information about assignments for classes."}, "_id": { "description": "A unique id for the assignment, such as " "HW01-EMCH-558-2016-S", "required": True, "type": "string", }, "category": { "description": "such as 'homework' or 'final'", "required": True, "type": "string", }, "courses": { "description": "ids of the courses that have this assignment", "required": True, "anyof_type": ["string", "list"], }, "file": { "description": "path to assignment file in store", "required": False, "type": "string", }, "points": { "description": "list of number of points possible for each " "question. Length is the number of questions", "required": True, "type": "list", "schema": {"anyof_type": ["integer", "float"]}, }, "questions": { "description": "titles for the questions on this assignment", "required": False, "type": "list", }, "solution": { "description": "path to solution file in store", "required": False, "type": "string", }, }, "beamplan": { "_id": { "description": "Unique identifier for the experiment plan. It should have a format '{year:2d}{month:2d}{people_id:s}_{plan_name:s}'", "required": True, "type": "string" }, "_description": { "description": "Information about the experiment plan for the beamtime."}, "project_lead": { "description": "The id for person who put out this plan. It should be inside the people.yml.", "required": True, "type": "string" }, "project": { "description": "The id for the project which the plan belongs to. It should be on airtable.", "required": True, "type": "string" }, "begin_date": { "description": "The begin date of the beam time.", "required": True, "anyof_type": ["string", "datetime", "date"] }, "end_date": { "description": "The end date of the beam time.", "required": True, "anyof_type": ["string", "datetime", "date"] }, "beamtime": { "description": "The id for the beamtime. Check the Airtable.", "required": True, "type": "string" }, "holder": { "description": "Sample holder used during the measurement, e. g. 3 mm OD tubes holder.", "required": True, "type": "string" }, "devices": { "description": "The dictionary of devices used in the measurement e. g. ", "required": True, "type": "list", "schema": { "type": "string" } }, "measurement": { "description": "What data to be measured, e. g. PDF, XRD, SAXS. This will determine the setup.", "required": True, "type": "string" }, "samples": { "description": "The list of samples to be measured.", "required": True, "type": "list", "schema": { "type": "string" } }, "time": { "description": "The total time of executing the exp_plan. Unit: min.", "required": True, "type": "integer" }, "objective": { "description": "What to study in the experiments. What goal to achieve.", "required": True, "type": "string" }, "prep_plan": { "description": "Steps to prepare the samples. Do NOT need details.", "required": True, "type": "list", "schema": { "type": "string" } }, "ship_plan": { "description": "Steps to carry the samples from the producer to the BNL. Do NOT need details.", "required": True, "type": "list", "schema": { "type": "string" } }, "exp_plan": { "description": "Steps to carry out the experiments at BNL. Need details", "required": True, "type": "list", "schema": { "type": "string" } }, "scanplan": { "description": "The scanplan for the experiment, e. g. tseries, Tramp, ct.", "required": True, "type": "list", "schema": { "type": "string" } }, "pipeline": { "description": "The analysis pipeline for the experiment. If no new pipeline is needed, use 'usual'.", "required": True, "type": "string", "default": "usual" }, "todo": { "description": "The TODO list before the beamtime.", "required": True, "type": "list", "schema": { "type": "string" } }, "notes": { "description": "Notes of the plan, e. g. the preferred time.", "required": False, "anyof_type": [ "list", "string" ], "schema": { "type": "string" } } }, "blog": { "_description": { "description": "This collection represents blog posts written by " "the members of the research group." }, "_id": { "description": "short representation, such as this-is-my-title", "required": True, "type": "string", }, "author": { "description": "name or AKA of author", "required": True, "type": "string", }, "day": {"description": "Publication day", "required": True, "type": "integer"}, "month": { "description": "Publication month", "required": True, "anyof_type": ["string", "integer"], }, "original": { "description": "URL of original post, if this is a repost", "required": False, "type": "string", }, "post": { "description": "actual contents of the post", "required": True, "type": "string", }, "title": { "description": "full human readable title", "required": True, "type": "string", }, "year": { "description": "Publication year", "required": True, "type": "integer", }, }, "contacts": { "_description": {"description": "a lighter version of people. Fewer required fields" "for capturing people who are less tightly coupled" }, "_id": { "description": "id of the person, e.g., first letter first name " "plus last name, but unique", "required": True, }, "aka": { "required": False, "type": "list", "description": "other names for the person", }, "date": { "description": "date when the entry was created in ISO format", "required": False, "anyof_type": ["string", "date"], }, 'day': { "description": "day when the entry was created", "required": False, "type": "integer", }, "department": { "description": "Department at the institution", "type": "string", "required": False, }, "email": { "description": "Contact email for the contact", "type": "string", "required": False, }, "institution": { "description": "the institution where they are located. This is" "required for building a COI list of coauthors, but" "not in general. It can be institute id or anything" "in the aka or name", "required": False, "type": "string" }, 'month': { "description": "month when the entry was created", "required": False, "anyof_type": ["string", "integer"], }, "name": { "description": "the person's canonical name", "required": True, "type": "string", }, "notes": { "description": "notes about the person", "required": False, "anyof_type": ["list", "string"] }, "title": { "description": "how the person is addressed", "required": False, "type": "string", }, 'updated': { "description": "most recently updated", "required": False, "anyof_type": ["string", "datetime", "date"], }, 'year': { "description": "year when the entry was created", "required": False, "type": "integer", }, 'uuid': { "description": "universally unique identifier", "required": False, "type": "string", }, }, "expenses": { "_description": { "description": "This collection records expenses for the " "group. It should most likely be private" }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": "string", }, "begin_date": { "description": "begin date in YYYY-MM-DD", "anyof_type": ["string", "date"], }, "end_date": { "description": "end date in YYYY-MM-DD", "anyof_type": ["string", "date"], }, "grant_percentages": { "description": "the percentage of the reimbursement amount to put " "on each grant. This list must be the same length as" "the grants list and the percentages placed in the " "order that the grants appear in that list", "required": False, "type": "list", }, "grants": { "description": "the grants in a list, or a string if only one grant", "required": True, "anyof_type": ["string", "list"], }, "project": { "description": "project or list of projects that this " "presentation is associated with. Should " "be discoverable in projects collection", "anyof_type": ["string", "list"], }, "payee": { "description": "The name or id of the payee filing the expense", "required": True, "type": "string", }, "itemized_expenses": { "type": "list", "schema": { "type": "dict", "schema": { "day": { "description": "Expense day", "required": False, "type": "integer", }, "date": { "description": "Expense date", "required": False, "anyof_type": ["string", "date"], }, "month": { "description": "Expense month", "required": False, "anyof_type": ["string", "integer"], }, "year": { "description": "Expense year", "required": False, "type": "integer", }, "purpose": { "description": "reason for expense", "type": "string", "required": True, }, "unsegregated_expense": { "description": "The allowed expenses", "type": "float", }, "segregated_expense": { "description": "The unallowed expenses", "type": "float", }, "original_currency": { "description": "The currency the payment was made in", "type": "float", }, }, }, }, "overall_purpose": { "description": "The reason for the expenses", "type": "string", "required": True, }, "notes": { "description": "Notes about the expense", "type": "list", }, "status": { "description": "The status of the expense", "eallowed": EXPENSES_TYPE, "type": "string" }, "reimbursements": { "description": "Reimbursements for the expense", "schema": { "schema": { 'amount': {"description": 'amount for reimbursements', "type": "float", }, 'date': {"description": "date of reimbursement", "anyof_type": ["string", "date"], }, 'submission_date': {"description": "date of submission", "anyof_type": ["string", "date"], }, 'submission_day': {"description": "day of submission. deprecated but here for " "backwards compatibility", "type": "integer", }, 'submission_month': {"description": "month of submission. deprecated but here for " "backwards compatibility", "anyof_type": ["integer", "string"], }, 'submission_year': {"description": "year of submission. deprecated but here for " "backwards compatibility", "type": "integer", }, 'day': {"description": "day of reimbursement. deprecated but here for " "backwards compatibility", "type": "integer", }, 'month': {"description": "month of reimbursement. deprecated but here for " "backwards compatibility", "anyof_type": ["string", "integer"], }, 'year': {"description": "year of reimbursement. deprecated but here for " "backwards compatibility", "type": "integer", }, 'where': {"description": 'where the reimbursement has been sent', "type": 'string', }, }, "type": "dict" }, "type": "list" }, "expense_type": { "description": "The type of expense", "allowed": ["travel", "business"], "required": True, }, }, "grades": { "_description": { "description": "The grade for a student on an assignment. This " "information should be private." }, "_id": { "description": "unique id, typically the " "student-assignment-course", "required": True, "type": "string", }, "assignment": { "description": "assignment id", "required": True, "type": "string", }, "course": {"description": "course id", "required": True, "type": "string"}, "filename": { "description": "path to file in store", "required": False, "type": "string", }, "scores": { "description": "the number of points earned on each question", "required": True, "type": "list", "schema": {"anyof_type": ["integer", "float"]}, }, "student": {"description": "student id", "required": True, "type": "string"}, }, "grants": { "_description": { "description": "This collection represents grants that have been " "awarded to the group." }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": ("string", "integer", "float"), }, "account": { "description": "the account number which holds the funds", "required": False, "type": "string", }, "admin": { "description": "the group administering the grant", "type": "string", "required": False, }, "alias": { "description": "the alias of the grant", "type": "string", "required": False, }, "amount": { "description": "value of award", "required": True, "type": ("integer", "float"), }, "begin_date": { "description": "start date of the grant (if string, in format YYYY-MM-DD)", "required": False, "anyof_type": ["string", "date"] }, "begin_day": { "description": "start day of the grant", "required": False, "type": "integer", }, "begin_month": { "description": "start month of the grant", "required": False, "anyof_type": ["string", "integer"], }, "begin_year": { "description": "start year of the grant", "required": False, "type": "integer", }, "benefit_of_collaboration": { "description": "", "required": False, "type": "string", }, # TODO: maybe this should move to proposals? "call_for_proposals": {"description": "", "required": False, "type": "string"}, "currency": { "description": "typically '$' or 'USD'", "required": False, "type": "string", }, "end_date": { "description": "start date of the grant (if string, in format YYYY-MM-DD)", "required": False, "anyof_type": ["string", "date"] }, "end_day": { "description": "end day of the grant", "required": False, "type": ("string", "integer"), }, "end_month": { "description": "end month of the grant", "required": False, "anyof_type": ["string", "integer"], }, "end_year": { "description": "end year of the grant", "required": False, "type": "integer", }, "funder": { "description": "the agency funding the work", "required": True, "type": "string", }, "grant_id": { "description": "the identifier for this work", "required": False, "type": "string", }, "institution": { "description": "the host institution for the grant", "type": "string", "required": False, }, "narrative": {"description": "", "required": False, "type": "string"}, "notes": { "description": "notes about the grant", "required": False, "type": "string", }, "person_months_academic": { "description": "Number of months of funding during the academic" "year", "required": False, "anyof_type": ["integer", "float"], }, "person_months_summer": { "description": "Number of months of funding during the summer", "required": False, "anyof_type": ["integer", "float"], }, "program": { "description": "the program the work was funded under", "required": True, "type": "string", }, # TODO: maybe this should be moved to proposals? "status": { "allowed": ["pending", "declined", "accepted", "in-prep"], "description": "status of the grant", "required": False, "type": "string", }, "scope": { "description": "The scope of the grant, answers the prompt: " '"Describe Research Including Synergies and ' 'Delineation with Respect to this Proposal/Award:"', "required": False, "type": "string", }, # TODO: maybe this should be duplicated in proposals? "team": { "description": "information about the team members participating " "in the grant.", "required": True, "schema": { "schema": { "cv": {"required": False, "type": "string"}, "institution": {"required": True, "type": "string"}, "name": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, "subaward_amount": { "required": False, "type": ("integer", "float"), }, }, "type": "dict", }, "type": "list", }, "title": { "description": "actual title of proposal / grant", "required": True, "type": "string", }, "budget": { "description": "budget periods of grant", "required": False, "schema": { "schema": { "begin_date": { "description": "start date of the budget period in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"], }, "end_date": { "description": "end date of the budget period in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"], }, "student_months": { "description": "number of months of funding for student members during the academic year", "required": False, "anyof_type": ["float", "integer"] }, "postdoc_months": { "description": "number of months of funding for postdoc members during the academic year", "required": False, "anyof_type": ["float", "integer"] }, "ss_months": { "description": "number of months of funding for the summer", "required": False, "anyof_type": ["float", "integer"] }, "amount": { "description": "subaward for this budget period", "required": False, "anyof_type": ["float", "integer"] } }, "type": "dict", }, "type": "list", }, "proposal_id": { "description": "initial proposal made for grant", "required": False, "type": "string", } }, "groups": { "_description": { "description": "Information about the research group" "this is generally public information" }, "_id": { "description": "Unique identifier for submission. This generally " "includes the author name and part of the title.", "required": True, "type": "string", }, "aka": { "required": True, "type": "list", "description": "other names for the group", }, "banner": { "required": False, "type": "string", "description": "name of image file with the group banner", }, "pi_name": { "description": "The name of the Principle Investigator", "required": True, "type": "string", }, "department": { "description": "Name of host department", "required": True, "type": "string", }, "institution": { "description": "Name of the host institution", "required": True, "type": "string", }, "name": { "description": "Name of the group", "required": True, "type": "string", }, "website": {"description": "URL to group webpage", "type": "string"}, "mission_statement": { "description": "Mission statement of the group", "type": "string", }, "projects": { "description": "About line for projects", "type": "string", "required": True, }, "email": { "description": "Contact email for the group", "type": "string", "required": True, }, }, "institutions": { "_description": { "description": "This collection will contain all the institutions" "in the world and their departments and addresses" }, "_id": { "description": "unique identifier for the institution.", "required": True, "type": "string", }, "aka": { "description": "list of all the different names this " "the institution is known by", "required": False, "type": "list", }, "city": { "description": "the city where the institution is", "required": True, "type": "string", }, "country": { "description": "The country where the institution is", "required": True, "type": "string", }, "date": { "description": "Expense date", "required": False, "anyof_type": ["string", "date"], }, "day": { "description": "the day the entry was created", "required": False, "type": "integer", }, "departments": { "description": "all the departments and centers and" "various units in the institution", "required": False, "type": "dict", # Allow unkown department names, but check their content "valuesrules": { "type": "dict", "schema": { "name": { "description": "The canonical name", "required": True, "type": "string", }, "aka": {"required": False, "type": "list"}, }, }, }, "month": { "description": "the month the entry was created", "required": False, "anyof_type": ["string", "integer"] }, "name": { "description": "the canonical name of the institutions", "required": True, "type": "string", }, "schools": { "description": "this is more for universities, but it " "be used for larger divisions in big " "organizations", "required": False, "type": "dict", "valuesrules": { "type": "dict", "schema": { "name": { "description": "The canonical name", "required": True, "type": "string", }, "aka": {"required": False, "type": "list"}, }, }, }, "state": { "description": "the state where the institution is", "required": False, "type": "string", }, "street": { "description": "the street where the institution is", "required": False, "type": "string", }, "updated": { "description": "a datetime when the entry was updated", "required": False, "anyof_type": ["string", "datetime", "date"] }, "uuid": { "description": "a uuid for the entry", "required": False, "type": "string", }, "year": { "description": "the year the entry was created", "required": False, "type": "integer", }, "zip": { "description": "the zip or postal code of the institution", "required": False, "anyof_type": ["integer", "string"], }, }, "meetings": { "_id": { "description": "unique identifier for the date of the group meeting", "required": True, "type": "string", }, "_description": { "description": "the group meeting." }, "actions": { "description": "action items expected from the group members for that particular meeting week", "required": False, "type": "list", }, "agenda": { "description": "schedule of the current meeting", "required": False, "type": "list", }, "buddies": { "description": "list of pairs of group members that are selected for the buddy round robin", "required": False, "type": "list", }, "day": { "description": "day of the group meeting", "required": False, "type": "integer", }, "journal_club": { "description": "indicating the doi of the journal and the presenting group member as the presenter", "required": False, "type": "dict", }, "lead": { "description": "person who will be leading the meeting of the current week", "required": False, "type": "string", }, "minutes": { "description": "meeting notes in a chronological order according to comments made by the group members", "required": False, "type": "list", }, "month": { "description": "month in which the meeting is taking place", "required": False, "anyof_type": ["string", "integer"] }, "place": { "description": "location where the meeting is taking place on campus", "required": False, "type": "string", }, "presentation": { "description": "indicating the title of the presentation along with the link and the presenter ", "required": False, "type": "dict", }, "scribe": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "type": "string", }, "time": { "description": "person who will be taking notes and updating minutes accordingly" "If an integer is minutes past midnight, so 13:30 is 810 for" "example.", "required": False, "anyof_type": ["string", "integer"] }, "updated": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "anyof_type": ["string", "datetime", "date"], }, "uuid": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "type": "string", }, "year": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "type": "integer", }, }, "people": { "_description": { "description": "This collection describes the members of the " "research group. This is normally public data." }, "_id": { "description": "unique identifier for the group member", "required": True, "type": "string", }, "active": { "description": "If the person is an active member, default True.", "required": False, "type": "boolean", }, "aka": { "description": "list of aliases (also-known-as), useful for " "identifying the group member in citations or " "elsewhere.", "required": True, "type": ["string", "list"], }, "appointments": { "type": "dict", "required": False, "description": "begin and end date, grant loading status and notes about appointments" }, "activities": { "type": "list", "required": False, "description": "activities may be teaching or research things", "schema": { "type": "dict", "schema": { "day": { "required": False, "description": "the day the activity took place", "type": "integer", }, "type": { "required": True, "description": "the type of the acitivity", "type": "string", "eallowed": ACTIVITIES_TYPE }, "month": { "required": False, "description": "the month the activity took place", "anyof_type": ["integer", "string"], }, "name": { "required": True, "description": "brief statement of the activity", "type": "string", }, "other": { "required": False, "description": "longer statement of the activity", "type": "string", }, "year": { "required": True, "description": "the year the activity took place", "type": "integer", }, } } }, "avatar": {"description": "URL to avatar", "required": True, "type": "string"}, "bio": { "description": "short biographical text", "required": True, "type": "string", }, "bios": { "description": "longer biographical text if needed", "required": False, "anyof_type": ["string", "list"] }, "collab": { "description": "If the person is a collaborator, default False.", "required": False, "type": "boolean", }, "committees": { "description": "Committees that are served on", "required": False, "schema": { "type": "dict", "schema": { "name": {"required": True, "type": "string", "description": "name of committee, or person if it " "is a phd committee"}, "day": {"required": False, "type": "integer"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "notes": {"required": False, "description": "extra things you want to record about the thing", "anyof_type": ["string", "list"], }, "year": {"required": True, "type": "integer"}, "unit": {"required": False, "type": "string", "description": "name of department or school etc."}, "type": {"required": False, "type": "string", "description": "type of committee, department, school, university, external", "eallowed": COMMITTEES_TYPE}, "level": { "required": True, "type": "string", "description": "department or school or university, or external", "eallowed": COMMITTEES_LEVEL }, "group": { "required": False, "type": "string", "description": "this employment is/was in" "a group in groups coll", }, }, }, "type": "list", }, "education": { "description": "This contains the educational information for " "the group member.", "required": True, "schema": { "type": "dict", "schema": { "advisor": {"required": False, "type": "string", "description": "name or id of advisor for this degree"}, "begin_day": {"required": False, "type": "integer"}, "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": True, "type": "integer"}, "degree": {"required": True, "type": "string"}, "department": { "required": False, "type": "string", "description": "department within" "the institution", }, "group": { "required": False, "type": "string", "description": "this employment is/was in" "a group in groups coll", }, "end_day": {"required": False, "type": "integer"}, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, # Could be ongoing with undefined end "end_year": {"required": False, "type": "integer"}, "gpa": {"required": False, "type": ("float", "string")}, "institution": {"required": True, "type": "string"}, "location": {"required": False, "type": "string"}, "other": {"required": False, "type": "list"}, }, }, "type": "list", }, "email": { "description": "email address of the group member", "required": False, "type": "string", }, "employment": { "description": "Employment information, similar to educational " "information.", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "advisor": {"required": False, "type": "string", "description": "name or id of " "advisor/mentor/manager"}, "begin_day": {"required": False, "type": "integer"}, "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": False, "type": "integer"}, "begin_date": {"required": False, "anyof_type": ["string", "date", "datetime"], "description": "begin date of employment in format YYYY-MM-DD"}, "coworkers": {"required": False, "type": "list", "description": "list of coworkers. If" "position is editor, these are " "assumed to be coeditors in" "conflict of interest builder"}, "department": {"required": False, "type": "string"}, "end_day": {"required": False, "type": "integer"}, "end_month": {"required": False, }, "end_year": {"required": False, "type": "integer"}, "end_date": {"required": False, "anyof_type": ["string", "date", "datetime"], "description": "end date of employment in format YYYY-MM-DD"}, "group": { "required": False, "type": "string", "description": "this employment is/was in" "a group in groups coll", }, "location": {"required": False, "type": "string"}, "organization": {"required": True, "type": "string"}, "other": {"required": False, "type": "list"}, "permanent": {"required": False, "type": "boolean", "description": "true if the position is open " \ "ended and has no fixed end-date"}, "position": {"required": True, "type": "string", "eallowed": list(SORTED_POSITION)}, "position_full": { "description": "The full on title of the position. This will be " "typeset if it is here, or if not Position will be " "used. Position will be used for sorting and must " "come from a fixed list of positions", "required": False, "type": "string", }, "status": {"required": False, "type": "string", "eallowed": POSITION_STATUS, }, }, }, }, "facilities": { "type": "list", "required": False, "description": "facilities may be teaching or research things", "schema": { "type": "dict", "schema": { "begin_day": { "required": False, "description": "the day facility, or the wish for the " "facility, started", "type": "integer", }, "end_day": { "required": False, "description": "the day facility started", "type": "integer", }, "type": { "required": True, "description": "the type of the facility. Columbia asks" "for wished-for facilities, so there are " "teaching-wish and research-wish fields.", "type": "string", "eallowed": FACILITIES_TYPE }, "begin_month": { "required": False, "description": "the month the facility (or wish) started", "anyof_type": ["integer", "string"], }, "end_month": { "required": False, "description": "the month the faclity went away", "anyof_type": ["integer", "string"], }, "name": { "required": True, "description": "description of the facility", "type": "string", }, "notes": { "required": False, "description": "anything else you want to jot down", "anyof_type": ["string", "list"] }, "begin_year": { "required": True, "description": "the year the facility (or wish) started", "type": "integer", }, "end_year": { "required": False, "description": "the year the facility (or wish) went away", "type": "integer", }, } } }, "funding": { "description": "Funding and scholarship that the group member " "has individually obtained in the past. " "**WARNING:** this is not to be confused with the " "**grants** collection", "required": False, "schema": { "type": "dict", "schema": { "currency": {"required": False, "type": "string"}, "duration": {"required": False, "type": "string"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "name": {"required": True, "type": "string"}, "value": {"required": True, "type": ("float", "integer")}, "year": {"required": True, "type": "integer"}, }, }, "type": "list", }, "github_id": {"required": False, "type": "string", "description": "Your GitHub ID"}, "google_scholar_url": {"required": False, "type": "string", "description": "URL of your Google Scholar " "rofile"}, "grp_mtg_active": {"required": False, "type": "boolean", "description": "Whether to schedule tasks at group meeting " "or not"}, "hindex": { "description": "details of hindex pulled on a certain date", "required": False, "schema": { "type": "dict", "schema": { "h": {"description": "the value of the h index", "required": True, "type": "integer"}, "h_last_five": {"description": "h index over past 5 years", "required": False, "type": "integer"}, "citations": {"description": "total number of citations", "required": False, "type": "integer"}, "citations_last_five": {"description": "number of citations" "in the past 5 years", "required": False, "type": "integer"}, "origin": {"description": "where the numbers came from", "required": False, "type": "string"}, "since": {"description": "year of first citation", "required": False, "type": "integer"}, "year": {"description": "year when the data were pulled", "required": False, "type": "integer"}, "month": {"description": "month when the data were pulled", "required": False, "anyof_type": ["string", "integer"]}, "day": {"description": "day when the data were pulled", "required": False, "type": "integer"}, } }, "type": "list", }, "home_address": { "description": "The person's home address", "type": "dict", "schema": { "street": {"type": "string", "description": "street address"}, "city": {"type": "string", "description": "name of home city"}, "state": {"type": "string", "description": "name o home state"}, "zip": {"type": "string", "description": "zip code"}, }, }, "honors": { "description": "Honors that have been awarded to this " "group member", "required": False, "schema": { "type": "dict", "schema": { "description": {"required": False, "type": "string"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "name": {"required": True, "type": "string"}, "year": {"required": True, "type": "integer"}, }, }, "type": "list", }, "initials": { "description": "The canonical initials for this group member", "required": False, "type": "string", }, # TODO: include `link` "membership": { "description": "Professional organizations this member is " "a part of", "required": False, "schema": { "type": "dict", "schema": { "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": True, "type": "integer"}, "description": {"required": False, "type": "string"}, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, "end_year": {"required": False, "type": "integer"}, "organization": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, "website": {"required": False, "type": "string"}, }, }, "type": "list", }, "miscellaneous": { "description": "Place to put weird things needed for special reporta", "required": False, "type": "dict", "schema": { "metrics_for_success": { "description": "How do I want to be judged", "required": False, "type": "list", }, }, }, "name": { "description": "Full, canonical name for the person", "required": True, "type": "string", }, "office": { "description": "The person's office", "type": "string", "required": False }, "orcid_id": { "description": "The ORCID ID of the person", "required": False, "type": "string", }, "position": { "description": "such as professor, graduate student, or scientist", "required": False, "type": "string", "eallowed": list(SORTED_POSITION), }, "position_full": { "description": "The full on title of the position. This will be " "typeset if it is here, or if not Position will be " "used. Position will be used for sorting and must " "come from a fixed list of positions", "required": False, "type": "string", }, "publicity": { "description": "summary of publicity that person has received", "required": False, "schema": { "type": "dict", "schema": { "type": {"required": True, "type": "string", "eallowed": PUBLICITY_TYPE}, "topic": {"required": False, "type": "string", "description": "The short sentence of what the " "publicity was about", }, "title": {"required": True, "type": "string", "description": "The title of the piece", }, "day": {"required": False, "type": "integer", "description": "The day the piece appeared" }, "month": {"required": False, "anyof_type": ["string", "integer"], "description": "The month the piece appeared" }, "publication": {"required": False, "type": "string", "description": "The place where the " "publicity was placed" }, "text": {"required": False, "type": "string", "description": "The text of the publicity", }, "url": {"required": False, "type": "string", "description": "The URL where the piece may be found" }, "year": {"required": True, "type": "integer", "description": "The year the piece appeared" }, "grant": {"required": True, "type": "string", "description": "The identifier of the grant " "associated with the piece" }, }, }, "type": "list" }, "research_focus_areas": { "description": "summary of research projects that are ongoing. Used" "in Annual appraisal for example", "required": False, "schema": { "type": "dict", "schema": { "begin_year": {"required": False, "type": "integer"}, "end_year": {"required": False, "type": "integer"}, "description": {"required": False, "type": "string"} }, }, "type": "list" }, "research_summary": { "description": "Brief summary of overarching research goals", "required": False, "type": "string", }, "service": { "description": "Service that this group member has provided", "required": False, "schema": { "type": "dict", "schema": { "description": {"required": False, "type": "string"}, "duration": {"required": False, "type": "string"}, "month": {"description": "Use month and year if the service" "doesn't extend more than one year." "Otherwise use begin_year and end_year", "required": False, "anyof_type": ["string", "integer"] }, "name": {"required": True, "type": "string"}, "role": {"required": False, "type": "string", "description": "the role played in the activity, e.g., co-chair"}, "notes": {"required": False, "anyof_type": ["string", "list"]}, "year": {"required": False, "type": "integer"}, "begin_year": {"required": False, "type": "integer"}, "begin_day": {"required": False, "type": "integer"}, "begin_month": {"description": "Use month and year if the service" "doesn't extend more than one year." "Otherwise use begin_year/month and end_year/month", "required": False, "anyof_type": ["string", "integer"] }, "end_year": {"required": False, "type": "integer"}, "end_month": {"description": "Use month and year if the service" "doesn't extend more than one year." "Otherwise use begin_year and end_year", "required": False, "anyof_type": ["string", "integer"] }, "end_day": {"required": False, "type": "integer"}, "other": {"required": False, "anyof_type": ["string", "list"]}, "type": {"required": True, "type": "string", "description": "profession, department, school, university", "eallowed": SERVICE_TYPE}, }, }, "type": "list", }, "skills": { "description": "Skill the group member has", "required": False, "schema": { "type": "dict", "schema": { "category": {"required": True, "type": "string"}, "level": {"required": True, "type": "string"}, "name": {"required": True, "type": "string"}, }, }, "type": "list", }, "teaching": { "description": "Courses that this group member has taught, if any", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "course": {"required": True, "type": "string"}, "courseid": {"required": True, "type": "string"}, "description": {"required": False, "type": "string"}, "end_month": {"required": False, "anyof_type": ["string", "integer"]}, "end_year": {"required": False, "type": "integer"}, "enrollment": {"required": False, "anyof_type": ["integer", "string"]}, "evaluation": { "type": "dict", "required": False, "schema": { "response_rate": {"type": "number", "required": True}, "amount_learned": {"type": "number", "required": True}, "appropriateness_workload": {"type": "number", "required": True}, "course_overall": {"type": "number", "required": True}, "fairness_grading": {"type": "number", "required": True}, "organization": {"type": "number", "required": True}, "classroom_delivery": {"type": "number", "required": True}, "approachability": {"type": "number", "required": True}, "instructor_overall": {"type": "number", "required": True}, "comments": {"type": "list", "required": False, "description": "student comments"}, }, }, "materials": {"required": False, "type": "string"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "organization": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, "semester": {"required": False, "type": "string"}, "syllabus": {"required": False, "type": "string"}, "video": {"required": False, "type": "string"}, "website": {"required": False, "type": "string"}, "year": {"required": True, "type": "integer"}, }, }, }, "title": { "description": "for example, Dr., etc.", "required": False, "type": "string", }, "todos": { "description": "a list of the todo tasks", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "description": {"description": "the description of the to-do task", "required": True, "type": "string"}, "due_date": {"description": "the due date", "required": False, "anyof_type": ["string", "date"]}, "begin_date": {"description": "the begin date", "required": False, "anyof_type": ["string", "date"]}, "end_date": {"description": "the end date", "required": False, "anyof_type": ["string", "date"]}, "duration": { "description": "the size of the task/ the estimated duration it will take to finish the task. Unit: miniutes.", "required": False, "type": "float"}, "importance": { "description": "the importance, from 0 to 2", "required": False, "type": "integer"}, "status": {"description": "the status: started/finished/cancelled", "required": True, "type": "string"}, "notes": {"description": "additional notes for this task", "required": False, "type": "list", "schema": {"type": "string"} }, "running_index": { "description": "Index of a certain task used to update that task in the enumerated todo list.", "required": False, "type": "integer"}, "assigned_by": { "description": "ID of the member that assigns the task", "required": False, "type": "string"}, } } }, }, "presentations": { "_description": { "description": "This collection describes presentations that group" "members make at conferences, symposia, seminars and" "so on." }, "_id": { "description": "unique id for the presentation", "required": True, "type": "string", }, "abstract": { "description": "abstract of the presentation", "required": False, "type": "string", }, "authors": { "description": "Author list.", "required": True, "anyof_type": ["string", "list"], }, "begin_date": { "description": "begin date in YYYY-MM-DD", "anyof_type": ["date", "string"], }, "end_date": { "description": "end_date in YYYY-MM-DD", "anyof_type": ["date", "string"], }, "begin_year": { "description": "year the conference or trip begins.", "required": False, "type": "integer", }, "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_day": {"required": False, "type": "integer"}, "department": { "description": "department of the institution where the" "presentation will be made, if " "applicable. should be discoverable in " "institutions.", "required": False, "type": "string", }, "end_year": { "description": "year the conference or trip ends", "required": False, "type": "integer", }, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, "end_day": {"required": False, "type": "integer"}, "institution": { "description": "institution where the" "presentation will be made, if " "applicable.", "required": False, "type": "string", }, "meeting_name": { "description": "full name of the conference or " "meeting. If it is a departmental " "seminar or colloquium, write Seminar" "or Colloquium and fill in department " "and institution fields", "required": False, "type": "string", }, # TODO: conditional validation. If type=colloq or seminar, required is # institution and department, otherwise location "location": { "description": "city and {state or country} of meeting", "required": False, "type": "string", }, "notes": { "description": "any reminder or memory aid about anything", "required": False, "anyof_type": ["list", "string"], }, "project": { "description": "project or list of projects that this " "presentation is associated with. Should " "be discoverable in projects collection", "required": False, "anyof_type": ["string", "list"], }, "status": { "description": "Is the application in prep or submitted, " "was the invitation accepted or declined, was " "the trip cancelled?", "required": True, "type": "string", "eallowed": PRESENTATION_STATUS, }, "title": { "description": "title of the presentation", "required": True, "type": "string", }, "type": { "description": "type of presentation", "eallowed": PRESENTATION_TYPE, "required": True, "type": "string", }, "webinar": { "description": "true if a webinar. Default to False", "required": False, "type": "boolean", }, }, "projects": { "_description": { "description": "This collection describes the research group " "projects. This is normally public data." }, "_id": { "description": "Unique project identifier.", "required": True, "type": "string", }, "active": { "description": "true if the project is active", "required": False, "anyof_type": ["string", "boolean"], }, "description": { "description": "brief project description.", "required": True, "type": "string", }, "grant": { "description": "Grant id if there is a grant supporting this " "project", "required": False, "type": "string", }, "group": { "description": "id for the group in the groups collection whose project this is", "required": False, "type": "string", }, "highlights": { "description": "list of things to highlight in a report or website, such as releases for for software or high profile publications", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "year": {"description": "the year of the highlight", "required": True, "type": "integer"}, "month": {"description": "the month of the highlight", "required": True, "anyof_type": ["string", "integer"]}, "description": {"description": "the highlight", "required": True, "type": "string"}, } } }, "logo": { "description": "URL to the project logo", "required": False, "type": "string", }, "name": { "description": "name of the project.", "required": True, "type": "string", }, "other": { "description": "other information about the project", "required": False, "type": ["list", "string"], }, "repo": { "description": "URL of the source code repo, if available", "required": False, "type": "string", }, "team": { "description": "People who are/have been working on this project.", "required": True, "schema": { "type": "dict", "schema": { "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": True, "type": "integer"}, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, "end_year": {"required": False, "type": "integer"}, "name": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, }, }, "type": "list", }, "type": { "description": "The type of project", "required": False, "anyof_type": ["string"], "eallowed": PROJECT_TYPE }, "website": { "description": "URL of the website.", "required": False, "type": "string", }, }, "proposalReviews": { "_description": { "description": "This collection contains reviews of funding proposals" }, "_id": { "description": "ID, e.g. 1906_doe_example", "required": True, "type": ("string", "integer", "float"), }, "adequacy_of_resources": { "description": "Are the resources of the PI adequate", "required": True, "type": "list", }, "agency": { "description": "currently nsf or doe", "type": "string", "eallowed": AGENCIES, }, "competency_of_team": { "description": "Is the team competent", "required": True, "type": "list", }, "doe_appropriateness_of_approach": { "description": "Appropriateness of Research. only used if agency is doe.", "required": False, "type": "list", }, "doe_reasonableness_of_budget": { "description": "Reasonableness of budget. only used if agency is doe.", "required": False, "type": "list", }, "doe_relevance_to_program_mission": { "description": "Relevance to program mission. only used if agency is doe.", "required": False, "type": "list", }, "does_how": { "description": "How will the research be done", "required": True, "type": "list", }, "does_what": { "description": "What will the team do", "required": True, "type": "string", }, "due_date": { "description": "date the review is due in ISO format", "required": True, "anyof_type": ["string", "date"], }, "freewrite": { "description": "Anything and this will appear in the built document" "right before the summary. This section often used " "for extra review criteria for the particular proposal", "required": False, "type": "list", }, "goals": { "description": "What are the main goals of the proposed research", "required": True, "type": "list", }, "importance": { "description": "The importance of the Research", "required": True, "type": "list", }, "institutions": { "description": "The institutions of the authors in the same order", "required": True, "anyof_type": ["string", "list"] }, "month": { "description": "The month the review was submitted", "required": True, "anyof_type": ["string", "integer"], }, "names": { "description": "The names of the PIs", "required": True, "anyof_type": ["list", "string"], }, "nsf_broader_impacts": { "description": "The broader impacts of the research. Only used if " "agency is nsf", "required": False, "type": "list", }, "nsf_create_original_transformative": { "description": "Answer to the question how the work is creative, " "original or transformative. Only used if agency is " "nsf", "required": False, "type": "list", }, "nsf_plan_good": { "description": "Is the plan good? Only used if agency is nsf", "required": False, "type": "list", }, "nsf_pot_to_advance_knowledge": { "description": "Answer to the question how the work will advance" "knowledge. Only used if agency is nsf", "required": False, "type": "list", }, "nsf_pot_to_benefit_society": { "description": "Answer to the question how the work has the potential" "to benefit society. Only used if agency is nsf", "required": False, "type": "list", }, "requester": { "description": "Name of the program officer who requested the review", "required": True, "type": "string", }, "reviewer": { "description": "short name of the reviewer. Will be used in the " "filename of the resulting text file", "required": True, "type": "string", }, "status": { "description": "the status of the review", "type": "string", "eallowed": REVIEW_STATI, }, "summary": { "description": "Summary statement", "required": True, "type": "string", }, "title": { "description": "The title of the proposal", "required": True, "type": "string", }, "year": { "description": "The year the review was submitted", "required": True, "type": "integer", }, }, "proposals": { "_description": { "description": "This collection represents proposals that have " "been submitted by the group." }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": ("string", "integer", "float"), }, "amount": { "description": "value of award", "required": True, "type": ("integer", "float"), }, "authors": { "description": "other investigator names", "required": False, "anyof_type": ["list", "string"], }, "begin_date": { "description": "start date of the proposed grant in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"] }, "begin_day": { "description": "start day of the proposed grant", "required": False, "type": "integer", }, "begin_month": { "description": "start month of the proposed grant", "required": False, "anyof_type": ["string", "integer"] }, "begin_year": { "description": "start year of the proposed grant", "required": False, "type": "integer", }, "call_for_proposals": { "description": "", "required": False, "type": "string", }, "cpp_info": { "description": "extra information needed for building current and " "pending form ", "required": False, "schema": { "cppflag": {"required": False, "type": "boolean"}, "other_agencies_submitted": {"required": False, "anyof_type": ["string", "boolean"]}, "institution": {"required": False, "type": "string", "description": "place where the proposed grant will be located"}, "person_months_academic": {"required": False, "anyof_type": ["float", "integer"]}, "person_months_summer": {"required": False, "anyof_type": ["float", "integer"]}, "project_scope": {"required": False, "type": "string"}, "single_pi": {"required": False, "type": "boolean", "description": "set to true if there are no co-pi's"}, }, "type": "dict", }, "currency": { "description": "typically '$' or 'USD'", "required": True, "type": "string", }, "due_date": { "description": "day that the proposal is due", "required": False, "anyof_type": ["string", "date"], }, "duration": { "description": "number of years", "required": False, "type": ("integer", "float"), }, "end_date": { "description": "end date of the proposed grant in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"] }, "end_day": { "description": "end day of the proposed grant", "required": False, "type": ("string", "integer"), }, "end_month": { "description": "end month of the proposed grant", "required": False, "anyof_type": ["string", "integer"] }, "end_year": { "description": "end year of the proposed grant", "required": False, "type": "integer", }, "funder": { "description": "who will fund the proposal" "as funder in grants", "required": False, "type": "string", }, "full": { "description": "full body of the proposal", "required": False, "type": "dict", }, "notes": { "description": "anything you want to note", "required": False, "anyof_type": ["string", "list"], }, "pi": { "description": "principal investigator name", "required": True, "type": "string", }, "pre": { "description": "Information about the pre-proposal", "required": False, "type": "dict", }, "status": { "description": "e.g. 'pending', 'accepted', 'declined'", "required": True, "type": "string", "eallowed": PROPOSAL_STATI, }, "submitted_date": { "description": "date that the proposal was submitted", "required": False, "anyof_type": ["string", "date"], }, "submitted_day": { "description": "day that the proposal was submitted", "required": False, "type": "integer", }, "submitted_month": { "description": "month that the proposal was submitted", "required": False, "anyof_type": ["string", "integer"] }, "submitted_year": { "description": "Year that the proposal was submitted", "required": False, "type": "integer", }, "team": { "description": "information about the team members participating " "in the grant.", "required": False, "schema": { "schema": { "cv": {"required": False, "type": "string"}, "email": {"required": False, "type": "string"}, "institution": {"required": False, "type": "string"}, "name": {"required": False, "type": "string"}, "position": {"required": False, "type": "string"}, "subaward_amount": { "required": False, "type": ("integer", "float"), }, }, "type": "dict", }, "type": "list", }, "title": { "description": "actual title of proposal", "required": True, "type": "string", }, "title_short": { "description": "short title of proposal", "required": False, "type": "string", }, }, "refereeReports": { "_description": { "description": "This is a collection of information that will be " "be used to build a referee report. This should probably be private." }, "_id": {"description": "the ID", "required": True, "type": "string"}, "claimed_found_what": { "description": "What the authors claim to have found", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "claimed_why_important": { "description": "What importance the authors claim", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "did_how": { "description": "How the study was done", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "did_what": { "description": "What the study was", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "due_date": { "description": "date the review is due in ISO format", "required": True, "anyof_type": ["string", "date"], }, "editor_eyes_only": { "description": "Comments you don't want passed to the author", "required": False, "type": "string", }, "final_assessment": { "description": "Summary of impressions of the study", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "first_author_last_name": { "description": "Last name of first author will be referred to " "with et al.", "required": True, "type": "string", }, "freewrite": { "description": "Things that you want to add that don't fit into " "any category above", "required": False, "type": "string", }, "journal": { "description": "name of the journal", "required": True, "type": "string", }, "month": { "description": "the month the entry was created", "required": False, "anyof_type": ["string", "integer"] }, "recommendation": { "description": "Your publication recommendation", "required": True, "type": "string", "eallowed": REVIEW_RECOMMENDATION, }, "requester": { "description": "Name of the program officer who requested the review", "required": True, "type": "string", }, "reviewer": { "description": "name of person reviewing the paper", "required": True, "type": "string", }, "status": { "description": "Where you are with the review", "required": True, "type": "string", "eallowed": REVIEW_STATI, }, "submitted_date": { "description": "submitted date in ISO YYYY-MM-DD format", "required": True, "anyof_type": ["string", "date"], }, "title": { "description": "title of the paper under review", "required": True, "type": "string", }, "validity_assessment": { "description": "List of impressions of the validity of the claims", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "year": { "description": "year when the review is being done", "required": True, "anyof_type": ["string", "integer"], }, }, "students": { "_description": { "description": "This is a collection of student names and " "metadata. This should probably be private." }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": "string", }, "aka": { "description": "list of aliases", "required": False, "schema": {"type": "string"}, "type": ("list", "string"), }, "email": {"description": "email address", "required": False, "type": "string"}, "university_id": { "description": "The university identifier for the student", "required": False, "type": "string", }, }, } for s in SCHEMAS: SCHEMAS[s]["files"] = { "description": "Files associated with the document", # TODO: fix this since this is currently comming out a CommentedMap # "type": "list", # "schema": {"type": "string"}, "required": False, } class NoDescriptionValidator(Validator): def _validate_description(self, description, field, value): """Don't validate descriptions The rule's arguments are validated against this schema: {'type': 'string'}""" if False: pass def _validate_eallowed(self, eallowed, field, value): """Test if value is in list The rule's arguments are validated against this schema: {'type': 'list'} """ if value not in eallowed: warn( '"{}" is not in the preferred entries for "{}", please ' "consider changing this entry to conform or add this to the " "``eallowed`` field in the schema.".format(value, field) ) def validate(coll, record, schemas): """Validate a record for a given db Parameters ---------- coll : str The name of the db in question record : dict The record to be validated schemas : dict The schema to validate against Returns ------- rtn : bool True is valid errors: dict The errors encountered (if any) """ if coll in schemas: schema = copy.deepcopy(schemas[coll]) v = NoDescriptionValidator(schema) return v.validate(record), v.errors else: return True, ()
39.058422
145
0.424287
import copy from warnings import warn from cerberus import Validator from .sorters import POSITION_LEVELS SORTED_POSITION = sorted(POSITION_LEVELS.keys(), key=POSITION_LEVELS.get) ACTIVITIES_TYPE = ["teaching", "research"] AGENCIES = ["nsf", "doe"] APPOINTMENTS_TYPE = ["gra", "ss", "pd", "ug"] COMMITTEES_TYPE = ["phdoral", "phddefense", "phdproposal", "promotion"] COMMITTEES_LEVEL = ["department", "school", "university", "external"] EXPENSES_TYPE = ["unsubmitted", "submitted", "reimbursed"] FACILITIES_TYPE = ["teaching", "research", "shared", "other", "teaching_wish", "research_wish"] POSITION_STATUS = ["pi", "adjunct", "high-school", "undergrad","ms", "phd", "postdoc","visitor-supported","visitor-unsupported"] PRESENTATION_TYPE = ["award", "colloquium", "contributed_oral", "invited", "keynote", "plenary", "poster", "seminar", "tutorial"] PRESENTATION_STATUS = ["in-prep", "submitted", "accepted", "declined", "cancelled", "postponed"] PROJECT_TYPE = ["ossoftware", "funded"] PROPOSAL_STATI = ["pending", "declined", "accepted", "inprep", "submitted"] PUBLICITY_TYPE = ["online", "article"] REVIEW_STATI = ["invited", "accepted", "declined", "downloaded", "inprogress", "submitted", "cancelled"] REVIEW_RECOMMENDATION = ["reject", "asis", "smalledits", "diffjournal", "majoredits"] SERVICE_TYPE = ["profession", "university", "school", "department"] EXEMPLARS = { "abstracts": { "_id": "Mouginot.Model", "coauthors": "P.P.H. Wilson", "email": "mouginot@wisc.edu", "firstname": "Baptiste", "institution": "University of Wisconsin-Madison", "lastname": "Mouginot", "references": "[1] B. MOUGINOT, “cyCLASS: CLASS " "models for Cyclus,”, Figshare, " "https://dx.doi.org/10.6084/" "m9.figshare.3468671.v2 (2016).", "text": "The CLASS team has developed high " "quality predictors based on pre-trained " "neural network...", "timestamp": "5/5/2017 13:15:59", "title": "Model Performance Analysis", }, "assignments": { "_id": "hw01-rx-power", "category": "homework", "courses": ["EMCH-558-2016-S", "EMCH-758-2016-S"], "points": [1, 2, 3], "questions": ["1-9", "1-10", "1-12"], }, "beamplan": { '_id': "test", 'beamtime': '2020-1-XPD', 'begin_date': '2020-01-01', 'end_date': '2020-01-02', 'devices': ['cryostream'], 'exp_plan': ['load samples on the holder', 'scan the holder to locate the samples', 'take room temperature measurement of sample and the subtrate', 'ramp down temperature to 100K', 'ramp up, measure PDF at temperature 100K ~ 300K, 10K stepsize, 1 min exposure'], 'holder': 'film holder (1 cm * 1 cm * 1 mm)', 'measurement': 'Tramp', 'objective': 'temperature ramping PDF of one WO3 film (100, 300K, 10K)', 'pipeline': 'usual', 'prep_plan': ['films will be made by kriti'], 'project': '20ks_wo3', 'project_lead': 'kseth', 'samples': ['WO3 film', 'glass subtrate'], 'scanplan': ['Scanplan(bt, Tramp, 30, 80, 500, 10)'], 'ship_plan': ['seal and ship to CU', 'carry to the beamline'], 'time': 190, 'todo': ["todo something"]}, "beamtime": { "_id": "2020-1-XPD", "begin_date": "2020-02-14", "begin_time": "8:00 am", "end_date": "2020-02-17", "end_time": "8:00 am" }, "blog": { "_id": "my-vision", "author": "Anthony Scopatz", "day": 18, "month": "September", "original": "https://scopatz.com/my-vision/", "post": "I would like see things move forward. Deep, I know!", "title": "My Vision", "year": 2015, }, "citations": { "_id": "meurer2016sympy", "author": [ "Meurer, Aaron", "Smith, Christopher P", "Paprocki, Mateusz", "{\\v{C}}ert{\\'\\i}k, Ond{\\v{r}}ej", "Rocklin, Matthew", "Kumar, AMiT", "Ivanov, Sergiu", "Moore, Jason K", "Singh, Sartaj", "Rathnayake, Thilina", "Sean Vig", "Brian E Granger", "Richard P Muller", "Francesco Bonazzi", "Harsh Gupta", "Shivam Vats", "Fredrik Johansson", "Fabian Pedregosa", "Matthew J Curry", "Ashutosh Saboo", "Isuru Fernando", "Sumith Kulal", "Robert Cimrman", "Anthony Scopatz", ], "doi": "10.1021/nn501591g", "entrytype": "article", "journal": "PeerJ Computer Science", "month": "Jan", "pages": "e103", "publisher": "PeerJ Inc. San Francisco, USA", "synopsis": "The description of symbolic computing in Python", "tags": "pdf", "title": "SymPy: Symbolic computing in Python", "volume": "4", "year": "2017", }, "contacts": { "_id": "afriend", "aka": [ "A. B. Friend", "AB Friend", "Tony Friend" ], "department": "physics", "email": "friend@deed.com", "institution": "columbiau", "name": "Anthony B Friend", "notes": ["The guy I meet for coffee sometimes"], "title": "Mr.", "month": "January", "year": 2020, "day": 15, "uuid": "76f2a4c7-aa63-4fa3-88b5-396b0c15d368", }, "courses": { "_id": "EMCH-552-2016-F", "active": False, "department": "EMCH", "number": 552, "scale": [ [0.875, "A"], [0.8125, "B+"], [0.75, "B"], [0.6875, "C+"], [0.625, "C"], [0.5625, "D+"], [0.5, "D"], [-1.0, "F"], ], "season": "F", "students": ["Human A. Person", "Human B. Person"], "syllabus": "emch552-2016-f-syllabus.pdf", "weights": { "class-notes": 0.15, "final": 0.3, "homework": 0.35, "midterm": 0.2, }, "year": 2016, }, "expenses": { "_id": "test", "expense_type": "business", "grant_percentages": ["50", "50"], "grants": ["dmref15", "SymPy-1.1"], "itemized_expenses": [ { "day": i, "month": "Jan", "year": 2018, "purpose": "test", "unsegregated_expense": 10 * i, "segregated_expense": 0, } for i in range(1, 11) ], "payee": "scopatz", "project": "Cyclus", "overall_purpose": "testing the databallectionsse", }, "grades": { "_id": "Human A. Person-rx-power-hw02-EMCH-758-2017-S", "student": "hap", "assignment": "2017-rx-power-hw02", "course": "EMCH-758-2017-S", "scores": [1, 1.6, 3], }, "grants": [ { "_id": "SymPy-1.1", "amount": 3000.0, "alias": "sym", "begin_day": 1, "begin_month": "May", "begin_year": 2030, "call_for_proposals": "https://groups.google.com/d/msg" "/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ", "end_day": 31, "end_month": "December", "end_year": 2030, "funder": "NumFOCUS", "narrative": "https://docs.google.com/document/d/1nZxqoL" "-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp" "=sharing", "program": "Small Development Grants", "team": [ { "institution": "University of South Carolina", "name": "Anthony Scopatz", "position": "pi", }, { "institution": "University of South Carolina", "name": "Aaron Meurer", "position": "researcher", }, ], "status": "pending", "title": "SymPy 1.1 Release Support", "budget": [ {"begin_date": "2030-05-01", "end_date": "2030-06-30", "student_months": 0.5, "postdoc_months": 0.0, "ss_months": 1.0, "amount": 1000.0, }, {"begin_date": "2030-07-01", "end_date": "2030-09-30", "student_months": 1.5, "postdoc_months": 0.0, "ss_months": 2.0, "amount": 1000.0, }, {"begin_date": "2030-10-01", "end_date": "2030-12-31", "student_months": 3.0, "postdoc_months": 0.0, "ss_months": 0.0, "amount": 1000.0, }, ], "proposal_id": "SymPy-1.1" }, { "_id": "SymPy-2.0", "amount": 3000.0, "alias": "sym2.0", "begin_day": 1, "begin_month": 6, "begin_year": 2019, "call_for_proposals": "https://groups.google.com/d/msg" "/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ", "end_day": 31, "end_month": "December", "end_year": 2030, "funder": "NumFOCUS", "narrative": "https://docs.google.com/document/d/1nZxqoL" "-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp" "=sharing", "program": "Small Development Grants", "team": [ { "institution": "University of South Carolina", "name": "Anthony Scopatz", "position": "pi", }, { "institution": "University of South Carolina", "name": "Aaron Meurer", "position": "researcher", }, ], "status": "pending", "title": "SymPy 1.1 Release Support", "budget": [ {"begin_date": "2019-06-01", "end_date": "2024-12-31", "student_months": 12.0, "postdoc_months": 24.0, "ss_months": 14.0, "amount": 1500.0, }, {"begin_date": "2025-01-01", "end_date": "2030-12-31", "student_months": 12.0, "postdoc_months": 24.0, "ss_months": 0.0, "amount": 1500.0, }, ], "proposal_id": "SymPy-2.0", }, { "_id": "dmref15", "alias": "dmref15", "account": "GG012345", "amount": 982785.0, "funder": "NSF", "grant_id": "DMREF-1534910", "institution": "Columbia University", "notes": "Designing Materials to Revolutionize and Engineer our " "Future (DMREF)", "person_months_academic": 0.0, "person_months_summer": 0.25, "program": "DMREF", "scope": "This grant is to develop complex modeling methods for regularizing " "ill-posed nanostructure inverse problems using data analytic and " "machine learning based approaches. This does not overlap with any " "other grant.", "team": [ { "institution": "Columbia University", "name": "qdu", "position": "co-pi", }, { "institution": "Columbia University", "name": "dhsu", "position": "co-pi", }, { "institution": "Columbia University", "name": "Anthony Scopatz", "position": "pi", "subaward_amount": 330000.0, }, ], "title": "DMREF: Novel, data validated, nanostructure determination " "methods for accelerating materials discovery", "budget": [ {"begin_date": "2018-05-01", "end_date": "2018-09-30", "student_months": 12.0, "postdoc_months": 0.0, "ss_months": 6.0, "amount": 327595.0, }, {"begin_date": "2018-10-01", "end_date": "2019-01-30", "student_months": 8.0, "postdoc_months": 0.0, "ss_months": 12.0, "amount": 327595.0, }, {"begin_date": "2019-02-01", "end_date": "2019-05-01", "student_months": 12.0, "postdoc_months": 0.0, "ss_months": 6.0, "amount": 327595.0, }, ], "proposal_id": "dmref15" }, {"_id": "abc42", "alias": "abc42", "amount": 42000.0, "begin_date": "2020-06-01", "end_date": "2020-12-31", "funder": "Life", "program": "Metaphysical Grants", "team": [ {"institution": "University of Pedagogy", "name": "Chief Pedagogue", "position": "pi" }, {"institution": "University of Pedagogy", "name": "Pedagogue Jr.", "position": "co-pi" }, ], "title": "The answer to life, the universe, and everything", "budget": [ {"begin_date": "2020-06-01", "end_date": "2020-12-31", "student_months": 0.0, "postdoc_months": 0.0, "ss_months": 1.0, "amount": 42000.0, } ], "proposal_id": "abc42", }, {"_id": "ta", "amount": 0.0, "begin_date": "2020-06-01", "end_date": "2020-12-31", "funder": "Life", "program": "Underground Grants", "team": [ {"institution": "Ministry of Magic", "name": "Chief Witch", "position": "pi" }, {"institution": "Ministry of Magic", "name": "Chief Wizard", "position": "co-pi" }, ], "title": "Support for teaching assistants", "budget": [ {"begin_date": "2020-06-01", "end_date": "2020-08-30", "student_months": 0.0, "postdoc_months": 0.0, "ss_months": 0.0, "amount": 0.0, } ] }, ], "groups": { "_id": "ergs", "pi_name": "Anthony Scopatz", "department": "Mechanical Engineering", "institution": "University of South Carolina", "name": "ERGS", "aka": ["Energy Research Group Something", "Scopatz Group"], "website": "www.ergs.sc.edu", "mission_statement": """<b>ERGS</b>, or <i>Energy Research Group: Scopatz</i>, is the Computational <a href="http://www.me.sc.edu/nuclear/">Nuclear Engineering</a> research group at the <a href="http://sc.edu/">University of South Carolina</a>. Our focus is on uncertainty quantification & predictive modeling, nuclear fuel cycle simulation, and improving nuclear engineering techniques through automation. We are committed to open & accessible research tools and methods.""", "projects": """ERGS is involved in a large number of computational projects. Please visit the <a href="projects.html">projects page</a> for more information! """, "email": "<b>scopatz</b> <i>(AT)</i> <b>cec.sc.edu</b>", }, "institutions": [{ "_id": "columbiau", "aka": ["Columbia University", "Columbia"], "city": "New York", "country": "USA", "day": 30, "departments": { "physics": { "name": "Department of Physics", "aka": ["Dept. of Physics", "Physics"], }, "chemistry": { "name": "Department of Chemistry", "aka": ["Chemistry", "Dept. of Chemistry"], }, "apam": { "name": "Department of Applied Physics " "and Applied Mathematics", "aka": ["APAM"], }, }, "month": "May", "name": "Columbia University", "schools": { "seas": { "name": "School of Engineering and " "Applied Science", "aka": [ "SEAS", "Columbia Engineering", "Fu Foundation School of Engineering " "and Applied Science", ], } }, "state": "NY", "street": "500 W 120th St", "updated": "2020-05-30", "uuid": "avacazdraca345rfsvwre", "year": 2020, "zip": "10027", }, { "_id": "usouthcarolina", "aka": ["The University of South Carolina"], "city": "Columbia", "country": "USA", "day": 30, "departments": { "physics": { "name": "Department of Physics", "aka": ["Dept. of Physics", "Physics"], }, "chemistry": { "name": "Department of Chemistry", "aka": ["Chemistry", "Dept. of Chemistry"], }, "apam": { "name": "Department of Applied Physics" "and Applied Mathematics", "aka": ["APAM"], }, "mechanical engineering": { "name": "Department of Mechanical Engineering", "aka": ["Mechanical", "Dept. of Mechanical"], } }, "month": "May", "name": "The University of South Carolina", "schools": { "cec": { "name": "College of Engineering and" "Computing", "aka": [ "CEC", "College of Engineering and Computing", ], } }, "state": "SC", "street": "1716 College Street", "updated": "2020-06-30", "uuid": "4E89A0DD-19AE-45CC-BCB4-83A2D84545E3", "year": 2020, "zip": "29208", }, ], "jobs": { "_id": "0004", "background_fields": [ "Data Science", "Data Engineering", "Computer Engineering", "Computer Science", "Applied Mathematics", "Physics", "Nuclear Engineering", "Mechanical Engineering", "Or similar", ], "compensation": [ "Salary and compensation will be based on prior work " "experience." ], "contact": "Please send CV or resume to Prof. Scopatz at " "scopatzATcec.sc.edu.", "day": 1, "description": "<p>We are seeking a dedicated individual to " "help to aid in ...", "month": "July", "open": False, "positions": ["Scientific Software Developer", "Programmer"], "start_date": "ASAP", "title": "Open Source Scientific Software Maintainer", "year": 2015, }, "meetings": [{ "_id": "grp1000-01-01", "actions": [ "(Everyone) Update overdue milestones", "(Professor Billinge) Explore, and plan a machine learning project for DSI" "(Professor Billinge, Emil, Yevgeny, Songsheng) Come up with a Kaggle competition for this DSI project" "(Emil) Set up the slack channel for the DSI project" ], "agenda": ["Review actions", "Fargo is not free on any streaming platforms", "Review Airtable for deliverables and celebrate", "Mention diversity action initiative", "Songsheng's journal club presentation", "(Vivian and Zicheng) Finish rest of crystallography presentation next week", "Emil's 7th inning Yoga Stretch", "Crystallography talk", "Presentation"], "buddies": [ " Jaylyn C. Umana, " " Simon J. L. Billinge", " Long Yang, " " Emil Kjaer", " Sani Harouna-Mayer," " Akshay Choudhry", " Vivian Lin, " " Songsheng Tao", " Ran Gu, " " Adiba Ejaz", " Zach Thatcher, " " Yevgeny Rakita", " Zicheng 'Taylor' Liu, " " Eric Shen ", " Hung Vuong, " " Daniela Hikari Yano", " Ahmed Shaaban, " " Jiawei Zang", " Berrak Ozer, " " Michael Winitch", " Shomik Ghose", ], "day": 1, "journal_club": { "doi": "10.1107/S2053273319005606", "presenter": "sbillinge", }, "lead": "sbillinge", "minutes": [ "Talked about eyesight and prescription lenses", "Professor Billinge tells everyone a Logician/Mathematician joke", "Mentioned pyjokes, a package in Python that lists bad jokes", "Jaylyn greets everyone", "Reviewed action items from last time", "Talked about fargo, and the merits (or lack thereof) of the Dakotas", "Celebrated finished prums", "Songhsheng holds journal club presentation on Machine Learning techniques", "Discussed Linear Classification, Gradient Descent, Perceptrons, Convolution and other ML topics", "Discussed how we can derive scientific meaning from ML algorithms", "Discussed real space versus reciprocal space", "Finished journal club, had to postpone Akshay's presentation, and the Yoga session to next week", ], "month": 1, "place": "Mudd 1106", "presentation": { "title": "PDF Distance Extraction", "link": "2007ac_grpmtg", "presenter": "sbillinge", }, "scribe": "sbillinge", "time": '0', "updated": "2020-07-31 23:27:50.764475", "uuid": "3fbee8d9-e283-48e7-948f-eecfc2a123b7", "year": 1000 }, { "_id": "grp2020-07-31", "actions": [ "(Everyone) Update overdue milestones", "(Professor Billinge) Explore, and plan a machine learning project for DSI" "(Professor Billinge, Emil, Yevgeny, Songsheng) Come up with a Kaggle competition for this DSI project" "(Emil) Set up the slack channel for the DSI project" ], "agenda": ["Review actions", "Fargo is not free on any streaming platforms", "Review Airtable for deliverables and celebrate", "Mention diversity action initiative", "Songsheng's journal club presentation", "(Vivian and Zicheng) Finish rest of crystallography presentation next week", "Emil's 7th inning Yoga Stretch", "Crystallography talk", "Presentation"], "buddies": [ " Jaylyn C. Umana, " " Simon J. L. Billinge", " Long Yang, " " Emil Kjaer", " Sani Harouna-Mayer," " Akshay Choudhry", " Vivian Lin, " " Songsheng Tao", " Ran Gu, " " Adiba Ejaz", " Zach Thatcher, " " Yevgeny Rakita", " Zicheng 'Taylor' Liu, " " Eric Shen ", " Hung Vuong, " " Daniela Hikari Yano", " Ahmed Shaaban, " " Jiawei Zang", " Berrak Ozer, " " Michael Winitch", " Shomik Ghose", ], "day": 1, "journal_club": { "doi": "10.1107/S2053273319005606", "presenter": "sbillinge", }, "lead": "sbillinge", "minutes": [ "Talked about eyesight and prescription lenses", "Professor Billinge tells everyone a Logician/Mathematician joke", "Mentioned pyjokes, a package in Python that lists bad jokes", "Jaylyn greets everyone", "Reviewed action items from last time", "Talked about fargo, and the merits (or lack thereof) of the Dakotas", "Celebrated finished prums", "Songhsheng holds journal club presentation on Machine Learning techniques", "Discussed Linear Classification, Gradient Descent, Perceptrons, Convolution and other ML topics", "Discussed how we can derive scientific meaning from ML algorithms", "Discussed real space versus reciprocal space", "Finished journal club, had to postpone Akshay's presentation, and the Yoga session to next week", ], "month": 1, "place": "Mudd 1106", "presentation": { "title": "PDF Distance Extraction", "link": "2007ac_grpmtg", "presenter": "sbillinge", }, "scribe": "sbillinge", "time": '0', "updated": "2020-07-31 23:27:50.764475", "uuid": "3fbee8d9-e283-48e7-948f-eecfc2a123b7", "year": 7000 } ], "news": { "_id": "56b4eb6d421aa921504ef2a9", "author": "Anthony Scopatz", "body": "Dr. Robert Flanagan joined ERGS as a post-doctoral " "scholar.", "day": 1, "month": "February", "year": 2016, }, "people": [{ "_id": "scopatz", "aka": [ "Scopatz", "Scopatz, A", "Scopatz, A.", "Scopatz, A M", "Anthony Michael Scopatz", ], "avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200", "appointments": { "f19": { "begin_year": 2019, "begin_month": 9, "begin_day": 1, "end_year": 2019, "end_month": 10, "end_day": 31, "grant": "dmref15", "type": "pd", "loading": 0.75, "status": "finalized", "notes": ["forgetmenot"] }, "s20": { "begin_date": "2020-01-01", "end_date": "2020-05-15", "grant": "sym", "type": "pd", "loading": 1.0, "status": "finalized", "notes": ["fully appointed", "outdated grant"] }, "ss20": { "begin_date": "2020-06-01", "end_date": "2020-08-31", "grant": "abc42", "type": "ss", "loading": 0.8, "status": "proposed", "notes": [] } }, "bio": "Anthony Scopatz is currently an Assistant Professor", "bios": ["Anthony Scopatz is currently an Assistant Professor but will go on to do great things"], "committees": [{ "name": "Heather Stanford", "type": "phdoral", "year": 2020, "month": 3, "day": 1, "level": "department", "unit": "apam" }, {"name": "Heather Stanford", "type": "promotion", "year": 2020, "month": 3, "day": 1, "level": "school", "unit": "seas" }, {"name": "Heather Stanford", "type": "phddefense", "year": 2020, "month": 3, "day": 1, "notes": "something else to remember about it, not published", "level": "external", "unit": "U Denmark" }, {"name": "Heather Stanford", "type": "promotion", "year": 2020, "month": 3, "day": 1, "unit": "columbiau", "level": "university", }], "education": [ { "advisor": "ascopatz", "begin_year": 2008, "degree": "Ph.D. Mechanical Engineering, " "Nuclear and Radiation Engineering " "Program", "end_year": 2011, "group": "ergs", "institution": "The University of Texas at Austin", "department": "apam", "location": "Austin, TX", "other": [ "Adviser: Erich A. Schneider", "Dissertation: Essential Physics for Fuel Cycle " "Modeling & Analysis", ], }, { "begin_year": 2006, "degree": "M.S.E. Mechanical Engineering, Nuclear and " "Radiation Engineering Program", "end_year": 2007, "institution": "The University of Texas at Austin", "location": "Austin, TX", "other": [ "Adviser: Erich A. Schneider", "Thesis: Recyclable Uranium Options under the Global " "Nuclear Energy Partnership", ], }, { "begin_year": 2002, "begin_month": "Sep", "begin_day": 1, "degree": "B.S. Physics", "end_year": 2006, "end_month": 5, "end_day": 20, "institution": "University of California, Santa Barbara", "location": "Santa Barbara, CA", "other": [ "Graduated with a Major in Physics and a Minor in " "Mathematics" ], }, { "begin_year": 2008, "degree": "ongoing", "group": "life", "institution": "solar system", "department": "earth", "location": "land, mostly", }, ], "email": "scopatz@cec.sc.edu", "employment": [ { "advisor": "ascopatz", "begin_year": 2015, "coworkers": ["afriend"], "group": "ergs", "location": "Columbia, SC", "organization": "The University of South Carolina", "other": [ "Cyclus: An agent-based, discrete time nuclear fuel " "cycle simulator.", "PyNE: The Nuclear Engineering Toolkit.", "Website: http://www.ergs.sc.edu/", ], "permanent": True, "position": "assistant professor", "position_full": "Assistant Professor, Mechanical Engineering " "Department", }, { "begin_year": 2013, "begin_month": "Jun", "begin_day": 1, "end_year": 2015, "end_month": 3, "end_day": 15, "location": "Madison, WI", "organization": "CNERG, The University of " "Wisconsin-Madison", "department": "Physics", "other": [ "Cyclus: An agent-based, discrete time nuclear fuel " "cycle simulator.", "PyNE: The Nuclear Engineering Toolkit.", "Website: https://cnerg.github.io/", ], "position": "associate scientist", "position_full": "Associate Scientist, Engineering Physics " "Department", }, { "begin_day": 1, "begin_month": "Nov", "begin_year": 2011, "end_month": "May", "end_year": 2013, "location": "Chicago, IL", "organization": "The FLASH Center, The University of " "Chicago", "other": [ "NIF: Simulation of magnetic field generation from " "neutral plasmas using FLASH.", "CosmoB: Simulation of magnetic field generation " "from neutral plasmas using FLASH.", "FLASH4: High-energy density physics capabilities " "and utilities.", "Simulated Diagnostics: Schlieren, shadowgraphy, " "Langmuir probes, etc. from FLASH.", "OpacPlot: HDF5-based equation of state and opacity " "file format.", "Website: http://flash.uchicago.edu/site/", ], "position": "post-doctoral scholar", "position_full": "Research Scientist, Postdoctoral Scholar", "status": "pi" }, ], "funding": [ { "name": "Omega Laser User's Group Travel Award", "value": 1100, "year": 2013, }, {"name": "NIF User's Group Travel Award", "value": 1150, "year": 2013}, ], "google_scholar_url": "https://scholar.google.com/citations?user=dRm8f", "github_id": "ascopatz", "hindex": [{ "h": 25, "h_last_five": 46, "citations": 19837, "citations_last_five": 9419, "origin": "Google Scholar", "since": 1991, "year": 2020, "month": 2, "day": 19 }], "home_address": { "street": "123 Wallabe Ln", "city": "The big apple", "state": "plasma", "zip": "007", }, "initials": "AMS", "membership": [ { "begin_year": 2006, "organization": "American Nuclear Society", "position": "Member", }, { "begin_year": 2013, "organization": "Python Software Foundation", "position": "Fellow", }, ], "name": "Anthony Scopatz", "orcid_id": "0000-0002-9432-4248", "position": "professor", "research_focus_areas": [ {"begin_year": 2010, "description": "software applied to nuclear " "engineering and life"} ], "service": [{ "name": "International Steering Committee", "role": "chair", "type": "profession", "year": 2020, "month": 3, "notes": ["something"], }, { "name": "National Steering Committee", "type": "profession", "begin_year": 2018, "end_year": 2021, "notes": "something", }, ], "skills": [ {"category": "Programming Languages", "level": "expert", "name": "Python"}, {"category": "Programming Languages", "level": "expert", "name": "Cython"}, ], "teaching": [ { "course": "EMCH 552: Intro to Nuclear Engineering", "courseid": "EMCH 552", "description": "This course is an introduction to nuclear " "physics.", "enrollment": "tbd", "month": "August", "organization": "University of South Carolina", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2017, }, { "course": "EMCH 558/758: Reactor Power Systems", "courseid": "EMCH 558", "description": "This course covers conventional " "reactors.", "enrollment": 28, "evaluation": { "response_rate": 66.76, "amount_learned": 3.5, "appropriateness_workload": 3.15, "course_overall": 3.67, "fairness_grading": 3.54, "organization": 3.25, "classroom_delivery": 4, "approachability": 4.3, "instructor_overall": 3.5, "comments": ["super duper", "dandy"] }, "month": "January", "organization": "University of South Carolina", "position": "professor", "syllabus": "https://docs.google.com/document/d" "/1uMAx_KFZK9ugYyF6wWtLLWgITVhaTBkAf8" "-PxiboYdM/edit?usp=sharing", "year": 2017, }, ], "title": "Dr.", }, { "_id": "sbillinge", "active": True, "activities": [{ "type": "teaching", "name": "course development", "year": 2018, "other": "Developed a new course for Materials Science" }], "aka": [ "Billinge", ], "avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200", "bio": "Simon teaches and does research", "committees": [{ "name": "Same Old", "type": "phddefense", "year": 2018, "unit": "Materials Science", "level": "department", "notes": "something" }], "education": [ { "begin_year": 2008, "degree": "Ph.D. Mechanical Engineering, " "Nuclear and Radiation Engineering " "Program", "end_year": 2011, "group": "ergs", "institution": "The University of Texas at Austin", "department": "apam", "location": "Austin, TX", "other": [ "Adviser: Erich A. Schneider", "Dissertation: Essential Physics for Fuel Cycle " "Modeling & Analysis", ], }, ], "email": "sb2896@columbia.edu", "employment": [ { "begin_year": 2015, "group": "ergs", "location": "Columbia, SC", "organization": "The University of South Carolina", "other": [ "Cyclus: An agent-based, discrete time nuclear fuel " "cycle simulator.", "PyNE: The Nuclear Engineering Toolkit.", "Website: http://www.ergs.sc.edu/", ], "position": "assistant professor", }, ], "facilities": [{ "type": "other", "name": "Shared {Habanero} compute cluster", "begin_year": 2015 }, { "type": "research_wish", "name": "Shared access to wet lab", "begin_year": 2015 }, { "type": "teaching", "name": "Courseworks2", "begin_year": 2017 }, { "type": "teaching_wish", "name": "nothing right now", "begin_year": 2019 }, { "type": "research", "name": "I don't have one", "begin_year": 2008 }, ], "funding": [ { "name": "Omega Laser User's Group Travel Award", "value": 1100, "year": 2013, }, {"name": "NIF User's Group Travel Award", "value": 1150, "year": 2013}, ], "google_scholar_url": "https://scholar.google.com/citations?user=dRm8f", "grp_mtg_active": True, "hindex": [{ "h": 65, "h_last_five": 43, "citations": 17890, "citations_last_five": 8817, "origin": "Google Scholar", "since": 1991, "year": 2019, "month": "May", "day": 12, }], "office": "1105 Seely W. Mudd Building (inner office)", "home_address": { "street": "123 Wallabe Ln", "city": "The big apple", "state": "plasma", "zip": "007", }, "initials": "SJLB", "membership": [ { "begin_year": 2006, "organization": "American Nuclear Society", "position": "Member", }, ], "miscellaneous": { "metrics_for_success": [ "publications(quality, quantity)", "invite talks", "funding", "citations", ], }, "name": "Simon J. L. Billinge", "orcid_id": "0000-0002-9432-4248", "position": "professor", "publicity": [{ "type": "online", "publication": "Brookhaven National Laboratory Web Story", "topic": "LDRD Provenance project", "title": "An awesome project and well worth the money", "day": 24, "month": "Jul", "year": 2019, "grant": "bnlldrd18", "url": "http://www.google.com" }, ], "research_focus_areas": [ {"begin_year": 2010, "description": "software applied to materials " "engineering and life"} ], "service": [ { "type": "profession", "name": "Master of Ceremonies and Organizer Brown University " '"Chemistry: Believe it or Not" public chemistry ' "demonstration", "year": 2017, "month": "August" }, { "type": "department", "name": "Applied Physics program committee", "year": 2018, "month": 1 }, { "type": "school", "name": "Ad hoc tenure committee", "year": 2017, "month": 6, "notes": "Albert Einstein" }, { "type": "profession", "name": "Co-organizer JUAMI", "year": 2017, "month": 12, "role": "co-organizer", "other": "great way to meet people", }, ], "skills": [ {"category": "Programming Languages", "level": "expert", "name": "Python"}, ], "teaching": [ { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2016, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2017, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "s17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "Jan", "organization": "Columbia University", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2018, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "s17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "Jan", "organization": "Columbia University", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2017, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "s17-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "month": "Jan", "organization": "Columbia University", "position": "professor", "semester": "Spring", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2019, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f18-3010", "description": "This course is an introduction to nuclear " "physics.", "enrollment": 18, "evaluation": { "response_rate": 58.33, "amount_learned": 4.57, "appropriateness_workload": 4.29, "fairness_grading": 4.57, "course_overall": 4.43, "organization": 4.0, "classroom_delivery": 4.29, "approachability": 4.86, "instructor_overall": 4.43, "comments": [ "Great teacher but disorganized", "Wears pink pants. Why?", ]}, "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2018, }, { "course": 'MSAE-3010: Introduction to Materials Science', "courseid": "f19-3010", "description": "This course is an introduction to nuclear " "physics.", "month": "August", "organization": "Columbia University", "position": "professor", "semester": "Fall", "syllabus": "https://drive.google.com/open?id" "=0BxUpd34yizZreDBCMEJNY2FUbnc", "year": 2019, }, ], "title": "Dr.", "todos": [ {"description": "read paper", "due_date": "2020-07-19", "begin_date": "2020-06-15", "duration": 60.0, "importance": 2, "status": "started", "assigned_by": "scopatz", "running_index": 1 }, {"description": "prepare the presentation", "due_date": "2020-07-29", "begin_date": "2020-06-22", "duration": 30.0, "importance": 0, "status": "started", "notes": ["about 10 minutes", "don't forget to upload to the website"], "assigned_by": "sbillinge", "running_index": 2 } ], }, {"_id": "abeing", "active": False, "aka": ["being", "human", "person"], "avatar": "https://xkcd.com/1221/", "bio": "Abstract Being is an exemplar human existence", "education": [ {"degree": "bachelors", "institution": "University of Laughs", "begin_year": 2010}, ], "employment": [ {"group": "bg", "begin_date": "2015-06-01", "end_date": "2015-08-31", "organization": "columbiau", "position": "intern"}, {"group": "agroup", "begin_date": "2020-01-01", "end_date": "2030-12-31", "organization": "usouthcarolina", "position": "intern"}, {"group": "bg", "begin_date": "2010-06-01", "end_date": "2012-08-31", "organization": "columbiau", "position": "intern"}, {"group": "bg", "begin_date": "2017-06-01", "end_date": "2019-08-31", "organization": "columbiau", "position": "intern"}, ], "position": "intern", "name": "Abstract Being", } ], "presentations": [ { "_id": "18sb_this_and_that", "abstract": "We pulled apart graphite with tape", "authors": ["scopatz", "afriend"], "begin_year": 2018, "begin_month": 5, "begin_day": 22, "department": "apam", "institution": "columbiau", "location": "Upton NY", "meeting_name": "Meeting to check flexibility on dates", "notes": [ "We hope the weather will be sunny", "if the weather is nice we will go to the " "beach", ], "project": "18sob_clustermining", "status": "accepted", "title": "Graphitic Dephenestration", "type": "award", "webinar": False, }, { "_id": "18sb_nslsii", "abstract": "We pulled apart graphite with tape", "authors": ["scopatz"], "begin_year": 2018, "begin_month": 5, "begin_day": 22, "department": "apam", "end_year": 2018, "end_month": 5, "end_day": 22, "institution": "columbiau", "location": "Upton NY", "meeting_name": "2018 NSLS-II and CFN Users Meeting", "notes": [ "We hope the weather will be sunny", "if the weather is nice we will go to the " "beach", ], "project": "18sob_clustermining", "status": "accepted", "title": "ClusterMining: extracting core structures of " "metallic nanoparticles from the atomic pair " "distribution function", "type": "poster", }, { "_id": "18sb04_kentstate", "abstract": "We made the case for local structure", "authors": ["scopatz"], "begin_year": 2018, "begin_month": "May", "begin_day": 22, "department": "physics", "end_year": 2018, "end_month": 5, "end_day": 22, "institution": "columbiau", "notes": ["what a week!"], "project": "18kj_conservation", "status": "accepted", "title": "Nanostructure challenges and successes from " "16th Century warships to 21st Century energy", "type": "colloquium", "webinar": True, }, ], "projecta": { "_id": "sb_firstprojectum", "begin_date": "2020-04-28", "collaborators": ["aeinstein", "pdirac"], "deliverable": { "audience": ["beginning grad in chemistry"], "due_date": "2021-05-05", "success_def": "audience is happy", "scope": ["UCs that are supported or some other scope description " "if it is software", "sketch of science story if it is paper" ], "platform": "description of how and where the audience will access " "the deliverable. Journal if it is a paper", "roll_out": [ "steps that the audience will take to access and interact with " "the deliverable", "not needed for paper submissions"], "notes": ["deliverable note"], "status": "proposed" }, "description": "My first projectum", "end_date": "2020-06-05", "grants": "SymPy-1.1", "group_members": ["ascopatz"], "kickoff": { "date": "2020-05-05", "due_date": "2020-05-06", "name": "Kick off meeting", "objective": "introduce project to the lead", "audience": ["lead", "pi", "group_members"], "notes": ["kickoff note"], "status": "finished" }, "lead": "ascopatz", "log_url": "https://docs.google.com/document/d/1YC_wtW5Q", "milestones": [{ 'due_date': '2020-05-20', 'name': 'Project lead presentation', 'notes': ["do background reading", "understand math"], 'objective': 'lead presents background reading and ' 'initial project plan', 'audience': ['lead', 'pi', 'group_members'], 'status': 'proposed', 'type': 'meeting' }, {'due_date': '2020-05-27', 'name': 'planning meeting', 'objective': 'develop a detailed plan with dates', 'audience': ['lead', 'pi', 'group_members'], 'status': 'proposed', 'type': 'pr', }], "name": "First Projectum", "pi_id": "scopatz", "status": "started" }, "projects": { "_id": "Cyclus", "name": "Cyclus", "description": "Agent-Based Nuclear Fuel Cycle Simulator", "group": "ergs", "highlights": [ {"year": 2020, "month": 5, "description": "high profile pub in Nature"} ], "logo": "http://fuelcycle.org/_static/big_c.png", "other": [ "Discrete facilities with discrete material transactions", "Low barrier to entry, rapid payback to adoption", ], "repo": "https://github.com/cyclus/cyclus/", "team": [ { "begin_month": "June", "begin_year": 2013, "end_month": "July", "end_year": 2015, "name": "Anthony Scopatz", "position": "Project Lead", } ], "type": "funded", "website": "http://fuelcycle.org/", "grant": "dmref15", }, "proposalReviews": [ { "_id": "1906doeExample", "adequacy_of_resources": [ "The resources available to the PI seem adequate" ], "agency": "doe", "competency_of_team": ["super competent!"], "doe_appropriateness_of_approach": [ "The proposed approach is highly innovative" ], "doe_reasonableness_of_budget": [ "They could do it with half the money"], "doe_relevance_to_program_mission": ["super relevant"], "does_how": [ "they will find the cause of Malaria", "when they find it they will determine a cure", ], "due_date": "2020-04-10", "does_what": "Find a cure for Malaria", "freewrite": [ "I can put extra things here, such as special instructions from the", "program officer", ], "goals": [ "The goals of the proposal are to put together a team to find a cure" "for Malaria, and then to find it" ], "importance": ["save lives", "lift people from poverty"], "institutions": "columbiau", "month": "May", "names": ["B. Cause", "A.N. Effect"], "nsf_broader_impacts": [], "nsf_create_original_transformative": [], "nsf_plan_good": [], "nsf_pot_to_advance_knowledge": [], "nsf_pot_to_benefit_society": [], "requester": "Lane Wilson", "reviewer": "sbillinge", "status": "submitted", "summary": "dynamite proposal", "title": "A stunning new way to cure Malaria", "year": 2019, }, { "_id": "1906nsfExample", "adequacy_of_resources": [ "The resources available to the PI seem adequate" ], "agency": "nsf", "competency_of_team": ["super competent!"], "doe_appropriateness_of_approach": [], "doe_reasonableness_of_budget": [], "doe_relevance_to_program_mission": [], "does_how": [ "they will find the cause of Poverty", "when they find it they will determine a cure", ], "does_what": "Find a cure for Poverty", "due_date": "2020-04-10", "freewrite": [ "I can put extra things here, such as special instructions from the", "program officer", ], "goals": [ "The goals of the proposal are to put together a team to find a cure" "for Poverty, and then to find it" ], "importance": ["save lives", "lift people from poverty"], "institutions": "upenn", "month": "May", "names": ["A Genius"], "nsf_broader_impacts": ["Poor people will be made unpoor"], "nsf_create_original_transformative": [ "transformative because lives will be transformed" ], "nsf_plan_good": [ "I don't see any issues with the plan", "it should be very straightforward", ], "nsf_pot_to_advance_knowledge": [ "This won't advance knowledge at all"], "nsf_pot_to_benefit_society": [ "Society will benefit by poor people being made unpoor if they want " "to be" ], "requester": "Tessemer Guebre", "reviewer": "sbillinge", "status": "submitted", "summary": "dynamite proposal", "title": "A stunning new way to cure Poverty", "year": 2019, }, ], "proposals": [ { "_id": "mypropsal", "amount": 1000000.0, "authors": ["Anthony Scopatz", "Robert Flanagan"], "begin_day": 1, "begin_month": "May", "begin_year": 2030, "currency": "USD", "submitted_day": 18, "duration": 3, "end_day": 31, "end_month": "December", "end_year": 2030, "full": { "benefit_of_collaboration": "http://pdf.com" "/benefit_of_collaboration", "cv": ["http://pdf.com/scopatz-cv", "http://pdf.com/flanagan-cv"], "narrative": "http://some.com/pdf", }, "submitted_month": "Aug", "notes": "Quite an idea", "pi": "Anthony Scopatz", "pre": { "benefit_of_collaboration": "http://pdf.com" "/benefit_of_collaboration", "cv": ["http://pdf.com/scopatz-cv", "http://pdf.com/flanagan-cv"], "day": 2, "month": "Aug", "narrative": "http://some.com/pdf", "year": 1998, }, "status": "submitted", "title": "A very fine proposal indeed", "submitted_year": 1999, }, { "_id": "dmref15", "amount": 982785.0, "authors": ["qdu", "dhsu", "sbillinge"], "call_for_proposals": "http://www.nsf.gov/pubs/2014/nsf14591/" "nsf14591.htm", "begin_day": 1, "begin_month": "May", "begin_year": 2018, "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "lots to do but it doesn't overlap with any " "other of my grants", "single_pi": True }, "currency": "USD", "submitted_date": "2015-02-02", "duration": 3, "end_day": 1, "end_month": "May", "end_year": 2019, "funder": "NSF", "notes": "Quite an idea", "pi": "Simon Billinge", "status": "accepted", "team": [ { "institution": "Columbia University", "name": "qdu", "position": "co-pi", }, { "institution": "Columbia University", "name": "dhsu", "position": "co-pi", }, { "institution": "Columbia University", "name": "sbillinge", "position": "pi", "subaward_amount": 330000.0, }, ], "title": "DMREF: Novel, data validated, nanostructure determination " "methods for accelerating materials discovery", "title_short": "DMREF nanostructure", }, { "_id": "SymPy-1.1", "amount": 3000.0, "begin_date": "2030-05-01", "end_date": "2030-12-31", "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "" }, "currency": "USD", "pi": "sbillinge", "status": "submitted", "title": "SymPy 1.1 Release Support", }, { "_id": "SymPy-2.0", "amount": 3000.0, "begin_date": "2019-06-01", "end_date": "2030-12-31", "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "" }, "currency": "USD", "pi": "sbillinge", "status": "submitted", "title": "SymPy 1.1 Release Support", }, { "_id": "abc42", "amount": 42000.0, "begin_date": "2020-06-01", "end_date": "2020-12-31", "cpp_info": { "cppflag": True, "other_agencies_submitted": "None", "institution": "Columbia University", "person_months_academic": 0, "person_months_summer": 1, "project_scope": "" }, "currency": "USD", "pi": "sbillinge", "status": "submitted", "title": "The answer to life, the universe, and everything", } ], "reading_lists": { "_id": "getting_started_with_pdf", "day": "15", "month": "12", "papers": [{"doi": "10.1107/97809553602060000935", "text": "Very basic, but brief, intro to powder diffraction in general"}, {"doi": "10.1039/9781847558237-00464", "text": "Lightest weight overview of PDF analysis around. Good starting point" }, {"url": "http://www.diffpy.org", "text": "Download and install PDFgui software and run through the step by step tutorial under the help tab"} ], "purpose": "Beginning reading about PDF", "title": "A step-by-step pathway towards PDF understanding. It is recommended to read the papers in the order they are listed here.", "year": 2019, }, "refereeReports": { "_id": "1902nature", "claimed_found_what": ["gravity waves"], "claimed_why_important": ["more money for ice cream"], "did_how": ["measured with a ruler"], "did_what": ["found a much cheaper way to measure gravity waves"], "due_date": '2020-04-11', "editor_eyes_only": "to be honest, I don't believe a word of it", "final_assessment": ["The authors should really start over"], "first_author_last_name": "Wingit", "freewrite": "this comment didn't fit anywhere above", "journal": "Nature", "recommendation": "reject", "requester": "Max Planck", "reviewer": "sbillinge", "status": "submitted", "submitted_date": "2019-01-01", "title": "a ruler approach to measuring gravity waves", "validity_assessment": ["complete rubbish"], "year": 2019, }, "students": { "_id": "Human A. Person", "aka": ["H. A. Person"], "email": "haperson@uni.edu", "university_id": "HAP42", }, } SCHEMAS = { "abstracts": { "_description": { "description": "Abstracts for a conference or workshop. This is " "generally public information" }, "_id": { "description": "Unique identifier for submission. This generally " "includes the author name and part of the title.", "required": True, "type": "string", }, "coauthors": { "description": "names of coauthors", "required": False, "type": "string", }, "email": { "description": "contact email for the author.", "required": True, "type": "string", }, "firstname": { "description": "first name of the author.", "required": True, "type": "string", }, "institution": { "description": "name of the institution", "required": True, "type": "string", }, "lastname": { "description": "last name of the author.", "required": True, "type": "string", }, "references": { "description": "HTML string of reference for the abstract itself", "required": False, "type": "string", }, "text": { "description": "HTML string of the abstract.", "required": True, "type": "string", }, "timestamp": { "description": "The time when the abstract was submitted.", "required": True, "type": "string", }, "title": { "description": "title of the presentation/paper.", "required": True, "type": "string", }, }, "assignments": { "_description": { "description": "Information about assignments for classes."}, "_id": { "description": "A unique id for the assignment, such as " "HW01-EMCH-558-2016-S", "required": True, "type": "string", }, "category": { "description": "such as 'homework' or 'final'", "required": True, "type": "string", }, "courses": { "description": "ids of the courses that have this assignment", "required": True, "anyof_type": ["string", "list"], }, "file": { "description": "path to assignment file in store", "required": False, "type": "string", }, "points": { "description": "list of number of points possible for each " "question. Length is the number of questions", "required": True, "type": "list", "schema": {"anyof_type": ["integer", "float"]}, }, "questions": { "description": "titles for the questions on this assignment", "required": False, "type": "list", }, "solution": { "description": "path to solution file in store", "required": False, "type": "string", }, }, "beamplan": { "_id": { "description": "Unique identifier for the experiment plan. It should have a format '{year:2d}{month:2d}{people_id:s}_{plan_name:s}'", "required": True, "type": "string" }, "_description": { "description": "Information about the experiment plan for the beamtime."}, "project_lead": { "description": "The id for person who put out this plan. It should be inside the people.yml.", "required": True, "type": "string" }, "project": { "description": "The id for the project which the plan belongs to. It should be on airtable.", "required": True, "type": "string" }, "begin_date": { "description": "The begin date of the beam time.", "required": True, "anyof_type": ["string", "datetime", "date"] }, "end_date": { "description": "The end date of the beam time.", "required": True, "anyof_type": ["string", "datetime", "date"] }, "beamtime": { "description": "The id for the beamtime. Check the Airtable.", "required": True, "type": "string" }, "holder": { "description": "Sample holder used during the measurement, e. g. 3 mm OD tubes holder.", "required": True, "type": "string" }, "devices": { "description": "The dictionary of devices used in the measurement e. g. ", "required": True, "type": "list", "schema": { "type": "string" } }, "measurement": { "description": "What data to be measured, e. g. PDF, XRD, SAXS. This will determine the setup.", "required": True, "type": "string" }, "samples": { "description": "The list of samples to be measured.", "required": True, "type": "list", "schema": { "type": "string" } }, "time": { "description": "The total time of executing the exp_plan. Unit: min.", "required": True, "type": "integer" }, "objective": { "description": "What to study in the experiments. What goal to achieve.", "required": True, "type": "string" }, "prep_plan": { "description": "Steps to prepare the samples. Do NOT need details.", "required": True, "type": "list", "schema": { "type": "string" } }, "ship_plan": { "description": "Steps to carry the samples from the producer to the BNL. Do NOT need details.", "required": True, "type": "list", "schema": { "type": "string" } }, "exp_plan": { "description": "Steps to carry out the experiments at BNL. Need details", "required": True, "type": "list", "schema": { "type": "string" } }, "scanplan": { "description": "The scanplan for the experiment, e. g. tseries, Tramp, ct.", "required": True, "type": "list", "schema": { "type": "string" } }, "pipeline": { "description": "The analysis pipeline for the experiment. If no new pipeline is needed, use 'usual'.", "required": True, "type": "string", "default": "usual" }, "todo": { "description": "The TODO list before the beamtime.", "required": True, "type": "list", "schema": { "type": "string" } }, "notes": { "description": "Notes of the plan, e. g. the preferred time.", "required": False, "anyof_type": [ "list", "string" ], "schema": { "type": "string" } } }, "blog": { "_description": { "description": "This collection represents blog posts written by " "the members of the research group." }, "_id": { "description": "short representation, such as this-is-my-title", "required": True, "type": "string", }, "author": { "description": "name or AKA of author", "required": True, "type": "string", }, "day": {"description": "Publication day", "required": True, "type": "integer"}, "month": { "description": "Publication month", "required": True, "anyof_type": ["string", "integer"], }, "original": { "description": "URL of original post, if this is a repost", "required": False, "type": "string", }, "post": { "description": "actual contents of the post", "required": True, "type": "string", }, "title": { "description": "full human readable title", "required": True, "type": "string", }, "year": { "description": "Publication year", "required": True, "type": "integer", }, }, "contacts": { "_description": {"description": "a lighter version of people. Fewer required fields" "for capturing people who are less tightly coupled" }, "_id": { "description": "id of the person, e.g., first letter first name " "plus last name, but unique", "required": True, }, "aka": { "required": False, "type": "list", "description": "other names for the person", }, "date": { "description": "date when the entry was created in ISO format", "required": False, "anyof_type": ["string", "date"], }, 'day': { "description": "day when the entry was created", "required": False, "type": "integer", }, "department": { "description": "Department at the institution", "type": "string", "required": False, }, "email": { "description": "Contact email for the contact", "type": "string", "required": False, }, "institution": { "description": "the institution where they are located. This is" "required for building a COI list of coauthors, but" "not in general. It can be institute id or anything" "in the aka or name", "required": False, "type": "string" }, 'month': { "description": "month when the entry was created", "required": False, "anyof_type": ["string", "integer"], }, "name": { "description": "the person's canonical name", "required": True, "type": "string", }, "notes": { "description": "notes about the person", "required": False, "anyof_type": ["list", "string"] }, "title": { "description": "how the person is addressed", "required": False, "type": "string", }, 'updated': { "description": "most recently updated", "required": False, "anyof_type": ["string", "datetime", "date"], }, 'year': { "description": "year when the entry was created", "required": False, "type": "integer", }, 'uuid': { "description": "universally unique identifier", "required": False, "type": "string", }, }, "expenses": { "_description": { "description": "This collection records expenses for the " "group. It should most likely be private" }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": "string", }, "begin_date": { "description": "begin date in YYYY-MM-DD", "anyof_type": ["string", "date"], }, "end_date": { "description": "end date in YYYY-MM-DD", "anyof_type": ["string", "date"], }, "grant_percentages": { "description": "the percentage of the reimbursement amount to put " "on each grant. This list must be the same length as" "the grants list and the percentages placed in the " "order that the grants appear in that list", "required": False, "type": "list", }, "grants": { "description": "the grants in a list, or a string if only one grant", "required": True, "anyof_type": ["string", "list"], }, "project": { "description": "project or list of projects that this " "presentation is associated with. Should " "be discoverable in projects collection", "anyof_type": ["string", "list"], }, "payee": { "description": "The name or id of the payee filing the expense", "required": True, "type": "string", }, "itemized_expenses": { "type": "list", "schema": { "type": "dict", "schema": { "day": { "description": "Expense day", "required": False, "type": "integer", }, "date": { "description": "Expense date", "required": False, "anyof_type": ["string", "date"], }, "month": { "description": "Expense month", "required": False, "anyof_type": ["string", "integer"], }, "year": { "description": "Expense year", "required": False, "type": "integer", }, "purpose": { "description": "reason for expense", "type": "string", "required": True, }, "unsegregated_expense": { "description": "The allowed expenses", "type": "float", }, "segregated_expense": { "description": "The unallowed expenses", "type": "float", }, "original_currency": { "description": "The currency the payment was made in", "type": "float", }, }, }, }, "overall_purpose": { "description": "The reason for the expenses", "type": "string", "required": True, }, "notes": { "description": "Notes about the expense", "type": "list", }, "status": { "description": "The status of the expense", "eallowed": EXPENSES_TYPE, "type": "string" }, "reimbursements": { "description": "Reimbursements for the expense", "schema": { "schema": { 'amount': {"description": 'amount for reimbursements', "type": "float", }, 'date': {"description": "date of reimbursement", "anyof_type": ["string", "date"], }, 'submission_date': {"description": "date of submission", "anyof_type": ["string", "date"], }, 'submission_day': {"description": "day of submission. deprecated but here for " "backwards compatibility", "type": "integer", }, 'submission_month': {"description": "month of submission. deprecated but here for " "backwards compatibility", "anyof_type": ["integer", "string"], }, 'submission_year': {"description": "year of submission. deprecated but here for " "backwards compatibility", "type": "integer", }, 'day': {"description": "day of reimbursement. deprecated but here for " "backwards compatibility", "type": "integer", }, 'month': {"description": "month of reimbursement. deprecated but here for " "backwards compatibility", "anyof_type": ["string", "integer"], }, 'year': {"description": "year of reimbursement. deprecated but here for " "backwards compatibility", "type": "integer", }, 'where': {"description": 'where the reimbursement has been sent', "type": 'string', }, }, "type": "dict" }, "type": "list" }, "expense_type": { "description": "The type of expense", "allowed": ["travel", "business"], "required": True, }, }, "grades": { "_description": { "description": "The grade for a student on an assignment. This " "information should be private." }, "_id": { "description": "unique id, typically the " "student-assignment-course", "required": True, "type": "string", }, "assignment": { "description": "assignment id", "required": True, "type": "string", }, "course": {"description": "course id", "required": True, "type": "string"}, "filename": { "description": "path to file in store", "required": False, "type": "string", }, "scores": { "description": "the number of points earned on each question", "required": True, "type": "list", "schema": {"anyof_type": ["integer", "float"]}, }, "student": {"description": "student id", "required": True, "type": "string"}, }, "grants": { "_description": { "description": "This collection represents grants that have been " "awarded to the group." }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": ("string", "integer", "float"), }, "account": { "description": "the account number which holds the funds", "required": False, "type": "string", }, "admin": { "description": "the group administering the grant", "type": "string", "required": False, }, "alias": { "description": "the alias of the grant", "type": "string", "required": False, }, "amount": { "description": "value of award", "required": True, "type": ("integer", "float"), }, "begin_date": { "description": "start date of the grant (if string, in format YYYY-MM-DD)", "required": False, "anyof_type": ["string", "date"] }, "begin_day": { "description": "start day of the grant", "required": False, "type": "integer", }, "begin_month": { "description": "start month of the grant", "required": False, "anyof_type": ["string", "integer"], }, "begin_year": { "description": "start year of the grant", "required": False, "type": "integer", }, "benefit_of_collaboration": { "description": "", "required": False, "type": "string", }, # TODO: maybe this should move to proposals? "call_for_proposals": {"description": "", "required": False, "type": "string"}, "currency": { "description": "typically '$' or 'USD'", "required": False, "type": "string", }, "end_date": { "description": "start date of the grant (if string, in format YYYY-MM-DD)", "required": False, "anyof_type": ["string", "date"] }, "end_day": { "description": "end day of the grant", "required": False, "type": ("string", "integer"), }, "end_month": { "description": "end month of the grant", "required": False, "anyof_type": ["string", "integer"], }, "end_year": { "description": "end year of the grant", "required": False, "type": "integer", }, "funder": { "description": "the agency funding the work", "required": True, "type": "string", }, "grant_id": { "description": "the identifier for this work", "required": False, "type": "string", }, "institution": { "description": "the host institution for the grant", "type": "string", "required": False, }, "narrative": {"description": "", "required": False, "type": "string"}, "notes": { "description": "notes about the grant", "required": False, "type": "string", }, "person_months_academic": { "description": "Number of months of funding during the academic" "year", "required": False, "anyof_type": ["integer", "float"], }, "person_months_summer": { "description": "Number of months of funding during the summer", "required": False, "anyof_type": ["integer", "float"], }, "program": { "description": "the program the work was funded under", "required": True, "type": "string", }, # TODO: maybe this should be moved to proposals? "status": { "allowed": ["pending", "declined", "accepted", "in-prep"], "description": "status of the grant", "required": False, "type": "string", }, "scope": { "description": "The scope of the grant, answers the prompt: " '"Describe Research Including Synergies and ' 'Delineation with Respect to this Proposal/Award:"', "required": False, "type": "string", }, # TODO: maybe this should be duplicated in proposals? "team": { "description": "information about the team members participating " "in the grant.", "required": True, "schema": { "schema": { "cv": {"required": False, "type": "string"}, "institution": {"required": True, "type": "string"}, "name": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, "subaward_amount": { "required": False, "type": ("integer", "float"), }, }, "type": "dict", }, "type": "list", }, "title": { "description": "actual title of proposal / grant", "required": True, "type": "string", }, "budget": { "description": "budget periods of grant", "required": False, "schema": { "schema": { "begin_date": { "description": "start date of the budget period in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"], }, "end_date": { "description": "end date of the budget period in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"], }, "student_months": { "description": "number of months of funding for student members during the academic year", "required": False, "anyof_type": ["float", "integer"] }, "postdoc_months": { "description": "number of months of funding for postdoc members during the academic year", "required": False, "anyof_type": ["float", "integer"] }, "ss_months": { "description": "number of months of funding for the summer", "required": False, "anyof_type": ["float", "integer"] }, "amount": { "description": "subaward for this budget period", "required": False, "anyof_type": ["float", "integer"] } }, "type": "dict", }, "type": "list", }, "proposal_id": { "description": "initial proposal made for grant", "required": False, "type": "string", } }, "groups": { "_description": { "description": "Information about the research group" "this is generally public information" }, "_id": { "description": "Unique identifier for submission. This generally " "includes the author name and part of the title.", "required": True, "type": "string", }, "aka": { "required": True, "type": "list", "description": "other names for the group", }, "banner": { "required": False, "type": "string", "description": "name of image file with the group banner", }, "pi_name": { "description": "The name of the Principle Investigator", "required": True, "type": "string", }, "department": { "description": "Name of host department", "required": True, "type": "string", }, "institution": { "description": "Name of the host institution", "required": True, "type": "string", }, "name": { "description": "Name of the group", "required": True, "type": "string", }, "website": {"description": "URL to group webpage", "type": "string"}, "mission_statement": { "description": "Mission statement of the group", "type": "string", }, "projects": { "description": "About line for projects", "type": "string", "required": True, }, "email": { "description": "Contact email for the group", "type": "string", "required": True, }, }, "institutions": { "_description": { "description": "This collection will contain all the institutions" "in the world and their departments and addresses" }, "_id": { "description": "unique identifier for the institution.", "required": True, "type": "string", }, "aka": { "description": "list of all the different names this " "the institution is known by", "required": False, "type": "list", }, "city": { "description": "the city where the institution is", "required": True, "type": "string", }, "country": { "description": "The country where the institution is", "required": True, "type": "string", }, "date": { "description": "Expense date", "required": False, "anyof_type": ["string", "date"], }, "day": { "description": "the day the entry was created", "required": False, "type": "integer", }, "departments": { "description": "all the departments and centers and" "various units in the institution", "required": False, "type": "dict", # Allow unkown department names, but check their content "valuesrules": { "type": "dict", "schema": { "name": { "description": "The canonical name", "required": True, "type": "string", }, "aka": {"required": False, "type": "list"}, }, }, }, "month": { "description": "the month the entry was created", "required": False, "anyof_type": ["string", "integer"] }, "name": { "description": "the canonical name of the institutions", "required": True, "type": "string", }, "schools": { "description": "this is more for universities, but it " "be used for larger divisions in big " "organizations", "required": False, "type": "dict", "valuesrules": { "type": "dict", "schema": { "name": { "description": "The canonical name", "required": True, "type": "string", }, "aka": {"required": False, "type": "list"}, }, }, }, "state": { "description": "the state where the institution is", "required": False, "type": "string", }, "street": { "description": "the street where the institution is", "required": False, "type": "string", }, "updated": { "description": "a datetime when the entry was updated", "required": False, "anyof_type": ["string", "datetime", "date"] }, "uuid": { "description": "a uuid for the entry", "required": False, "type": "string", }, "year": { "description": "the year the entry was created", "required": False, "type": "integer", }, "zip": { "description": "the zip or postal code of the institution", "required": False, "anyof_type": ["integer", "string"], }, }, "meetings": { "_id": { "description": "unique identifier for the date of the group meeting", "required": True, "type": "string", }, "_description": { "description": "the group meeting." }, "actions": { "description": "action items expected from the group members for that particular meeting week", "required": False, "type": "list", }, "agenda": { "description": "schedule of the current meeting", "required": False, "type": "list", }, "buddies": { "description": "list of pairs of group members that are selected for the buddy round robin", "required": False, "type": "list", }, "day": { "description": "day of the group meeting", "required": False, "type": "integer", }, "journal_club": { "description": "indicating the doi of the journal and the presenting group member as the presenter", "required": False, "type": "dict", }, "lead": { "description": "person who will be leading the meeting of the current week", "required": False, "type": "string", }, "minutes": { "description": "meeting notes in a chronological order according to comments made by the group members", "required": False, "type": "list", }, "month": { "description": "month in which the meeting is taking place", "required": False, "anyof_type": ["string", "integer"] }, "place": { "description": "location where the meeting is taking place on campus", "required": False, "type": "string", }, "presentation": { "description": "indicating the title of the presentation along with the link and the presenter ", "required": False, "type": "dict", }, "scribe": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "type": "string", }, "time": { "description": "person who will be taking notes and updating minutes accordingly" "If an integer is minutes past midnight, so 13:30 is 810 for" "example.", "required": False, "anyof_type": ["string", "integer"] }, "updated": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "anyof_type": ["string", "datetime", "date"], }, "uuid": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "type": "string", }, "year": { "description": "person who will be taking notes and updating minutes accordingly", "required": False, "type": "integer", }, }, "people": { "_description": { "description": "This collection describes the members of the " "research group. This is normally public data." }, "_id": { "description": "unique identifier for the group member", "required": True, "type": "string", }, "active": { "description": "If the person is an active member, default True.", "required": False, "type": "boolean", }, "aka": { "description": "list of aliases (also-known-as), useful for " "identifying the group member in citations or " "elsewhere.", "required": True, "type": ["string", "list"], }, "appointments": { "type": "dict", "required": False, "description": "begin and end date, grant loading status and notes about appointments" }, "activities": { "type": "list", "required": False, "description": "activities may be teaching or research things", "schema": { "type": "dict", "schema": { "day": { "required": False, "description": "the day the activity took place", "type": "integer", }, "type": { "required": True, "description": "the type of the acitivity", "type": "string", "eallowed": ACTIVITIES_TYPE }, "month": { "required": False, "description": "the month the activity took place", "anyof_type": ["integer", "string"], }, "name": { "required": True, "description": "brief statement of the activity", "type": "string", }, "other": { "required": False, "description": "longer statement of the activity", "type": "string", }, "year": { "required": True, "description": "the year the activity took place", "type": "integer", }, } } }, "avatar": {"description": "URL to avatar", "required": True, "type": "string"}, "bio": { "description": "short biographical text", "required": True, "type": "string", }, "bios": { "description": "longer biographical text if needed", "required": False, "anyof_type": ["string", "list"] }, "collab": { "description": "If the person is a collaborator, default False.", "required": False, "type": "boolean", }, "committees": { "description": "Committees that are served on", "required": False, "schema": { "type": "dict", "schema": { "name": {"required": True, "type": "string", "description": "name of committee, or person if it " "is a phd committee"}, "day": {"required": False, "type": "integer"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "notes": {"required": False, "description": "extra things you want to record about the thing", "anyof_type": ["string", "list"], }, "year": {"required": True, "type": "integer"}, "unit": {"required": False, "type": "string", "description": "name of department or school etc."}, "type": {"required": False, "type": "string", "description": "type of committee, department, school, university, external", "eallowed": COMMITTEES_TYPE}, "level": { "required": True, "type": "string", "description": "department or school or university, or external", "eallowed": COMMITTEES_LEVEL }, "group": { "required": False, "type": "string", "description": "this employment is/was in" "a group in groups coll", }, }, }, "type": "list", }, "education": { "description": "This contains the educational information for " "the group member.", "required": True, "schema": { "type": "dict", "schema": { "advisor": {"required": False, "type": "string", "description": "name or id of advisor for this degree"}, "begin_day": {"required": False, "type": "integer"}, "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": True, "type": "integer"}, "degree": {"required": True, "type": "string"}, "department": { "required": False, "type": "string", "description": "department within" "the institution", }, "group": { "required": False, "type": "string", "description": "this employment is/was in" "a group in groups coll", }, "end_day": {"required": False, "type": "integer"}, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, # Could be ongoing with undefined end "end_year": {"required": False, "type": "integer"}, "gpa": {"required": False, "type": ("float", "string")}, "institution": {"required": True, "type": "string"}, "location": {"required": False, "type": "string"}, "other": {"required": False, "type": "list"}, }, }, "type": "list", }, "email": { "description": "email address of the group member", "required": False, "type": "string", }, "employment": { "description": "Employment information, similar to educational " "information.", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "advisor": {"required": False, "type": "string", "description": "name or id of " "advisor/mentor/manager"}, "begin_day": {"required": False, "type": "integer"}, "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": False, "type": "integer"}, "begin_date": {"required": False, "anyof_type": ["string", "date", "datetime"], "description": "begin date of employment in format YYYY-MM-DD"}, "coworkers": {"required": False, "type": "list", "description": "list of coworkers. If" "position is editor, these are " "assumed to be coeditors in" "conflict of interest builder"}, "department": {"required": False, "type": "string"}, "end_day": {"required": False, "type": "integer"}, "end_month": {"required": False, }, "end_year": {"required": False, "type": "integer"}, "end_date": {"required": False, "anyof_type": ["string", "date", "datetime"], "description": "end date of employment in format YYYY-MM-DD"}, "group": { "required": False, "type": "string", "description": "this employment is/was in" "a group in groups coll", }, "location": {"required": False, "type": "string"}, "organization": {"required": True, "type": "string"}, "other": {"required": False, "type": "list"}, "permanent": {"required": False, "type": "boolean", "description": "true if the position is open " \ "ended and has no fixed end-date"}, "position": {"required": True, "type": "string", "eallowed": list(SORTED_POSITION)}, "position_full": { "description": "The full on title of the position. This will be " "typeset if it is here, or if not Position will be " "used. Position will be used for sorting and must " "come from a fixed list of positions", "required": False, "type": "string", }, "status": {"required": False, "type": "string", "eallowed": POSITION_STATUS, }, }, }, }, "facilities": { "type": "list", "required": False, "description": "facilities may be teaching or research things", "schema": { "type": "dict", "schema": { "begin_day": { "required": False, "description": "the day facility, or the wish for the " "facility, started", "type": "integer", }, "end_day": { "required": False, "description": "the day facility started", "type": "integer", }, "type": { "required": True, "description": "the type of the facility. Columbia asks" "for wished-for facilities, so there are " "teaching-wish and research-wish fields.", "type": "string", "eallowed": FACILITIES_TYPE }, "begin_month": { "required": False, "description": "the month the facility (or wish) started", "anyof_type": ["integer", "string"], }, "end_month": { "required": False, "description": "the month the faclity went away", "anyof_type": ["integer", "string"], }, "name": { "required": True, "description": "description of the facility", "type": "string", }, "notes": { "required": False, "description": "anything else you want to jot down", "anyof_type": ["string", "list"] }, "begin_year": { "required": True, "description": "the year the facility (or wish) started", "type": "integer", }, "end_year": { "required": False, "description": "the year the facility (or wish) went away", "type": "integer", }, } } }, "funding": { "description": "Funding and scholarship that the group member " "has individually obtained in the past. " "**WARNING:** this is not to be confused with the " "**grants** collection", "required": False, "schema": { "type": "dict", "schema": { "currency": {"required": False, "type": "string"}, "duration": {"required": False, "type": "string"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "name": {"required": True, "type": "string"}, "value": {"required": True, "type": ("float", "integer")}, "year": {"required": True, "type": "integer"}, }, }, "type": "list", }, "github_id": {"required": False, "type": "string", "description": "Your GitHub ID"}, "google_scholar_url": {"required": False, "type": "string", "description": "URL of your Google Scholar " "rofile"}, "grp_mtg_active": {"required": False, "type": "boolean", "description": "Whether to schedule tasks at group meeting " "or not"}, "hindex": { "description": "details of hindex pulled on a certain date", "required": False, "schema": { "type": "dict", "schema": { "h": {"description": "the value of the h index", "required": True, "type": "integer"}, "h_last_five": {"description": "h index over past 5 years", "required": False, "type": "integer"}, "citations": {"description": "total number of citations", "required": False, "type": "integer"}, "citations_last_five": {"description": "number of citations" "in the past 5 years", "required": False, "type": "integer"}, "origin": {"description": "where the numbers came from", "required": False, "type": "string"}, "since": {"description": "year of first citation", "required": False, "type": "integer"}, "year": {"description": "year when the data were pulled", "required": False, "type": "integer"}, "month": {"description": "month when the data were pulled", "required": False, "anyof_type": ["string", "integer"]}, "day": {"description": "day when the data were pulled", "required": False, "type": "integer"}, } }, "type": "list", }, "home_address": { "description": "The person's home address", "type": "dict", "schema": { "street": {"type": "string", "description": "street address"}, "city": {"type": "string", "description": "name of home city"}, "state": {"type": "string", "description": "name o home state"}, "zip": {"type": "string", "description": "zip code"}, }, }, "honors": { "description": "Honors that have been awarded to this " "group member", "required": False, "schema": { "type": "dict", "schema": { "description": {"required": False, "type": "string"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "name": {"required": True, "type": "string"}, "year": {"required": True, "type": "integer"}, }, }, "type": "list", }, "initials": { "description": "The canonical initials for this group member", "required": False, "type": "string", }, "membership": { "description": "Professional organizations this member is " "a part of", "required": False, "schema": { "type": "dict", "schema": { "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": True, "type": "integer"}, "description": {"required": False, "type": "string"}, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, "end_year": {"required": False, "type": "integer"}, "organization": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, "website": {"required": False, "type": "string"}, }, }, "type": "list", }, "miscellaneous": { "description": "Place to put weird things needed for special reporta", "required": False, "type": "dict", "schema": { "metrics_for_success": { "description": "How do I want to be judged", "required": False, "type": "list", }, }, }, "name": { "description": "Full, canonical name for the person", "required": True, "type": "string", }, "office": { "description": "The person's office", "type": "string", "required": False }, "orcid_id": { "description": "The ORCID ID of the person", "required": False, "type": "string", }, "position": { "description": "such as professor, graduate student, or scientist", "required": False, "type": "string", "eallowed": list(SORTED_POSITION), }, "position_full": { "description": "The full on title of the position. This will be " "typeset if it is here, or if not Position will be " "used. Position will be used for sorting and must " "come from a fixed list of positions", "required": False, "type": "string", }, "publicity": { "description": "summary of publicity that person has received", "required": False, "schema": { "type": "dict", "schema": { "type": {"required": True, "type": "string", "eallowed": PUBLICITY_TYPE}, "topic": {"required": False, "type": "string", "description": "The short sentence of what the " "publicity was about", }, "title": {"required": True, "type": "string", "description": "The title of the piece", }, "day": {"required": False, "type": "integer", "description": "The day the piece appeared" }, "month": {"required": False, "anyof_type": ["string", "integer"], "description": "The month the piece appeared" }, "publication": {"required": False, "type": "string", "description": "The place where the " "publicity was placed" }, "text": {"required": False, "type": "string", "description": "The text of the publicity", }, "url": {"required": False, "type": "string", "description": "The URL where the piece may be found" }, "year": {"required": True, "type": "integer", "description": "The year the piece appeared" }, "grant": {"required": True, "type": "string", "description": "The identifier of the grant " "associated with the piece" }, }, }, "type": "list" }, "research_focus_areas": { "description": "summary of research projects that are ongoing. Used" "in Annual appraisal for example", "required": False, "schema": { "type": "dict", "schema": { "begin_year": {"required": False, "type": "integer"}, "end_year": {"required": False, "type": "integer"}, "description": {"required": False, "type": "string"} }, }, "type": "list" }, "research_summary": { "description": "Brief summary of overarching research goals", "required": False, "type": "string", }, "service": { "description": "Service that this group member has provided", "required": False, "schema": { "type": "dict", "schema": { "description": {"required": False, "type": "string"}, "duration": {"required": False, "type": "string"}, "month": {"description": "Use month and year if the service" "doesn't extend more than one year." "Otherwise use begin_year and end_year", "required": False, "anyof_type": ["string", "integer"] }, "name": {"required": True, "type": "string"}, "role": {"required": False, "type": "string", "description": "the role played in the activity, e.g., co-chair"}, "notes": {"required": False, "anyof_type": ["string", "list"]}, "year": {"required": False, "type": "integer"}, "begin_year": {"required": False, "type": "integer"}, "begin_day": {"required": False, "type": "integer"}, "begin_month": {"description": "Use month and year if the service" "doesn't extend more than one year." "Otherwise use begin_year/month and end_year/month", "required": False, "anyof_type": ["string", "integer"] }, "end_year": {"required": False, "type": "integer"}, "end_month": {"description": "Use month and year if the service" "doesn't extend more than one year." "Otherwise use begin_year and end_year", "required": False, "anyof_type": ["string", "integer"] }, "end_day": {"required": False, "type": "integer"}, "other": {"required": False, "anyof_type": ["string", "list"]}, "type": {"required": True, "type": "string", "description": "profession, department, school, university", "eallowed": SERVICE_TYPE}, }, }, "type": "list", }, "skills": { "description": "Skill the group member has", "required": False, "schema": { "type": "dict", "schema": { "category": {"required": True, "type": "string"}, "level": {"required": True, "type": "string"}, "name": {"required": True, "type": "string"}, }, }, "type": "list", }, "teaching": { "description": "Courses that this group member has taught, if any", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "course": {"required": True, "type": "string"}, "courseid": {"required": True, "type": "string"}, "description": {"required": False, "type": "string"}, "end_month": {"required": False, "anyof_type": ["string", "integer"]}, "end_year": {"required": False, "type": "integer"}, "enrollment": {"required": False, "anyof_type": ["integer", "string"]}, "evaluation": { "type": "dict", "required": False, "schema": { "response_rate": {"type": "number", "required": True}, "amount_learned": {"type": "number", "required": True}, "appropriateness_workload": {"type": "number", "required": True}, "course_overall": {"type": "number", "required": True}, "fairness_grading": {"type": "number", "required": True}, "organization": {"type": "number", "required": True}, "classroom_delivery": {"type": "number", "required": True}, "approachability": {"type": "number", "required": True}, "instructor_overall": {"type": "number", "required": True}, "comments": {"type": "list", "required": False, "description": "student comments"}, }, }, "materials": {"required": False, "type": "string"}, "month": {"required": False, "anyof_type": ["string", "integer"], }, "organization": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, "semester": {"required": False, "type": "string"}, "syllabus": {"required": False, "type": "string"}, "video": {"required": False, "type": "string"}, "website": {"required": False, "type": "string"}, "year": {"required": True, "type": "integer"}, }, }, }, "title": { "description": "for example, Dr., etc.", "required": False, "type": "string", }, "todos": { "description": "a list of the todo tasks", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "description": {"description": "the description of the to-do task", "required": True, "type": "string"}, "due_date": {"description": "the due date", "required": False, "anyof_type": ["string", "date"]}, "begin_date": {"description": "the begin date", "required": False, "anyof_type": ["string", "date"]}, "end_date": {"description": "the end date", "required": False, "anyof_type": ["string", "date"]}, "duration": { "description": "the size of the task/ the estimated duration it will take to finish the task. Unit: miniutes.", "required": False, "type": "float"}, "importance": { "description": "the importance, from 0 to 2", "required": False, "type": "integer"}, "status": {"description": "the status: started/finished/cancelled", "required": True, "type": "string"}, "notes": {"description": "additional notes for this task", "required": False, "type": "list", "schema": {"type": "string"} }, "running_index": { "description": "Index of a certain task used to update that task in the enumerated todo list.", "required": False, "type": "integer"}, "assigned_by": { "description": "ID of the member that assigns the task", "required": False, "type": "string"}, } } }, }, "presentations": { "_description": { "description": "This collection describes presentations that group" "members make at conferences, symposia, seminars and" "so on." }, "_id": { "description": "unique id for the presentation", "required": True, "type": "string", }, "abstract": { "description": "abstract of the presentation", "required": False, "type": "string", }, "authors": { "description": "Author list.", "required": True, "anyof_type": ["string", "list"], }, "begin_date": { "description": "begin date in YYYY-MM-DD", "anyof_type": ["date", "string"], }, "end_date": { "description": "end_date in YYYY-MM-DD", "anyof_type": ["date", "string"], }, "begin_year": { "description": "year the conference or trip begins.", "required": False, "type": "integer", }, "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_day": {"required": False, "type": "integer"}, "department": { "description": "department of the institution where the" "presentation will be made, if " "applicable. should be discoverable in " "institutions.", "required": False, "type": "string", }, "end_year": { "description": "year the conference or trip ends", "required": False, "type": "integer", }, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, "end_day": {"required": False, "type": "integer"}, "institution": { "description": "institution where the" "presentation will be made, if " "applicable.", "required": False, "type": "string", }, "meeting_name": { "description": "full name of the conference or " "meeting. If it is a departmental " "seminar or colloquium, write Seminar" "or Colloquium and fill in department " "and institution fields", "required": False, "type": "string", }, "location": { "description": "city and {state or country} of meeting", "required": False, "type": "string", }, "notes": { "description": "any reminder or memory aid about anything", "required": False, "anyof_type": ["list", "string"], }, "project": { "description": "project or list of projects that this " "presentation is associated with. Should " "be discoverable in projects collection", "required": False, "anyof_type": ["string", "list"], }, "status": { "description": "Is the application in prep or submitted, " "was the invitation accepted or declined, was " "the trip cancelled?", "required": True, "type": "string", "eallowed": PRESENTATION_STATUS, }, "title": { "description": "title of the presentation", "required": True, "type": "string", }, "type": { "description": "type of presentation", "eallowed": PRESENTATION_TYPE, "required": True, "type": "string", }, "webinar": { "description": "true if a webinar. Default to False", "required": False, "type": "boolean", }, }, "projects": { "_description": { "description": "This collection describes the research group " "projects. This is normally public data." }, "_id": { "description": "Unique project identifier.", "required": True, "type": "string", }, "active": { "description": "true if the project is active", "required": False, "anyof_type": ["string", "boolean"], }, "description": { "description": "brief project description.", "required": True, "type": "string", }, "grant": { "description": "Grant id if there is a grant supporting this " "project", "required": False, "type": "string", }, "group": { "description": "id for the group in the groups collection whose project this is", "required": False, "type": "string", }, "highlights": { "description": "list of things to highlight in a report or website, such as releases for for software or high profile publications", "required": False, "type": "list", "schema": { "type": "dict", "schema": { "year": {"description": "the year of the highlight", "required": True, "type": "integer"}, "month": {"description": "the month of the highlight", "required": True, "anyof_type": ["string", "integer"]}, "description": {"description": "the highlight", "required": True, "type": "string"}, } } }, "logo": { "description": "URL to the project logo", "required": False, "type": "string", }, "name": { "description": "name of the project.", "required": True, "type": "string", }, "other": { "description": "other information about the project", "required": False, "type": ["list", "string"], }, "repo": { "description": "URL of the source code repo, if available", "required": False, "type": "string", }, "team": { "description": "People who are/have been working on this project.", "required": True, "schema": { "type": "dict", "schema": { "begin_month": {"required": False, "anyof_type": ["string", "integer"], }, "begin_year": {"required": True, "type": "integer"}, "end_month": {"required": False, "anyof_type": ["string", "integer"], }, "end_year": {"required": False, "type": "integer"}, "name": {"required": True, "type": "string"}, "position": {"required": True, "type": "string"}, }, }, "type": "list", }, "type": { "description": "The type of project", "required": False, "anyof_type": ["string"], "eallowed": PROJECT_TYPE }, "website": { "description": "URL of the website.", "required": False, "type": "string", }, }, "proposalReviews": { "_description": { "description": "This collection contains reviews of funding proposals" }, "_id": { "description": "ID, e.g. 1906_doe_example", "required": True, "type": ("string", "integer", "float"), }, "adequacy_of_resources": { "description": "Are the resources of the PI adequate", "required": True, "type": "list", }, "agency": { "description": "currently nsf or doe", "type": "string", "eallowed": AGENCIES, }, "competency_of_team": { "description": "Is the team competent", "required": True, "type": "list", }, "doe_appropriateness_of_approach": { "description": "Appropriateness of Research. only used if agency is doe.", "required": False, "type": "list", }, "doe_reasonableness_of_budget": { "description": "Reasonableness of budget. only used if agency is doe.", "required": False, "type": "list", }, "doe_relevance_to_program_mission": { "description": "Relevance to program mission. only used if agency is doe.", "required": False, "type": "list", }, "does_how": { "description": "How will the research be done", "required": True, "type": "list", }, "does_what": { "description": "What will the team do", "required": True, "type": "string", }, "due_date": { "description": "date the review is due in ISO format", "required": True, "anyof_type": ["string", "date"], }, "freewrite": { "description": "Anything and this will appear in the built document" "right before the summary. This section often used " "for extra review criteria for the particular proposal", "required": False, "type": "list", }, "goals": { "description": "What are the main goals of the proposed research", "required": True, "type": "list", }, "importance": { "description": "The importance of the Research", "required": True, "type": "list", }, "institutions": { "description": "The institutions of the authors in the same order", "required": True, "anyof_type": ["string", "list"] }, "month": { "description": "The month the review was submitted", "required": True, "anyof_type": ["string", "integer"], }, "names": { "description": "The names of the PIs", "required": True, "anyof_type": ["list", "string"], }, "nsf_broader_impacts": { "description": "The broader impacts of the research. Only used if " "agency is nsf", "required": False, "type": "list", }, "nsf_create_original_transformative": { "description": "Answer to the question how the work is creative, " "original or transformative. Only used if agency is " "nsf", "required": False, "type": "list", }, "nsf_plan_good": { "description": "Is the plan good? Only used if agency is nsf", "required": False, "type": "list", }, "nsf_pot_to_advance_knowledge": { "description": "Answer to the question how the work will advance" "knowledge. Only used if agency is nsf", "required": False, "type": "list", }, "nsf_pot_to_benefit_society": { "description": "Answer to the question how the work has the potential" "to benefit society. Only used if agency is nsf", "required": False, "type": "list", }, "requester": { "description": "Name of the program officer who requested the review", "required": True, "type": "string", }, "reviewer": { "description": "short name of the reviewer. Will be used in the " "filename of the resulting text file", "required": True, "type": "string", }, "status": { "description": "the status of the review", "type": "string", "eallowed": REVIEW_STATI, }, "summary": { "description": "Summary statement", "required": True, "type": "string", }, "title": { "description": "The title of the proposal", "required": True, "type": "string", }, "year": { "description": "The year the review was submitted", "required": True, "type": "integer", }, }, "proposals": { "_description": { "description": "This collection represents proposals that have " "been submitted by the group." }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": ("string", "integer", "float"), }, "amount": { "description": "value of award", "required": True, "type": ("integer", "float"), }, "authors": { "description": "other investigator names", "required": False, "anyof_type": ["list", "string"], }, "begin_date": { "description": "start date of the proposed grant in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"] }, "begin_day": { "description": "start day of the proposed grant", "required": False, "type": "integer", }, "begin_month": { "description": "start month of the proposed grant", "required": False, "anyof_type": ["string", "integer"] }, "begin_year": { "description": "start year of the proposed grant", "required": False, "type": "integer", }, "call_for_proposals": { "description": "", "required": False, "type": "string", }, "cpp_info": { "description": "extra information needed for building current and " "pending form ", "required": False, "schema": { "cppflag": {"required": False, "type": "boolean"}, "other_agencies_submitted": {"required": False, "anyof_type": ["string", "boolean"]}, "institution": {"required": False, "type": "string", "description": "place where the proposed grant will be located"}, "person_months_academic": {"required": False, "anyof_type": ["float", "integer"]}, "person_months_summer": {"required": False, "anyof_type": ["float", "integer"]}, "project_scope": {"required": False, "type": "string"}, "single_pi": {"required": False, "type": "boolean", "description": "set to true if there are no co-pi's"}, }, "type": "dict", }, "currency": { "description": "typically '$' or 'USD'", "required": True, "type": "string", }, "due_date": { "description": "day that the proposal is due", "required": False, "anyof_type": ["string", "date"], }, "duration": { "description": "number of years", "required": False, "type": ("integer", "float"), }, "end_date": { "description": "end date of the proposed grant in format YYYY-MM-DD", "required": False, "anyof_type": ["string", "date"] }, "end_day": { "description": "end day of the proposed grant", "required": False, "type": ("string", "integer"), }, "end_month": { "description": "end month of the proposed grant", "required": False, "anyof_type": ["string", "integer"] }, "end_year": { "description": "end year of the proposed grant", "required": False, "type": "integer", }, "funder": { "description": "who will fund the proposal" "as funder in grants", "required": False, "type": "string", }, "full": { "description": "full body of the proposal", "required": False, "type": "dict", }, "notes": { "description": "anything you want to note", "required": False, "anyof_type": ["string", "list"], }, "pi": { "description": "principal investigator name", "required": True, "type": "string", }, "pre": { "description": "Information about the pre-proposal", "required": False, "type": "dict", }, "status": { "description": "e.g. 'pending', 'accepted', 'declined'", "required": True, "type": "string", "eallowed": PROPOSAL_STATI, }, "submitted_date": { "description": "date that the proposal was submitted", "required": False, "anyof_type": ["string", "date"], }, "submitted_day": { "description": "day that the proposal was submitted", "required": False, "type": "integer", }, "submitted_month": { "description": "month that the proposal was submitted", "required": False, "anyof_type": ["string", "integer"] }, "submitted_year": { "description": "Year that the proposal was submitted", "required": False, "type": "integer", }, "team": { "description": "information about the team members participating " "in the grant.", "required": False, "schema": { "schema": { "cv": {"required": False, "type": "string"}, "email": {"required": False, "type": "string"}, "institution": {"required": False, "type": "string"}, "name": {"required": False, "type": "string"}, "position": {"required": False, "type": "string"}, "subaward_amount": { "required": False, "type": ("integer", "float"), }, }, "type": "dict", }, "type": "list", }, "title": { "description": "actual title of proposal", "required": True, "type": "string", }, "title_short": { "description": "short title of proposal", "required": False, "type": "string", }, }, "refereeReports": { "_description": { "description": "This is a collection of information that will be " "be used to build a referee report. This should probably be private." }, "_id": {"description": "the ID", "required": True, "type": "string"}, "claimed_found_what": { "description": "What the authors claim to have found", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "claimed_why_important": { "description": "What importance the authors claim", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "did_how": { "description": "How the study was done", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "did_what": { "description": "What the study was", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "due_date": { "description": "date the review is due in ISO format", "required": True, "anyof_type": ["string", "date"], }, "editor_eyes_only": { "description": "Comments you don't want passed to the author", "required": False, "type": "string", }, "final_assessment": { "description": "Summary of impressions of the study", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "first_author_last_name": { "description": "Last name of first author will be referred to " "with et al.", "required": True, "type": "string", }, "freewrite": { "description": "Things that you want to add that don't fit into " "any category above", "required": False, "type": "string", }, "journal": { "description": "name of the journal", "required": True, "type": "string", }, "month": { "description": "the month the entry was created", "required": False, "anyof_type": ["string", "integer"] }, "recommendation": { "description": "Your publication recommendation", "required": True, "type": "string", "eallowed": REVIEW_RECOMMENDATION, }, "requester": { "description": "Name of the program officer who requested the review", "required": True, "type": "string", }, "reviewer": { "description": "name of person reviewing the paper", "required": True, "type": "string", }, "status": { "description": "Where you are with the review", "required": True, "type": "string", "eallowed": REVIEW_STATI, }, "submitted_date": { "description": "submitted date in ISO YYYY-MM-DD format", "required": True, "anyof_type": ["string", "date"], }, "title": { "description": "title of the paper under review", "required": True, "type": "string", }, "validity_assessment": { "description": "List of impressions of the validity of the claims", "required": True, "schema": {"type": "string", "required": True}, "type": "list", }, "year": { "description": "year when the review is being done", "required": True, "anyof_type": ["string", "integer"], }, }, "students": { "_description": { "description": "This is a collection of student names and " "metadata. This should probably be private." }, "_id": { "description": "short representation, such as this-is-my-name", "required": True, "type": "string", }, "aka": { "description": "list of aliases", "required": False, "schema": {"type": "string"}, "type": ("list", "string"), }, "email": {"description": "email address", "required": False, "type": "string"}, "university_id": { "description": "The university identifier for the student", "required": False, "type": "string", }, }, } for s in SCHEMAS: SCHEMAS[s]["files"] = { "description": "Files associated with the document", # TODO: fix this since this is currently comming out a CommentedMap # "type": "list", # "schema": {"type": "string"}, "required": False, } class NoDescriptionValidator(Validator): def _validate_description(self, description, field, value): if False: pass def _validate_eallowed(self, eallowed, field, value): if value not in eallowed: warn( '"{}" is not in the preferred entries for "{}", please ' "consider changing this entry to conform or add this to the " "``eallowed`` field in the schema.".format(value, field) ) def validate(coll, record, schemas): if coll in schemas: schema = copy.deepcopy(schemas[coll]) v = NoDescriptionValidator(schema) return v.validate(record), v.errors else: return True, ()
true
true
79031751b8223ad4874b9d60640bb7cc5ead7850
18,724
py
Python
plugins/modules/oci_waas_certificate_facts.py
hanielburton/oci-ansible-collection
dfdffde637f746d346ba35569be8c3a3407022f2
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_waas_certificate_facts.py
hanielburton/oci-ansible-collection
dfdffde637f746d346ba35569be8c3a3407022f2
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_waas_certificate_facts.py
hanielburton/oci-ansible-collection
dfdffde637f746d346ba35569be8c3a3407022f2
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # Copyright (c) 2017, 2021 Oracle and/or its affiliates. # This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Apache License v2.0 # See LICENSE.TXT for details. # GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_waas_certificate_facts short_description: Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure description: - Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure - Gets a list of SSL certificates that can be used in a WAAS policy. - If I(certificate_id) is specified, the details of a single WaasCertificate will be returned. version_added: "2.9" author: Oracle (@oracle) options: certificate_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the SSL certificate used in the WAAS policy. This number is generated when the certificate is added to the policy. - Required to get a specific waas_certificate. type: str aliases: ["id"] compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. This number is generated when the compartment is created. - Required to list multiple waas_certificates. type: str sort_by: description: - The value by which certificate summaries are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`. type: str choices: - "id" - "compartmentId" - "displayName" - "notValidAfter" - "timeCreated" sort_order: description: - The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`. type: str choices: - "ASC" - "DESC" display_name: description: - Filter certificates using a list of display names. type: list aliases: ["name"] lifecycle_state: description: - Filter certificates using a list of lifecycle states. type: list choices: - "CREATING" - "ACTIVE" - "FAILED" - "UPDATING" - "DELETING" - "DELETED" time_created_greater_than_or_equal_to: description: - A filter that matches certificates created on or after the specified date-time. type: str time_created_less_than: description: - A filter that matches certificates created before the specified date-time. type: str extends_documentation_fragment: [ oracle.oci.oracle ] """ EXAMPLES = """ - name: List waas_certificates oci_waas_certificate_facts: compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx - name: Get a specific waas_certificate oci_waas_certificate_facts: certificate_id: ocid1.certificate.oc1..xxxxxxEXAMPLExxxxxx """ RETURN = """ waas_certificates: description: - List of WaasCertificate resources returned: on success type: complex contains: id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate. returned: on success type: string sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate's compartment. returned: on success type: string sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx display_name: description: - The user-friendly name of the certificate. returned: on success type: string sample: display_name_example issued_by: description: - "" returned: on success type: string sample: issued_by_example subject_name: description: - "" returned: on success type: complex contains: country: description: - ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). returned: on success type: string sample: country_example state_province: description: - The province where the organization is located. returned: on success type: string sample: state_province_example locality: description: - The city in which the organization is located. returned: on success type: string sample: locality_example organization: description: - The organization name. returned: on success type: string sample: organization_example organizational_unit: description: - The field to differentiate between divisions within an organization. returned: on success type: string sample: organizational_unit_example common_name: description: - The fully qualified domain name used for DNS lookups of the server. returned: on success type: string sample: common_name_example email_address: description: - The email address of the server's administrator. returned: on success type: string sample: email_address_example issuer_name: description: - "" returned: on success type: complex contains: country: description: - ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). returned: on success type: string sample: country_example state_province: description: - The province where the organization is located. returned: on success type: string sample: state_province_example locality: description: - The city in which the organization is located. returned: on success type: string sample: locality_example organization: description: - The organization name. returned: on success type: string sample: organization_example organizational_unit: description: - The field to differentiate between divisions within an organization. returned: on success type: string sample: organizational_unit_example common_name: description: - The Certificate Authority (CA) name. returned: on success type: string sample: common_name_example email_address: description: - The email address of the server's administrator. returned: on success type: string sample: email_address_example serial_number: description: - A unique, positive integer assigned by the Certificate Authority (CA). The issuer name and serial number identify a unique certificate. returned: on success type: string sample: serial_number_example version: description: - The version of the encoded certificate. returned: on success type: int sample: 56 signature_algorithm: description: - The identifier for the cryptographic algorithm used by the Certificate Authority (CA) to sign this certificate. returned: on success type: string sample: signature_algorithm_example time_not_valid_before: description: - The date and time the certificate will become valid, expressed in RFC 3339 timestamp format. returned: on success type: string sample: 2018-11-16T21:10:29Z time_not_valid_after: description: - The date and time the certificate will expire, expressed in RFC 3339 timestamp format. returned: on success type: string sample: 2018-11-16T21:10:29Z public_key_info: description: - "" returned: on success type: complex contains: algorithm: description: - The algorithm identifier and parameters for the public key. returned: on success type: string sample: algorithm_example exponent: description: - The private key exponent. returned: on success type: int sample: 56 key_size: description: - The number of bits in a key used by a cryptographic algorithm. returned: on success type: int sample: 56 extensions: description: - Additional attributes associated with users or public keys for managing relationships between Certificate Authorities. returned: on success type: complex contains: name: description: - The certificate extension name. returned: on success type: string sample: name_example is_critical: description: - The critical flag of the extension. Critical extensions must be processed, non-critical extensions can be ignored. returned: on success type: bool sample: true value: description: - The certificate extension value. returned: on success type: string sample: value_example freeform_tags: description: - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Department\\": \\"Finance\\"}`" returned: on success type: dict sample: {'Department': 'Finance'} defined_tags: description: - Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" returned: on success type: dict sample: {'Operations': {'CostCenter': 'US'}} lifecycle_state: description: - The current lifecycle state of the SSL certificate. returned: on success type: string sample: CREATING time_created: description: - The date and time the certificate was created, expressed in RFC 3339 timestamp format. returned: on success type: string sample: 2018-11-16T21:10:29Z sample: [{ "id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx", "compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx", "display_name": "display_name_example", "issued_by": "issued_by_example", "subject_name": { "country": "country_example", "state_province": "state_province_example", "locality": "locality_example", "organization": "organization_example", "organizational_unit": "organizational_unit_example", "common_name": "common_name_example", "email_address": "email_address_example" }, "issuer_name": { "country": "country_example", "state_province": "state_province_example", "locality": "locality_example", "organization": "organization_example", "organizational_unit": "organizational_unit_example", "common_name": "common_name_example", "email_address": "email_address_example" }, "serial_number": "serial_number_example", "version": 56, "signature_algorithm": "signature_algorithm_example", "time_not_valid_before": "2018-11-16T21:10:29Z", "time_not_valid_after": "2018-11-16T21:10:29Z", "public_key_info": { "algorithm": "algorithm_example", "exponent": 56, "key_size": 56 }, "extensions": [{ "name": "name_example", "is_critical": true, "value": "value_example" }], "freeform_tags": {'Department': 'Finance'}, "defined_tags": {'Operations': {'CostCenter': 'US'}}, "lifecycle_state": "CREATING", "time_created": "2018-11-16T21:10:29Z" }] """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceFactsHelperBase, get_custom_class, ) try: from oci.waas import WaasClient HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class WaasCertificateFactsHelperGen(OCIResourceFactsHelperBase): """Supported operations: get, list""" def get_required_params_for_get(self): return [ "certificate_id", ] def get_required_params_for_list(self): return [ "compartment_id", ] def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_certificate, certificate_id=self.module.params.get("certificate_id"), ) def list_resources(self): optional_list_method_params = [ "sort_by", "sort_order", "display_name", "lifecycle_state", "time_created_greater_than_or_equal_to", "time_created_less_than", ] optional_kwargs = dict( (param, self.module.params[param]) for param in optional_list_method_params if self.module.params.get(param) is not None ) return oci_common_utils.list_all_resources( self.client.list_certificates, compartment_id=self.module.params.get("compartment_id"), **optional_kwargs ) WaasCertificateFactsHelperCustom = get_custom_class("WaasCertificateFactsHelperCustom") class ResourceFactsHelper( WaasCertificateFactsHelperCustom, WaasCertificateFactsHelperGen ): pass def main(): module_args = oci_common_utils.get_common_arg_spec() module_args.update( dict( certificate_id=dict(aliases=["id"], type="str"), compartment_id=dict(type="str"), sort_by=dict( type="str", choices=[ "id", "compartmentId", "displayName", "notValidAfter", "timeCreated", ], ), sort_order=dict(type="str", choices=["ASC", "DESC"]), display_name=dict(aliases=["name"], type="list"), lifecycle_state=dict( type="list", choices=[ "CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED", ], ), time_created_greater_than_or_equal_to=dict(type="str"), time_created_less_than=dict(type="str"), ) ) module = AnsibleModule(argument_spec=module_args) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_facts_helper = ResourceFactsHelper( module=module, resource_type="waas_certificate", service_client_class=WaasClient, namespace="waas", ) result = [] if resource_facts_helper.is_get(): result = [resource_facts_helper.get()] elif resource_facts_helper.is_list(): result = resource_facts_helper.list() else: resource_facts_helper.fail() module.exit_json(waas_certificates=result) if __name__ == "__main__": main()
37.224652
159
0.553354
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_waas_certificate_facts short_description: Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure description: - Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure - Gets a list of SSL certificates that can be used in a WAAS policy. - If I(certificate_id) is specified, the details of a single WaasCertificate will be returned. version_added: "2.9" author: Oracle (@oracle) options: certificate_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the SSL certificate used in the WAAS policy. This number is generated when the certificate is added to the policy. - Required to get a specific waas_certificate. type: str aliases: ["id"] compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. This number is generated when the compartment is created. - Required to list multiple waas_certificates. type: str sort_by: description: - The value by which certificate summaries are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`. type: str choices: - "id" - "compartmentId" - "displayName" - "notValidAfter" - "timeCreated" sort_order: description: - The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`. type: str choices: - "ASC" - "DESC" display_name: description: - Filter certificates using a list of display names. type: list aliases: ["name"] lifecycle_state: description: - Filter certificates using a list of lifecycle states. type: list choices: - "CREATING" - "ACTIVE" - "FAILED" - "UPDATING" - "DELETING" - "DELETED" time_created_greater_than_or_equal_to: description: - A filter that matches certificates created on or after the specified date-time. type: str time_created_less_than: description: - A filter that matches certificates created before the specified date-time. type: str extends_documentation_fragment: [ oracle.oci.oracle ] """ EXAMPLES = """ - name: List waas_certificates oci_waas_certificate_facts: compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx - name: Get a specific waas_certificate oci_waas_certificate_facts: certificate_id: ocid1.certificate.oc1..xxxxxxEXAMPLExxxxxx """ RETURN = """ waas_certificates: description: - List of WaasCertificate resources returned: on success type: complex contains: id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate. returned: on success type: string sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx compartment_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate's compartment. returned: on success type: string sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx display_name: description: - The user-friendly name of the certificate. returned: on success type: string sample: display_name_example issued_by: description: - "" returned: on success type: string sample: issued_by_example subject_name: description: - "" returned: on success type: complex contains: country: description: - ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). returned: on success type: string sample: country_example state_province: description: - The province where the organization is located. returned: on success type: string sample: state_province_example locality: description: - The city in which the organization is located. returned: on success type: string sample: locality_example organization: description: - The organization name. returned: on success type: string sample: organization_example organizational_unit: description: - The field to differentiate between divisions within an organization. returned: on success type: string sample: organizational_unit_example common_name: description: - The fully qualified domain name used for DNS lookups of the server. returned: on success type: string sample: common_name_example email_address: description: - The email address of the server's administrator. returned: on success type: string sample: email_address_example issuer_name: description: - "" returned: on success type: complex contains: country: description: - ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/). returned: on success type: string sample: country_example state_province: description: - The province where the organization is located. returned: on success type: string sample: state_province_example locality: description: - The city in which the organization is located. returned: on success type: string sample: locality_example organization: description: - The organization name. returned: on success type: string sample: organization_example organizational_unit: description: - The field to differentiate between divisions within an organization. returned: on success type: string sample: organizational_unit_example common_name: description: - The Certificate Authority (CA) name. returned: on success type: string sample: common_name_example email_address: description: - The email address of the server's administrator. returned: on success type: string sample: email_address_example serial_number: description: - A unique, positive integer assigned by the Certificate Authority (CA). The issuer name and serial number identify a unique certificate. returned: on success type: string sample: serial_number_example version: description: - The version of the encoded certificate. returned: on success type: int sample: 56 signature_algorithm: description: - The identifier for the cryptographic algorithm used by the Certificate Authority (CA) to sign this certificate. returned: on success type: string sample: signature_algorithm_example time_not_valid_before: description: - The date and time the certificate will become valid, expressed in RFC 3339 timestamp format. returned: on success type: string sample: 2018-11-16T21:10:29Z time_not_valid_after: description: - The date and time the certificate will expire, expressed in RFC 3339 timestamp format. returned: on success type: string sample: 2018-11-16T21:10:29Z public_key_info: description: - "" returned: on success type: complex contains: algorithm: description: - The algorithm identifier and parameters for the public key. returned: on success type: string sample: algorithm_example exponent: description: - The private key exponent. returned: on success type: int sample: 56 key_size: description: - The number of bits in a key used by a cryptographic algorithm. returned: on success type: int sample: 56 extensions: description: - Additional attributes associated with users or public keys for managing relationships between Certificate Authorities. returned: on success type: complex contains: name: description: - The certificate extension name. returned: on success type: string sample: name_example is_critical: description: - The critical flag of the extension. Critical extensions must be processed, non-critical extensions can be ignored. returned: on success type: bool sample: true value: description: - The certificate extension value. returned: on success type: string sample: value_example freeform_tags: description: - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Department\\": \\"Finance\\"}`" returned: on success type: dict sample: {'Department': 'Finance'} defined_tags: description: - Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). - "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`" returned: on success type: dict sample: {'Operations': {'CostCenter': 'US'}} lifecycle_state: description: - The current lifecycle state of the SSL certificate. returned: on success type: string sample: CREATING time_created: description: - The date and time the certificate was created, expressed in RFC 3339 timestamp format. returned: on success type: string sample: 2018-11-16T21:10:29Z sample: [{ "id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx", "compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx", "display_name": "display_name_example", "issued_by": "issued_by_example", "subject_name": { "country": "country_example", "state_province": "state_province_example", "locality": "locality_example", "organization": "organization_example", "organizational_unit": "organizational_unit_example", "common_name": "common_name_example", "email_address": "email_address_example" }, "issuer_name": { "country": "country_example", "state_province": "state_province_example", "locality": "locality_example", "organization": "organization_example", "organizational_unit": "organizational_unit_example", "common_name": "common_name_example", "email_address": "email_address_example" }, "serial_number": "serial_number_example", "version": 56, "signature_algorithm": "signature_algorithm_example", "time_not_valid_before": "2018-11-16T21:10:29Z", "time_not_valid_after": "2018-11-16T21:10:29Z", "public_key_info": { "algorithm": "algorithm_example", "exponent": 56, "key_size": 56 }, "extensions": [{ "name": "name_example", "is_critical": true, "value": "value_example" }], "freeform_tags": {'Department': 'Finance'}, "defined_tags": {'Operations': {'CostCenter': 'US'}}, "lifecycle_state": "CREATING", "time_created": "2018-11-16T21:10:29Z" }] """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceFactsHelperBase, get_custom_class, ) try: from oci.waas import WaasClient HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class WaasCertificateFactsHelperGen(OCIResourceFactsHelperBase): def get_required_params_for_get(self): return [ "certificate_id", ] def get_required_params_for_list(self): return [ "compartment_id", ] def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_certificate, certificate_id=self.module.params.get("certificate_id"), ) def list_resources(self): optional_list_method_params = [ "sort_by", "sort_order", "display_name", "lifecycle_state", "time_created_greater_than_or_equal_to", "time_created_less_than", ] optional_kwargs = dict( (param, self.module.params[param]) for param in optional_list_method_params if self.module.params.get(param) is not None ) return oci_common_utils.list_all_resources( self.client.list_certificates, compartment_id=self.module.params.get("compartment_id"), **optional_kwargs ) WaasCertificateFactsHelperCustom = get_custom_class("WaasCertificateFactsHelperCustom") class ResourceFactsHelper( WaasCertificateFactsHelperCustom, WaasCertificateFactsHelperGen ): pass def main(): module_args = oci_common_utils.get_common_arg_spec() module_args.update( dict( certificate_id=dict(aliases=["id"], type="str"), compartment_id=dict(type="str"), sort_by=dict( type="str", choices=[ "id", "compartmentId", "displayName", "notValidAfter", "timeCreated", ], ), sort_order=dict(type="str", choices=["ASC", "DESC"]), display_name=dict(aliases=["name"], type="list"), lifecycle_state=dict( type="list", choices=[ "CREATING", "ACTIVE", "FAILED", "UPDATING", "DELETING", "DELETED", ], ), time_created_greater_than_or_equal_to=dict(type="str"), time_created_less_than=dict(type="str"), ) ) module = AnsibleModule(argument_spec=module_args) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_facts_helper = ResourceFactsHelper( module=module, resource_type="waas_certificate", service_client_class=WaasClient, namespace="waas", ) result = [] if resource_facts_helper.is_get(): result = [resource_facts_helper.get()] elif resource_facts_helper.is_list(): result = resource_facts_helper.list() else: resource_facts_helper.fail() module.exit_json(waas_certificates=result) if __name__ == "__main__": main()
true
true