hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72a84f3844befc8aa1a322a61d1a30d58921cb7 | 5,638 | py | Python | apps/challenges/tests/test_challenges.py | gene1wood/spark | 071d6da19076ee047530220223d7beab3d31abab | [
"BSD-3-Clause"
] | 3 | 2015-12-09T15:02:03.000Z | 2017-10-05T16:54:14.000Z | apps/challenges/tests/test_challenges.py | gene1wood/spark | 071d6da19076ee047530220223d7beab3d31abab | [
"BSD-3-Clause"
] | 2 | 2019-02-17T17:29:25.000Z | 2019-03-28T03:40:58.000Z | apps/challenges/tests/test_challenges.py | gene1wood/spark | 071d6da19076ee047530220223d7beab3d31abab | [
"BSD-3-Clause"
] | 3 | 2019-03-28T03:41:01.000Z | 2020-04-29T09:47:21.000Z | import logging
from datetime import datetime, timedelta
from spark.tests import TestCase
from nose.tools import eq_
from geo.continents import (AFRICA, ASIA, EUROPE, NORTH_AMERICA, SOUTH_AMERICA,
OCEANIA, ANTARCTICA)
from users.models import User
from stats.models import SharingHistory
from challenges.challenges import all_challenges
class ChallengesTestCase(TestCase):
fixtures = ['boost.json', 'challenges.json']
def get_profile(self, username):
return User.objects.get(username=username).profile
def assert_completion(self, profile, challenge_id):
eq_(True, all_challenges[challenge_id].is_completed_by(profile))
def assert_non_completion(self, profile, challenge_id):
eq_(False, all_challenges[challenge_id].is_completed_by(profile))
def test_complete_1_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share(profile)
profile = self.get_profile('bob')
eq_(1, profile.total_shares)
self.assert_completion(profile, '1_1')
def test_complete_1_2(self):
# franck has completed Boost 1/2
profile = self.get_profile('franck')
self.assert_completion(profile, '1_2')
def test_complete_1_3(self):
# franck has completed Boost 2/2
profile = self.get_profile('franck')
self.assert_completion(profile, '1_3')
def test_complete_2_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_facebook(profile)
self.assert_completion(profile, '2_1')
def test_complete_2_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_twitter(profile)
self.assert_completion(profile, '2_2')
def test_complete_2_3(self):
profile = self.get_profile('bob')
eq_(False, profile.login_desktop)
profile.login_desktop = True
self.assert_completion(profile, '2_3')
def test_complete_2_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_qr_code(profile)
self.assert_completion(profile, '2_4')
def test_complete_2_5(self):
profile = self.get_profile('batman')
child = profile.children_profiles[0]
self.assert_non_completion(profile, '2_5')
# Paris
profile.boost1_completed = True
profile.latitude = 48.857487002645485
profile.longitude = 2.3291015625
profile.save()
# Close to Paris (< 100km)
child.boost1_completed = True
child.latitude = 48.821332549646634
child.longitude = 2.4993896484375
child.save()
self.assert_non_completion(profile, '2_5')
# Barcelona
child.boost1_completed = True
child.latitude = 41.387917
child.longitude = 2.169918
child.save()
self.assert_completion(profile, '2_5')
def test_complete_2_6(self):
profile = self.get_profile('batman')
eq_(None, profile.country_code)
profile.country_code = 'US'
profile.save()
self.assert_non_completion(profile, '2_6')
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'US'
child.save()
self.assert_non_completion(profile, '2_6')
child.country_code = 'MX'
child.save()
self.assert_completion(profile, '2_6')
def test_complete_2_7(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
for i in range(13):
SharingHistory.add_share(profile)
eq_(13, profile.total_shares)
self.assert_completion(profile, '2_7')
def test_complete_3_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_poster(profile)
self.assert_completion(profile, '3_2')
def test_complete_3_3(self):
profile = self.get_profile('batman')
profile.boost1_completed = True
profile.country_code = 'US'
eq_(NORTH_AMERICA, profile.continent_code)
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'CA'
child.save()
eq_(NORTH_AMERICA, child.continent_code)
self.assert_non_completion(profile, '3_3')
child.country_code = 'FR'
child.save()
eq_(EUROPE, child.continent_code)
self.assert_completion(profile, '3_3')
def test_complete_3_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
now = datetime.now()
_create_share(profile, now - timedelta(hours=15))
_create_share(profile, now - timedelta(hours=8))
_create_share(profile, now - timedelta(hours=3))
eq_(3, profile.total_shares)
self.assert_non_completion(profile, '3_4')
_create_share(profile, now - timedelta(hours=11))
eq_(4, profile.total_shares)
self.assert_completion(profile, '3_4')
def test_complete_3_5(self):
pass
def _create_share(profile, date):
share = SharingHistory.objects.create(parent=profile)
share.date_shared = date
share.save()
| 27.773399 | 79 | 0.631075 | import logging
from datetime import datetime, timedelta
from spark.tests import TestCase
from nose.tools import eq_
from geo.continents import (AFRICA, ASIA, EUROPE, NORTH_AMERICA, SOUTH_AMERICA,
OCEANIA, ANTARCTICA)
from users.models import User
from stats.models import SharingHistory
from challenges.challenges import all_challenges
class ChallengesTestCase(TestCase):
fixtures = ['boost.json', 'challenges.json']
def get_profile(self, username):
return User.objects.get(username=username).profile
def assert_completion(self, profile, challenge_id):
eq_(True, all_challenges[challenge_id].is_completed_by(profile))
def assert_non_completion(self, profile, challenge_id):
eq_(False, all_challenges[challenge_id].is_completed_by(profile))
def test_complete_1_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share(profile)
profile = self.get_profile('bob')
eq_(1, profile.total_shares)
self.assert_completion(profile, '1_1')
def test_complete_1_2(self):
profile = self.get_profile('franck')
self.assert_completion(profile, '1_2')
def test_complete_1_3(self):
profile = self.get_profile('franck')
self.assert_completion(profile, '1_3')
def test_complete_2_1(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_facebook(profile)
self.assert_completion(profile, '2_1')
def test_complete_2_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_twitter(profile)
self.assert_completion(profile, '2_2')
def test_complete_2_3(self):
profile = self.get_profile('bob')
eq_(False, profile.login_desktop)
profile.login_desktop = True
self.assert_completion(profile, '2_3')
def test_complete_2_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_qr_code(profile)
self.assert_completion(profile, '2_4')
def test_complete_2_5(self):
profile = self.get_profile('batman')
child = profile.children_profiles[0]
self.assert_non_completion(profile, '2_5')
profile.boost1_completed = True
profile.latitude = 48.857487002645485
profile.longitude = 2.3291015625
profile.save()
child.boost1_completed = True
child.latitude = 48.821332549646634
child.longitude = 2.4993896484375
child.save()
self.assert_non_completion(profile, '2_5')
child.boost1_completed = True
child.latitude = 41.387917
child.longitude = 2.169918
child.save()
self.assert_completion(profile, '2_5')
def test_complete_2_6(self):
profile = self.get_profile('batman')
eq_(None, profile.country_code)
profile.country_code = 'US'
profile.save()
self.assert_non_completion(profile, '2_6')
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'US'
child.save()
self.assert_non_completion(profile, '2_6')
child.country_code = 'MX'
child.save()
self.assert_completion(profile, '2_6')
def test_complete_2_7(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
for i in range(13):
SharingHistory.add_share(profile)
eq_(13, profile.total_shares)
self.assert_completion(profile, '2_7')
def test_complete_3_2(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
SharingHistory.add_share_from_poster(profile)
self.assert_completion(profile, '3_2')
def test_complete_3_3(self):
profile = self.get_profile('batman')
profile.boost1_completed = True
profile.country_code = 'US'
eq_(NORTH_AMERICA, profile.continent_code)
child = profile.children_profiles[0]
child.boost1_completed = True
child.country_code = 'CA'
child.save()
eq_(NORTH_AMERICA, child.continent_code)
self.assert_non_completion(profile, '3_3')
child.country_code = 'FR'
child.save()
eq_(EUROPE, child.continent_code)
self.assert_completion(profile, '3_3')
def test_complete_3_4(self):
profile = self.get_profile('bob')
eq_(0, profile.total_shares)
now = datetime.now()
_create_share(profile, now - timedelta(hours=15))
_create_share(profile, now - timedelta(hours=8))
_create_share(profile, now - timedelta(hours=3))
eq_(3, profile.total_shares)
self.assert_non_completion(profile, '3_4')
_create_share(profile, now - timedelta(hours=11))
eq_(4, profile.total_shares)
self.assert_completion(profile, '3_4')
def test_complete_3_5(self):
pass
def _create_share(profile, date):
share = SharingHistory.objects.create(parent=profile)
share.date_shared = date
share.save()
| true | true |
f72a8508eb15e1b3e71b5ca8ef252bc825472afa | 15,226 | py | Python | tests/integration-tests/tests/storage/test_ebs.py | eshpc/aws-parallelcluster | 8cc6169a12661ce1c0025c93ebd9019c26e7219e | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/tests/storage/test_ebs.py | eshpc/aws-parallelcluster | 8cc6169a12661ce1c0025c93ebd9019c26e7219e | [
"Apache-2.0"
] | 108 | 2021-10-11T09:12:06.000Z | 2022-03-28T09:28:39.000Z | tests/integration-tests/tests/storage/test_ebs.py | yuleiwan/aws-parallelcluster | aad2a3019ef4ad08d702f5acf41b152b3f7a0b46 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import boto3
import pytest
import utils
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from tests.common.schedulers_common import get_scheduler_commands
from tests.storage.kms_key_factory import KMSKeyFactory
from tests.storage.snapshots_factory import EBSSnapshotsFactory
from tests.storage.storage_common import verify_directory_correctly_shared
@pytest.mark.regions(["eu-west-3", "cn-north-1", "us-gov-west-1"])
@pytest.mark.instances(["c4.xlarge", "c5.xlarge"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("instance")
def test_ebs_single(scheduler, pcluster_config_reader, clusters_factory, kms_key_factory, region, os):
mount_dir = "ebs_mount_dir"
kms_key_id = kms_key_factory.create_kms_key(region)
cluster_config = pcluster_config_reader(
mount_dir=mount_dir, ec2_iam_role=kms_key_factory.iam_role_arn, ebs_kms_key_id=kms_key_id
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
volume_id = get_ebs_volume_ids(cluster, region)[0]
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size=35)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=True, kms_key_id=kms_key_id)
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=True)
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "alinux2", "slurm")
@pytest.mark.dimensions("cn-northwest-1", "c4.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-1", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_snapshot(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs snapshot")
mount_dir = "ebs_mount_dir"
volume_size = 21
# This volume_size is set to be larger than snapshot size(10G), to test create volumes larger than its snapshot size
logging.info("Creating snapshot")
snapshot_id = snapshots_factory.create_snapshot(request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region)
logging.info("Snapshot id: %s" % snapshot_id)
cluster_config = pcluster_config_reader(mount_dir=mount_dir, volume_size=volume_size, snapshot_id=snapshot_id)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size="9.8")
_test_ebs_resize(remote_command_executor, mount_dir, volume_size=volume_size)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
# Checks for test data
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
# cn-north-1 does not support KMS
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "alinux2", "awsbatch")
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-2", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("instance")
def test_ebs_multiple(scheduler, pcluster_config_reader, clusters_factory, region, os):
mount_dirs = ["/ebs_mount_dir_{0}".format(i) for i in range(0, 5)]
volume_sizes = [15 + 5 * i for i in range(0, 5)]
# for volume type sc1 and st1, the minimum volume sizes are 500G
volume_sizes[3] = 500
volume_sizes[4] = 500
cluster_config = pcluster_config_reader(mount_dirs=mount_dirs, volume_sizes=volume_sizes)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
for mount_dir, volume_size in zip(mount_dirs, volume_sizes):
# for volume size equal to 500G, the filesystem size is only about 492G
# This is because the file systems use some of the total space available on a device for storing internal
# structures and data (the file system's metadata). The overhead of the XFS filesystem is around 0.5%.
# If we test with small volume size(eg: 40G), the number is not large enough to show the gap between the
# partition size and the filesystem size. For sc1 and st1, the minimum size is 500G, so there will be a size
# difference.
_test_ebs_correctly_mounted(
remote_command_executor, mount_dir, volume_size if volume_size != 500 else "49[0-9]"
)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
volume_ids = get_ebs_volume_ids(cluster, region)
for i in range(len(volume_ids)):
# test different volume types
volume_id = volume_ids[i]
ebs_settings = _get_ebs_settings_by_name(cluster.config, f"ebs{i+1}")
volume_type = ebs_settings["VolumeType"]
volume = describe_volume(volume_id, region)
assert_that(volume[0]).is_equal_to(volume_type)
encrypted = ebs_settings.get("Encrypted")
if encrypted is None:
# Default encryption if not specified
encrypted = True
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=encrypted, kms_key_id=ebs_settings.get("KmsKeyId"))
# test different iops
# only io1, io2, gp3 can configure iops
if volume_type in ["io1", "io2", "gp3"]:
volume_iops = ebs_settings["Iops"]
assert_that(volume[1]).is_equal_to(int(volume_iops))
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=False)
_assert_root_volume_configuration(cluster, os, region, scheduler)
def _get_ebs_settings_by_name(config, name):
for shared_storage in config["SharedStorage"]:
if shared_storage["Name"] == name:
return shared_storage["EbsSettings"]
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "centos7", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_existing(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs existing")
existing_mount_dir = "existing_mount_dir"
logging.info("Creating volume")
volume_id = snapshots_factory.create_existing_volume(
request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region
)
logging.info("Existing Volume id: %s" % volume_id)
cluster_config = pcluster_config_reader(volume_id=volume_id, existing_mount_dir=existing_mount_dir)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
existing_mount_dir = "/" + existing_mount_dir
_test_ebs_correctly_mounted(remote_command_executor, existing_mount_dir, volume_size="9.8")
_test_ebs_correctly_shared(remote_command_executor, existing_mount_dir, scheduler_commands)
# Checks for test data
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(existing_mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
# delete the cluster before detaching the EBS volume
cluster.delete()
# check the volume still exists after deleting the cluster
_assert_volume_exist(volume_id, region)
def _test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size):
logging.info("Testing ebs {0} is correctly mounted".format(mount_dir))
result = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 | awk '{{print $2, $6}}' | grep '{0}'".format(mount_dir)
)
assert_that(result.stdout).matches(r"{size}G {mount_dir}".format(size=volume_size, mount_dir=mount_dir))
result = remote_command_executor.run_remote_command("cat /etc/fstab")
assert_that(result.stdout).matches(r"UUID=.* {mount_dir} ext4 _netdev 0 0".format(mount_dir=mount_dir))
def _test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands):
logging.info("Testing ebs correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
def _test_home_correctly_shared(remote_command_executor, scheduler_commands):
logging.info("Testing home dir correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, "/home", scheduler_commands)
def _test_ebs_resize(remote_command_executor, mount_dir, volume_size):
"""
This test verifies the following case:
If the volume is created from a snapshot with a size larger than the snapshot, the size of the volume is correct.
"""
logging.info("Testing ebs has correct volume size")
# get the filesystem that the shared_dir is mounted on
# example output of "df -h -t ext4"
# Filesystem Size Used Avail Use% Mounted on
# /dev/nvme1n1p1 9.8G 37M 9.3G 1% /ebs_mount_dir
# /dev/nvme2n1p1 9.8G 37M 9.3G 1% /existing_mount_dir
filesystem_name = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 |grep '{mount_dir}' | awk '{{print $1}}'".format(mount_dir=mount_dir)
).stdout
# get the volume name given the filesystem name
# example input: /dev/nvme1n1p1
# example output: nvme1n1
volume_name = remote_command_executor.run_remote_command(
"lsblk -no pkname {filesystem_name}".format(filesystem_name=filesystem_name)
).stdout
# get the volume size of the volume
# example output of "lsblk"
# NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
# nvme0n1 259:0 0 25G 0 disk
# ├─nvme0n1p1 259:1 0 25G 0 part /
# └─nvme0n1p128 259:2 0 1M 0 part
# nvme1n1 259:3 0 21G 0 disk
# └─nvme1n1p1 259:4 0 10G 0 part /ebs_mount_dir
# nvme2n1 259:5 0 10G 0 disk
# └─nvme2n1p1 259:6 0 10G 0 part /existing_mount_dir
result = remote_command_executor.run_remote_command(
"lsblk | tail -n +2 | grep {volume_name}| awk '{{print $4}}' | sed -n '1p'''".format(volume_name=volume_name)
)
assert_that(result.stdout).matches(r"{size}G".format(size=volume_size))
def get_ebs_volume_ids(cluster, region):
# get the list of configured ebs volume ids
# example output: ['vol-000', 'vol-001', 'vol-002']
return utils.retrieve_cfn_outputs(cluster.cfn_name, region).get("EBSIds").split(",")
def describe_volume(volume_id, region):
volume = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
volume_type = volume.get("VolumeType")
volume_iops = volume.get("Iops")
return volume_type, volume_iops
def _assert_volume_exist(volume_id, region):
volume_status = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("State")
)
assert_that(volume_status).is_equal_to("available")
def _test_ebs_encrypted_with_kms(volume_id, region, encrypted, kms_key_id=None):
logging.info("Getting Encrypted information from DescribeVolumes API.")
volume_info = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
assert_that(volume_info.get("Encrypted") == encrypted).is_true()
if kms_key_id:
assert_that(volume_info.get("KmsKeyId")).matches(kms_key_id)
def _test_root_volume_encryption(cluster, os, region, scheduler, encrypted):
logging.info("Testing root volume encryption.")
if scheduler == "slurm":
# If the scheduler is slurm, root volumes both on head and compute can be encrypted
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
root_volume_id = utils.get_root_volume_id(instance, region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
else:
# If the scheduler is awsbatch, only the head_node root volume can be encrypted.
root_volume_id = utils.get_root_volume_id(cluster.cfn_resources["HeadNode"], region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
def _assert_root_volume_configuration(cluster, os, region, scheduler):
logging.info("Testing root volume type, iops, throughput.")
# Test root volume of head node
head_node = cluster.cfn_resources["HeadNode"]
if utils.dict_has_nested_key(cluster.config, ("HeadNode", "LocalStorage", "RootVolume")):
logging.info("Checking head node root volume settings")
root_volume_id = utils.get_root_volume_id(head_node, region, os)
expected_settings = cluster.config["HeadNode"]["LocalStorage"]["RootVolume"]
_assert_volume_configuration(expected_settings, root_volume_id, region)
if scheduler == "slurm":
# Only if the scheduler is slurm, root volumes both on compute can be configured
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
if instance == head_node:
# head node is already checked
continue
root_volume_id = utils.get_root_volume_id(instance, region, os)
if utils.dict_has_nested_key(
cluster.config, ("Scheduling", "SlurmQueues", 0, "ComputeSettings", "LocalStorage", "RootVolume")
):
logging.info("Checking compute node root volume settings")
expected_settings = cluster.config["Scheduling"]["SlurmQueues"][0]["ComputeSettings"]["LocalStorage"][
"RootVolume"
]
_assert_volume_configuration(expected_settings, root_volume_id, region)
def _assert_volume_configuration(expected_settings, volume_id, region):
actual_root_volume_settings = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
)
for key in expected_settings:
assert_that(actual_root_volume_settings[key]).is_equal_to(expected_settings[key])
@pytest.fixture(scope="class")
def snapshots_factory():
factory = EBSSnapshotsFactory()
yield factory
factory.release_all()
@pytest.fixture(scope="module")
def kms_key_factory():
factory = KMSKeyFactory()
yield factory
factory.release_all()
| 46.705521 | 120 | 0.732825 |
import logging
import boto3
import pytest
import utils
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from tests.common.schedulers_common import get_scheduler_commands
from tests.storage.kms_key_factory import KMSKeyFactory
from tests.storage.snapshots_factory import EBSSnapshotsFactory
from tests.storage.storage_common import verify_directory_correctly_shared
@pytest.mark.regions(["eu-west-3", "cn-north-1", "us-gov-west-1"])
@pytest.mark.instances(["c4.xlarge", "c5.xlarge"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("instance")
def test_ebs_single(scheduler, pcluster_config_reader, clusters_factory, kms_key_factory, region, os):
mount_dir = "ebs_mount_dir"
kms_key_id = kms_key_factory.create_kms_key(region)
cluster_config = pcluster_config_reader(
mount_dir=mount_dir, ec2_iam_role=kms_key_factory.iam_role_arn, ebs_kms_key_id=kms_key_id
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
volume_id = get_ebs_volume_ids(cluster, region)[0]
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size=35)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=True, kms_key_id=kms_key_id)
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=True)
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "alinux2", "slurm")
@pytest.mark.dimensions("cn-northwest-1", "c4.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-1", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_snapshot(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs snapshot")
mount_dir = "ebs_mount_dir"
volume_size = 21
logging.info("Creating snapshot")
snapshot_id = snapshots_factory.create_snapshot(request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region)
logging.info("Snapshot id: %s" % snapshot_id)
cluster_config = pcluster_config_reader(mount_dir=mount_dir, volume_size=volume_size, snapshot_id=snapshot_id)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
mount_dir = "/" + mount_dir
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size="9.8")
_test_ebs_resize(remote_command_executor, mount_dir, volume_size=volume_size)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "alinux2", "awsbatch")
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "ubuntu1804", "slurm")
@pytest.mark.dimensions("eu-west-2", "c5.xlarge", "slurm")
@pytest.mark.usefixtures("instance")
def test_ebs_multiple(scheduler, pcluster_config_reader, clusters_factory, region, os):
mount_dirs = ["/ebs_mount_dir_{0}".format(i) for i in range(0, 5)]
volume_sizes = [15 + 5 * i for i in range(0, 5)]
volume_sizes[3] = 500
volume_sizes[4] = 500
cluster_config = pcluster_config_reader(mount_dirs=mount_dirs, volume_sizes=volume_sizes)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
for mount_dir, volume_size in zip(mount_dirs, volume_sizes):
# If we test with small volume size(eg: 40G), the number is not large enough to show the gap between the
# partition size and the filesystem size. For sc1 and st1, the minimum size is 500G, so there will be a size
# difference.
_test_ebs_correctly_mounted(
remote_command_executor, mount_dir, volume_size if volume_size != 500 else "49[0-9]"
)
_test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
volume_ids = get_ebs_volume_ids(cluster, region)
for i in range(len(volume_ids)):
# test different volume types
volume_id = volume_ids[i]
ebs_settings = _get_ebs_settings_by_name(cluster.config, f"ebs{i+1}")
volume_type = ebs_settings["VolumeType"]
volume = describe_volume(volume_id, region)
assert_that(volume[0]).is_equal_to(volume_type)
encrypted = ebs_settings.get("Encrypted")
if encrypted is None:
# Default encryption if not specified
encrypted = True
_test_ebs_encrypted_with_kms(volume_id, region, encrypted=encrypted, kms_key_id=ebs_settings.get("KmsKeyId"))
# test different iops
# only io1, io2, gp3 can configure iops
if volume_type in ["io1", "io2", "gp3"]:
volume_iops = ebs_settings["Iops"]
assert_that(volume[1]).is_equal_to(int(volume_iops))
_test_root_volume_encryption(cluster, os, region, scheduler, encrypted=False)
_assert_root_volume_configuration(cluster, os, region, scheduler)
def _get_ebs_settings_by_name(config, name):
for shared_storage in config["SharedStorage"]:
if shared_storage["Name"] == name:
return shared_storage["EbsSettings"]
@pytest.mark.dimensions("ap-northeast-2", "c5.xlarge", "centos7", "slurm")
@pytest.mark.usefixtures("os", "instance")
def test_ebs_existing(
request, vpc_stacks, region, scheduler, pcluster_config_reader, snapshots_factory, clusters_factory
):
logging.info("Testing ebs existing")
existing_mount_dir = "existing_mount_dir"
logging.info("Creating volume")
volume_id = snapshots_factory.create_existing_volume(
request, vpc_stacks[region].cfn_outputs["PublicSubnetId"], region
)
logging.info("Existing Volume id: %s" % volume_id)
cluster_config = pcluster_config_reader(volume_id=volume_id, existing_mount_dir=existing_mount_dir)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
existing_mount_dir = "/" + existing_mount_dir
_test_ebs_correctly_mounted(remote_command_executor, existing_mount_dir, volume_size="9.8")
_test_ebs_correctly_shared(remote_command_executor, existing_mount_dir, scheduler_commands)
# Checks for test data
result = remote_command_executor.run_remote_command("cat {}/test.txt".format(existing_mount_dir))
assert_that(result.stdout.strip()).is_equal_to("hello world")
# delete the cluster before detaching the EBS volume
cluster.delete()
# check the volume still exists after deleting the cluster
_assert_volume_exist(volume_id, region)
def _test_ebs_correctly_mounted(remote_command_executor, mount_dir, volume_size):
logging.info("Testing ebs {0} is correctly mounted".format(mount_dir))
result = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 | awk '{{print $2, $6}}' | grep '{0}'".format(mount_dir)
)
assert_that(result.stdout).matches(r"{size}G {mount_dir}".format(size=volume_size, mount_dir=mount_dir))
result = remote_command_executor.run_remote_command("cat /etc/fstab")
assert_that(result.stdout).matches(r"UUID=.* {mount_dir} ext4 _netdev 0 0".format(mount_dir=mount_dir))
def _test_ebs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands):
logging.info("Testing ebs correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
def _test_home_correctly_shared(remote_command_executor, scheduler_commands):
logging.info("Testing home dir correctly mounted on compute nodes")
verify_directory_correctly_shared(remote_command_executor, "/home", scheduler_commands)
def _test_ebs_resize(remote_command_executor, mount_dir, volume_size):
logging.info("Testing ebs has correct volume size")
# get the filesystem that the shared_dir is mounted on
# example output of "df -h -t ext4"
# Filesystem Size Used Avail Use% Mounted on
# /dev/nvme1n1p1 9.8G 37M 9.3G 1% /ebs_mount_dir
# /dev/nvme2n1p1 9.8G 37M 9.3G 1% /existing_mount_dir
filesystem_name = remote_command_executor.run_remote_command(
"df -h -t ext4 | tail -n +2 |grep '{mount_dir}' | awk '{{print $1}}'".format(mount_dir=mount_dir)
).stdout
# get the volume name given the filesystem name
# example input: /dev/nvme1n1p1
# example output: nvme1n1
volume_name = remote_command_executor.run_remote_command(
"lsblk -no pkname {filesystem_name}".format(filesystem_name=filesystem_name)
).stdout
# get the volume size of the volume
# example output of "lsblk"
# NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
# nvme0n1 259:0 0 25G 0 disk
# ├─nvme0n1p1 259:1 0 25G 0 part /
# └─nvme0n1p128 259:2 0 1M 0 part
# nvme1n1 259:3 0 21G 0 disk
# └─nvme1n1p1 259:4 0 10G 0 part /ebs_mount_dir
# nvme2n1 259:5 0 10G 0 disk
# └─nvme2n1p1 259:6 0 10G 0 part /existing_mount_dir
result = remote_command_executor.run_remote_command(
"lsblk | tail -n +2 | grep {volume_name}| awk '{{print $4}}' | sed -n '1p'''".format(volume_name=volume_name)
)
assert_that(result.stdout).matches(r"{size}G".format(size=volume_size))
def get_ebs_volume_ids(cluster, region):
# get the list of configured ebs volume ids
# example output: ['vol-000', 'vol-001', 'vol-002']
return utils.retrieve_cfn_outputs(cluster.cfn_name, region).get("EBSIds").split(",")
def describe_volume(volume_id, region):
volume = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
volume_type = volume.get("VolumeType")
volume_iops = volume.get("Iops")
return volume_type, volume_iops
def _assert_volume_exist(volume_id, region):
volume_status = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("State")
)
assert_that(volume_status).is_equal_to("available")
def _test_ebs_encrypted_with_kms(volume_id, region, encrypted, kms_key_id=None):
logging.info("Getting Encrypted information from DescribeVolumes API.")
volume_info = boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
assert_that(volume_info.get("Encrypted") == encrypted).is_true()
if kms_key_id:
assert_that(volume_info.get("KmsKeyId")).matches(kms_key_id)
def _test_root_volume_encryption(cluster, os, region, scheduler, encrypted):
logging.info("Testing root volume encryption.")
if scheduler == "slurm":
# If the scheduler is slurm, root volumes both on head and compute can be encrypted
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
root_volume_id = utils.get_root_volume_id(instance, region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
else:
# If the scheduler is awsbatch, only the head_node root volume can be encrypted.
root_volume_id = utils.get_root_volume_id(cluster.cfn_resources["HeadNode"], region, os)
_test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
def _assert_root_volume_configuration(cluster, os, region, scheduler):
logging.info("Testing root volume type, iops, throughput.")
# Test root volume of head node
head_node = cluster.cfn_resources["HeadNode"]
if utils.dict_has_nested_key(cluster.config, ("HeadNode", "LocalStorage", "RootVolume")):
logging.info("Checking head node root volume settings")
root_volume_id = utils.get_root_volume_id(head_node, region, os)
expected_settings = cluster.config["HeadNode"]["LocalStorage"]["RootVolume"]
_assert_volume_configuration(expected_settings, root_volume_id, region)
if scheduler == "slurm":
# Only if the scheduler is slurm, root volumes both on compute can be configured
instance_ids = cluster.get_cluster_instance_ids()
for instance in instance_ids:
if instance == head_node:
# head node is already checked
continue
root_volume_id = utils.get_root_volume_id(instance, region, os)
if utils.dict_has_nested_key(
cluster.config, ("Scheduling", "SlurmQueues", 0, "ComputeSettings", "LocalStorage", "RootVolume")
):
logging.info("Checking compute node root volume settings")
expected_settings = cluster.config["Scheduling"]["SlurmQueues"][0]["ComputeSettings"]["LocalStorage"][
"RootVolume"
]
_assert_volume_configuration(expected_settings, root_volume_id, region)
def _assert_volume_configuration(expected_settings, volume_id, region):
actual_root_volume_settings = (
boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0]
)
for key in expected_settings:
assert_that(actual_root_volume_settings[key]).is_equal_to(expected_settings[key])
@pytest.fixture(scope="class")
def snapshots_factory():
factory = EBSSnapshotsFactory()
yield factory
factory.release_all()
@pytest.fixture(scope="module")
def kms_key_factory():
factory = KMSKeyFactory()
yield factory
factory.release_all()
| true | true |
f72a864d02a1615e377a438d9b2868959da2187a | 8,179 | py | Python | tensorflow_text/python/ops/bert_tokenizer.py | kornesh/text | f762def9dbb14f8f182936dd25af154af79f366e | [
"Apache-2.0"
] | null | null | null | tensorflow_text/python/ops/bert_tokenizer.py | kornesh/text | f762def9dbb14f8f182936dd25af154af79f366e | [
"Apache-2.0"
] | null | null | null | tensorflow_text/python/ops/bert_tokenizer.py | kornesh/text | f762def9dbb14f8f182936dd25af154af79f366e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tokenization ops for BERT preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import string_ops
from tensorflow_text.python.ops import regex_split_ops
from tensorflow_text.python.ops.normalize_ops import case_fold_utf8
from tensorflow_text.python.ops.normalize_ops import normalize_utf8
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer
_DELIM_REGEX = [
r"\s+",
r"|".join([
r"[!-/]",
r"[:-@]",
r"[\[-`]",
r"[{-~]",
r"[\p{P}]",
]),
r"|".join([
r"[\x{4E00}-\x{9FFF}]",
r"[\x{3400}-\x{4DBF}]",
r"[\x{20000}-\x{2A6DF}]",
r"[\x{2A700}-\x{2B73F}]",
r"[\x{2B740}-\x{2B81F}]",
r"[\x{2B820}-\x{2CEAF}]",
r"[\x{F900}-\x{FAFF}]",
r"[\x{2F800}-\x{2FA1F}]",
]),
]
_DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE.remove(r"\s+")
_KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE)
class BasicTokenizer(TokenizerWithOffsets):
"""Basic tokenizer for for tokenizing text.
A basic tokenizer that tokenizes using some deterministic rules:
- For most languages, this tokenizer will split on whitespace.
- For Chinese, Japanese, and Korean characters, this tokenizer will split on
Unicode characters.
Attributes:
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, apply NFD normalization, and strip accents characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If true and lower_case=False, the input text will be
normalized to `normalization_form`. See normalize_utf8() op for a list of
valid values.
"""
def __init__(self,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
self._lower_case = lower_case
if not keep_whitespace:
self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN
else:
self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN
self._normalization_form = normalization_form
def tokenize(self, text_input):
tokens, _, _ = self.tokenize_with_offsets(text_input)
return tokens
def tokenize_with_offsets(self, text_input):
"""Performs basic word tokenization for BERT.
Args:
text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings.
Returns:
A `RaggedTensor` of tokenized strings from text_input.
"""
# lowercase and strip accents (if option is set)
if self._lower_case:
text_input = case_fold_utf8(text_input)
text_input = normalize_utf8(text_input, "NFD")
text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "")
else:
# utf8 normalization
if self._normalization_form is not None:
text_input = normalize_utf8(text_input, self._normalization_form)
# strip out control characters
text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ")
return regex_split_ops.regex_split_with_offsets(
text_input, _DELIM_REGEX_PATTERN, self._keep_delim_regex_pattern,
"BertBasicTokenizer")
class BertTokenizer(TokenizerWithOffsets):
"""Tokenizer used for BERT.
This tokenizer applies an end-to-end, text string to wordpiece tokenization.
It first applies basic tokenization, and then follwed by wordpiece
tokenization.
See BasicTokenizer and WordpieceTokenizer for their respective details.
Attributes:
vocab_lookup_table: A lookup table implementing the LookupInterface
containing the vocabulary of subwords or a string which is the file path
to the vocab.txt file.
suffix_indicator: (optional) The characters prepended to a wordpiece to
indicate that it is a suffix to another subword. Default is '##'.
max_bytes_per_word: (optional) Max size of input token. Default is 100.
max_chars_per_token: (optional) Max size of subwords, excluding suffix
indicator. If known, providing this improves the efficiency of decoding
long words.
token_out_type: (optional) The type of the token to return. This can be
`tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.
unknown_token: (optional) The value to use when an unknown token is found.
Default is "[UNK]". If this is set to a string, and `token_out_type` is
`tf.int64`, the `vocab_lookup_table` is used to convert the
`unknown_token` to an integer. If this is set to `None`,
out-of-vocabulary tokens are left as is.
split_unknown_characters: (optional) Whether to split out single unknown
characters as subtokens. If False (default), words containing unknown
characters will be treated as single unknown tokens.
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, apply NFD normalization, and strip accents characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If true and lower_case=False, the input text will be
normalized to `normalization_form`. See normalize_utf8() op for a list
of valid values.
"""
def __init__(self,
vocab_lookup_table,
suffix_indicator="##",
max_bytes_per_word=100,
max_chars_per_token=None,
token_out_type=dtypes.int64,
unknown_token="[UNK]",
split_unknown_characters=False,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
if isinstance(vocab_lookup_table, str):
init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)
vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(
init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)
self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,
normalization_form)
self._wordpiece_tokenizer = WordpieceTokenizer(
vocab_lookup_table, suffix_indicator, max_bytes_per_word,
max_chars_per_token, token_out_type, unknown_token,
split_unknown_characters)
def tokenize_with_offsets(self, text_input):
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._wordpiece_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = array_ops.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def tokenize(self, text_input):
"""Performs untokenized text to wordpiece tokenization for BERT.
Args:
text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8
strings.
Returns:
A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string
contents (or ID in the vocab_lookup_table representing that string)
of the `jth` token in `input[i1...iN]`
"""
tokens = self._basic_tokenizer.tokenize(text_input)
return self._wordpiece_tokenizer.tokenize(tokens)
| 40.093137 | 80 | 0.711334 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import string_ops
from tensorflow_text.python.ops import regex_split_ops
from tensorflow_text.python.ops.normalize_ops import case_fold_utf8
from tensorflow_text.python.ops.normalize_ops import normalize_utf8
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer
_DELIM_REGEX = [
r"\s+",
r"|".join([
r"[!-/]",
r"[:-@]",
r"[\[-`]",
r"[{-~]",
r"[\p{P}]",
]),
r"|".join([
r"[\x{4E00}-\x{9FFF}]",
r"[\x{3400}-\x{4DBF}]",
r"[\x{20000}-\x{2A6DF}]",
r"[\x{2A700}-\x{2B73F}]",
r"[\x{2B740}-\x{2B81F}]",
r"[\x{2B820}-\x{2CEAF}]",
r"[\x{F900}-\x{FAFF}]",
r"[\x{2F800}-\x{2FA1F}]",
]),
]
_DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE.remove(r"\s+")
_KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE)
class BasicTokenizer(TokenizerWithOffsets):
def __init__(self,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
self._lower_case = lower_case
if not keep_whitespace:
self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN
else:
self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN
self._normalization_form = normalization_form
def tokenize(self, text_input):
tokens, _, _ = self.tokenize_with_offsets(text_input)
return tokens
def tokenize_with_offsets(self, text_input):
if self._lower_case:
text_input = case_fold_utf8(text_input)
text_input = normalize_utf8(text_input, "NFD")
text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "")
else:
if self._normalization_form is not None:
text_input = normalize_utf8(text_input, self._normalization_form)
text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ")
return regex_split_ops.regex_split_with_offsets(
text_input, _DELIM_REGEX_PATTERN, self._keep_delim_regex_pattern,
"BertBasicTokenizer")
class BertTokenizer(TokenizerWithOffsets):
def __init__(self,
vocab_lookup_table,
suffix_indicator="##",
max_bytes_per_word=100,
max_chars_per_token=None,
token_out_type=dtypes.int64,
unknown_token="[UNK]",
split_unknown_characters=False,
lower_case=False,
keep_whitespace=False,
normalization_form=None):
if isinstance(vocab_lookup_table, str):
init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)
vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(
init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)
self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,
normalization_form)
self._wordpiece_tokenizer = WordpieceTokenizer(
vocab_lookup_table, suffix_indicator, max_bytes_per_word,
max_chars_per_token, token_out_type, unknown_token,
split_unknown_characters)
def tokenize_with_offsets(self, text_input):
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._wordpiece_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = array_ops.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def tokenize(self, text_input):
tokens = self._basic_tokenizer.tokenize(text_input)
return self._wordpiece_tokenizer.tokenize(tokens)
| true | true |
f72a86580356f7077caa16bc6708c93567dae160 | 6,746 | py | Python | py_algorithms/data_structures/heap.py | rlishtaba/py-algorithms-playground | ce7cf332483e01d05bcad98921d736c33a33a66c | [
"MIT"
] | 31 | 2017-09-17T06:29:15.000Z | 2022-03-11T14:45:40.000Z | py_algorithms/data_structures/heap.py | MindaugasVaitkus2/py-algorithms | ce7cf332483e01d05bcad98921d736c33a33a66c | [
"MIT"
] | null | null | null | py_algorithms/data_structures/heap.py | MindaugasVaitkus2/py-algorithms | ce7cf332483e01d05bcad98921d736c33a33a66c | [
"MIT"
] | 11 | 2017-11-01T20:33:20.000Z | 2022-02-13T16:54:21.000Z | import sys
from typing import Any
from typing import Callable
from typing import List
from typing import Union
from ..utils import test_iterable
class _HeapNode:
def __init__(self, key: Any, value: Any):
self.key = key
self.value = value
self.degree = 0
self.marked = False
self.right = self
self.left = self
self.parent = None
self.child = None
def is_marked(self) -> bool:
return self.marked is True
class Heap:
MAX_MIN = -sys.maxsize
def __init__(self, comparator_f2: Callable[[Any, Any], bool], xs: List[Any] = ()):
test_iterable(xs)
self._size = 0
self._comparator_f2 = comparator_f2
self._next = None
self._stored = {}
# default initial values
for x in xs:
self.push(x, x)
@staticmethod
def _get_by_index(array, index) -> Union[None, Any]:
try:
return array[index]
except IndexError:
return None
@classmethod
def _set_entry_by_index(cls, array, index, value):
if cls._get_by_index(array, index) == cls.MAX_MIN:
array[index] = value
return array
else:
array.extend([cls.MAX_MIN] * (index - len(array) + 1))
return cls._set_entry_by_index(array, index, value)
@property
def size(self):
return self._size
@property
def next(self) -> Union[Any, None]:
if self._next:
return self._next.value
return None
@property
def next_key(self) -> Union[Any, None]:
if self._next:
return self._next.key
return None
@property
def is_empty(self) -> bool:
return self._next is None
def clear(self) -> 'Heap':
self._next = None
self._size = 0
self._stored = {}
return self
def contains_key(self, key) -> bool:
if self._stored.get(key, None) and self._stored.get(key):
return True
return False
def push(self, key: Any, value: any) -> Any:
if key is None:
raise RuntimeError('Could not process heap keys equal to Null.')
node = _HeapNode(key, value)
if self._next:
node.right = self._next
node.left = self._next.left
node.left.right = node
self._next.left = node
if self._comparator_f2(key, self._next.key):
self._next = node
else:
self._next = node
self._size += 1
w = self._next.right
while w is not self._next:
w = w.right
if not self._stored.get(key, None):
self._stored[key] = []
self._stored[key].append(node)
return value
def pop(self) -> Any:
if not self._next:
return None
popped = self._next
if self._size == 1:
self.clear()
return popped.value
# things getting hairy here, we need to merge popped
# `popped` node's children to the root node
if self._next.child:
self._next.child.parent = None
sibling = self._next.child.right
while not sibling == self._next.child:
sibling.parent = None
sibling = sibling.right
# Merge children into root.
# If next is a singular root node,
# make its child pointer the next node
if self._next.right == self._next:
self._next = self._next.child
else:
next_left, next_right = self._next.left, self._next.right
current_child = self._next.child
self._next.right.left = current_child
self._next.left.right = current_child.right
current_child.right.left = next_left
current_child.right = next_right
self._next = self._next.right
else:
self._next.left.right = self._next.right
self._next.right.left = self._next.left
self._next = self._next.right
self._consolidate()
if not self._stored.get(popped.key, None):
raise RuntimeError("Could not delete a heap entry.")
self._size -= 1
return popped.value
def _consolidate(self):
roots = []
root = self._next
_min = root
while True: # find the nodes in the list
roots.append(root)
root = root.right
if root == self._next:
break
degrees = []
for root in roots:
if self._comparator_f2(root.key, _min.key):
_min = root
# check if we need to merge
if not self._get_by_index(degrees, root.degree):
self._set_entry_by_index(degrees, root.degree, root)
else:
# there is another node(s) with the same degree,
# we'll try to consolidate them
degree = root.degree
while not (self._get_by_index(degrees, degree) in [self.MAX_MIN, None]):
other_root_with_degree = degrees[degree]
if self._comparator_f2(root.key, other_root_with_degree.key):
# determine which node is the parent, which one is the
# child
smaller, larger = root, other_root_with_degree
else:
smaller, larger = other_root_with_degree, root
self._link_nodes(larger, smaller)
degrees[degree] = self.MAX_MIN
root = smaller
degree += 1
self._set_entry_by_index(degrees, degree, root)
# make sure duplicate keys in the right order
if _min.key == root.key:
_min = root
self._next = _min
@staticmethod
def _link_nodes(child, parent) -> None:
"""make node a child of a parent"""
# link the child's siblings
child.left.right = child.right
child.right.left = child.left
child.parent = parent
# if parent doesn't have children, make new child its only child
if not parent.child:
parent.child = child.right = child.left = child
# otherwise insert new child into parent's children list
else:
current_child = parent.child
child.left = current_child
child.right = current_child.right
current_child.right.left = child
current_child.right = child
parent.degree += 1
child.marked = False
| 29.982222 | 88 | 0.546991 | import sys
from typing import Any
from typing import Callable
from typing import List
from typing import Union
from ..utils import test_iterable
class _HeapNode:
def __init__(self, key: Any, value: Any):
self.key = key
self.value = value
self.degree = 0
self.marked = False
self.right = self
self.left = self
self.parent = None
self.child = None
def is_marked(self) -> bool:
return self.marked is True
class Heap:
MAX_MIN = -sys.maxsize
def __init__(self, comparator_f2: Callable[[Any, Any], bool], xs: List[Any] = ()):
test_iterable(xs)
self._size = 0
self._comparator_f2 = comparator_f2
self._next = None
self._stored = {}
for x in xs:
self.push(x, x)
@staticmethod
def _get_by_index(array, index) -> Union[None, Any]:
try:
return array[index]
except IndexError:
return None
@classmethod
def _set_entry_by_index(cls, array, index, value):
if cls._get_by_index(array, index) == cls.MAX_MIN:
array[index] = value
return array
else:
array.extend([cls.MAX_MIN] * (index - len(array) + 1))
return cls._set_entry_by_index(array, index, value)
@property
def size(self):
return self._size
@property
def next(self) -> Union[Any, None]:
if self._next:
return self._next.value
return None
@property
def next_key(self) -> Union[Any, None]:
if self._next:
return self._next.key
return None
@property
def is_empty(self) -> bool:
return self._next is None
def clear(self) -> 'Heap':
self._next = None
self._size = 0
self._stored = {}
return self
def contains_key(self, key) -> bool:
if self._stored.get(key, None) and self._stored.get(key):
return True
return False
def push(self, key: Any, value: any) -> Any:
if key is None:
raise RuntimeError('Could not process heap keys equal to Null.')
node = _HeapNode(key, value)
if self._next:
node.right = self._next
node.left = self._next.left
node.left.right = node
self._next.left = node
if self._comparator_f2(key, self._next.key):
self._next = node
else:
self._next = node
self._size += 1
w = self._next.right
while w is not self._next:
w = w.right
if not self._stored.get(key, None):
self._stored[key] = []
self._stored[key].append(node)
return value
def pop(self) -> Any:
if not self._next:
return None
popped = self._next
if self._size == 1:
self.clear()
return popped.value
if self._next.child:
self._next.child.parent = None
sibling = self._next.child.right
while not sibling == self._next.child:
sibling.parent = None
sibling = sibling.right
# Merge children into root.
# If next is a singular root node,
# make its child pointer the next node
if self._next.right == self._next:
self._next = self._next.child
else:
next_left, next_right = self._next.left, self._next.right
current_child = self._next.child
self._next.right.left = current_child
self._next.left.right = current_child.right
current_child.right.left = next_left
current_child.right = next_right
self._next = self._next.right
else:
self._next.left.right = self._next.right
self._next.right.left = self._next.left
self._next = self._next.right
self._consolidate()
if not self._stored.get(popped.key, None):
raise RuntimeError("Could not delete a heap entry.")
self._size -= 1
return popped.value
def _consolidate(self):
roots = []
root = self._next
_min = root
while True: # find the nodes in the list
roots.append(root)
root = root.right
if root == self._next:
break
degrees = []
for root in roots:
if self._comparator_f2(root.key, _min.key):
_min = root
# check if we need to merge
if not self._get_by_index(degrees, root.degree):
self._set_entry_by_index(degrees, root.degree, root)
else:
# there is another node(s) with the same degree,
# we'll try to consolidate them
degree = root.degree
while not (self._get_by_index(degrees, degree) in [self.MAX_MIN, None]):
other_root_with_degree = degrees[degree]
if self._comparator_f2(root.key, other_root_with_degree.key):
smaller, larger = root, other_root_with_degree
else:
smaller, larger = other_root_with_degree, root
self._link_nodes(larger, smaller)
degrees[degree] = self.MAX_MIN
root = smaller
degree += 1
self._set_entry_by_index(degrees, degree, root)
if _min.key == root.key:
_min = root
self._next = _min
@staticmethod
def _link_nodes(child, parent) -> None:
child.left.right = child.right
child.right.left = child.left
child.parent = parent
# if parent doesn't have children, make new child its only child
if not parent.child:
parent.child = child.right = child.left = child
else:
current_child = parent.child
child.left = current_child
child.right = current_child.right
current_child.right.left = child
current_child.right = child
parent.degree += 1
child.marked = False
| true | true |
f72a885aba97b22f715ae7119075a6a258e3e0a9 | 3,885 | py | Python | func.py | Abner0627/IPRV_Optical-Flow | 85c0650f671ad44c8bbe1d820a761be42cbe56d0 | [
"MIT"
] | null | null | null | func.py | Abner0627/IPRV_Optical-Flow | 85c0650f671ad44c8bbe1d820a761be42cbe56d0 | [
"MIT"
] | null | null | null | func.py | Abner0627/IPRV_Optical-Flow | 85c0650f671ad44c8bbe1d820a761be42cbe56d0 | [
"MIT"
] | null | null | null | import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
# %%
def _pick(L, ty, path):
L_ = [cv2.imread(os.path.join(path, i)) for i in L if i.split('_')[0]==ty]
# 輸入影像
return L_
def _gray(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def _Pos(img, idx):
def on_press(event):
L.append(np.array([int(event.xdata), int(event.ydata)]))
# 紀錄點選的座標點
if len(L)>=2:
plt.close()
# 當點選次數大於等於2時,關閉視窗
np.save('./npy/loc_' + idx + '.npy', np.array(L))
# 儲存紀錄座標點
fig = plt.figure()
plt.imshow(img, animated= True)
L = []
fig.canvas.mpl_connect('button_press_event', on_press)
# 用動態圖的形式產生介面供使用者點選目標點
plt.show()
def _PlotPos(img, idx):
img_c = np.copy(img)
src = np.load('./npy/loc_' + idx + '.npy')
# 輸入儲存的選取座標
print('Choose point 1: ({}, {})'.format(src[0, 0], src[0, 1]))
print('Choose point 2: ({}, {})'.format(src[1, 0], src[1, 1]))
cv2.circle(img_c, (src[0, 0], src[0, 1]), 3, (0, 38, 255), -1)
cv2.circle(img_c, (src[1, 0], src[1, 1]), 3, (0, 38, 255), -1)
# 畫上座標點
return img_c
# def _flow(pre_img, nxt_img, pt_x, pt_y, param, init_flow=None):
# XL, YL = [0], [0]
# PX, PY = [pt_x], [pt_y]
# flow = init_flow
# ep = 1000
# i=0
# while ep>1e-2:
# if i==0:
# fg = 0
# else:
# fg = cv2.OPTFLOW_USE_INITIAL_FLOW
# flow = cv2.calcOpticalFlowFarneback(pre_img, nxt_img, flow=flow, flags=fg, **param)
# XL.append(flow[pt_y, pt_x, 0])
# YL.append(flow[pt_y, pt_x, 1])
# PX.append(int(pt_x + flow[pt_y, pt_x, 0]))
# PY.append(int(pt_y + flow[pt_y, pt_x, 1]))
# print('iter:{}, ep:{}\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))
# print('x = {:.4f}, y = {:.4f}'.format(PX[i], PY[i]))
# print('======================')
# i+=1
# if i>0:
# ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i]))
# return PX, PY
def _LKflow(pre_img, nxt_img, pt_x, pt_y, lk_params):
p0 = np.array([[pt_x, pt_y]]).astype(np.float32)
i = 0
PX, PY = [pt_x], [pt_y]
XL, YL = [], []
ep = 1e3
# 初始化各參數
while ep>1e-2:
if i==0:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, None, **lk_params)
else:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, p1, flags=cv2.OPTFLOW_USE_INITIAL_FLOW, **lk_params)
# 用迴圈計算每個iteration的輸出座標
PX.append(p1[0][0])
PY.append(p1[0][1])
XL.append(PX[i] - PX[i+1])
YL.append(PY[i] - PY[i+1])
# 紀錄輸出座標與位移向量
if i>0:
ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i]))
# 與前一個iteration位移向量之差值,
# 當差值<0.01時則停止迴圈
print('iter:{}, ep:{}\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))
print('x = {:.4f}, y = {:.4f}'.format(PX[i+1], PY[i+1]))
print('======================')
i+=1
return PX, PY
def _plot(img, PX, PY):
PX = np.array(PX).astype(np.int)
PY = np.array(PY).astype(np.int)
for j in range(len(PX)):
if j!=0:
cv2.line(img, (PX[j-1], PY[j-1]), (PX[j], PY[j]), (250, 5, 216), 2)
for k in range(len(PX)):
if k==0:
c = (0, 38, 255)
elif k==len(PX)-1:
c = (182, 255, 0)
else:
c = (255, 0, 0)
cv2.circle(img, (PX[k], PY[k]), 3, c, -1)
# 依每個iteration輸出的座標畫上標點
return img
# param = dict(pyr_scale=0.8,
# levels=25,
# iterations=1,
# winsize=5,
# poly_n=5,
# poly_sigma=1.1)
lk_params = dict(winSize = (15, 15),
maxLevel = 3,
criteria = (cv2.TERM_CRITERIA_COUNT, 1, 0.03)) | 32.107438 | 122 | 0.488288 | import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
def _pick(L, ty, path):
L_ = [cv2.imread(os.path.join(path, i)) for i in L if i.split('_')[0]==ty]
return L_
def _gray(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def _Pos(img, idx):
def on_press(event):
L.append(np.array([int(event.xdata), int(event.ydata)]))
if len(L)>=2:
plt.close()
np.save('./npy/loc_' + idx + '.npy', np.array(L))
fig = plt.figure()
plt.imshow(img, animated= True)
L = []
fig.canvas.mpl_connect('button_press_event', on_press)
plt.show()
def _PlotPos(img, idx):
img_c = np.copy(img)
src = np.load('./npy/loc_' + idx + '.npy')
print('Choose point 1: ({}, {})'.format(src[0, 0], src[0, 1]))
print('Choose point 2: ({}, {})'.format(src[1, 0], src[1, 1]))
cv2.circle(img_c, (src[0, 0], src[0, 1]), 3, (0, 38, 255), -1)
cv2.circle(img_c, (src[1, 0], src[1, 1]), 3, (0, 38, 255), -1)
return img_c
def _LKflow(pre_img, nxt_img, pt_x, pt_y, lk_params):
p0 = np.array([[pt_x, pt_y]]).astype(np.float32)
i = 0
PX, PY = [pt_x], [pt_y]
XL, YL = [], []
ep = 1e3
while ep>1e-2:
if i==0:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, None, **lk_params)
else:
p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, p1, flags=cv2.OPTFLOW_USE_INITIAL_FLOW, **lk_params)
PX.append(p1[0][0])
PY.append(p1[0][1])
XL.append(PX[i] - PX[i+1])
YL.append(PY[i] - PY[i+1])
if i>0:
ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i]))
print('iter:{}, ep:{}\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))
print('x = {:.4f}, y = {:.4f}'.format(PX[i+1], PY[i+1]))
print('======================')
i+=1
return PX, PY
def _plot(img, PX, PY):
PX = np.array(PX).astype(np.int)
PY = np.array(PY).astype(np.int)
for j in range(len(PX)):
if j!=0:
cv2.line(img, (PX[j-1], PY[j-1]), (PX[j], PY[j]), (250, 5, 216), 2)
for k in range(len(PX)):
if k==0:
c = (0, 38, 255)
elif k==len(PX)-1:
c = (182, 255, 0)
else:
c = (255, 0, 0)
cv2.circle(img, (PX[k], PY[k]), 3, c, -1)
return img
lk_params = dict(winSize = (15, 15),
maxLevel = 3,
criteria = (cv2.TERM_CRITERIA_COUNT, 1, 0.03)) | true | true |
f72a8a9e767b0990ec36270d87dafab2e37e3e27 | 276,837 | py | Python | mindspore/ops/operations/nn_ops.py | Rossil2012/mindspore | 8a20b5d784b3fec6d32e058581ec56ec553a06a0 | [
"Apache-2.0"
] | 1 | 2021-04-23T06:35:18.000Z | 2021-04-23T06:35:18.000Z | mindspore/ops/operations/nn_ops.py | nudt-eddie/mindspore | 55372b41fdfae6d2b88d7078971e06d537f6c558 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/nn_ops.py | nudt-eddie/mindspore | 55372b41fdfae6d2b88d7078971e06d537f6c558 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for nn."""
import math
import operator
from functools import reduce
import numpy as np
from ... import context
from .. import signature as sig
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register
from ..operations.math_ops import _infer_shape_reduce
def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False, ret_four=False):
"""
Checks whether an argument is a positive int or tuple with 2 or 4(when allow_four is True) positive int elements.
"""
def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two "
f"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value) if ret_four else (arg_value, arg_value)
elif len(arg_value) == 2:
ret = (1, 1, arg_value[0], arg_value[1]) if ret_four else arg_value
elif len(arg_value) == 4:
if not allow_four:
_raise_message()
ret = arg_value if ret_four else (arg_value[2], arg_value[3])
else:
_raise_message()
return ret
validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and item > 0:
continue
_raise_message()
return ret_value
class Flatten(PrimitiveWithInfer):
r"""
Flattens a tensor without changing its batch size on the 0-th axis.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
the product of the remaining dimension.
Examples:
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = P.Flatten()
>>> output = flatten(input_tensor)
>>> assert output.shape == (1, 24)
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
validator.check_integer('input_x rank', len(input_x), 1, Rel.GE, self.name)
prod = 1 if len(input_x) == 1 else reduce(operator.mul, input_x[1:])
return input_x[0], prod
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class Softmax(PrimitiveWithInfer):
r"""
Softmax operation.
Applies the Softmax operation to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)},
where :math:`N` is the length of the tensor.
Args:
axis (Union[int, tuple]): The axis to do the Softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softmax = P.Softmax()
>>> softmax(input_x)
[0.01165623, 0.03168492, 0.08612854, 0.23412167, 0.6364086]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type("axis", axis, [int, tuple], self.name)
if isinstance(axis, int):
self.add_prim_attr('axis', (axis,))
for item in self.axis:
validator.check_value_type("item of axis", item, [int], self.name)
def infer_shape(self, logits):
validator.check_integer("length of axis", len(self.axis), 1, Rel.GE, self.name)
rank = len(logits)
for axis_v in self.axis:
validator.check_int_range("axis", axis_v, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class LogSoftmax(PrimitiveWithInfer):
r"""
Log Softmax activation function.
Applies the Log Softmax function to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Log Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \log \left(\frac{exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
where :math:`N` is the length of the Tensor.
Args:
axis (int): The axis to do the Log softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Log Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> log_softmax = P.LogSoftmax()
>>> log_softmax(input_x)
[-4.4519143, -3.4519143, -2.4519143, -1.4519144, -0.4519144]
"""
@prim_attr_register
def __init__(self, axis=-1):
validator.check_value_type("axis", axis, [int], self.name)
def infer_shape(self, logits):
rank = len(logits)
validator.check_int_range('axis', self.axis, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class Softplus(PrimitiveWithInfer):
r"""
Softplus activation function.
Softplus is a smooth approximation to the ReLU function.
The function is shown as follows:
.. math::
\text{output} = \log(1 + \exp(\text{input_x})),
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softplus = P.Softplus()
>>> softplus(input_x)
[1.3132615, 2.126928, 3.0485873, 4.01815, 5.0067153]
"""
@prim_attr_register
def __init__(self):
"""init Softplus"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class Softsign(PrimitiveWithInfer):
r"""
Softsign activation function.
The function is shown as follows:
.. math::
\text{output} = \frac{\text{input_x}}{1 + \left| \text{input_x} \right|},
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign(input_x)
[0. -0.5 0.6666667 0.9677419 -0.9677419]
"""
@prim_attr_register
def __init__(self):
"""init Softsign"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class ReLU(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu = P.ReLU()
>>> result = relu(input_x)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
"""
@prim_attr_register
def __init__(self):
"""init ReLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.number_type, self.name)
return input_x
class ReLU6(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) upper bounded by 6 of input tensor element-wise.
It returns :math:`\min(\max(0,x), 6)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu6 = P.ReLU6()
>>> result = relu6(input_x)
"""
@prim_attr_register
def __init__(self):
"""init ReLU6"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class ReLUV2(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor should be a 4-D tensor.
Outputs:
- **output** (Tensor) - Has the same type and shape as the `input_x`.
- **mask** (Tensor) - A tensor whose data type must be uint8.
Examples:
>>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32)
>>> relu_v2 = P.ReLUV2()
>>> output = relu_v2(input_x)
([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]],
[[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]])
"""
@prim_attr_register
def __init__(self):
"""init ReLUV2"""
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
def __infer__(self, input_x):
input_shape = list(input_x['shape'])
input_dtype = input_x['dtype']
mask_shape = []
if len(input_shape) != 4:
raise ValueError("The `input_x` should be a 4-D tensor, "
f"but got a {len(input_shape)}-D tensor whose shape is {input_shape}")
for i in enumerate(input_shape):
if i[0] == 1:
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append((input_shape[1] + 31) // 32)
else:
mask_shape.append((input_shape[1] + 15) // 16)
else:
mask_shape.append(i[1])
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append(4)
else:
mask_shape.append(2)
output_shape = (input_x['shape'], mask_shape)
validator.check_subclass("input_x", input_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({'input_x': input_dtype}, mstype.number_type, self.name)
mask_dtype = mstype.uint8
output_dtype = (input_dtype, mask_dtype)
return {'shape': output_shape,
'dtype': output_dtype,
'value': None}
class Elu(PrimitiveWithInfer):
r"""
Computes exponential linear: `alpha * (exp(x) - 1)` if x < 0, `x` otherwise.
The data type of input tensor should be float.
Args:
alpha (float): The coefficient of negative factor whose type is float,
only support '1.0' currently. Default: 1.0.
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, has the same shape and data type as `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> elu = P.Elu()
>>> result = elu(input_x)
Tensor([[-0.632 4.0 -0.999]
[2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32)
"""
@prim_attr_register
def __init__(self, alpha=1.0):
"""Init Elu"""
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_number("alpha", alpha, 1.0, Rel.EQ, self.name)
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class HSwish(PrimitiveWithInfer):
r"""
Hard swish activation function.
Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.
Hard swish is defined as:
.. math::
\text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSwish, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hswish = P.HSwish()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hswish(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, xshape):
return xshape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Sigmoid(PrimitiveWithInfer):
r"""
Sigmoid activation function.
Computes Sigmoid of input element-wise. The Sigmoid function is defined as:
.. math::
\text{sigmoid}(x_i) = \frac{1}{1 + exp(-x_i)},
where :math:`x_i` is the element of the input.
Inputs:
- **input_x** (Tensor) - The input of Sigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> sigmoid = P.Sigmoid()
>>> sigmoid(input_x)
[0.73105866, 0.880797, 0.9525742, 0.98201376, 0.9933071]
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class HSigmoid(PrimitiveWithInfer):
r"""
Hard sigmoid activation function.
Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
Hard sigmoid is defined as:
.. math::
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hsigmoid = P.HSigmoid()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hsigmoid(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Tanh(PrimitiveWithInfer):
r"""
Tanh activation function.
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
.. math::
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
where :math:`x_i` is an element of the input Tensor.
Inputs:
- **input_x** (Tensor) - The input of Tanh.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> tanh = P.Tanh()
>>> tanh(input_x)
[0.7615941, 0.9640276, 0.9950548, 0.9993293, 0.99990916]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class FusedBatchNorm(Primitive):
r"""
FusedBatchNorm is a BatchNorm that moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Outputs:
Tuple of 5 Tensor, the normalized input and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNorm()
>>> output = op(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
class FusedBatchNormEx(PrimitiveWithInfer):
r"""
FusedBatchNormEx is an extension of FusedBatchNorm, FusedBatchNormEx has one more output(output reserve)
than FusedBatchNorm, reserve will be used in backpropagation phase. FusedBatchNorm is a BatchNorm that
moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - The input of FusedBatchNormEx, Tensor of shape :math:`(N, C)`,
data type: float16 or float32.
- **scale** (Tensor) - Parameter scale, same with gamma above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **bias** (Tensor) - Parameter bias, same with beta above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **mean** (Tensor) - mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **variance** (Tensor) - variance value, Tensor of shape :math:`(C,)`, data type: float32.
Outputs:
Tuple of 6 Tensors, the normalized input, the updated parameters and reserve.
- **output_x** (Tensor) - The input of FusedBatchNormEx, same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Updated parameter scale, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_bias** (Tensor) - Updated parameter bias, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_mean** (Tensor) - Updated mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_variance** (Tensor) - Updated variance value, Tensor of shape :math:`(C,)`,
data type: float32.
- **reserve** (Tensor) - reserve space, Tensor of shape :math:`(C,)`, data type: float32.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNormEx()
>>> output = op(input_x, scale, bias, mean, variance)
"""
__mindspore_signature__ = (
sig.make_sig('input_x', dtype=sig.sig_dtype.T2),
sig.make_sig('scale', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('bias', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('mean', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('variance', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'save_scale', 'save_bias', 'save_mean', 'save_inv_variance', 'reserve'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
self.add_prim_attr('data_format', "NCHW")
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
valid_types = [mstype.tensor_type(mstype.float32)]
validator.check_type_same(args_moving, valid_types, self.name)
return (input_x, scale, scale, scale, scale, scale)
class BNTrainingReduce(PrimitiveWithInfer):
"""
reduce sum at axis [0, 2, 3].
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
Outputs:
- **sum** (Tensor) - Tensor of shape :math:`(C,)`.
- **square_sum** (Tensor) - Tensor of shape :math:`(C,)`.
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['sum', 'square_sum'])
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
return ([x_shape[1]], [x_shape[1]])
def infer_dtype(self, x_type):
return (x_type, x_type)
class BNTrainingUpdate(PrimitiveWithInfer):
"""
The primitive operator of the register and info descriptor in bn_training_update.
"""
@prim_attr_register
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
#self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')
def infer_shape(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
def infer_dtype(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
class BatchNorm(PrimitiveWithInfer):
r"""
Batch Normalization for input data and updated parameters.
Batch Normalization is widely used in convolutional neural networks. This operation
applies Batch Normalization over input to avoid internal covariate shift as described
in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
features using a mini-batch of data and the learned parameters which can be described
in the following formula,
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
is_training (bool): If `is_training` is True, `mean` and `variance` are computed during training.
If `is_training` is False, they're loaded from checkpoint during inference. Default: False.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`, with float16 or float32 data type.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `mean`.
Outputs:
Tuple of 5 Tensor, the normalized inputs and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the input_x. The shape is :math:`(N, C)`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_1** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> batch_norm = P.BatchNorm()
>>> output = batch_norm(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, is_training=False, epsilon=1e-5):
validator.check_value_type('is_training', is_training, (bool,), self.name)
validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.add_prim_attr('data_format', "NCHW")
self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],
outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
if not self.is_training:
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
if self.is_training:
valid_types = [mstype.tensor_type(mstype.float16), mstype.tensor_type(mstype.float32), None]
validator.check_type_same(args_moving, valid_types, self.name)
else:
args_moving = {"mean": mean, "variance": variance}
validator.check_tensor_type_same(args_moving, [mstype.float16, mstype.float32], self.name)
return (input_x, scale, bias, input_x, input_x)
class Conv2D(PrimitiveWithInfer):
r"""
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_. More detailed introduction can be found here:
http://cs231n.github.io/convolutional-networks/.
Args:
out_channel (int): The dimension of the output.
kernel_size (Union[int, tuple[int]]): The kernel size of the 2D convolution.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union(int, tuple[int])): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
stride (Union(int, tuple[int])): The stride to be applied to the convolution filter. Default: 1.
dilation (Union(int, tuple[int])): Specify the space to use between kernel elements. Default: 1.
group (int): Split input into groups. Default: 1.
Returns:
Tensor, the value that applied 2D convolution.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is
:math:`(C_{out}, C_{in}, K_1, K_2)`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> conv2d = P.Conv2D(out_channel=32, kernel_size=3)
>>> conv2d(input, weight)
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init Conv2D"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check(f"x_shape[1] / group", x_shape[1] // self.group, "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_h = w_shape[2]
kernel_size_w = w_shape[3]
stride_h = self.stride[2]
stride_w = self.stride[3]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = [pad_top, pad_bottom, pad_left, pad_right]
self.add_prim_attr('pad_list', (pad_top, pad_bottom, pad_left, pad_right))
out_channel = self.out_channel
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class DepthwiseConv2dNative(PrimitiveWithInfer):
r"""
Returns the depth-wise convolution value for the input.
Applies depthwise conv2d for the input, which will generate more channels with channel_multiplier.
Given an input tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` where :math:`N` is the batch size and a
filter tensor with kernel size :math:`(ks_{h}, ks_{w})`, containing :math:`C_{in} * \text{channel_multiplier}`
convolutional filters of depth 1; it applies different filters to each input channel (channel_multiplier channels
for each input channel has the default value 1), then concatenates the results together. The output has
:math:`\text{in_channels} * \text{channel_multiplier}` channels.
Args:
channel_multiplier (int): The multipiler for the original output convolution. Its value must be greater than 0.
kernel_size (Union[int, tuple[int]]): The size of the convolution kernel.
mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 3.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the padding
of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. Default: 0.
stride (Union[int, tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int, tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set the size of kernel as :math:`(K_1, K_2)`, then the shape is
:math:`(K, C_{in}, K_1, K_2)`, `K` must be 1.
Outputs:
Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)
>>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> output = depthwise_conv2d(input, weight)
>>> output.shape == (10, 96, 30, 30)
"""
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
mode=3,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init DepthwiseConv2dNative"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name)
if self.stride[0] != self.stride[1]:
raise ValueError("The height and width of stride should be equal,"
f"but got height:{self.stride[0]}, width:{self.stride[1]}")
self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)
if self.dilation[0] != self.dilation[1]:
raise ValueError("The height and width of dilation should be equal,"
f"but got height:{self.dilation[0]}, width:{self.dilation[1]}")
self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer("mode", mode, 3, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.channel_multiplier = validator.check_integer("channel_multiplier", channel_multiplier, 0, Rel.GT,
self.name)
self.group = validator.check_integer("group", group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check("x_shape[1]", x_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape
_, _, stride_h, stride_w = self.stride
_, _, dilation_h, dilation_w = self.dilation
if kernel_size_n != 1:
raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}")
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = (pad_top, pad_bottom, pad_left, pad_right)
self.add_prim_attr('pads', self.pad_list)
out_channel = self.channel_multiplier * x_shape[1]
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class _Pool(PrimitiveWithInfer):
r"""
Performs max/avg pooling operation.
Args:
ksize (Union[int, tuple[int]]): The size of the kernel, that should be a tuple
of two `int` for height and width. Default: 1.
strides (Union[int, tuple[int]]): The stride of the window, that should be
a tuple of two `int` for height and width. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)
self.add_prim_attr("padding", self.padding)
self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax")
if not self.is_maxpoolwithargmax:
self.add_prim_attr('data_format', "NCHW")
self.ksize = _check_positive_int_or_tuple("ksize", ksize, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)
self.add_prim_attr("ksize", self.ksize)
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.strides = (1, self.strides[-2], self.strides[-1], 1)
self.add_prim_attr("strides", self.strides)
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
batch, channel, input_h, input_w = x_shape
if self.is_maxpoolwithargmax:
_, kernel_h, kernel_w, _ = self.ksize
_, stride_h, stride_w, _ = self.strides
else:
_, _, kernel_h, kernel_w = self.ksize
_, _, stride_h, stride_w = self.strides
if self.padding == "VALID":
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.padding == "SAME":
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_h, out_w]
for shape_value in out_shape:
if shape_value <= 0:
raise ValueError(f"For '{self.name}' The kernel size is not valid, "
f"please check it if is larger than data's shape size.")
return out_shape
def infer_dtype(self, x_dtype):
validator.check_subclass("input", x_dtype, mstype.tensor, self.name)
return x_dtype
class MaxPool(_Pool):
r"""
Max pooling operation.
Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_op = P.MaxPool(padding="VALID", ksize=2, strides=1)
>>> output_tensor = maxpool_op(input_tensor)
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPool, self).__init__(ksize, strides, padding)
class MaxPoolWithArgmax(_Pool):
r"""
Performs max pooling on the input Tensor and return both max values and indices.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg value,
is an int number that represents height and width are both ksize, or a tuple of
two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Data type should be float16 or float32.
Outputs:
Tuple of 2 Tensor, the maxpool result and where max values from.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
- **mask** (Tensor) - Max values' index represented by the mask.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_arg_op = P.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1)
>>> output_tensor, argmax = maxpool_arg_op(input_tensor)
"""
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding)
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_gpu = context.get_context("device_target") == "GPU"
def infer_shape(self, x_shape):
out_shape = _Pool.infer_shape(self, x_shape)
_, _, out_h, out_w = out_shape
_, kernel_h, kernel_w, _ = self.ksize
argmax_shape = []
if self.is_tbe:
for i in range(4):
if i == 2:
dim = kernel_h * kernel_w
argmax_shape.append(dim)
elif i == 3:
dim = math.ceil(out_h * out_w / 16) + 1
argmax_shape.append(dim)
else:
argmax_shape.append(x_shape[i])
else:
argmax_shape = out_shape
return out_shape, argmax_shape
def infer_dtype(self, x_dtype):
out_dtype = x_dtype
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
argmax_dtype = mstype.uint16
if self.is_gpu:
argmax_dtype = mstype.int32
return out_dtype, argmax_dtype
class AvgPool(_Pool):
r"""
Average pooling operation.
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, AvgPool2d outputs
regional average in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \frac{1}{h_{ker} * w_{ker}} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.avgpool_op = P.AvgPool(padding="VALID", ksize=2, strides=1)
>>>
>>> def construct(self, x):
>>> result = self.avgpool_op(x)
>>> return result
>>>
>>> input_x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> net = Net()
>>> result = net(input_x)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[ 14.5 15.5 16.5]
[ 18.5 19.5 20.5]]
[[ 26.5 27.5 28.5]
[ 30.5 31.5 32.5]]]]
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
if context.get_context("device_target") == "GPU":
self.target = "GPU"
elif context.get_context("enable_ge"):
self.target = "GE"
else:
self.target = "OTHER"
super(AvgPool, self).__init__(ksize, strides, padding)
class Conv2DBackpropInput(PrimitiveWithInfer):
"""
Computes the gradients of convolution with respect to the input.
Args:
out_channel (int): The dimensionality of the output space.
kernel_size (Union[int, tuple[int]]): The size of the convolution window.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
stride (Union[int. tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int. tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Returns:
Tensor, the gradients of convolution.
Examples:
>>> dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> x = Tensor(np.ones([10, 32, 32, 32]))
>>> conv2d_backprop_input = P.Conv2DBackpropInput(out_channel=32, kernel_size=3)
>>> conv2d_backprop_input(dout, weight, F.shape(x))
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
pad_mode="valid",
pad=0,
pad_list=None,
mode=1,
stride=1,
dilation=1,
group=1):
"""init Conv2DBackpropInput"""
self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output'])
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=False)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('data_format', "NCHW")
if pad_list:
for x in pad_list:
validator.check_integer('element of pad_list', x, 0, Rel.GE, self.name)
self.pad_list = pad_list
def __infer__(self, doutput, w, x_size):
x_size_v = x_size['value']
validator.check_value_type('x_size', x_size_v, [tuple], self.name)
for i, dim_len in enumerate(x_size_v):
validator.check_value_type("x_size[%d]" % i, dim_len, [int], self.name)
args = {'doutput': doutput['dtype'], 'w': w['dtype']}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
# infer shape
dout_shape = doutput['shape']
kernel_h = self.kernel_size[0]
kernel_w = self.kernel_size[1]
stride_h = self.stride[0]
stride_w = self.stride[1]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
# default pad mode is valid
pad_list = (0, 0, 0, 0)
if self.pad_list:
pad_list = tuple(self.pad_list)
elif self.pad_mode == "SAME":
pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + dilation_h * (kernel_h - 1) + 1 - x_size_v[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + dilation_w * (kernel_w - 1) + 1 - x_size_v[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
pad_list = (pad_top, pad_bottom, pad_left, pad_right)
elif self.pad_mode == 'PAD':
pad_list = self.padding
self.add_prim_attr('pad_list', pad_list)
out = {
'value': None,
'shape': x_size_v,
'dtype': doutput['dtype'],
}
return out
class BiasAdd(PrimitiveWithInfer):
r"""
Returns sum of input and bias tensor.
Adds the 1-D bias tensor to the input tensor, and broadcasts the shape on all axis
except for the channel axis.
Inputs:
- **input_x** (Tensor) - The input tensor. The shape can be 2-4 dimensions.
- **bias** (Tensor) - The bias tensor, with shape :math:`(C)`.
The shape of `bias` must be the same as `input_x` in the second dimension.
Outputs:
Tensor, with the same shape and type as `input_x`.
Examples:
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = P.BiasAdd()
>>> bias_add(input_x, bias)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])
self.add_prim_attr('data_format', 'NCHW')
def infer_shape(self, x_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.GE, self.name)
validator.check_integer("bias rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "x_shape[1]", x_shape[1], Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, b_type):
args = {"input_x": x_type, "bias": b_type}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_type
class TopK(PrimitiveWithInfer):
"""
Finds values and indices of the `k` largest entries along the last dimension.
Args:
sorted (bool): If true, the resulting elements will
be sorted by the values in descending order. Default: False.
Inputs:
- **input_x** (Tensor) - Input to be computed, data type should be float16, float32 or int32.
- **k** (int) - Number of top elements to be computed along the last dimension, constant input is needed.
Outputs:
Tuple of 2 Tensor, the values and the indices.
- **values** (Tensor) - The `k` largest elements along each last dimensional slice.
- **indices** (Tensor) - The indices of values within the last dimension of input.
Examples:
>>> topk = P.TopK(sorted=True)
>>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)
>>> k = 3
>>> values, indices = topk(input_x, k)
>>> assert values == Tensor(np.array([5, 4, 3]), mstype.float16)
>>> assert indices == Tensor(np.array([4, 3, 2]), mstype.int32)
"""
@prim_attr_register
def __init__(self, sorted=False):
validator.check_value_type("sorted", sorted, [bool], self.name)
self.init_prim_io_names(inputs=['input', 'k'],
outputs=['values', 'indices'])
def __infer__(self, input_x, k):
x_dtype = input_x['dtype']
valid_types = (mstype.int32, mstype.float16, mstype.float32)
validator.check_tensor_type_same({'x': x_dtype}, valid_types, self.name)
k_v = k['value']
validator.check_value_type('k', k_v, (int,), self.name)
x_shape = list(input_x['shape'])
ndim = len(x_shape) - 1
x_shape[ndim] = k_v
return {'shape': (x_shape, x_shape),
'dtype': (x_dtype, mstype.int32),
'value': None}
class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Gets the softmax cross-entropy value between logits and labels which shoule be one-hot encoding.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = -\sum_j{Y_{ij} * ln(p_{ij})}
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N, C)`, has the same data type with `logits`.
Outputs:
Tuple of 2 Tensor, the loss shape is `(N,)`, and the dlogits with the same shape as `logits`.
Examples:
>>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32)
>>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32)
>>> softmax_cross = P.SoftmaxCrossEntropyWithLogits()
>>> loss, backprop = softmax_cross(logits, labels)
([0.5899297, 0.52374405], [[0.02760027, 0.20393994, 0.01015357, 0.20393994, -0.44563377],
[0.08015892, 0.02948882, 0.08015892, -0.4077012, 0.21789455]])
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape", logits_shape, "labels_shape", labels_shape, Rel.EQ, self.name)
loss_shape = [logits_shape[0]]
dlogits_shape = logits_shape
return (loss_shape, dlogits_shape)
def infer_dtype(self, logits_type, labels_type):
args = {"logits": logits_type, "labels": labels_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return (logits_type, logits_type)
class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Computes the softmax cross-entropy value between logits and sparse encoding labels.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = \begin{cases} -ln(p_{ij}), &j = y_i \cr -ln(1 - p_{ij}), & j \neq y_i \end{cases}
.. math::
loss = \sum_{ij} loss_{ij}
Args:
is_grad (bool): If it's true, this operation returns the computed gradient. Default: False.
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N)`.
Data type should be int32 or int64.
Outputs:
Tensor, if `is_grad` is False, the output tensor is the value of loss which is a scalar tensor;
if `is_grad` is True, the output tensor is the gradient of input with the same shape as `logits`.
Examples:
Please refer to the usage in nn.SoftmaxCrossEntropyWithLogits source code.
"""
@prim_attr_register
def __init__(self, is_grad=False):
self.init_prim_io_names(inputs=['features', 'labels'], outputs=['output'])
self.is_grad = is_grad
self.add_prim_attr('sens', 1.0)
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape[0]", logits_shape[0], "labels_shape[0]", labels_shape[0], Rel.EQ, self.name)
loss_shape = []
if self.is_grad:
return logits_shape
return loss_shape
def infer_dtype(self, logits_type, labels_type):
validator.check_tensor_type_same({"logits": logits_type}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"labels": labels_type}, (mstype.int32, mstype.int64), self.name)
return logits_type
class ApplyMomentum(PrimitiveWithInfer):
"""
Optimizer that implements the Momentum algorithm.
Refer to the paper `On the importance of initialization and momentum in deep
learning <https://dl.acm.org/doi/10.5555/3042817.3043064>`_ for more details.
Inputs of `variable`, `accumulation` and `gradient` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
Data type conversion of Parameter is not supported. RuntimeError exception will be thrown.
Args:
use_locking (bool): Enable a lock to protect the update of variable and accumlation tensors. Default: False.
use_nesterov (bool): Enable Nesterov momentum. Default: False.
gradient_scale (float): The scale of the gradient. Default: 1.0.
Inputs:
- **variable** (Parameter) - Weights to be updated. data type should be float.
- **accumulation** (Parameter) - Accumulated gradient value by moment weight.
Has the same data type with `variable`.
- **learning_rate** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float data type.
- **gradient** (Tensor) - Gradients, has the same data type as `variable`.
- **momentum** (Union[Number, Tensor]) - Momentum, should be a float number or
a scalar tensor with float data type.
Outputs:
Tensor, parameters to be updated.
Examples:
Please refer to the usage in nn.ApplyMomentum.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accumulation', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('learning_rate', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('momentum', dtype=sig.sig_dtype.T2),
)
@prim_attr_register
def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0):
self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'],
outputs=['output'])
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_ge = context.get_context("enable_ge")
def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape):
if not self.is_ge and self.is_tbe:
return v_shape, v_shape
return v_shape
def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype):
valid_types = [mstype.float16, mstype.float32, mstype.float64]
if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey:
validator.check_tensor_type_same({"v": v_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"a": a_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l_dtype": l_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"g_dtype": g_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"m_dtype": m_dtype}, valid_types, self.name)
if not self.is_ge and self.is_tbe:
return g_dtype, g_dtype
return g_dtype
class SmoothL1Loss(PrimitiveWithInfer):
r"""
Computes smooth L1 loss, a robust L1 loss.
SmoothL1Loss is a Loss similar to MSELoss but less sensitive to outliers as described in the
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_ by Ross Girshick.
Note:
Sets input prediction as `X`, input target as `Y`, output as `loss`. Then,
.. math::
\text{SmoothL1Loss} = \begin{cases} \frac{0.5 x^{2}}{\text{beta}}, &if \left |x \right | < \text{beta} \cr
\left |x \right|-0.5 \text{beta}, &\text{otherwise}\end{cases}
Args:
beta (float): A parameter used to control the point where the function will change from
quadratic to linear. Default: 1.0.
Inputs:
- **prediction** (Tensor) - Predict data. Data type should be float16 or float32.
- **target** (Tensor) - Ground truth data, with the same type and shape as `prediction`.
Outputs:
Tensor, with the same type and shape as `prediction`.
Examples:
>>> loss = P.SmoothL1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data)
[0, 0, 0.5]
"""
@prim_attr_register
def __init__(self, beta=1.0):
validator.check_value_type('beta', beta, [float], self.name)
validator.check('beta', beta, '', 0, Rel.GT, self.name)
self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])
def infer_shape(self, prediction, target):
validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)
return prediction
def infer_dtype(self, prediction, target):
args = {"prediction": prediction, "target": target}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return prediction
class L2Loss(PrimitiveWithInfer):
"""
Calculates half of the L2 norm of a tensor without using the `sqrt`.
Set `input_x` as x and output as loss.
.. math::
loss = sum(x ** 2) / nelement(x)
:math:`nelement(x)` represents the number of `input_x`.
Inputs:
- **input_x** (Tensor) - A input Tensor. Data type should be float16 or float32.
Outputs:
Tensor, has the same dtype as `input_x`. The output tensor is the value of loss which is a scalar tensor.
Examples
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)
>>> l2_loss = P.L2Loss()
>>> l2_loss(input_x)
7.0
"""
@prim_attr_register
def __init__(self):
"""init L2Loss"""
def infer_shape(self, input_x):
loss_shape = []
return loss_shape
def infer_dtype(self, x_type):
validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({'x_type': x_type}, valid_types, self.name)
return x_type
class DataFormatDimMap(PrimitiveWithInfer):
"""
Returns the dimension index in the destination data format given in the source data format.
Args:
src_format (string): An optional value for source data format. Default: 'NHWC'.
dst_format (string): An optional value for destination data format. Default: 'NCHW'.
Inputs:
- **input_x** (Tensor) - A Tensor with each element as a dimension index in source data format.
The suggested values is in the range [-4, 4). It's type is int32.
Outputs:
Tensor, has the same type as the `input_x`.
Examples:
>>> x = Tensor([0, 1, 2, 3], mindspore.int32)
>>> dfdm = P.DataFormatDimMap()
>>> dfdm(x)
[0 3 1 2]
"""
@prim_attr_register
def __init__(self, src_format='NHWC', dst_format='NCHW'):
valid_values = ['NHWC', 'NCHW']
self.src_format = validator.check_string("src_format", src_format, valid_values, self.name)
self.dst_format = validator.check_string("dst_format", dst_format, valid_values, self.name)
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_type):
validator.check_subclass("x", x_type, mstype.tensor, self.name)
valid_types = [mstype.int32]
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
return x_type
class RNNTLoss(PrimitiveWithInfer):
"""
Computes the RNNTLoss and its gradient with respect to the softmax outputs.
Args:
blank_label (int): blank label. Default: 0.
Inputs:
- **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`. Data type should be float16 or float32.
- **labels** (Tensor[int32]) - Tensor of shape :math:`(B, U-1)`.
- **input_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **label_lebgths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
Outputs:
- **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **grads** (Tensor[int32]) - Has the same shape as `acts`.
Examples:
>>> B, T, U, V = 1, 2, 3, 5
>>> acts = np.random.random((B, T, U, V)).astype(np.float32)
>>> labels = np.array([[1, 2]]).astype(np.int32)
>>> input_length = np.array([T] * B).astype(np.int32)
>>> label_length = np.array([len(l) for l in labels]).astype(np.int32)
>>> rnnt_loss = P.RNNTLoss(blank_label=blank)
>>> costs, grads = rnnt_loss(Tensor(acts), Tensor(labels), Tensor(input_length), Tensor(label_length))
"""
@prim_attr_register
def __init__(self, blank_label=0):
validator.check_value_type('blank_label', blank_label, [int], self.name)
self.init_prim_io_names(inputs=['acts', 'labels', 'input_length', 'label_length'],
outputs=['costs', 'grads'])
def infer_shape(self, acts_shape, labels_shape, input_length_shape, label_length_shape):
validator.check_integer('acts_rank', len(acts_shape), 4, Rel.EQ, self.name)
validator.check_integer('labels_rank', len(labels_shape), 2, Rel.EQ, self.name)
validator.check_integer('input_length_rank', len(input_length_shape), 1, Rel.EQ, self.name)
validator.check_integer('label_length_rank', len(label_length_shape), 1, Rel.EQ, self.name)
validator.check('labels shape[0]', labels_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('labels shape[1]', labels_shape[1], 'acts shape[2]-1', acts_shape[2]-1, Rel.EQ, self.name)
validator.check('input_length size', input_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('label_length size', label_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
costs_shape = (acts_shape[0],)
return (costs_shape, acts_shape)
def infer_dtype(self, acts_type, labels_type, input_length_type, label_length_type):
validator.check_subclass("acts_type", acts_type, mstype.tensor, self.name)
validator.check_subclass("labels_type", labels_type, mstype.tensor, self.name)
validator.check_subclass("input_length_type", input_length_type, mstype.tensor, self.name)
validator.check_subclass("label_length_type", label_length_type, mstype.tensor, self.name)
validator.check_tensor_type_same({"acts_type": acts_type}, [mstype.float32, mstype.float16], self.name)
validator.check_tensor_type_same({"labels_type": labels_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"input_length_type": input_length_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"label_length_type": label_length_type}, [mstype.int32], self.name)
return (acts_type, acts_type)
class SGD(PrimitiveWithInfer):
"""
Computes stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from On the importance of
initialization and momentum in deep learning.
Note:
For details, please refer to `nn.SGD` source code.
Args:
dampening (float): The dampening for momentum. Default: 0.0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
nesterov (bool): Enable Nesterov momentum. Default: False.
Inputs:
- **parameters** (Tensor) - Parameters to be updated. With float16 or float32 data type.
- **gradient** (Tensor) - Gradients. With float16 or float32 data type.
- **learning_rate** (Tensor) - Learning rate, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32)
- **accum** (Tensor) - Accum(velocity) to be updated. With float16 or float32 data type.
- **momentum** (Tensor) - Momentum, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32).
- **stat** (Tensor) - States to be updated with the same shape as gradient. With float16 or float32 data type.
Outputs:
Tensor, parameters to be updated.
Examples:
>>> sgd = P.SGD()
>>> parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32)
>>> gradient = Tensor(np.array([1, -1, 0.5, 2]), mindspore.float32)
>>> learning_rate = Tensor(0.01, mindspore.float32)
>>> accum = Tensor(np.array([0.1, 0.3, -0.2, -0.1]), mindspore.float32)
>>> momentum = Tensor(0.1, mindspore.float32)
>>> stat = Tensor(np.array([1.5, -0.3, 0.2, -0.7]), mindspore.float32)
>>> result = sgd(parameters, gradient, learning_rate, accum, momentum, stat)
"""
@prim_attr_register
def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False):
validator.check_value_type("nesterov", nesterov, [bool], self.name)
if nesterov and dampening != 0:
raise ValueError(f"Nesterov need zero dampening!")
self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'],
outputs=['output'])
def infer_shape(self, parameters_shape, gradient_shape, learning_rate_shape,
accum_shape, momentum_shape, stat_shape):
validator.check_integer(f'parameters rank', len(parameters_shape), 0, Rel.GT, self.name)
validator.check_integer(f'gradient rank', len(gradient_shape), 0, Rel.GE, self.name)
validator.check_integer(f'learning rate rank', len(learning_rate_shape), 0, Rel.GE, self.name)
validator.check_integer(f'accumulation rank', len(accum_shape), 0, Rel.GT, self.name)
validator.check_integer(f'momentum rank', len(momentum_shape), 0, Rel.GE, self.name)
validator.check_integer(f'stat rank', len(stat_shape), 0, Rel.GE, self.name)
validator.check("gradient shape", gradient_shape, "stat shape", stat_shape, Rel.EQ, self.name)
return parameters_shape
def infer_dtype(self, parameters_dtype, gradient_dtype, learning_rate_dtype,
accum_dtype, momentum_dtype, stat_dtype):
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({"parameters": parameters_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"gradient": gradient_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"learning_rate": learning_rate_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"accum": accum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"momentum": momentum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"stat": stat_dtype}, valid_types, self.name)
return parameters_dtype
class ApplyRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the Root Mean Square prop(RMSProp) algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the RMSProp algorithm.
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **decay** (float) - Decay rate. Only constant value is allowed.
- **momentum** (float) - Momentum. Only constant value is allowed.
- **epsilon** (float) - Ridge term. Only constant value is allowed.
Outputs:
Tensor, parameters to be update.
Examples:
>>> apply_rms = P.ApplyRMSProp()
>>> input_x = Tensor(1., mindspore.float32)
>>> mean_square = Tensor(2., mindspore.float32)
>>> moment = Tensor(1., mindspore.float32)
>>> grad = Tensor(2., mindspore.float32 )
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.001
>>> result = apply_rms(input_x, mean_square, moment, learning_rate, grad, decay, momentum, epsilon)
(-2.9977674, 0.80999994, 1.9987665)
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'mean_square', 'moment', 'learning_rate', 'grad',
'rho', 'momentum', 'epsilon'], outputs=['output'])
self.is_ge = context.get_context("enable_ge")
self.is_d = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape, decay_shape,
momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if not self.is_ge and self.is_d:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, learning_rate_dtype, grad_dtype, decay_dtype,
momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_decay = {"decay": decay_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_decay, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "decay": decay_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if not self.is_ge and self.is_d:
return var_dtype, var_dtype, var_dtype
return var_dtype
def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon):
if decay is None or momentum is None or epsilon is None:
raise ValueError(f"For {self.name}, decay, momentum, epsilon must be const.")
class ApplyCenteredRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the centered RMSProp algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the centered RMSProp algorithm.
.. math::
g_{t} = \\rho g_{t-1} + (1 - \\rho)\\nabla Q_{i}(w)
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} - g_{t}^2 + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_gradient** (Tensor) - Mean gradients, must have the same type as `var`.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **decay** (float) - Decay rate.
- **momentum** (float) - Momentum.
- **epsilon** (float) - Ridge term.
Outputs:
Tensor, parameters to be update.
Examples:
>>> centered_rms_prop = P.ApplyCenteredRMSProp()
>>> input_x = Tensor(np.arange(-6, 6).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_square = Tensor(np.arange(-8, 4).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> moment = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.05
>>> result = centered_rms_prop(input_x, mean_grad, mean_square, moment, grad,
>>> learning_rate, decay, momentum, epsilon)
[[[ -6. -9.024922]
[-12.049845 -15.074766]
[-18.09969 -21.124613]]
[[-24.149532 -27.174456]
[-30.199379 -33.2243 ]
[-36.249226 -39.274143]]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_ascend = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape,
learning_rate_shape, decay_shape, momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if self.is_ascend:
return var_shape, mean_gradient_shape, mean_square_shape, moment_shape
return var_shape
def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype,
learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_gradient": mean_gradient_dtype,
"mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_rho = {"rho": rho_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_rho, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "rho": rho_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if self.is_ascend:
return var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype
return var_dtype
class LayerNorm(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,
the value should be in [-1, rank(input)). Default: 1.
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNorm, the value should be in [-1, rank(input)). Default: 1.
epsilon (float): A value added to the denominator for numerical stability. Default: 1e-7.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
The input of LayerNorm.
- **gamma** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `gamma` as the scale on norm.
- **beta** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `beta` as the scale on norm.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
The shape is :math:`(N, C)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = P.LayerNorm()
>>> output = layer_norm(input_x, gamma, beta)
([[-0.22474492, 1., 2.2247488], [-0.22474492, 1., 2.2247488]],
[[2.], [2.]], [[0.6666667], [0.6666667]])
"""
@prim_attr_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [float], self.name)
class L2Normalize(PrimitiveWithInfer):
r"""
L2 normalization Operator.
This operator will normalizes the input using the given axis. The function is shown as follows:
.. math::
\text{output} = \frac{x}{\sqrt{\text{max}(\text{sum} (\text{input_x}^2), \epsilon)}},
where :math:`\epsilon` is epsilon.
Args:
axis (int): The begin axis for the input to apply L2 normalize. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-4.
Inputs:
- **input_x** (Tensor) - Input to compute the normalization. Data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input.
Examples:
>>> l2_normalize = P.L2Normalize()
>>> input_x = Tensor(np.random.randint(-256, 256, (2, 3, 4)), mindspore.float32)
>>> result = l2_normalize(input_x)
[[[-0.47247353 -0.30934513 -0.4991462 0.8185567 ]
[-0.08070751 -0.9961299 -0.5741758 0.09262337]
[-0.9916556 -0.3049123 0.5730487 -0.40579924]
[[-0.88134485 0.9509498 -0.86651784 0.57442576]
[ 0.99673784 0.08789381 -0.8187321 0.9957012 ]
[ 0.12891524 -0.9523804 -0.81952125 0.91396334]]]
"""
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
def infer_shape(self, input_x):
dim = len(input_x)
validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT, self.name)
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("x", input_x, mstype.tensor, self.name)
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class DropoutGenMask(Primitive):
"""
Generates the mask value for the input shape.
Args:
Seed0 (int): Seed0 value for random generating. Default: 0.
Seed1 (int): Seed1 value for random generating. Default: 0.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_gen_mask = P.DropoutGenMask()
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> mask = dropout_gen_mask(shape, keep_prob)
"""
@prim_attr_register
def __init__(self, Seed0=0, Seed1=0):
self.init_prim_io_names(inputs=['shape', 'keep_prob'], outputs=['output'])
validator.check_value_type("Seed0", Seed0, [int], self.name)
validator.check_value_type("Seed1", Seed1, [int], self.name)
self.add_prim_attr("_random_effect", True)
class DropoutDoMask(PrimitiveWithInfer):
"""
Applies dropout mask on the input tensor.
Take the mask output of DropoutGenMask as input, and apply dropout on the input.
Inputs:
- **input_x** (Tensor) - The input tensor.
- **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the
shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`,
the output of `DropoutDoMask` are unpredictable.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of
`DropoutGenMask`.
Outputs:
Tensor, the value that applied dropout on.
Examples:
>>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32)
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> dropout_gen_mask = P.DropoutGenMask()
>>> dropout_do_mask = P.DropoutDoMask()
>>> mask = dropout_gen_mask(shape, keep_prob)
>>> output = dropout_do_mask(x, mask, keep_prob)
>>> assert output.shape == (20, 16, 50)
"""
@prim_attr_register
def __init__(self):
pass
def __infer__(self, input_x, mask, keep_prob):
input_x_shape = input_x['shape']
mask_shape = mask['shape']
keep_prob_shape = keep_prob['shape']
validator.check("keep_prob's dim", len(keep_prob_shape), '0(scalar)', 0, Rel.EQ, self.name)
size_x = reduce(lambda x, y: x * y, input_x_shape)
if len(mask_shape) != 1:
raise ValueError("DropoutDoMask mask shape should be 1-dimension.")
size_y = mask_shape[0] * 8
if size_x > size_y:
raise ValueError(f"DropoutDoMask y mask do not math input input_x shape:"
"{input_x_shape}, mask shape: {mask_shape}.")
validator.check_tensor_type_same({"input_x": input_x['dtype']}, [mstype.float32, mstype.float16, mstype.int32],
self.name)
validator.check_tensor_type_same({"input_mask": mask['dtype']}, [mstype.uint8], self.name)
keep_prob_v = keep_prob['value']
if keep_prob_v is not None:
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
out = {'shape': input_x_shape,
'dtype': input_x['dtype'],
'value': None}
return out
class ResizeBilinear(PrimitiveWithInfer):
r"""
Resizes the image to certain size using bilinear interpolation.
The resizing only affects the lower two dimensions which represent the height and width. The input images
can be represented by different data types, but the data types of output images are always float32.
Args:
size (tuple[int]): A tuple of 2 int elements `(new_height, new_width)`, the new size for the images.
align_corners (bool): If it's true, rescale input by `(new_height - 1) / (height - 1)`,
which exactly aligns the 4 corners of images and resized images. If it's false,
rescale by `new_height / height`. Default: False.
Inputs:
- **input** (Tensor) - Image to be resized. Tensor of shape `(N_i, ..., N_n, height, width)`,
with data type of float32 or float16.
Outputs:
Tensor, resized image. Tensor of shape `(N_i, ..., N_n, new_height, new_width)` in `float32`.
Examples:
>>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
>>> resize_bilinear = P.ResizeBilinear((5, 5))
>>> result = resize_bilinear(tensor)
>>> assert result.shape == (1, 1, 5, 5)
"""
@prim_attr_register
def __init__(self, size, align_corners=False):
pass
def infer_shape(self, input_shape):
input_shape = list(input_shape)
batch, channel, _, _ = input_shape
out_shape = [batch, channel]
for i in self.size:
out_shape.append(int(i))
return out_shape
def infer_dtype(self, input_dtype):
validator.check_tensor_type_same({'input_dtype': input_dtype}, [mstype.float16, mstype.float32], self.name)
return mstype.tensor_type(mstype.float32)
class OneHot(PrimitiveWithInfer):
r"""
Computes a one-hot tensor.
Makes a new tensor, whose locations represented by indices in `indices` take value `on_value`, while all
other locations take value `off_value`.
Note:
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
Args:
axis (int): Position to insert the value. e.g. If `indices` shape is [n, c], and `axis` is `-1` the output shape
will be [n, c, depth], If `axis` is `0` the output shape will be [depth, n, c]. Default: -1.
Inputs:
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32.
- **depth** (int) - A scalar defining the depth of the one hot dimension.
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. With data type of float16 or float32.
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.
Has the same data type with as `on_value`.
Outputs:
Tensor, one_hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
Examples:
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = P.OneHot()
>>> result = onehot(indices, depth, on_value, off_value)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])
validator.check_value_type("axis", axis, [int], self.name)
def __infer__(self, indices, depth, on_value, off_value):
# check type
validator.check_tensor_type_same({"indices": indices['dtype']}, (mstype.int32,), self.name)
validator.check_type_name("depth", depth['dtype'], mstype.int_type, self.name)
args = {"on_value": on_value['dtype'], "off_value": off_value['dtype']}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
# check shape
indices_shp = indices['shape']
validator.check_int_range("axis", self.axis, -1, len(indices_shp), Rel.INC_BOTH, self.name)
depth_val = depth['value']
validator.check_integer("depth", depth_val, 0, Rel.GE, self.name)
# create new dimension at end if self.axis is -1
_ = indices_shp.insert(self.axis, depth_val) if self.axis >= 0 else indices_shp.append(depth_val)
return {'shape': indices_shp,
'dtype': on_value['dtype'],
'value': None}
class Gelu(PrimitiveWithInfer):
r"""
Gaussian Error Linear Units activation function.
GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
<https://arxiv.org/abs/1810.04805>`_.
Gelu is defined as follows:
.. math::
\text{output} = 0.5 * x * (1 + erf(x / \sqrt{2})),
where :math:`erf` is the "Gauss error function" .
Inputs:
- **input_x** (Tensor) - Input to compute the Gelu with data type of float16 or float32.
Outputs:
Tensor, with the same type and shape as input.
Examples:
>>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> gelu = P.Gelu()
>>> result = gelu(tensor)
"""
@prim_attr_register
def __init__(self):
"""init GeLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class GetNext(PrimitiveWithInfer):
"""
Returns the next element in the dataset queue.
Note:
The GetNext operation needs to be associated with network and it also depends on the init_dataset interface,
it can't be used directly as a single operation.
For details, please refer to `nn.DataWrapper` source code.
Args:
types (list[:class:`mindspore.dtype`]): The type of the outputs.
shapes (list[tuple[int]]): The dimensionality of the outputs.
output_num (int): The output number, length of `types` and `shapes`.
shared_name (str): The queue name of `init_dataset` interface.
Inputs:
No inputs.
Outputs:
tuple[Tensor], the output of Dataset. The shape is described in `shapes`
and the type is described is `types`.
Examples:
>>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name')
>>> feature, label = get_next()
"""
@prim_attr_register
def __init__(self, types, shapes, output_num, shared_name):
validator.check_value_type("types", types, [list, tuple], self.name)
validator.check_value_type("shapes", shapes, [list, tuple], self.name)
validator.check("types length", len(types), "shapes length", len(shapes), Rel.EQ, self.name)
validator.check_value_type("output_num", output_num, [int], self.name)
def infer_shape(self):
return tuple(self.shapes)
def infer_dtype(self):
return tuple(self.types)
class PReLU(PrimitiveWithInfer):
r"""
Parametric Rectified Linear Unit activation function.
PReLU is described in the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_. Defined as follows:
.. math::
prelu(x_i)= \max(0, x_i) + \min(0, w * x_i),
where :math:`x_i` is an element of an channel of the input.
Note:
1-dimensional input_x is not supported.
Inputs:
- **input_x** (Tensor) - Float tensor, representing the output of the preview layer.
With data type of float16 or float32.
- **weight** (Tensor) - Float Tensor, w > 0, there is only two shapes are legitimate,
1 or the number of channels at input. With data type of float16 or float32.
Outputs:
Tensor, with the same type as `input_x`.
Detailed information, please refer to `nn.PReLU`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.prelu = P.PReLU()
>>> def construct(self, input_x, weight):
>>> result = self.prelu(input_x, weight)
>>> return result
>>>
>>> input_x = Tensor(np.random.randint(-3, 3, (2, 3, 2)), mindspore.float32)
>>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)
>>> net = Net()
>>> result = net(input_x, weight)
[[[-0.1 1. ]
[ 0. 2. ]
[0. 0. ]]
[[-0.2 -0.1 ]
[2. -1.8000001]
[0.6 0.6 ]]]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x_shape, weight_shape):
input_x_dim = len(input_x_shape)
weight_dim = len(weight_shape)
if input_x_dim == 1:
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
if weight_dim != 1:
raise ValueError(f'For \'{self.name}\' weight_dim must be 1, while weight_dim is {weight_dim}.')
if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
raise ValueError(f'For \'{self.name}\' channel of input_x and weight must be matched,'
f' while channel of input_x is {input_x_shape[1]},'
f' weight_shape[0] is {weight_shape[0]}.')
return input_x_shape
def infer_dtype(self, input_x_dtype, weight_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"input_x": input_x_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"weight": weight_dtype}, valid_types, self.name)
return input_x_dtype
class LSTM(PrimitiveWithInfer):
"""
Performs the long short term memory(LSTM) on the input.
Detailed information, please refer to `nn.LSTM`.
"""
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer("input_size", input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer("hidden_size", hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer("num_layers", num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type("has_bias", has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type("bidirectional", bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, h_shape, c_shape, w_shape):
# (seq, batch_size, feature)
validator.check_integer("x rank", len(x_shape), 3, Rel.EQ, self.name)
validator.check_integer("x[2]", x_shape[2], self.input_size, Rel.EQ, self.name)
# h and c should be same shape
validator.check_integer("h rank", len(h_shape), 3, Rel.EQ, self.name)
validator.check("h_shape", h_shape, "c_shape", c_shape, Rel.EQ, self.name)
# (num_layers * num_directions, batch, hidden_size)
validator.check_integer("h[0]", h_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h[1]", h_shape[1], x_shape[1], Rel.EQ, self.name)
validator.check_integer("h[2]", h_shape[2], self.hidden_size, Rel.EQ, self.name)
y_shape = (x_shape[0], x_shape[1], self.hidden_size * self.num_directions)
# set arbitrary shape for reserved space
type_size = 4
gates_ws_ld = self.get_good_ld(self.hidden_size * 4, type_size)
states_ws_ld = self.get_good_ld(max(self.hidden_size, self.input_size), type_size)
self.ws_gates_size = self.num_layers * self.num_directions * x_shape[0] * x_shape[1] * gates_ws_ld * type_size
self.ws_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_c_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_diff_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * (2 + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_grid_comp_size = 0
self.page_size = 4096
current_offset = 0
current_offset += self.ws_gates_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_c_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_diff_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_grid_comp_size
reserved_shape = (current_offset, 1)
state_shape = (1, 1)
return (y_shape, h_shape, c_shape, reserved_shape, state_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype):
args = {'x': x_dtype, 'h': h_dtype, 'c': c_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)
return (x_dtype, x_dtype, x_dtype, x_dtype, x_dtype)
def rnd_up(self, current_offset, page_size):
return ((current_offset + page_size - 1) // page_size) * page_size
def get_good_ld(self, dim, type_size):
ld = self.rnd_up(dim, 64 // type_size)
if ld * 256 == 0:
return ld + 64 // type_size
return ld
class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Uses the given logits to compute sigmoid cross entropy.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}}
.. math::
loss_{ij} = -[Y_{ij} * ln(p_{ij}) + (1 - Y_{ij})ln(1 - p_{ij})]
Inputs:
- **logits** (Tensor) - Input logits.
- **label** (Tensor) - Ground truth label.
Outputs:
Tensor, with the same shape and type as input `logits`.
Examples:
>>> logits = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> labels = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> sigmoid = P.SigmoidCrossEntropyWithLogits()
>>> sigmoid(logits, labels)
"""
@prim_attr_register
def __init__(self):
"""Init SigmoidCrossEntropyWithLogits"""
self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])
def infer_shape(self, x_shape, y_shape):
validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, y_dtype):
args = {"x_dtype": x_dtype, "y_dtype": y_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_dtype
class Pad(PrimitiveWithInfer):
"""
Pads input tensor according to the paddings.
Args:
paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of
paddings are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, the tensor after padding.
Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> pad_op = P.Pad(((1, 2), (2, 1)))
>>> output_tensor = pad_op(input_tensor)
>>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ],
>>> [ 0. , 0. , 0.4, 0.5, -3.2, 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, paddings):
"""Init Pad"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
if not isinstance(paddings, tuple):
raise TypeError('Paddings must be tuple type.')
for item in paddings:
if len(item) != 2:
raise ValueError('The shape of paddings must be (n, 2).')
self.paddings = paddings
def infer_shape(self, x):
paddings = np.array(self.paddings)
validator.check_integer('paddings.shape', paddings.size, len(x) * 2, Rel.EQ, self.name)
if not np.all(paddings >= 0):
raise ValueError('All elements of paddings must be >= 0.')
y_shape = ()
for i in range(int(paddings.size / 2)):
y_shape += ((x[i] + paddings[i, 0] + paddings[i, 1]),)
return y_shape
def infer_dtype(self, x):
validator.check_subclass("input_x", x, mstype.tensor, self.name)
return x
class MirrorPad(PrimitiveWithInfer):
"""
Pads the input tensor according to the paddings and mode.
Args:
mode (str): Specifies padding mode. The optional values are "REFLECT", "SYMMETRIC".
Default: "REFLECT".
Inputs:
- **input_x** (Tensor) - The input tensor.
- **paddings** (Tensor) - The paddings tensor. The value of `paddings` is a matrix(list),
and its shape is (N, 2). N is the rank of input data. All elements of paddings
are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Outputs:
Tensor, the tensor after padding.
- If `mode` is "REFLECT", it uses a way of symmetrical copying throught the axis of symmetry to fill in.
If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the
Outputs is [[6,5,4,5,6,5,4],[3,2,1,2,3,2,1],[6,5,4,5,6,5,4],[9,8,7,8,9,8,7],[6,5,4,5,6,5,4]].
- If `mode` is "SYMMETRIC", the filling method is similar to the "REFLECT". It is also copied
according to the symmetry axis, except that it includes the symmetry axis. If the `input_x`
is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is
[[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]].
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.pad = P.MirrorPad(mode="REFLECT")
>>> def construct(self, x, paddings):
>>> return self.pad(x, paddings)
>>> x = np.random.random(size=(2, 3)).astype(np.float32)
>>> paddings = Tensor([[1,1],[2,2]])
>>> pad = Net()
>>> ms_output = pad(Tensor(x), paddings)
"""
@prim_attr_register
def __init__(self, mode='REFLECT'):
"""Init Pad"""
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
self.set_const_input_indexes([1])
def __infer__(self, input_x, paddings):
validator.check_subclass("input_x", input_x['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
x_shape = list(input_x['shape'])
paddings_value = paddings['value'].asnumpy()
paddings_size = paddings_value.size
validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ, self.name)
if not np.all(paddings_value >= 0):
raise ValueError('All elements of paddings must be >= 0.')
adjust = 0
if self.mode == 'SYMMETRIC':
adjust = 1
for i in range(0, int(paddings_size / 2)):
if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust):
raise ValueError('At least one dim has too high a padding value for this input and mode')
y_shape = ()
for i in range(0, int(paddings_size / 2)):
y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),)
return {'shape': y_shape,
'dtype': input_x['dtype'],
'value': None}
class ROIAlign(PrimitiveWithInfer):
"""
Computes Region of Interest (RoI) Align operator.
The operator computes the value of each sampling point by bilinear interpolation from the nearby grid points on the
feature map. No quantization is performed on any coordinates involved in the RoI, its bins, or the sampling
points. The details of (RoI) Align operator are described in `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
Args:
pooled_height (int): The output features' height.
pooled_width (int): The output features' width.
spatial_scale (float): A scaling factor that maps the raw image coordinates to the input
feature map coordinates. Suppose the height of a RoI is `ori_h` in the raw image and `fea_h` in the
input feature map, the `spatial_scale` should be `fea_h / ori_h`.
sample_num (int): Number of sampling points. Default: 2.
roi_end_mode (int): Number must be 0 or 1. Default: 1.
Inputs:
- **features** (Tensor) - The input features, whose shape should be `(N, C, H, W)`.
- **rois** (Tensor) - The shape is `(rois_n, 5)`. With data type of float16 or float32.
`rois_n` represents the number of RoI. The size of the second dimension should be `5` and the `5` colunms
are `(image_index, top_left_x, top_left_y, bottom_right_x, bottom_right_y)`. `image_index` represents the
index of image. `top_left_x` and `top_left_y` represent the `x, y` coordinates of the top left corner
of corresponding RoI, respectively. `bottom_right_x` and `bottom_right_y` represent the `x, y`
coordinates of the bottom right corner of corresponding RoI, respectively.
Outputs:
Tensor, the shape is `(rois_n, C, pooled_height, pooled_width)`.
Examples:
>>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32)
>>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32)
>>> roi_align = P.ROIAlign(2, 2, 0.5, 2)
>>> output_tensor = roi_align(input_tensor, rois)
>>> assert output_tensor == Tensor(np.array([[[[2.15]]]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, pooled_height, pooled_width, spatial_scale, sample_num=2, roi_end_mode=1):
"""init ROIAlign"""
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
validator.check_value_type("spatial_scale", spatial_scale, [float], self.name)
validator.check_value_type("sample_num", sample_num, [int], self.name)
validator.check_value_type("roi_end_mode", roi_end_mode, [int], self.name)
validator.check_int_range("roi_end_mode", roi_end_mode, 0, 1, Rel.INC_BOTH, self.name)
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.spatial_scale = spatial_scale
self.sample_num = sample_num
self.roi_end_mode = roi_end_mode
def infer_shape(self, inputs_shape, rois_shape):
return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width]
def infer_dtype(self, inputs_type, rois_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"inputs_type": inputs_type}, valid_types, self.name)
validator.check_tensor_type_same({"rois_type": rois_type}, valid_types, self.name)
return inputs_type
class Adam(PrimitiveWithInfer):
r"""
Updates gradients by Adaptive Moment Estimation (Adam) algorithm.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Tensor) - Weights to be updated.
- **m** (Tensor) - The 1st moment vector in the updating formula, has the same type as `var`.
- **v** (Tensor) - the 2nd moment vector in the updating formula.
Mean square gradients with the same type as `var`.
- **beta1_power** (float) - :math:`beta_1^t` in the updating formula.
- **beta2_power** (float) - :math:`beta_2^t` in the updating formula.
- **lr** (float) - :math:`l` in the updating formula.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations.
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations.
- **epsilon** (float) - Term added to the denominator to improve numerical stability.
- **gradient** (Tensor) - Gradients, has the same type as `var`.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adam = P.Adam()
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad)
>>> return out
>>> net = Net()
>>> gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> result = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient)
"""
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
return var_dtype, m_dtype, v_dtype
class FusedSparseAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula. With float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adam = P.FusedSparseAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseLazyAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the
original Adam algorithm, as only the current indices parameters will be updated.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula with float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_lazyadam = P.FusedSparseLazyAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_lazyadam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1,
>>> beta2, epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseFtrl(PrimitiveWithInfer):
"""
Merge the duplicate value of the gradient and then update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The shape
of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
- **linear** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'indices'],
outputs=['output'])
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return [1], [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class FusedSparseProximalAdagrad(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update relevant entries according to the proximal adagrad
algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the variable and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Tensor) - The learning rate value. The data type must be float32.
- **l1** (Tensor) - l1 regularization strength. The data type must be float32.
- **l2** (Tensor) - l2 regularization strength. The data type must be float32.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient. The data type must be float32.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The data type
must be int32.
Outputs:
Tuple of 2 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.lr = Tensor(0.01, mstype.float32)
>>> self.l1 = Tensor(0.0, mstype.float32)
>>> self.l2 = Tensor(0.0, mstype.float32)
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
>>> self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('l1', dtype=sig.sig_dtype.T),
sig.make_sig('l2', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
return [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class KLDivLoss(PrimitiveWithInfer):
r"""
Computes the Kullback-Leibler divergence between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = y_n \cdot (\log y_n - x_n)
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type must be float32.
- **input_y** (Tensor) - The label Tensor which has the same shape as `input_x`. The data type must be float32.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise it is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.kldiv_loss = P.KLDivLoss()
>>> def construct(self, x, y):
>>> result = self.kldiv_loss(x, y)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> result = net(input_x, input_y)
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
return x_type
class BinaryCrossEntropy(PrimitiveWithInfer):
r"""
Computes the Binary Cross Entropy between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type should be float16 or float32.
- **input_y** (Tensor) - The label Tensor which has same shape and data type as `input_x`.
- **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.
And it should have same shape and data type as `input_x`. Default: None.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise, the output is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.binary_cross_entropy = P.BinaryCrossEntropy()
>>> def construct(self, x, y, weight):
>>> result = self.binary_cross_entropy(x, y, weight)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> result = net(input_x, input_y, weight)
0.38240486
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, weight_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if weight_shape:
validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type, weight_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
if weight_type:
validator.check_tensor_type_same({'x': x_type, 'weight': weight_type}, valid_types, self.name)
return x_type
class ApplyAdaMax(PrimitiveWithInfer):
r"""
Update relevant entries according to the adamax scheme.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m_{t} = \beta_1 * m_{t-1} + (1 - \beta_1) * g \\
v_{t} = \max(\beta_2 * v_{t-1}, \left| g \right|) \\
var = var - \frac{l}{1 - \beta_1^t} * \frac{m_{t}}{v_{t} + \epsilon}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`v` represents the 2nd moment vector, :math:`v_{t-1}`
is the last momentent of :math:`v_{t}`, :math:`l` represents scaling factor `lr`,
:math:`g` represents `grad`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`beta_1^t` represents `beta1_power`, :math:`var` represents the variable to be updated,
:math:`\epsilon` represents `epsilon`.
Inputs of `var`, `m`, `v` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape and type as `var`.
With float32 or float16 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients
with the same shape and type as `var`. With float32 or float16 data type.
- **beta1_power** (Union[Number, Tensor]) - :math:`beta_1^t` in the updating formula, should be scalar.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, :math:`l` in the updating formula, should be scalar.
With float32 or float16 data type.
- **beta1** (Union[Number, Tensor]) - The exponential decay rate for the 1st moment estimations,
should be scalar. With float32 or float16 data type.
- **beta2** (Union[Number, Tensor]) - The exponential decay rate for the 2nd moment estimations,
should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient, has the same shape and type as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_ada_max = P.ApplyAdaMax()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_ada_max(self.var, self.m, self.v, beta1_power, lr, beta1, beta2, epsilon, grad)
>>> return out
>>> net = Net()
>>> beta1_power =Tensor(0.9, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.99, mstype.float32)
>>> epsilon = Tensor(1e-10, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(beta1_power, lr, beta1, beta2, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T4),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T5),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdaMax"""
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("m_shape", m_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("v_shape", v_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
beta1_power_shp_len = len(beta1_power_shape)
validator.check_integer("beta1 power's rank", beta1_power_shp_len, 1, Rel.LE, self.name)
if beta1_power_shp_len == 1:
validator.check_integer("beta1_power_shape[0]", beta1_power_shape[0], 1, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
beta1_shp_len = len(beta1_shape)
validator.check_integer("beta1's rank", beta1_shp_len, 1, Rel.LE, self.name)
if beta1_shp_len == 1:
validator.check_integer("beta1_shape[0]", beta1_shape[0], 1, Rel.EQ, self.name)
beta2_shp_len = len(beta2_shape)
validator.check_integer("beta2's rank", beta2_shp_len, 1, Rel.LE, self.name)
if beta2_shp_len == 1:
validator.check_integer("beta2_shape[0]", beta2_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("epsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1_power": beta1_power_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1": beta1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta2": beta2_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, m_dtype, v_dtype
class ApplyAdadelta(PrimitiveWithInfer):
r"""
Update relevant entries according to the adadelta scheme.
.. math::
accum = \rho * accum + (1 - \rho) * grad^2
.. math::
\text{update} = \sqrt{\text{accum_update} + \epsilon} * \frac{grad}{\sqrt{accum + \epsilon}}
.. math::
\text{accum_update} = \rho * \text{accum_update} + (1 - \rho) * update^2
.. math::
var -= lr * update
Inputs of `var`, `accum`, `accum_update` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Weights to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **accum_update** (Parameter) - Accum_update to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, should be scalar. With float32 or float16 data type.
- **rho** (Union[Number, Tensor]) - Decay rate, should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - Gradients, has the same shape and type as `var`. With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
- **accum_update** (Tensor) - The same shape and data type as `accum_update`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adadelta = P.ApplyAdadelta()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.accum_update = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum_update")
>>> def construct(self, lr, rho, epsilon, grad):
>>> out = self.apply_adadelta(self.var, self.accum, self.accum_update, lr, rho, epsilon, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> rho = Tensor(0.0, mstype.float32)
>>> epsilon = Tensor(1e-6, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, rho, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum_update', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('rho', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdadelta"""
def infer_shape(self, var_shape, accum_shape, accum_update_shape, lr_shape, rho_shape,
epsilon_shape, grad_shape):
validator.check("accum_shape", accum_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("accum_update_shape", accum_update_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
rho_shp_len = len(rho_shape)
validator.check_integer("rho's rank", rho_shp_len, 1, Rel.LE, self.name)
if rho_shp_len == 1:
validator.check_integer("rho_shape[0]", rho_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("lepsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape, accum_update_shape
def infer_dtype(self, var_dtype, accum_dtype, accum_update_dtype, lr_dtype, rho_dtype,
epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "accum": accum_dtype, "accum_update": accum_update_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"rho": rho_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, accum_dtype, accum_update_dtype
class ApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum}}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent..
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad = P.ApplyAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, update_slots=True):
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class ApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagradv2 scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
epsilon (float): A small value added for numerical stability.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float16 or float32 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad_v2(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, epsilon, update_slots=True):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'grad shape', grad_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, [mstype.float16, mstype.float32], self.name)
return var_dtype, accum_dtype
class SparseApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * (1 / sqrt(accum))
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, update_slots=True, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_number_range("lr", lr, float("-inf"), float("inf"), Rel.INC_NEITHER, self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class SparseApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
epsilon (float): A small value added for numerical stability.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
update_slots (bool): If `True`, the computation logic will be different to `False`. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, epsilon, use_locking=False, update_slots=True):
self.lr = validator.check_value_type("lr", lr, [float], self.name)
self.epsilon = validator.check_value_type("epsilon", epsilon, [float], self.name)
self.use_locking = validator.check_value_type("update_slots", update_slots, [bool], self.name)
self.update_slots = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class ApplyProximalAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the proximal adagrad algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. Must has the same shape and dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. The data type should be
float16 or float32.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar. The data type should be
float16 or float32.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar. The data type should be
float16 or float32.
- **grad** (Tensor) - Gradient with the same shape and dtype as `var`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_adagrad = P.ApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad):
>>> out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
l1_shp_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shp_len, 1, Rel.LE, self.name)
if l1_shp_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shp_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shp_len, 1, Rel.LE, self.name)
if l2_shp_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class SparseApplyProximalAdagrad(PrimitiveWithCheck):
r"""
Update relevant entries according to the proximal adagrad algorithm. Compared with ApplyProximalAdagrad,
an additional index tensor is input.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value. Tshould be a float number or
a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type..
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones((3,), np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T4),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float16, mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
class ApplyAddSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = (\alpha + \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_add_sign = P.ApplyAddSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.alpha = 1.0
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('alpha', dtype=sig.sig_dtype.T2),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T3),
sig.make_sig('beta', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyAddSign"
def infer_shape(self, var_shape, m_shape, lr_shape, alpha_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, alpha_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyPowerSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = \exp(\text{logbase} * \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
All of inputs comply with the implicit type conversion rules to make the data types consistent.
If `lr`, `logbase`, `sign_decay` or `beta` is a number, the number is automatically converted to Tensor,
and the data type is consistent with the Tensor data type involved in the operation.
If inputs are tensors and have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
If data type of `var` is float16, all inputs must have the same data type as `var`.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **logbase** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_power_sign = P.ApplyPowerSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.logbase = np.e
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase,
self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('logbase', dtype=sig.sig_dtype.T),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T),
sig.make_sig('beta', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyPowerSign"
def infer_shape(self, var_shape, m_shape, lr_shape, logbase_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
logbase_shape_len = len(logbase_shape)
validator.check_integer("logbase's rank", logbase_shape_len, 1, Rel.LE, self.name)
if logbase_shape_len == 1:
validator.check_integer("logbase_shape[0]", logbase_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, logbase_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"logbase": logbase_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the following formula.
.. math::
var = var - \alpha * \delta
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Scaling factor, should be a scalar. With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_gradient_descent = P.ApplyGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> def construct(self, delta):
>>> out = self.apply_gradient_descent(self.var, self.alpha, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
return var_dtype
class ApplyProximalGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the FOBOS(Forward Backward Splitting) algorithm.
.. math::
\text{prox_v} = var - \alpha * \delta
.. math::
var = \frac{sign(\text{prox_v})}{1 + \alpha * l2} * \max(\left| \text{prox_v} \right| - alpha * l1, 0)
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Saling factor, should be a scalar. With float32 or float16 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar.
With float32 or float16 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar.
With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, delta):
>>> out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, l1_shape, l2_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
l1_shape_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shape_len, 1, Rel.LE, self.name)
if l1_shape_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shape_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shape_len, 1, Rel.LE, self.name)
if l2_shape_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, l1_dtype, l2_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype
class LARSUpdate(PrimitiveWithInfer):
"""
Conduct lars (layer-wise adaptive rate scaling) update on the square sum of gradient.
Args:
epsilon (float): Term added to the denominator to improve numerical stability. Default: 1e-05.
hyperpara (float): Trust coefficient for calculating the local learning rate. Default: 0.001.
use_clip (bool): Whether to use clip operation for calculating the local learning rate. Default: False.
Inputs:
- **weight** (Tensor) - The weight to be updated.
- **gradient** (Tensor) - The gradient of weight, which has the same shape and dtype with weight.
- **norm_weight** (Tensor) - A scalar tensor, representing the square sum of weight.
- **norm_gradient** (Tensor) - A scalar tensor, representing the square sum of gradient.
- **weight_decay** (Union[Number, Tensor]) - Weight decay. It should be a scalar tensor or number.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. It should be a scalar tensor or number.
Outputs:
Tensor, represents the new gradient.
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import functional as F
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.lars = P.LARSUpdate()
>>> self.reduce = P.ReduceSum()
>>> def construct(self, weight, gradient):
>>> w_square_sum = self.reduce(F.square(weight))
>>> grad_square_sum = self.reduce(F.square(gradient))
>>> grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0)
>>> return grad_t
>>> weight = np.random.random(size=(2, 3)).astype(np.float32)
>>> gradient = np.random.random(size=(2, 3)).astype(np.float32)
>>> net = Net()
>>> ms_output = net(Tensor(weight), Tensor(gradient))
"""
@prim_attr_register
def __init__(self, epsilon=1e-05, hyperpara=0.001, use_clip=False):
"""init"""
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("hyperpara", hyperpara, [float], self.name)
validator.check_value_type("use_clip", use_clip, [bool], self.name)
def infer_shape(self, weight_shape, gradient_shape, norm_weight_shape, norm_gradient_shape, weight_decay_shape,
learning_rate_shape):
validator.check("weight shape", weight_shape, "gradient shape", gradient_shape, Rel.EQ, self.name)
validator.check("norm weight shape", norm_weight_shape, "norm gradient shape", norm_gradient_shape, Rel.EQ,
self.name)
shp_len = len(weight_decay_shape)
validator.check_integer("weight decay's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("weight_decay_shape[0]", weight_decay_shape[0], 1, Rel.EQ, self.name)
shp_len = len(learning_rate_shape)
validator.check_integer("learning rate's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("learning_rate_shape[0]", learning_rate_shape[0], 1, Rel.EQ, self.name)
return weight_shape
def infer_dtype(self, weight_dtype, gradient_dtype, norm_weight_dtype, norm_gradient_dtype,
weight_decay_dtype, learning_rate_dtype):
args = {"Weight dtype": weight_dtype, "gradient dtype": gradient_dtype, "norm weight dtype": norm_weight_dtype,
"norm gradient dtype": norm_gradient_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int16, mstype.int32], self.name)
validator.check_scalar_or_tensor_type_same({"weight_decay": weight_decay_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
validator.check_scalar_or_tensor_type_same({"learning_rate": learning_rate_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
return weight_dtype
class ApplyFtrl(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL scheme.
Args:
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - Gradient. The data type should be float16 or float32.
- **lr** (Union[Number, Tensor]) - The learning rate value, must be positive. Default: 0.001.
It should be a float number or a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **lr_power** (Union[Number, Tensor]) - Learning rate power controls how the learning rate decreases
during training, must be less than or equal to zero. Use fixed learning rate if lr_power is zero.
Default: -0.5. It should be a float number or a scalar tensor with float16 or float32 data type.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class ApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(ApplyFtrlNet, self).__init__()
>>> self.apply_ftrl = P.ApplyFtrl()
>>> self.lr = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> self.lr_power = -0.5
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad):
>>> out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2,
>>> self.lr_power)
>>> return out
>>>
>>> net = ApplyFtrlNet()
>>> input_x = Tensor(np.random.randint(-4, 4, (3, 3)), mindspore.float32)
>>> result = net(input_x)
[[0.67455846 0.14630564 0.160499 ]
[0.16329421 0.00415689 0.05202988]
[0.18672481 0.17418946 0.36420345]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_tbe = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape,
lr_power_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if self.is_tbe:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_type, accum_type, linear_type, grad_type, lr_type, l1_type, l2_type, lr_power_type):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_type, 'accum': accum_type, 'linear': linear_type, 'grad': grad_type}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr_power": lr_power_type}, valid_types, self.name)
if self.is_tbe:
return var_type, var_type, var_type
return var_type
class SparseApplyFtrl(PrimitiveWithCheck):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
class SparseApplyFtrlV2(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
l2_shrinkage (float): L2 shrinkage regularization.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlV2Net(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlV2Net, self).__init__()
>>> self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,
l2_shrinkage=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlV2Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.l2_shrinkage = validator.check_value_type("l2_shrinkage", l2_shrinkage, [float], self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape, linear_shape
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class ConfusionMulGrad(PrimitiveWithInfer):
"""
`output0` is the dot product result of input0 and input1.
`output1` is the dot product result of input0 and input1, then apply the reducesum operation on it.
Args:
axis (Union[int, tuple[int], list[int]]): The dimensions to reduce.
Default:(), reduce all dimensions. Only constant value is allowed.
keep_dims (bool):
- If true, keep these reduced dimensions and the length as 1.
- If false, don't keep these dimensions. Default:False.
Inputs:
- **input_0** (Tensor) - The input Tensor.
- **input_1** (Tensor) - The input Tensor.
- **input_2** (Tensor) - The input Tensor.
Outputs:
- **output_0** (Tensor) - The same shape as `input0`.
- **output_1** (Tensor)
- If axis is (), and keep_dims is false, the output is a 0-D array representing
the sum of all elements in the input array.
- If axis is int, set as 2, and keep_dims is false,
the shape of output is :math:`(x_1,x_3,...,x_R)`.
- If axis is tuple(int), set as (2,3), and keep_dims is false,
the shape of output is :math:`(x_1,x_4,...x_R)`.
Examples:
>>> confusion_mul_grad = P.ConfusionMulGrad()
>>> input_0 = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)
>>> input_1 = Tensor(np.random.randint(0, 4, (2, 3)), mindspore.float32)
>>> input_2 = Tensor(np.random.randint(-4, 0, (2, 3)), mindspore.float32)
>>> output_0, output_1 = confusion_mul_grad(input_0, input_1, input_2)
output_0:
[[ 3. 1. 0.]
[-6. 2. -2.]]
output_1:
-3.0
"""
@prim_attr_register
def __init__(self, axis=(), keep_dims=False):
self.init_prim_io_names(inputs=["input0", "input1", "input2"], outputs=["output0", "output1"])
self.axis_ = validator.check_value_type("axis", axis, [int, tuple, list], self.name)
self.keep_dims_ = validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
def infer_shape(self, input0_shape, input1_shape, input2_shape):
outshape0 = input0_shape
outshape1 = _infer_shape_reduce(input1_shape, self.axis_, self.keep_dims_, self.name)
return outshape0, outshape1
def infer_dtype(self, input0_dtype, input1_dtype, input2_dtype):
validator.check_subclass("input0_dtype", input0_dtype, mstype.tensor, self.name)
validator.check_subclass("input1_dtype", input1_dtype, mstype.tensor, self.name)
validator.check_subclass("input2_dtype", input2_dtype, mstype.tensor, self.name)
return input0_dtype, input1_dtype
class Dropout(PrimitiveWithInfer):
"""
During training, randomly zeroes some of the elements of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout = P.Dropout(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 1, Rel.GE, self.name)
mask_shape = x_shape
return x_shape, mask_shape
def infer_dtype(self, x_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"x_dtype": x_dtype}, valid_types, self.name)
return x_dtype, x_dtype
class DropoutGrad(PrimitiveWithInfer):
"""
The gradient of Dropout. During training, randomly zeroes some of the elements
of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_grad = P.DropoutGrad(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout_grad(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, dy_shape, mask_shape):
return dy_shape
def infer_dtype(self, dy_dtype, mask_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
return dy_dtype
class CTCLoss(PrimitiveWithInfer):
"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Args:
preprocess_collapse_repeated (bool): If true, repeated labels will be collapsed prior to the CTC calculation.
Default: False.
ctc_merge_repeated (bool): If false, during CTC calculation, repeated non-blank labels will not be merged
and these labels will be interpreted as individual ones. This is a simplfied
version of CTC. Default: True.
ignore_longer_outputs_than_inputs (bool): If True, sequences with longer outputs than inputs will be ignored.
Default: False.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float16, float32 or float64.
- **labels_indices** (Tensor) - The indices of labels. `labels_indices[i, :] == [b, t]` means `labels_values[i]`
stores the id for `(batch b, time t)`. The type must be int64 and rank must be 2.
- **labels_values** (Tensor) - A `1-D` input tensor. The values are associated with the given batch and time.
The type must be int32. `labels_values[i]` must in the range of `[0, num_classes)`.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not be greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`. The tensor has
the same type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels_indices = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int64)
>>> labels_values = Tensor(np.array([2, 2]), mindspore.int32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> ctc_loss = P.CTCLoss()
>>> output = ctc_loss(inputs, labels_indices, labels_values, sequence_length)
"""
@prim_attr_register
def __init__(self, preprocess_collapse_repeated=False, ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False):
self.init_prim_io_names(inputs=["inputs", "labels_indices", "labels_values", "sequence_length"],
outputs=["loss", "gradient"])
validator.check_value_type("preprocess_collapse_repeated", preprocess_collapse_repeated, [bool], self.name)
self.preprocess_collapse_repeated_ = preprocess_collapse_repeated
self.ctc_merge_repeated_ = validator.check_value_type("ctc_merge_repeated", ctc_merge_repeated,
[bool], self.name)
validator.check_value_type("ignore_longer_outputs_than_inputs",
ignore_longer_outputs_than_inputs, [bool], self.name)
self.ignore_longer_outputs_than_inputs_ = ignore_longer_outputs_than_inputs
def infer_shape(self, inputs, labels_indices, labels_values, sequence_length):
validator.check_integer("inputs rank", len(inputs), 3, Rel.EQ, self.name)
validator.check_integer("labels_indices rank", len(labels_indices), 2, Rel.EQ, self.name)
validator.check_integer("labels_indices dim one", labels_indices[1], 2, Rel.EQ, self.name)
validator.check_integer("labels_values rank", len(labels_values), 1, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length), 1, Rel.EQ, self.name)
validator.check('labels_indices size', labels_indices[0], 'labels_values size',
labels_values[0], Rel.EQ, self.name)
validator.check('inputs batch_size', inputs[1], 'sequence_length batch_size',
sequence_length[0], Rel.EQ, self.name)
batch_size = []
batch_size.append(inputs[1])
return batch_size, inputs
def infer_dtype(self, inputs, labels_indices, labels_values, sequence_length):
valid_dtype = [mstype.float16, mstype.float32, mstype.double]
validator.check_tensor_type_same({"inputs_dtype": inputs}, valid_dtype, self.name)
validator.check_tensor_type_same({"labels_indices_dtype": labels_indices}, [mstype.int64], self.name)
validator.check_tensor_type_same({"labels_values_dtype": labels_values}, [mstype.int32], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length}, [mstype.int32], self.name)
return inputs, inputs
class CTCGreedyDecoder(PrimitiveWithInfer):
"""
Performs greedy decoding on the logits given in inputs.
Args:
merge_repeated (bool): If True, merge repeated classes in output. Default: True.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float32 or float64.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **decoded_indices** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs, 2)`.
Data type is int64.
- **decoded_values** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs)`,
it stores the decoded classes. Data type is int64.
- **decoded_shape** (Tensor) - The value of tensor is :math:`[batch_size, max_decoded_legth]`.
Data type is int64.
- **log_probability** (Tensor) - A tensor with shape of :math:`(batch_size, 1)`,
containing sequence log-probability, has the same type as `inputs`.
Examples:
>>> class CTCGreedyDecoderNet(nn.Cell):
>>> def __init__(self):
>>> super(CTCGreedyDecoderNet, self).__init__()
>>> self.ctc_greedy_decoder = P.CTCGreedyDecoder()
>>> self.assert_op = P.Assert(300)
>>>
>>> def construct(self, inputs, sequence_length):
>>> out = self.ctc_greedy_decoder(inputs,sequence_length)
>>> self.assert_op(True, (out[0], out[1], out[2], out[3]))
>>> return out[2]
>>>
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> net = CTCGreedyDecoderNet()
>>> output = net(inputs, sequence_length)
"""
@prim_attr_register
def __init__(self, merge_repeated=True):
self.merge_repeated = validator.check_value_type("merge_repeated", merge_repeated, [bool], self.name)
def infer_shape(self, inputs_shape, sequence_length_shape):
validator.check_integer("inputs rank", len(inputs_shape), 3, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length_shape), 1, Rel.EQ, self.name)
validator.check('inputs batch_size', inputs_shape[1], 'sequence_length batch_size',
sequence_length_shape[0], Rel.EQ, self.name)
total_decoded_outputs = -1
decoded_indices_shape = [total_decoded_outputs, 2]
decoded_values = [total_decoded_outputs]
decoded_shape = [2]
log_probability_shape = [inputs_shape[1], 1]
return decoded_indices_shape, decoded_values, decoded_shape, log_probability_shape
def infer_dtype(self, inputs_dtype, sequence_length_dtype):
validator.check_tensor_type_same({"inputs_dtype": inputs_dtype}, [mstype.float32, mstype.double], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length_dtype}, [mstype.int32], self.name)
decoded_type = mstype.tensor_type(mstype.int64)
return decoded_type, decoded_type, decoded_type, inputs_dtype
class BasicLSTMCell(PrimitiveWithInfer):
r"""
Applies the long short-term memory (LSTM) to the input.
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\
f_t = \sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\
\tilde{c}_t = \tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\
o_t = \sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\
c_t = f_t * c_{(t-1)} + i_t * \tilde{c}_t \\
h_t = o_t * \tanh(c_t) \\
\end{array}
Here :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`
are learnable weights between the output and the input in the formula. For instance,
:math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.
Details can be found in paper `LONG SHORT-TERM MEMORY
<https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and
`Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling
<https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.
Args:
keep_prob (float): If not 1.0, append `Dropout` layer on the outputs of each
LSTM layer except the last layer. Default 1.0. The range of dropout is [0.0, 1.0].
forget_bias (float): Add forget bias to forget gate biases in order to decrease former scale. Default: 1.0.
state_is_tuple (bool): If true, the state is a tuple of 2 tensors, containing h and c; If false, the state is
a tensor and it needs to be split first. Default: True.
activation (str): Activation. Default: "tanh". Only "tanh" is currently supported.
Inputs:
- **x** (Tensor) - Current words. Tensor of shape (`batch_size`, `input_size`).
The data type must be float16 or float32.
- **h** (Tensor) - Hidden state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **c** (Tensor) - Cell state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **w** (Tensor) - Weight. Tensor of shape (`input_size + hidden_size`, `4 x hidden_size`).
The data type must be float16 or float32.
- **b** (Tensor) - Bias. Tensor of shape (`4 x hidden_size`).
The data type must be the same as `c`.
Outputs:
- **ct** (Tensor) - Forward :math:`c_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ht** (Tensor) - Cell output. Tensor of shape (`batch_size`, `hidden_size`). With data type of float16.
- **it** (Tensor) - Forward :math:`i_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **jt** (Tensor) - Forward :math:`j_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ft** (Tensor) - Forward :math:`f_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ot** (Tensor) - Forward :math:`o_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **tanhct** (Tensor) - Forward :math:`tanh c_t` cache at moment `t`.
Tensor of shape (`batch_size`, `hidden_size`), has the same type with input `c`.
Examples:
>>> x = Tensor(np.random.rand(1, 32).astype(np.float16))
>>> h = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> c = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> w = Tensor(np.random.rand(96, 256).astype(np.float16))
>>> b = Tensor(np.random.rand(256, ).astype(np.float16))
>>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')
>>> lstm(x, h, c, w, b)
"""
@prim_attr_register
def __init__(self, keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh'):
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
self.state_is_tuple = validator.check_value_type("state_is_tuple", state_is_tuple, [bool], self.name)
self.activation = validator.check_string("activation", activation, ['tanh'], self.name)
self.add_prim_attr("io_format", "ND")
def infer_shape(self, x_shape, h_shape, c_shape, w_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 2, Rel.EQ, self.name)
validator.check_integer("c rank", len(c_shape), 2, Rel.EQ, self.name)
validator.check_integer("w rank", len(w_shape), 2, Rel.EQ, self.name)
validator.check_integer("b rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("x_shape[0]", x_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[0]", c_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[1]", c_shape[1], "h_shape[1]", h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[1]", w_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[0]", w_shape[0], "x_shape[1]+h_shape[1]", x_shape[1] + h_shape[1], Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
ct_shape = c_shape
ht_shape = c_shape
it_shape = c_shape
jt_shape = c_shape
ft_shape = c_shape
ot_shape = c_shape
tanhct_shape = c_shape
return (ct_shape, ht_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype, b_dtype):
validator.check_tensor_type_same({"x_dtype": x_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"h_dtype": h_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"w_dtype": w_dtype}, [mstype.float16, mstype.float32], self.name)
args = {"c_dtype": c_dtype, "b_dtype": b_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return (c_dtype, mstype.float16, c_dtype, c_dtype, c_dtype, c_dtype, c_dtype)
class InTopK(PrimitiveWithInfer):
r"""
Whether the targets are in the top `k` predictions.
Args:
k (int): Specify the number of top elements to be used for computing precision.
Inputs:
- **x1** (Tensor) - A 2D Tensor defines the predictions of a batch of samples with float16 or float32 data type.
- **x2** (Tensor) - A 1D Tensor defines the labels of a batch of samples with int32 data type.
Outputs:
Tensor has 1 dimension of type bool and the same shape with `x2`. For labeling sample `i` in `x2`,
if the label in the first `k` predictions for sample `i` is in `x1`, then the value is True, otherwise False.
Examples:
>>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32)
>>> x2 = Tensor(np.array([1, 3]), mindspore.int32)
>>> in_top_k = P.InTopK(3)
>>> result = in_top_k(x1, x2)
[True False]
"""
@prim_attr_register
def __init__(self, k):
"""Init InTopK"""
self.init_prim_io_names(inputs=['x1', 'x2', 'k'], outputs=['y'])
validator.check_value_type("k", k, [int], self.name)
def infer_dtype(self, x1_dtype, x2_dtype):
validator.check_tensor_type_same({"x1": x1_dtype}, (mstype.float16, mstype.float32,), self.name)
validator.check_tensor_type_same({"x2": x2_dtype}, (mstype.int32,), self.name)
return mstype.tensor_type(mstype.bool_)
def infer_shape(self, x1_shape, x2_shape):
validator.check("x1", len(x1_shape), "", 2, Rel.EQ, self.name)
validator.check("x2", len(x2_shape), "", 1, Rel.EQ, self.name)
validator.check("size of x2", x2_shape[0], "x1's first dimension", x1_shape[0], Rel.EQ, self.name)
return x2_shape
class LRN(PrimitiveWithInfer):
r"""
Local Response Normalization
Args:
depth_radius (int): Half-width of the 1-D normalization window. Shape of 0-D.
bias (float): An offset (usually positive to avoid dividing by 0).
alpha (float): A scale factor, usually positive.
beta (float): An exponent.
norm_region (str): Specify normalization region. Options: "ACROSS_CHANNELS". Default: "ACROSS_CHANNELS".
Inputs:
- **x** (Tensor) - A 4D Tensor with float16 or float32 data type.
Outputs:
Tensor, With shape and data type same as the input tensor.
Examples:
>>> x = Tensor(np.random.rand(1, 10, 4, 4)), mindspore.float32)
>>> lrn = P.LRN()
>>> lrn(x)
"""
@prim_attr_register
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
"""Init LRN"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
validator.check_value_type("depth_radius", depth_radius, [int], self.name)
validator.check_value_type("bias", bias, [float], self.name)
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_value_type("beta", beta, [float], self.name)
validator.check_value_type("norm_region", norm_region, [str], self.name)
validator.check_string('norm_region', norm_region, ['ACROSS_CHANNELS'], self.name)
validator.check_integer("depth_radius", depth_radius, 0, Rel.GE, self.name)
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32,), self.name)
return x_dtype
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ, self.name)
return x_shape
class CTCLossV2(PrimitiveWithInfer):
r"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Note:
- Cudnn Uses label value of for the `blank`
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_class)`. `num_class` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved.
- **labels** (Tensor) - The labels Tensor should be a `1-D` tensor whose shape is
:math:`(\sigma{label_lengths})`
or `2-D` tensor whose shape is
:math:`(max_time, max{label_lengths})`
The type must be int32.
- **input_lengths** (Tensor) - A `1-D` input tensor whose shape is
:math:`(batch_size,)`. The values should be batch. The type must be int32.
- **label_lengths** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`, has the same
type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
>>> input_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> label_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> ctc_loss = P.CTCLossV2()
>>> output = ctc_loss(inputs, labels, input_lengths, label_lengths)
"""
@prim_attr_register
def __init__(self):
pass
def infer_dtype(self, input_dtype, labels_dtype, input_lengths_dtype, label_lengths_dtype):
validator.check_tensor_type_same({"input": input_dtype}, (mstype.float32,), self.name)
validator.check_tensor_type_same({"labels": labels_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"input_lengths": input_lengths_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"target_lengths": label_lengths_dtype}, (mstype.int32,), self.name)
return mstype.float32, mstype.float32
def infer_shape(self, input_shape, labels_shape, input_lengths_shape, label_lengths_shape):
validator.check_integer("input shape", len(input_shape), 3, Rel.EQ, self.name)
validator.check_number_range("labels shape", len(labels_shape), 1, 2, Rel.INC_BOTH, self.name)
validator.check_integer("input lengths shape", len(input_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("label lengths shape", len(label_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], input_lengths_shape[0], Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], label_lengths_shape[0], Rel.EQ, self.name)
return (input_shape[1],), input_shape
| 48.807652 | 120 | 0.628525 |
import math
import operator
from functools import reduce
import numpy as np
from ... import context
from .. import signature as sig
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register
from ..operations.math_ops import _infer_shape_reduce
def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False, ret_four=False):
def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two "
f"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value) if ret_four else (arg_value, arg_value)
elif len(arg_value) == 2:
ret = (1, 1, arg_value[0], arg_value[1]) if ret_four else arg_value
elif len(arg_value) == 4:
if not allow_four:
_raise_message()
ret = arg_value if ret_four else (arg_value[2], arg_value[3])
else:
_raise_message()
return ret
validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and item > 0:
continue
_raise_message()
return ret_value
class Flatten(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
validator.check_integer('input_x rank', len(input_x), 1, Rel.GE, self.name)
prod = 1 if len(input_x) == 1 else reduce(operator.mul, input_x[1:])
return input_x[0], prod
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class Softmax(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type("axis", axis, [int, tuple], self.name)
if isinstance(axis, int):
self.add_prim_attr('axis', (axis,))
for item in self.axis:
validator.check_value_type("item of axis", item, [int], self.name)
def infer_shape(self, logits):
validator.check_integer("length of axis", len(self.axis), 1, Rel.GE, self.name)
rank = len(logits)
for axis_v in self.axis:
validator.check_int_range("axis", axis_v, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class LogSoftmax(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=-1):
validator.check_value_type("axis", axis, [int], self.name)
def infer_shape(self, logits):
rank = len(logits)
validator.check_int_range('axis', self.axis, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class Softplus(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class Softsign(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class ReLU(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.number_type, self.name)
return input_x
class ReLU6(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class ReLUV2(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
def __infer__(self, input_x):
input_shape = list(input_x['shape'])
input_dtype = input_x['dtype']
mask_shape = []
if len(input_shape) != 4:
raise ValueError("The `input_x` should be a 4-D tensor, "
f"but got a {len(input_shape)}-D tensor whose shape is {input_shape}")
for i in enumerate(input_shape):
if i[0] == 1:
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append((input_shape[1] + 31) // 32)
else:
mask_shape.append((input_shape[1] + 15) // 16)
else:
mask_shape.append(i[1])
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append(4)
else:
mask_shape.append(2)
output_shape = (input_x['shape'], mask_shape)
validator.check_subclass("input_x", input_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({'input_x': input_dtype}, mstype.number_type, self.name)
mask_dtype = mstype.uint8
output_dtype = (input_dtype, mask_dtype)
return {'shape': output_shape,
'dtype': output_dtype,
'value': None}
class Elu(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, alpha=1.0):
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_number("alpha", alpha, 1.0, Rel.EQ, self.name)
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class HSwish(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, xshape):
return xshape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Sigmoid(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class HSigmoid(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Tanh(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class FusedBatchNorm(Primitive):
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
class FusedBatchNormEx(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('input_x', dtype=sig.sig_dtype.T2),
sig.make_sig('scale', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('bias', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('mean', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('variance', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'save_scale', 'save_bias', 'save_mean', 'save_inv_variance', 'reserve'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
self.add_prim_attr('data_format', "NCHW")
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
valid_types = [mstype.tensor_type(mstype.float32)]
validator.check_type_same(args_moving, valid_types, self.name)
return (input_x, scale, scale, scale, scale, scale)
class BNTrainingReduce(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['sum', 'square_sum'])
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
return ([x_shape[1]], [x_shape[1]])
def infer_dtype(self, x_type):
return (x_type, x_type)
class BNTrainingUpdate(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')
def infer_shape(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
def infer_dtype(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
class BatchNorm(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, is_training=False, epsilon=1e-5):
validator.check_value_type('is_training', is_training, (bool,), self.name)
validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.add_prim_attr('data_format', "NCHW")
self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],
outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
if not self.is_training:
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
if self.is_training:
valid_types = [mstype.tensor_type(mstype.float16), mstype.tensor_type(mstype.float32), None]
validator.check_type_same(args_moving, valid_types, self.name)
else:
args_moving = {"mean": mean, "variance": variance}
validator.check_tensor_type_same(args_moving, [mstype.float16, mstype.float32], self.name)
return (input_x, scale, bias, input_x, input_x)
class Conv2D(PrimitiveWithInfer):
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check(f"x_shape[1] / group", x_shape[1] // self.group, "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_h = w_shape[2]
kernel_size_w = w_shape[3]
stride_h = self.stride[2]
stride_w = self.stride[3]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = [pad_top, pad_bottom, pad_left, pad_right]
self.add_prim_attr('pad_list', (pad_top, pad_bottom, pad_left, pad_right))
out_channel = self.out_channel
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class DepthwiseConv2dNative(PrimitiveWithInfer):
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
mode=3,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name)
if self.stride[0] != self.stride[1]:
raise ValueError("The height and width of stride should be equal,"
f"but got height:{self.stride[0]}, width:{self.stride[1]}")
self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)
if self.dilation[0] != self.dilation[1]:
raise ValueError("The height and width of dilation should be equal,"
f"but got height:{self.dilation[0]}, width:{self.dilation[1]}")
self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer("mode", mode, 3, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.channel_multiplier = validator.check_integer("channel_multiplier", channel_multiplier, 0, Rel.GT,
self.name)
self.group = validator.check_integer("group", group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check("x_shape[1]", x_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape
_, _, stride_h, stride_w = self.stride
_, _, dilation_h, dilation_w = self.dilation
if kernel_size_n != 1:
raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}")
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = (pad_top, pad_bottom, pad_left, pad_right)
self.add_prim_attr('pads', self.pad_list)
out_channel = self.channel_multiplier * x_shape[1]
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class _Pool(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)
self.add_prim_attr("padding", self.padding)
self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax")
if not self.is_maxpoolwithargmax:
self.add_prim_attr('data_format', "NCHW")
self.ksize = _check_positive_int_or_tuple("ksize", ksize, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)
self.add_prim_attr("ksize", self.ksize)
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.strides = (1, self.strides[-2], self.strides[-1], 1)
self.add_prim_attr("strides", self.strides)
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
batch, channel, input_h, input_w = x_shape
if self.is_maxpoolwithargmax:
_, kernel_h, kernel_w, _ = self.ksize
_, stride_h, stride_w, _ = self.strides
else:
_, _, kernel_h, kernel_w = self.ksize
_, _, stride_h, stride_w = self.strides
if self.padding == "VALID":
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.padding == "SAME":
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_h, out_w]
for shape_value in out_shape:
if shape_value <= 0:
raise ValueError(f"For '{self.name}' The kernel size is not valid, "
f"please check it if is larger than data's shape size.")
return out_shape
def infer_dtype(self, x_dtype):
validator.check_subclass("input", x_dtype, mstype.tensor, self.name)
return x_dtype
class MaxPool(_Pool):
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPool, self).__init__(ksize, strides, padding)
class MaxPoolWithArgmax(_Pool):
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding)
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_gpu = context.get_context("device_target") == "GPU"
def infer_shape(self, x_shape):
out_shape = _Pool.infer_shape(self, x_shape)
_, _, out_h, out_w = out_shape
_, kernel_h, kernel_w, _ = self.ksize
argmax_shape = []
if self.is_tbe:
for i in range(4):
if i == 2:
dim = kernel_h * kernel_w
argmax_shape.append(dim)
elif i == 3:
dim = math.ceil(out_h * out_w / 16) + 1
argmax_shape.append(dim)
else:
argmax_shape.append(x_shape[i])
else:
argmax_shape = out_shape
return out_shape, argmax_shape
def infer_dtype(self, x_dtype):
out_dtype = x_dtype
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
argmax_dtype = mstype.uint16
if self.is_gpu:
argmax_dtype = mstype.int32
return out_dtype, argmax_dtype
class AvgPool(_Pool):
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
if context.get_context("device_target") == "GPU":
self.target = "GPU"
elif context.get_context("enable_ge"):
self.target = "GE"
else:
self.target = "OTHER"
super(AvgPool, self).__init__(ksize, strides, padding)
class Conv2DBackpropInput(PrimitiveWithInfer):
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
pad_mode="valid",
pad=0,
pad_list=None,
mode=1,
stride=1,
dilation=1,
group=1):
self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output'])
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=False)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('data_format', "NCHW")
if pad_list:
for x in pad_list:
validator.check_integer('element of pad_list', x, 0, Rel.GE, self.name)
self.pad_list = pad_list
def __infer__(self, doutput, w, x_size):
x_size_v = x_size['value']
validator.check_value_type('x_size', x_size_v, [tuple], self.name)
for i, dim_len in enumerate(x_size_v):
validator.check_value_type("x_size[%d]" % i, dim_len, [int], self.name)
args = {'doutput': doutput['dtype'], 'w': w['dtype']}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
# infer shape
dout_shape = doutput['shape']
kernel_h = self.kernel_size[0]
kernel_w = self.kernel_size[1]
stride_h = self.stride[0]
stride_w = self.stride[1]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
# default pad mode is valid
pad_list = (0, 0, 0, 0)
if self.pad_list:
pad_list = tuple(self.pad_list)
elif self.pad_mode == "SAME":
pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + dilation_h * (kernel_h - 1) + 1 - x_size_v[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + dilation_w * (kernel_w - 1) + 1 - x_size_v[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
pad_list = (pad_top, pad_bottom, pad_left, pad_right)
elif self.pad_mode == 'PAD':
pad_list = self.padding
self.add_prim_attr('pad_list', pad_list)
out = {
'value': None,
'shape': x_size_v,
'dtype': doutput['dtype'],
}
return out
class BiasAdd(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])
self.add_prim_attr('data_format', 'NCHW')
def infer_shape(self, x_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.GE, self.name)
validator.check_integer("bias rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "x_shape[1]", x_shape[1], Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, b_type):
args = {"input_x": x_type, "bias": b_type}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_type
class TopK(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, sorted=False):
validator.check_value_type("sorted", sorted, [bool], self.name)
self.init_prim_io_names(inputs=['input', 'k'],
outputs=['values', 'indices'])
def __infer__(self, input_x, k):
x_dtype = input_x['dtype']
valid_types = (mstype.int32, mstype.float16, mstype.float32)
validator.check_tensor_type_same({'x': x_dtype}, valid_types, self.name)
k_v = k['value']
validator.check_value_type('k', k_v, (int,), self.name)
x_shape = list(input_x['shape'])
ndim = len(x_shape) - 1
x_shape[ndim] = k_v
return {'shape': (x_shape, x_shape),
'dtype': (x_dtype, mstype.int32),
'value': None}
class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape", logits_shape, "labels_shape", labels_shape, Rel.EQ, self.name)
loss_shape = [logits_shape[0]]
dlogits_shape = logits_shape
return (loss_shape, dlogits_shape)
def infer_dtype(self, logits_type, labels_type):
args = {"logits": logits_type, "labels": labels_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return (logits_type, logits_type)
class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, is_grad=False):
self.init_prim_io_names(inputs=['features', 'labels'], outputs=['output'])
self.is_grad = is_grad
self.add_prim_attr('sens', 1.0)
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape[0]", logits_shape[0], "labels_shape[0]", labels_shape[0], Rel.EQ, self.name)
loss_shape = []
if self.is_grad:
return logits_shape
return loss_shape
def infer_dtype(self, logits_type, labels_type):
validator.check_tensor_type_same({"logits": logits_type}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"labels": labels_type}, (mstype.int32, mstype.int64), self.name)
return logits_type
class ApplyMomentum(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accumulation', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('learning_rate', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('momentum', dtype=sig.sig_dtype.T2),
)
@prim_attr_register
def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0):
self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'],
outputs=['output'])
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_ge = context.get_context("enable_ge")
def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape):
if not self.is_ge and self.is_tbe:
return v_shape, v_shape
return v_shape
def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype):
valid_types = [mstype.float16, mstype.float32, mstype.float64]
if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey:
validator.check_tensor_type_same({"v": v_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"a": a_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l_dtype": l_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"g_dtype": g_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"m_dtype": m_dtype}, valid_types, self.name)
if not self.is_ge and self.is_tbe:
return g_dtype, g_dtype
return g_dtype
class SmoothL1Loss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, beta=1.0):
validator.check_value_type('beta', beta, [float], self.name)
validator.check('beta', beta, '', 0, Rel.GT, self.name)
self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])
def infer_shape(self, prediction, target):
validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)
return prediction
def infer_dtype(self, prediction, target):
args = {"prediction": prediction, "target": target}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return prediction
class L2Loss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
def infer_shape(self, input_x):
loss_shape = []
return loss_shape
def infer_dtype(self, x_type):
validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({'x_type': x_type}, valid_types, self.name)
return x_type
class DataFormatDimMap(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, src_format='NHWC', dst_format='NCHW'):
valid_values = ['NHWC', 'NCHW']
self.src_format = validator.check_string("src_format", src_format, valid_values, self.name)
self.dst_format = validator.check_string("dst_format", dst_format, valid_values, self.name)
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_type):
validator.check_subclass("x", x_type, mstype.tensor, self.name)
valid_types = [mstype.int32]
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
return x_type
class RNNTLoss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, blank_label=0):
validator.check_value_type('blank_label', blank_label, [int], self.name)
self.init_prim_io_names(inputs=['acts', 'labels', 'input_length', 'label_length'],
outputs=['costs', 'grads'])
def infer_shape(self, acts_shape, labels_shape, input_length_shape, label_length_shape):
validator.check_integer('acts_rank', len(acts_shape), 4, Rel.EQ, self.name)
validator.check_integer('labels_rank', len(labels_shape), 2, Rel.EQ, self.name)
validator.check_integer('input_length_rank', len(input_length_shape), 1, Rel.EQ, self.name)
validator.check_integer('label_length_rank', len(label_length_shape), 1, Rel.EQ, self.name)
validator.check('labels shape[0]', labels_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('labels shape[1]', labels_shape[1], 'acts shape[2]-1', acts_shape[2]-1, Rel.EQ, self.name)
validator.check('input_length size', input_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('label_length size', label_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
costs_shape = (acts_shape[0],)
return (costs_shape, acts_shape)
def infer_dtype(self, acts_type, labels_type, input_length_type, label_length_type):
validator.check_subclass("acts_type", acts_type, mstype.tensor, self.name)
validator.check_subclass("labels_type", labels_type, mstype.tensor, self.name)
validator.check_subclass("input_length_type", input_length_type, mstype.tensor, self.name)
validator.check_subclass("label_length_type", label_length_type, mstype.tensor, self.name)
validator.check_tensor_type_same({"acts_type": acts_type}, [mstype.float32, mstype.float16], self.name)
validator.check_tensor_type_same({"labels_type": labels_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"input_length_type": input_length_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"label_length_type": label_length_type}, [mstype.int32], self.name)
return (acts_type, acts_type)
class SGD(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False):
validator.check_value_type("nesterov", nesterov, [bool], self.name)
if nesterov and dampening != 0:
raise ValueError(f"Nesterov need zero dampening!")
self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'],
outputs=['output'])
def infer_shape(self, parameters_shape, gradient_shape, learning_rate_shape,
accum_shape, momentum_shape, stat_shape):
validator.check_integer(f'parameters rank', len(parameters_shape), 0, Rel.GT, self.name)
validator.check_integer(f'gradient rank', len(gradient_shape), 0, Rel.GE, self.name)
validator.check_integer(f'learning rate rank', len(learning_rate_shape), 0, Rel.GE, self.name)
validator.check_integer(f'accumulation rank', len(accum_shape), 0, Rel.GT, self.name)
validator.check_integer(f'momentum rank', len(momentum_shape), 0, Rel.GE, self.name)
validator.check_integer(f'stat rank', len(stat_shape), 0, Rel.GE, self.name)
validator.check("gradient shape", gradient_shape, "stat shape", stat_shape, Rel.EQ, self.name)
return parameters_shape
def infer_dtype(self, parameters_dtype, gradient_dtype, learning_rate_dtype,
accum_dtype, momentum_dtype, stat_dtype):
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({"parameters": parameters_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"gradient": gradient_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"learning_rate": learning_rate_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"accum": accum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"momentum": momentum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"stat": stat_dtype}, valid_types, self.name)
return parameters_dtype
class ApplyRMSProp(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'mean_square', 'moment', 'learning_rate', 'grad',
'rho', 'momentum', 'epsilon'], outputs=['output'])
self.is_ge = context.get_context("enable_ge")
self.is_d = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape, decay_shape,
momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if not self.is_ge and self.is_d:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, learning_rate_dtype, grad_dtype, decay_dtype,
momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_decay = {"decay": decay_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_decay, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "decay": decay_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if not self.is_ge and self.is_d:
return var_dtype, var_dtype, var_dtype
return var_dtype
def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon):
if decay is None or momentum is None or epsilon is None:
raise ValueError(f"For {self.name}, decay, momentum, epsilon must be const.")
class ApplyCenteredRMSProp(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_ascend = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape,
learning_rate_shape, decay_shape, momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if self.is_ascend:
return var_shape, mean_gradient_shape, mean_square_shape, moment_shape
return var_shape
def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype,
learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_gradient": mean_gradient_dtype,
"mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_rho = {"rho": rho_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_rho, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "rho": rho_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if self.is_ascend:
return var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype
return var_dtype
class LayerNorm(Primitive):
@prim_attr_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [float], self.name)
class L2Normalize(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
def infer_shape(self, input_x):
dim = len(input_x)
validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT, self.name)
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("x", input_x, mstype.tensor, self.name)
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class DropoutGenMask(Primitive):
@prim_attr_register
def __init__(self, Seed0=0, Seed1=0):
self.init_prim_io_names(inputs=['shape', 'keep_prob'], outputs=['output'])
validator.check_value_type("Seed0", Seed0, [int], self.name)
validator.check_value_type("Seed1", Seed1, [int], self.name)
self.add_prim_attr("_random_effect", True)
class DropoutDoMask(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def __infer__(self, input_x, mask, keep_prob):
input_x_shape = input_x['shape']
mask_shape = mask['shape']
keep_prob_shape = keep_prob['shape']
validator.check("keep_prob's dim", len(keep_prob_shape), '0(scalar)', 0, Rel.EQ, self.name)
size_x = reduce(lambda x, y: x * y, input_x_shape)
if len(mask_shape) != 1:
raise ValueError("DropoutDoMask mask shape should be 1-dimension.")
size_y = mask_shape[0] * 8
if size_x > size_y:
raise ValueError(f"DropoutDoMask y mask do not math input input_x shape:"
"{input_x_shape}, mask shape: {mask_shape}.")
validator.check_tensor_type_same({"input_x": input_x['dtype']}, [mstype.float32, mstype.float16, mstype.int32],
self.name)
validator.check_tensor_type_same({"input_mask": mask['dtype']}, [mstype.uint8], self.name)
keep_prob_v = keep_prob['value']
if keep_prob_v is not None:
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
out = {'shape': input_x_shape,
'dtype': input_x['dtype'],
'value': None}
return out
class ResizeBilinear(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, size, align_corners=False):
pass
def infer_shape(self, input_shape):
input_shape = list(input_shape)
batch, channel, _, _ = input_shape
out_shape = [batch, channel]
for i in self.size:
out_shape.append(int(i))
return out_shape
def infer_dtype(self, input_dtype):
validator.check_tensor_type_same({'input_dtype': input_dtype}, [mstype.float16, mstype.float32], self.name)
return mstype.tensor_type(mstype.float32)
class OneHot(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])
validator.check_value_type("axis", axis, [int], self.name)
def __infer__(self, indices, depth, on_value, off_value):
validator.check_tensor_type_same({"indices": indices['dtype']}, (mstype.int32,), self.name)
validator.check_type_name("depth", depth['dtype'], mstype.int_type, self.name)
args = {"on_value": on_value['dtype'], "off_value": off_value['dtype']}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
indices_shp = indices['shape']
validator.check_int_range("axis", self.axis, -1, len(indices_shp), Rel.INC_BOTH, self.name)
depth_val = depth['value']
validator.check_integer("depth", depth_val, 0, Rel.GE, self.name)
_ = indices_shp.insert(self.axis, depth_val) if self.axis >= 0 else indices_shp.append(depth_val)
return {'shape': indices_shp,
'dtype': on_value['dtype'],
'value': None}
class Gelu(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class GetNext(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, types, shapes, output_num, shared_name):
validator.check_value_type("types", types, [list, tuple], self.name)
validator.check_value_type("shapes", shapes, [list, tuple], self.name)
validator.check("types length", len(types), "shapes length", len(shapes), Rel.EQ, self.name)
validator.check_value_type("output_num", output_num, [int], self.name)
def infer_shape(self):
return tuple(self.shapes)
def infer_dtype(self):
return tuple(self.types)
class PReLU(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x_shape, weight_shape):
input_x_dim = len(input_x_shape)
weight_dim = len(weight_shape)
if input_x_dim == 1:
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
if weight_dim != 1:
raise ValueError(f'For \'{self.name}\' weight_dim must be 1, while weight_dim is {weight_dim}.')
if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
raise ValueError(f'For \'{self.name}\' channel of input_x and weight must be matched,'
f' while channel of input_x is {input_x_shape[1]},'
f' weight_shape[0] is {weight_shape[0]}.')
return input_x_shape
def infer_dtype(self, input_x_dtype, weight_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"input_x": input_x_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"weight": weight_dtype}, valid_types, self.name)
return input_x_dtype
class LSTM(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer("input_size", input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer("hidden_size", hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer("num_layers", num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type("has_bias", has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type("bidirectional", bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, h_shape, c_shape, w_shape):
validator.check_integer("x rank", len(x_shape), 3, Rel.EQ, self.name)
validator.check_integer("x[2]", x_shape[2], self.input_size, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 3, Rel.EQ, self.name)
validator.check("h_shape", h_shape, "c_shape", c_shape, Rel.EQ, self.name)
validator.check_integer("h[0]", h_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h[1]", h_shape[1], x_shape[1], Rel.EQ, self.name)
validator.check_integer("h[2]", h_shape[2], self.hidden_size, Rel.EQ, self.name)
y_shape = (x_shape[0], x_shape[1], self.hidden_size * self.num_directions)
type_size = 4
gates_ws_ld = self.get_good_ld(self.hidden_size * 4, type_size)
states_ws_ld = self.get_good_ld(max(self.hidden_size, self.input_size), type_size)
self.ws_gates_size = self.num_layers * self.num_directions * x_shape[0] * x_shape[1] * gates_ws_ld * type_size
self.ws_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_c_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_diff_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * (2 + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_grid_comp_size = 0
self.page_size = 4096
current_offset = 0
current_offset += self.ws_gates_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_c_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_diff_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_grid_comp_size
reserved_shape = (current_offset, 1)
state_shape = (1, 1)
return (y_shape, h_shape, c_shape, reserved_shape, state_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype):
args = {'x': x_dtype, 'h': h_dtype, 'c': c_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)
return (x_dtype, x_dtype, x_dtype, x_dtype, x_dtype)
def rnd_up(self, current_offset, page_size):
return ((current_offset + page_size - 1) // page_size) * page_size
def get_good_ld(self, dim, type_size):
ld = self.rnd_up(dim, 64 // type_size)
if ld * 256 == 0:
return ld + 64 // type_size
return ld
class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])
def infer_shape(self, x_shape, y_shape):
validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, y_dtype):
args = {"x_dtype": x_dtype, "y_dtype": y_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_dtype
class Pad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, paddings):
self.init_prim_io_names(inputs=['x'], outputs=['y'])
if not isinstance(paddings, tuple):
raise TypeError('Paddings must be tuple type.')
for item in paddings:
if len(item) != 2:
raise ValueError('The shape of paddings must be (n, 2).')
self.paddings = paddings
def infer_shape(self, x):
paddings = np.array(self.paddings)
validator.check_integer('paddings.shape', paddings.size, len(x) * 2, Rel.EQ, self.name)
if not np.all(paddings >= 0):
raise ValueError('All elements of paddings must be >= 0.')
y_shape = ()
for i in range(int(paddings.size / 2)):
y_shape += ((x[i] + paddings[i, 0] + paddings[i, 1]),)
return y_shape
def infer_dtype(self, x):
validator.check_subclass("input_x", x, mstype.tensor, self.name)
return x
class MirrorPad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, mode='REFLECT'):
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
self.set_const_input_indexes([1])
def __infer__(self, input_x, paddings):
validator.check_subclass("input_x", input_x['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
x_shape = list(input_x['shape'])
paddings_value = paddings['value'].asnumpy()
paddings_size = paddings_value.size
validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ, self.name)
if not np.all(paddings_value >= 0):
raise ValueError('All elements of paddings must be >= 0.')
adjust = 0
if self.mode == 'SYMMETRIC':
adjust = 1
for i in range(0, int(paddings_size / 2)):
if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust):
raise ValueError('At least one dim has too high a padding value for this input and mode')
y_shape = ()
for i in range(0, int(paddings_size / 2)):
y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),)
return {'shape': y_shape,
'dtype': input_x['dtype'],
'value': None}
class ROIAlign(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, pooled_height, pooled_width, spatial_scale, sample_num=2, roi_end_mode=1):
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
validator.check_value_type("spatial_scale", spatial_scale, [float], self.name)
validator.check_value_type("sample_num", sample_num, [int], self.name)
validator.check_value_type("roi_end_mode", roi_end_mode, [int], self.name)
validator.check_int_range("roi_end_mode", roi_end_mode, 0, 1, Rel.INC_BOTH, self.name)
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.spatial_scale = spatial_scale
self.sample_num = sample_num
self.roi_end_mode = roi_end_mode
def infer_shape(self, inputs_shape, rois_shape):
return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width]
def infer_dtype(self, inputs_type, rois_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"inputs_type": inputs_type}, valid_types, self.name)
validator.check_tensor_type_same({"rois_type": rois_type}, valid_types, self.name)
return inputs_type
class Adam(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
return var_dtype, m_dtype, v_dtype
class FusedSparseAdam(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseLazyAdam(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseFtrl(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'indices'],
outputs=['output'])
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return [1], [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class FusedSparseProximalAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('l1', dtype=sig.sig_dtype.T),
sig.make_sig('l2', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
return [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class KLDivLoss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
return x_type
class BinaryCrossEntropy(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, weight_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if weight_shape:
validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type, weight_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
if weight_type:
validator.check_tensor_type_same({'x': x_type, 'weight': weight_type}, valid_types, self.name)
return x_type
class ApplyAdaMax(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T4),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T5),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("m_shape", m_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("v_shape", v_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
beta1_power_shp_len = len(beta1_power_shape)
validator.check_integer("beta1 power's rank", beta1_power_shp_len, 1, Rel.LE, self.name)
if beta1_power_shp_len == 1:
validator.check_integer("beta1_power_shape[0]", beta1_power_shape[0], 1, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
beta1_shp_len = len(beta1_shape)
validator.check_integer("beta1's rank", beta1_shp_len, 1, Rel.LE, self.name)
if beta1_shp_len == 1:
validator.check_integer("beta1_shape[0]", beta1_shape[0], 1, Rel.EQ, self.name)
beta2_shp_len = len(beta2_shape)
validator.check_integer("beta2's rank", beta2_shp_len, 1, Rel.LE, self.name)
if beta2_shp_len == 1:
validator.check_integer("beta2_shape[0]", beta2_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("epsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1_power": beta1_power_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1": beta1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta2": beta2_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, m_dtype, v_dtype
class ApplyAdadelta(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum_update', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('rho', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, accum_shape, accum_update_shape, lr_shape, rho_shape,
epsilon_shape, grad_shape):
validator.check("accum_shape", accum_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("accum_update_shape", accum_update_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
rho_shp_len = len(rho_shape)
validator.check_integer("rho's rank", rho_shp_len, 1, Rel.LE, self.name)
if rho_shp_len == 1:
validator.check_integer("rho_shape[0]", rho_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("lepsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape, accum_update_shape
def infer_dtype(self, var_dtype, accum_dtype, accum_update_dtype, lr_dtype, rho_dtype,
epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "accum": accum_dtype, "accum_update": accum_update_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"rho": rho_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, accum_dtype, accum_update_dtype
class ApplyAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, update_slots=True):
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class ApplyAdagradV2(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, epsilon, update_slots=True):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'grad shape', grad_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, [mstype.float16, mstype.float32], self.name)
return var_dtype, accum_dtype
class SparseApplyAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, update_slots=True, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_number_range("lr", lr, float("-inf"), float("inf"), Rel.INC_NEITHER, self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class SparseApplyAdagradV2(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, epsilon, use_locking=False, update_slots=True):
self.lr = validator.check_value_type("lr", lr, [float], self.name)
self.epsilon = validator.check_value_type("epsilon", epsilon, [float], self.name)
self.use_locking = validator.check_value_type("update_slots", update_slots, [bool], self.name)
self.update_slots = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class ApplyProximalAdagrad(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
l1_shp_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shp_len, 1, Rel.LE, self.name)
if l1_shp_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shp_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shp_len, 1, Rel.LE, self.name)
if l2_shp_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class SparseApplyProximalAdagrad(PrimitiveWithCheck):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T4),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float16, mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
class ApplyAddSign(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('alpha', dtype=sig.sig_dtype.T2),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T3),
sig.make_sig('beta', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, m_shape, lr_shape, alpha_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, alpha_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyPowerSign(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('logbase', dtype=sig.sig_dtype.T),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T),
sig.make_sig('beta', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, m_shape, lr_shape, logbase_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
logbase_shape_len = len(logbase_shape)
validator.check_integer("logbase's rank", logbase_shape_len, 1, Rel.LE, self.name)
if logbase_shape_len == 1:
validator.check_integer("logbase_shape[0]", logbase_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, logbase_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"logbase": logbase_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyGradientDescent(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, alpha_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
return var_dtype
class ApplyProximalGradientDescent(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
def infer_shape(self, var_shape, alpha_shape, l1_shape, l2_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
l1_shape_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shape_len, 1, Rel.LE, self.name)
if l1_shape_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shape_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shape_len, 1, Rel.LE, self.name)
if l2_shape_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, l1_dtype, l2_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype
class LARSUpdate(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, epsilon=1e-05, hyperpara=0.001, use_clip=False):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("hyperpara", hyperpara, [float], self.name)
validator.check_value_type("use_clip", use_clip, [bool], self.name)
def infer_shape(self, weight_shape, gradient_shape, norm_weight_shape, norm_gradient_shape, weight_decay_shape,
learning_rate_shape):
validator.check("weight shape", weight_shape, "gradient shape", gradient_shape, Rel.EQ, self.name)
validator.check("norm weight shape", norm_weight_shape, "norm gradient shape", norm_gradient_shape, Rel.EQ,
self.name)
shp_len = len(weight_decay_shape)
validator.check_integer("weight decay's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("weight_decay_shape[0]", weight_decay_shape[0], 1, Rel.EQ, self.name)
shp_len = len(learning_rate_shape)
validator.check_integer("learning rate's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("learning_rate_shape[0]", learning_rate_shape[0], 1, Rel.EQ, self.name)
return weight_shape
def infer_dtype(self, weight_dtype, gradient_dtype, norm_weight_dtype, norm_gradient_dtype,
weight_decay_dtype, learning_rate_dtype):
args = {"Weight dtype": weight_dtype, "gradient dtype": gradient_dtype, "norm weight dtype": norm_weight_dtype,
"norm gradient dtype": norm_gradient_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int16, mstype.int32], self.name)
validator.check_scalar_or_tensor_type_same({"weight_decay": weight_decay_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
validator.check_scalar_or_tensor_type_same({"learning_rate": learning_rate_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
return weight_dtype
class ApplyFtrl(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_tbe = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape,
lr_power_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if self.is_tbe:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_type, accum_type, linear_type, grad_type, lr_type, l1_type, l2_type, lr_power_type):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_type, 'accum': accum_type, 'linear': linear_type, 'grad': grad_type}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr_power": lr_power_type}, valid_types, self.name)
if self.is_tbe:
return var_type, var_type, var_type
return var_type
class SparseApplyFtrl(PrimitiveWithCheck):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
class SparseApplyFtrlV2(PrimitiveWithInfer):
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.l2_shrinkage = validator.check_value_type("l2_shrinkage", l2_shrinkage, [float], self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape, linear_shape
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class ConfusionMulGrad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=(), keep_dims=False):
self.init_prim_io_names(inputs=["input0", "input1", "input2"], outputs=["output0", "output1"])
self.axis_ = validator.check_value_type("axis", axis, [int, tuple, list], self.name)
self.keep_dims_ = validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
def infer_shape(self, input0_shape, input1_shape, input2_shape):
outshape0 = input0_shape
outshape1 = _infer_shape_reduce(input1_shape, self.axis_, self.keep_dims_, self.name)
return outshape0, outshape1
def infer_dtype(self, input0_dtype, input1_dtype, input2_dtype):
validator.check_subclass("input0_dtype", input0_dtype, mstype.tensor, self.name)
validator.check_subclass("input1_dtype", input1_dtype, mstype.tensor, self.name)
validator.check_subclass("input2_dtype", input2_dtype, mstype.tensor, self.name)
return input0_dtype, input1_dtype
class Dropout(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 1, Rel.GE, self.name)
mask_shape = x_shape
return x_shape, mask_shape
def infer_dtype(self, x_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"x_dtype": x_dtype}, valid_types, self.name)
return x_dtype, x_dtype
class DropoutGrad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, dy_shape, mask_shape):
return dy_shape
def infer_dtype(self, dy_dtype, mask_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
return dy_dtype
class CTCLoss(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, preprocess_collapse_repeated=False, ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False):
self.init_prim_io_names(inputs=["inputs", "labels_indices", "labels_values", "sequence_length"],
outputs=["loss", "gradient"])
validator.check_value_type("preprocess_collapse_repeated", preprocess_collapse_repeated, [bool], self.name)
self.preprocess_collapse_repeated_ = preprocess_collapse_repeated
self.ctc_merge_repeated_ = validator.check_value_type("ctc_merge_repeated", ctc_merge_repeated,
[bool], self.name)
validator.check_value_type("ignore_longer_outputs_than_inputs",
ignore_longer_outputs_than_inputs, [bool], self.name)
self.ignore_longer_outputs_than_inputs_ = ignore_longer_outputs_than_inputs
def infer_shape(self, inputs, labels_indices, labels_values, sequence_length):
validator.check_integer("inputs rank", len(inputs), 3, Rel.EQ, self.name)
validator.check_integer("labels_indices rank", len(labels_indices), 2, Rel.EQ, self.name)
validator.check_integer("labels_indices dim one", labels_indices[1], 2, Rel.EQ, self.name)
validator.check_integer("labels_values rank", len(labels_values), 1, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length), 1, Rel.EQ, self.name)
validator.check('labels_indices size', labels_indices[0], 'labels_values size',
labels_values[0], Rel.EQ, self.name)
validator.check('inputs batch_size', inputs[1], 'sequence_length batch_size',
sequence_length[0], Rel.EQ, self.name)
batch_size = []
batch_size.append(inputs[1])
return batch_size, inputs
def infer_dtype(self, inputs, labels_indices, labels_values, sequence_length):
valid_dtype = [mstype.float16, mstype.float32, mstype.double]
validator.check_tensor_type_same({"inputs_dtype": inputs}, valid_dtype, self.name)
validator.check_tensor_type_same({"labels_indices_dtype": labels_indices}, [mstype.int64], self.name)
validator.check_tensor_type_same({"labels_values_dtype": labels_values}, [mstype.int32], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length}, [mstype.int32], self.name)
return inputs, inputs
class CTCGreedyDecoder(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, merge_repeated=True):
self.merge_repeated = validator.check_value_type("merge_repeated", merge_repeated, [bool], self.name)
def infer_shape(self, inputs_shape, sequence_length_shape):
validator.check_integer("inputs rank", len(inputs_shape), 3, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length_shape), 1, Rel.EQ, self.name)
validator.check('inputs batch_size', inputs_shape[1], 'sequence_length batch_size',
sequence_length_shape[0], Rel.EQ, self.name)
total_decoded_outputs = -1
decoded_indices_shape = [total_decoded_outputs, 2]
decoded_values = [total_decoded_outputs]
decoded_shape = [2]
log_probability_shape = [inputs_shape[1], 1]
return decoded_indices_shape, decoded_values, decoded_shape, log_probability_shape
def infer_dtype(self, inputs_dtype, sequence_length_dtype):
validator.check_tensor_type_same({"inputs_dtype": inputs_dtype}, [mstype.float32, mstype.double], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length_dtype}, [mstype.int32], self.name)
decoded_type = mstype.tensor_type(mstype.int64)
return decoded_type, decoded_type, decoded_type, inputs_dtype
class BasicLSTMCell(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh'):
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
self.state_is_tuple = validator.check_value_type("state_is_tuple", state_is_tuple, [bool], self.name)
self.activation = validator.check_string("activation", activation, ['tanh'], self.name)
self.add_prim_attr("io_format", "ND")
def infer_shape(self, x_shape, h_shape, c_shape, w_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 2, Rel.EQ, self.name)
validator.check_integer("c rank", len(c_shape), 2, Rel.EQ, self.name)
validator.check_integer("w rank", len(w_shape), 2, Rel.EQ, self.name)
validator.check_integer("b rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("x_shape[0]", x_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[0]", c_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[1]", c_shape[1], "h_shape[1]", h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[1]", w_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[0]", w_shape[0], "x_shape[1]+h_shape[1]", x_shape[1] + h_shape[1], Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
ct_shape = c_shape
ht_shape = c_shape
it_shape = c_shape
jt_shape = c_shape
ft_shape = c_shape
ot_shape = c_shape
tanhct_shape = c_shape
return (ct_shape, ht_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype, b_dtype):
validator.check_tensor_type_same({"x_dtype": x_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"h_dtype": h_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"w_dtype": w_dtype}, [mstype.float16, mstype.float32], self.name)
args = {"c_dtype": c_dtype, "b_dtype": b_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return (c_dtype, mstype.float16, c_dtype, c_dtype, c_dtype, c_dtype, c_dtype)
class InTopK(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, k):
self.init_prim_io_names(inputs=['x1', 'x2', 'k'], outputs=['y'])
validator.check_value_type("k", k, [int], self.name)
def infer_dtype(self, x1_dtype, x2_dtype):
validator.check_tensor_type_same({"x1": x1_dtype}, (mstype.float16, mstype.float32,), self.name)
validator.check_tensor_type_same({"x2": x2_dtype}, (mstype.int32,), self.name)
return mstype.tensor_type(mstype.bool_)
def infer_shape(self, x1_shape, x2_shape):
validator.check("x1", len(x1_shape), "", 2, Rel.EQ, self.name)
validator.check("x2", len(x2_shape), "", 1, Rel.EQ, self.name)
validator.check("size of x2", x2_shape[0], "x1's first dimension", x1_shape[0], Rel.EQ, self.name)
return x2_shape
class LRN(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
self.init_prim_io_names(inputs=['x'], outputs=['y'])
validator.check_value_type("depth_radius", depth_radius, [int], self.name)
validator.check_value_type("bias", bias, [float], self.name)
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_value_type("beta", beta, [float], self.name)
validator.check_value_type("norm_region", norm_region, [str], self.name)
validator.check_string('norm_region', norm_region, ['ACROSS_CHANNELS'], self.name)
validator.check_integer("depth_radius", depth_radius, 0, Rel.GE, self.name)
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32,), self.name)
return x_dtype
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ, self.name)
return x_shape
class CTCLossV2(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
pass
def infer_dtype(self, input_dtype, labels_dtype, input_lengths_dtype, label_lengths_dtype):
validator.check_tensor_type_same({"input": input_dtype}, (mstype.float32,), self.name)
validator.check_tensor_type_same({"labels": labels_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"input_lengths": input_lengths_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"target_lengths": label_lengths_dtype}, (mstype.int32,), self.name)
return mstype.float32, mstype.float32
def infer_shape(self, input_shape, labels_shape, input_lengths_shape, label_lengths_shape):
validator.check_integer("input shape", len(input_shape), 3, Rel.EQ, self.name)
validator.check_number_range("labels shape", len(labels_shape), 1, 2, Rel.INC_BOTH, self.name)
validator.check_integer("input lengths shape", len(input_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("label lengths shape", len(label_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], input_lengths_shape[0], Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], label_lengths_shape[0], Rel.EQ, self.name)
return (input_shape[1],), input_shape
| true | true |
f72a8af8ff8184287d32ace234a39d44ce65d605 | 25,935 | py | Python | lib/kb_das_tool/Utils/DASToolUtil.py | n1mus/kb_das_tool | e19f2c68aa24a93eec95a2dcbb6d662d7c088dcc | [
"MIT"
] | null | null | null | lib/kb_das_tool/Utils/DASToolUtil.py | n1mus/kb_das_tool | e19f2c68aa24a93eec95a2dcbb6d662d7c088dcc | [
"MIT"
] | null | null | null | lib/kb_das_tool/Utils/DASToolUtil.py | n1mus/kb_das_tool | e19f2c68aa24a93eec95a2dcbb6d662d7c088dcc | [
"MIT"
] | null | null | null | import errno
import json
import os
import subprocess
import sys
import time
import uuid
import zipfile
import shutil
from Bio import SeqIO
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.MetagenomeUtilsClient import MetagenomeUtils
from installed_clients.ReadsUtilsClient import ReadsUtils
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class DASToolUtil:
DASTOOL_THREADS=2
BINNER_RESULT_DIRECTORY = 'das_tool_output_dir'
BINNER_BIN_RESULT_DIR = 'das_tool_output_dir_DASTool_bins'
def __init__(self, config):
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.shock_url = config['shock-url']
self.ws_url = config['workspace-url']
self.dfu = DataFileUtil(self.callback_url)
self.ru = ReadsUtils(self.callback_url)
self.au = AssemblyUtil(self.callback_url)
self.mgu = MetagenomeUtils(self.callback_url)
def validate_run_das_tool_params(self, params):
"""
validate_run_concoct_params:
validates params passed to run_concoct method
"""
log('Start validating run_kb_das_tool params')
# check for required parameters
for p in ['assembly_ref', 'input_binned_contig_names', 'output_binned_contig_name', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def mkdir_p(self, path):
"""
mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def run_command(self, command):
"""
run_command: run command and print result
"""
#os.chdir(self.scratch)
log('Start executing command:\n{}'.format(command))
log('Command is running from:\n{}'.format(self.scratch))
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output,stderr = pipe.communicate()
exitCode = pipe.returncode
if (exitCode == 0):
log('Executed command:\n{}\n'.format(command) +
'Exit Code: {}\n'.format(exitCode))
else:
error_msg = 'Error running command:\n{}\n'.format(command)
error_msg += 'Exit Code: {}\nOutput:\n{}\nStderr:\n{}'.format(exitCode, output, stderr)
raise ValueError(error_msg)
sys.exit(1)
return (output,stderr)
def get_contig_file(self, assembly_ref):
"""
get_contig_file: get contif file from GenomeAssembly object
"""
contig_file = self.au.get_assembly_as_fasta({'ref': assembly_ref}).get('path')
sys.stdout.flush()
contig_file = self.dfu.unpack_file({'file_path': contig_file})['file_path']
return contig_file
def retrieve_and_clean_assembly(self, task_params):
if os.path.exists(task_params['contig_file_path']):
assembly = task_params['contig_file_path']
print("FOUND ASSEMBLY ON LOCAL SCRATCH")
else:
# we are on njsw so lets copy it over to scratch
assembly = self.get_contig_file(task_params['assembly_ref'])
# remove spaces from fasta headers because that breaks bedtools
assembly_clean = os.path.abspath(assembly).split('.fa')[0] + "_clean.fa"
command = '/bin/bash reformat.sh in={} out={} addunderscore'.format(assembly,assembly_clean)
log('running reformat command: {}'.format(command))
out,err = self.run_command(command)
return assembly_clean
def generate_output_file_list(self, result_directory):
"""
generate_output_file_list: zip result files and generate file_links for report
"""
log('Start packing result files')
output_files = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file = os.path.join(output_directory, 'das_tool_result.zip')
report_file = None
with zipfile.ZipFile(result_file, 'w',
zipfile.ZIP_DEFLATED,
allowZip64=True) as zip_file:
# grab all files we want to zip
for dirname, subdirs, files in os.walk(result_directory):
for file in files:
if (file.endswith('.sam') or
file.endswith('.bam') or
file.endswith('.bai') or
file.endswith('.summary')):
continue
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
continue
zip_file.write(os.path.join(dirname, file), file)
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
baseDir = os.path.basename(dirname)
for file in files:
full = os.path.join(dirname, file)
zip_file.write(full, os.path.join(baseDir, file))
output_files.append({'path': result_file,
'name': os.path.basename(result_file),
'label': os.path.basename(result_file),
'description': 'Files generated by kb_das_tool App'})
return output_files
def generate_html_report(self, result_directory, assembly_ref, binned_contig_obj_ref):
"""
generate_html_report: generate html summary report
"""
log('Start generating html report')
#html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'report.html')
# get summary data from existing assembly object and bins_objects
Summary_Table_Content = ''
Overview_Content = ''
(binned_contig_count, input_contig_count,
total_bins_count) = self.generate_overview_info(assembly_ref,
binned_contig_obj_ref,
result_directory)
# get pdfs
pdf_filename_l = [f for f in os.listdir(self.BINNER_RESULT_DIRECTORY) if f.endswith('.pdf')]
assert len(pdf_filename_l) == 2
Overview_Content += '<p>Binned contigs: {}</p>'.format(binned_contig_count)
Overview_Content += '<p>Input contigs: {}</p>'.format(input_contig_count)
Overview_Content += '<p>Number of bins: {}</p>'.format(total_bins_count)
for pdf_filename in pdf_filename_l:
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
Overview_Content)
report_template = report_template.replace('Summary_Table_Content',
Summary_Table_Content)
result_file.write(report_template)
# copy pdfs into html dir
for pdf_filename in pdf_filename_l:
shutil.copyfile(os.path.join(self.BINNER_RESULT_DIRECTORY, pdf_filename), os.path.join(output_directory, pdf_filename))
# save html dir to shock
def dir_to_shock(dir_path, name, description):
'''
For regular directories or html directories
name - for regular directories: the name of the flat (zip) file returned to ui
for html directories: the name of the html file
'''
dfu_fileToShock_ret = self.dfu.file_to_shock({
'file_path': dir_path,
'make_handle': 0,
'pack': 'zip',
})
dir_shockInfo = {
'shock_id': dfu_fileToShock_ret['shock_id'],
'name': name,
'description': description
}
return dir_shockInfo
html_shockInfo = dir_to_shock(output_directory, 'report.html', 'Report html for DAS tool')
"""
html_report.append({'path': result_file_path,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for kb_concoct App'})
return html_report
"""
return [html_shockInfo]
def generate_overview_info(self, assembly_ref, binned_contig_obj_ref, result_directory):
"""
_generate_overview_info: generate overview information from assembly and binnedcontig
"""
# get assembly and binned_contig objects that already have some data populated in them
assembly = self.dfu.get_objects({'object_refs': [assembly_ref]})['data'][0]
binned_contig = self.dfu.get_objects({'object_refs': [binned_contig_obj_ref]})['data'][0]
input_contig_count = assembly.get('data').get('num_contigs')
bins_directory = os.path.join(self.scratch, result_directory, self.BINNER_BIN_RESULT_DIR)
binned_contig_count = 0
total_bins_count = 0
total_bins = binned_contig.get('data').get('bins')
total_bins_count = len(total_bins)
for bin in total_bins:
binned_contig_count += len(bin.get('contigs'))
return (binned_contig_count, input_contig_count, total_bins_count)
def generate_report(self, binned_contig_obj_ref, params):
"""
generate_report: generate summary report
"""
log('Generating report')
params['result_directory'] = self.BINNER_RESULT_DIRECTORY
output_files = self.generate_output_file_list(params['result_directory'])
output_html_files = self.generate_html_report(params['result_directory'],
params['assembly_ref'],
binned_contig_obj_ref)
report_params = {
'message': '',
'workspace_name': params.get('workspace_name'),
'file_links': output_files,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 266,
'report_object_name': 'kb_das_tool_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def rename_and_standardize_bin_names(self):
"""
generate_command: generate renamed bins
"""
log("\n\nRunning rename_and_standardize_bin_names")
path_to_result_bins = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, "das_tool_output_dir_DASTool_bins")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fa'):
os.rename(os.path.abspath(path_to_result_bins) + '/' +
file, os.path.abspath(path_to_result_bins) + '/bin.' +
file.split('.')[-2].zfill(3) + '.fasta') # need to change to 4 digits
def make_binned_contig_summary_file_for_binning_apps(self, task_params):
"""
generate_command: generate binned contig summary command
"""
log("\n\nRunning make_binned_contig_summary_file_for_binning_apps")
result_directory = task_params['result_directory']
path_to_result_bins = '{}/{}/'.format(result_directory, task_params['bin_result_directory'])
path_to_summary_file = path_to_result_bins + 'binned_contig.summary'
with open(path_to_summary_file, 'w+') as f:
f.write("Bin name\tCompleteness\tGenome size\tGC content\n")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fasta'):
genome_bin_fna_file = os.path.join(path_to_result_bins, file)
bbstats_output_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY,
genome_bin_fna_file).split('.fasta')[0] + ".bbstatsout"
bbstats_output = self.generate_stats_for_genome_bins(task_params,
genome_bin_fna_file,
bbstats_output_file)
f.write('{}\t0\t{}\t{}\n'.format(genome_bin_fna_file.split("/")[-1],
bbstats_output['contig_bp'],
bbstats_output['gc_avg']))
f.close()
log('Finished make_binned_contig_summary_file_for_binning_apps function')
#
# def make_binned_contig_summary_file_for_binning_apps(self, task_params):
# """
# generate_command: generate binned contig summary command
# """
# log("\n\nRunning make_binned_contig_summary_file_for_binning_apps")
# path_to_result = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, "das_tool_output_dir_DASTool_bins")
# path_to_summary_file = path_to_result + '/binned_contig.summary'
# with open(path_to_summary_file, 'w+') as f:
# f.write("Bin name\tCompleteness\tGenome size\tGC content\n")
# for dirname, subdirs, files in os.walk(path_to_result):
# for file in files:
# if file.endswith('.fasta'):
# genome_bin_fna_file = os.path.join(path_to_result, file)
# bbstats_output_file = os.path.join(path_to_result,
# genome_bin_fna_file).split('.fasta')[0] + ".bbstatsout"
# bbstats_output = self.generate_stats_for_genome_bins(task_params,
# genome_bin_fna_file,
# bbstats_output_file)
# f.write('{}\t0\t{}\t{}\n'.format(genome_bin_fna_file.split("/")[-1],
# bbstats_output['contig_bp'],
# bbstats_output['gc_avg']))
# f.close()
# log('Finished make_binned_contig_summary_file_for_binning_apps function')
#
def generate_stats_for_genome_bins(self, task_params, genome_bin_fna_file, bbstats_output_file):
"""
generate_command: bbtools stats.sh command
"""
log("running generate_stats_for_genome_bins on {}".format(genome_bin_fna_file))
genome_bin_fna_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, genome_bin_fna_file)
command = '/bin/bash stats.sh in={} format=3 > {}'.format(genome_bin_fna_file, bbstats_output_file)
self.run_command(command)
bbstats_output = open(bbstats_output_file, 'r').readlines()[1]
n_scaffolds = bbstats_output.split('\t')[0]
n_contigs = bbstats_output.split('\t')[1]
scaf_bp = bbstats_output.split('\t')[2]
contig_bp = bbstats_output.split('\t')[3]
gap_pct = bbstats_output.split('\t')[4]
scaf_N50 = bbstats_output.split('\t')[5]
scaf_L50 = bbstats_output.split('\t')[6]
ctg_N50 = bbstats_output.split('\t')[7]
ctg_L50 = bbstats_output.split('\t')[8]
scaf_N90 = bbstats_output.split('\t')[9]
scaf_L90 = bbstats_output.split('\t')[10]
ctg_N90 = bbstats_output.split('\t')[11]
ctg_L90 = bbstats_output.split('\t')[12]
scaf_max = bbstats_output.split('\t')[13]
ctg_max = bbstats_output.split('\t')[14]
scaf_n_gt50K = bbstats_output.split('\t')[15]
scaf_pct_gt50K = bbstats_output.split('\t')[16]
gc_avg = float(bbstats_output.split('\t')[17]) * 100 # need to figure out if correct
gc_std = float(bbstats_output.split('\t')[18]) * 100 # need to figure out if correct
log('Generated generate_stats_for_genome_bins command: {}'.format(command))
return {'n_scaffolds': n_scaffolds,
'n_contigs': n_contigs,
'scaf_bp': scaf_bp,
'contig_bp': contig_bp,
'gap_pct': gap_pct,
'scaf_N50': scaf_N50,
'scaf_L50': scaf_L50,
'ctg_N50': ctg_N50,
'ctg_L50': ctg_L50,
'scaf_N90': scaf_N90,
'scaf_L90': scaf_L90,
'ctg_N90': ctg_N90,
'ctg_L90': ctg_L90,
'scaf_max': scaf_max,
'ctg_max': ctg_max,
'scaf_n_gt50K': scaf_n_gt50K,
'scaf_pct_gt50K': scaf_pct_gt50K,
'gc_avg': gc_avg,
'gc_std': gc_std
}
def generate_das_tool_input_files_and_commands_from_binned_contigs(self, params):
#params['binned_contig_list_file'] = binned_contig_list_file
binned_contig_names = params['input_binned_contig_names']
trimmed_binned_contig_name_list = []
contig_to_bin_file_name_list = []
for input_ref in binned_contig_names:
# next line needed for testing
# binned_contig = self.dfu.get_objects({'object_refs': [input_ref['binned_contig_obj_ref']]})['data'][0]
# next line needed in production only
binned_contig = self.dfu.get_objects({'object_refs': [input_ref]})['data'][0]
binned_contig_name = binned_contig.get('info')[1]
binned_contig_data = binned_contig.get('data')
bins = binned_contig_data.get('bins')
trimmed_binned_contig_name = binned_contig_name.split(".BinnedContig")[0]
trimmed_binned_contig_name_list.append(trimmed_binned_contig_name)
contig_to_bin_file_name = "{}_contigs_to_bins.tsv".format(trimmed_binned_contig_name)
contig_to_bin_file_name_list.append(contig_to_bin_file_name)
f = open(contig_to_bin_file_name, "w+")
for bin in bins:
bin_id = bin.get('bid')
trimmed_bin_id = bin_id.split(".fasta")[0]
contigs = bin.get('contigs')
for contig_id, contig_value in contigs.items():
f.write("{}\t{}.{}\n".format(contig_id, trimmed_binned_contig_name, trimmed_bin_id))
f.close()
#contig_to_bin_file_name_list = self.BINNER_RESULT_DIRECTORY + contig_to_bin_file_name
# temp = str(self.BINNER_RESULT_DIRECTORY) + '/'
# contig_to_bin_file_name_list = [temp + s for s in contig_to_bin_file_name_list]
return (trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
def generate_das_tool_command(self, params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list):
"""
generate_command: generate concoct params
"""
print("\n\nRunning generate_das_tool_command")
command = 'DAS_Tool '
command += '-i {} '.format(contig_to_bin_file_name_list)
command += '-l {} '.format(trimmed_binned_contig_name_list)
command += '-c {} '.format(params.get('contig_file_path'))
command += '-o {} '.format(self.BINNER_RESULT_DIRECTORY)
command += '--search_engine {} '.format(params.get('search_engine'))
command += '--score_threshold {} '.format(params.get('score_threshold'))
command += '--duplicate_penalty {} '.format(params.get('duplicate_penalty'))
command += '--megabin_penalty {} '.format(params.get('megabin_penalty'))
command += '--write_bin_evals {} '.format(params.get('write_bin_evals'))
command += '--create_plots {} '.format(params.get('create_plots'))
command += '--write_bins 1 '
command += '--write_unbinned 0 '
command += '-t {}'.format(self.DASTOOL_THREADS)
log('Generated das_tool command: {}'.format(command))
return command
def run_das_tool(self, params):
"""
run_das_tool: DAS_Tool app
required params:
assembly_ref: Metagenome assembly object reference
input_binned_contig_names: list of BinnedContig objects
output_binned_contig_name: output BinnedContig object name
workspace_name: the name of the workspace it gets saved to.
optional params:
search_engine; default diamond
score_threshold; default 0.5
duplicate_penalty; default 0.6
megabin_penalty; default 0.5
write_bin_evals; default 1
create_plots; default 1
write_bins; default 1
write_unbinned; default 0
ref: https://github.com/cmks/DAS_Tool
"""
log('--->\nrunning DASToolUtil.run_das_tool\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self.validate_run_das_tool_params(params)
print("\n\nFinished running validate_run_das_tool_params")
#
contig_file = self.get_contig_file(params.get('assembly_ref'))
params['contig_file_path'] = contig_file
result_directory = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY)
params['result_directory'] = result_directory
self.mkdir_p(result_directory)
cwd = os.getcwd()
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
(trimmed_binned_contig_name_list, contig_to_bin_file_name_list) = self.generate_das_tool_input_files_and_commands_from_binned_contigs(params)
comma_symbol = ','
trimmed_binned_contig_name_list = comma_symbol.join(trimmed_binned_contig_name_list)
contig_to_bin_file_name_list = comma_symbol.join(contig_to_bin_file_name_list)
log(os.listdir(result_directory))
log("trimmed_binned_contig_name_list {}".format(trimmed_binned_contig_name_list))
log("contig_to_bin_file_name_list {}".format(contig_to_bin_file_name_list))
# binned_contig_to_file_params = {
# 'input_ref': input_ref['binned_contig_obj_ref'],
# 'save_to_shock': 1,
# 'bin_file_directory': '{}/bin_set_{}/'.format(result_directory, i),
# 'workspace_name': params.get('workspace_name'),
# }
#
# self.mgu.binned_contigs_to_file(binned_contig_to_file_params) # returns "binned_contig_obj_ref" of type "obj_ref" (An X/Y/Z style reference)
#shutil.copytree(bin_file_directory, os.path.join(result_directory, bin_file_directory))
#print('\n\n\n result: {}'.format(self.mgu.binned_contigs_to_file(binned_contig_to_file_params)))
#run concoct
command = self.generate_das_tool_command(params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
log('\nWorking dir is {}'.format(result_directory))
log('\nWorking dir is {}'.format(os.getcwd()))
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
self.run_command(command)
self.rename_and_standardize_bin_names()
os.chdir(self.scratch)
task_params = {}
task_params['result_directory'] = os.path.join(self.scratch)
task_params['bin_result_directory'] = os.path.join(self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins")
# check to make sure bins were generated, otherwise no need to run the rest
if not os.path.exists(task_params['bin_result_directory']):
raise AssertionError('No bins produced - skipping the creation of a new BinnedContig object')
self.make_binned_contig_summary_file_for_binning_apps(task_params)
generate_binned_contig_param = {
'file_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins"),
'assembly_ref': params.get('assembly_ref'),
'binned_contig_name': params.get('output_binned_contig_name'),
'workspace_name': params.get('workspace_name')
}
binned_contig_obj_ref = self.mgu.file_to_binned_contigs(
generate_binned_contig_param).get('binned_contig_obj_ref')
reportVal = self.generate_report(binned_contig_obj_ref, params)
returnVal = {
'result_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY),
'binned_contig_obj_ref': binned_contig_obj_ref
}
returnVal.update(reportVal)
return returnVal
| 44.409247 | 154 | 0.609794 | import errno
import json
import os
import subprocess
import sys
import time
import uuid
import zipfile
import shutil
from Bio import SeqIO
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.MetagenomeUtilsClient import MetagenomeUtils
from installed_clients.ReadsUtilsClient import ReadsUtils
def log(message, prefix_newline=False):
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class DASToolUtil:
DASTOOL_THREADS=2
BINNER_RESULT_DIRECTORY = 'das_tool_output_dir'
BINNER_BIN_RESULT_DIR = 'das_tool_output_dir_DASTool_bins'
def __init__(self, config):
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.shock_url = config['shock-url']
self.ws_url = config['workspace-url']
self.dfu = DataFileUtil(self.callback_url)
self.ru = ReadsUtils(self.callback_url)
self.au = AssemblyUtil(self.callback_url)
self.mgu = MetagenomeUtils(self.callback_url)
def validate_run_das_tool_params(self, params):
log('Start validating run_kb_das_tool params')
for p in ['assembly_ref', 'input_binned_contig_names', 'output_binned_contig_name', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def mkdir_p(self, path):
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def run_command(self, command):
log('Start executing command:\n{}'.format(command))
log('Command is running from:\n{}'.format(self.scratch))
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output,stderr = pipe.communicate()
exitCode = pipe.returncode
if (exitCode == 0):
log('Executed command:\n{}\n'.format(command) +
'Exit Code: {}\n'.format(exitCode))
else:
error_msg = 'Error running command:\n{}\n'.format(command)
error_msg += 'Exit Code: {}\nOutput:\n{}\nStderr:\n{}'.format(exitCode, output, stderr)
raise ValueError(error_msg)
sys.exit(1)
return (output,stderr)
def get_contig_file(self, assembly_ref):
contig_file = self.au.get_assembly_as_fasta({'ref': assembly_ref}).get('path')
sys.stdout.flush()
contig_file = self.dfu.unpack_file({'file_path': contig_file})['file_path']
return contig_file
def retrieve_and_clean_assembly(self, task_params):
if os.path.exists(task_params['contig_file_path']):
assembly = task_params['contig_file_path']
print("FOUND ASSEMBLY ON LOCAL SCRATCH")
else:
assembly = self.get_contig_file(task_params['assembly_ref'])
assembly_clean = os.path.abspath(assembly).split('.fa')[0] + "_clean.fa"
command = '/bin/bash reformat.sh in={} out={} addunderscore'.format(assembly,assembly_clean)
log('running reformat command: {}'.format(command))
out,err = self.run_command(command)
return assembly_clean
def generate_output_file_list(self, result_directory):
log('Start packing result files')
output_files = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file = os.path.join(output_directory, 'das_tool_result.zip')
report_file = None
with zipfile.ZipFile(result_file, 'w',
zipfile.ZIP_DEFLATED,
allowZip64=True) as zip_file:
for dirname, subdirs, files in os.walk(result_directory):
for file in files:
if (file.endswith('.sam') or
file.endswith('.bam') or
file.endswith('.bai') or
file.endswith('.summary')):
continue
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
continue
zip_file.write(os.path.join(dirname, file), file)
if (dirname.endswith(self.BINNER_BIN_RESULT_DIR)):
baseDir = os.path.basename(dirname)
for file in files:
full = os.path.join(dirname, file)
zip_file.write(full, os.path.join(baseDir, file))
output_files.append({'path': result_file,
'name': os.path.basename(result_file),
'label': os.path.basename(result_file),
'description': 'Files generated by kb_das_tool App'})
return output_files
def generate_html_report(self, result_directory, assembly_ref, binned_contig_obj_ref):
log('Start generating html report')
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self.mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'report.html')
Summary_Table_Content = ''
Overview_Content = ''
(binned_contig_count, input_contig_count,
total_bins_count) = self.generate_overview_info(assembly_ref,
binned_contig_obj_ref,
result_directory)
pdf_filename_l = [f for f in os.listdir(self.BINNER_RESULT_DIRECTORY) if f.endswith('.pdf')]
assert len(pdf_filename_l) == 2
Overview_Content += '<p>Binned contigs: {}</p>'.format(binned_contig_count)
Overview_Content += '<p>Input contigs: {}</p>'.format(input_contig_count)
Overview_Content += '<p>Number of bins: {}</p>'.format(total_bins_count)
for pdf_filename in pdf_filename_l:
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
Overview_Content += '\n<embed src="{}" width="1000px" height="700px">'.format(pdf_filename)
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
Overview_Content)
report_template = report_template.replace('Summary_Table_Content',
Summary_Table_Content)
result_file.write(report_template)
for pdf_filename in pdf_filename_l:
shutil.copyfile(os.path.join(self.BINNER_RESULT_DIRECTORY, pdf_filename), os.path.join(output_directory, pdf_filename))
def dir_to_shock(dir_path, name, description):
dfu_fileToShock_ret = self.dfu.file_to_shock({
'file_path': dir_path,
'make_handle': 0,
'pack': 'zip',
})
dir_shockInfo = {
'shock_id': dfu_fileToShock_ret['shock_id'],
'name': name,
'description': description
}
return dir_shockInfo
html_shockInfo = dir_to_shock(output_directory, 'report.html', 'Report html for DAS tool')
return [html_shockInfo]
def generate_overview_info(self, assembly_ref, binned_contig_obj_ref, result_directory):
assembly = self.dfu.get_objects({'object_refs': [assembly_ref]})['data'][0]
binned_contig = self.dfu.get_objects({'object_refs': [binned_contig_obj_ref]})['data'][0]
input_contig_count = assembly.get('data').get('num_contigs')
bins_directory = os.path.join(self.scratch, result_directory, self.BINNER_BIN_RESULT_DIR)
binned_contig_count = 0
total_bins_count = 0
total_bins = binned_contig.get('data').get('bins')
total_bins_count = len(total_bins)
for bin in total_bins:
binned_contig_count += len(bin.get('contigs'))
return (binned_contig_count, input_contig_count, total_bins_count)
def generate_report(self, binned_contig_obj_ref, params):
log('Generating report')
params['result_directory'] = self.BINNER_RESULT_DIRECTORY
output_files = self.generate_output_file_list(params['result_directory'])
output_html_files = self.generate_html_report(params['result_directory'],
params['assembly_ref'],
binned_contig_obj_ref)
report_params = {
'message': '',
'workspace_name': params.get('workspace_name'),
'file_links': output_files,
'html_links': output_html_files,
'direct_html_link_index': 0,
'html_window_height': 266,
'report_object_name': 'kb_das_tool_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def rename_and_standardize_bin_names(self):
log("\n\nRunning rename_and_standardize_bin_names")
path_to_result_bins = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, "das_tool_output_dir_DASTool_bins")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fa'):
os.rename(os.path.abspath(path_to_result_bins) + '/' +
file, os.path.abspath(path_to_result_bins) + '/bin.' +
file.split('.')[-2].zfill(3) + '.fasta')
def make_binned_contig_summary_file_for_binning_apps(self, task_params):
log("\n\nRunning make_binned_contig_summary_file_for_binning_apps")
result_directory = task_params['result_directory']
path_to_result_bins = '{}/{}/'.format(result_directory, task_params['bin_result_directory'])
path_to_summary_file = path_to_result_bins + 'binned_contig.summary'
with open(path_to_summary_file, 'w+') as f:
f.write("Bin name\tCompleteness\tGenome size\tGC content\n")
for dirname, subdirs, files in os.walk(path_to_result_bins):
for file in files:
if file.endswith('.fasta'):
genome_bin_fna_file = os.path.join(path_to_result_bins, file)
bbstats_output_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY,
genome_bin_fna_file).split('.fasta')[0] + ".bbstatsout"
bbstats_output = self.generate_stats_for_genome_bins(task_params,
genome_bin_fna_file,
bbstats_output_file)
f.write('{}\t0\t{}\t{}\n'.format(genome_bin_fna_file.split("/")[-1],
bbstats_output['contig_bp'],
bbstats_output['gc_avg']))
f.close()
log('Finished make_binned_contig_summary_file_for_binning_apps function')
# generate_command: generate binned contig summary command
# """
def generate_stats_for_genome_bins(self, task_params, genome_bin_fna_file, bbstats_output_file):
log("running generate_stats_for_genome_bins on {}".format(genome_bin_fna_file))
genome_bin_fna_file = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY, genome_bin_fna_file)
command = '/bin/bash stats.sh in={} format=3 > {}'.format(genome_bin_fna_file, bbstats_output_file)
self.run_command(command)
bbstats_output = open(bbstats_output_file, 'r').readlines()[1]
n_scaffolds = bbstats_output.split('\t')[0]
n_contigs = bbstats_output.split('\t')[1]
scaf_bp = bbstats_output.split('\t')[2]
contig_bp = bbstats_output.split('\t')[3]
gap_pct = bbstats_output.split('\t')[4]
scaf_N50 = bbstats_output.split('\t')[5]
scaf_L50 = bbstats_output.split('\t')[6]
ctg_N50 = bbstats_output.split('\t')[7]
ctg_L50 = bbstats_output.split('\t')[8]
scaf_N90 = bbstats_output.split('\t')[9]
scaf_L90 = bbstats_output.split('\t')[10]
ctg_N90 = bbstats_output.split('\t')[11]
ctg_L90 = bbstats_output.split('\t')[12]
scaf_max = bbstats_output.split('\t')[13]
ctg_max = bbstats_output.split('\t')[14]
scaf_n_gt50K = bbstats_output.split('\t')[15]
scaf_pct_gt50K = bbstats_output.split('\t')[16]
gc_avg = float(bbstats_output.split('\t')[17]) * 100
gc_std = float(bbstats_output.split('\t')[18]) * 100
log('Generated generate_stats_for_genome_bins command: {}'.format(command))
return {'n_scaffolds': n_scaffolds,
'n_contigs': n_contigs,
'scaf_bp': scaf_bp,
'contig_bp': contig_bp,
'gap_pct': gap_pct,
'scaf_N50': scaf_N50,
'scaf_L50': scaf_L50,
'ctg_N50': ctg_N50,
'ctg_L50': ctg_L50,
'scaf_N90': scaf_N90,
'scaf_L90': scaf_L90,
'ctg_N90': ctg_N90,
'ctg_L90': ctg_L90,
'scaf_max': scaf_max,
'ctg_max': ctg_max,
'scaf_n_gt50K': scaf_n_gt50K,
'scaf_pct_gt50K': scaf_pct_gt50K,
'gc_avg': gc_avg,
'gc_std': gc_std
}
def generate_das_tool_input_files_and_commands_from_binned_contigs(self, params):
binned_contig_names = params['input_binned_contig_names']
trimmed_binned_contig_name_list = []
contig_to_bin_file_name_list = []
for input_ref in binned_contig_names:
binned_contig = self.dfu.get_objects({'object_refs': [input_ref]})['data'][0]
binned_contig_name = binned_contig.get('info')[1]
binned_contig_data = binned_contig.get('data')
bins = binned_contig_data.get('bins')
trimmed_binned_contig_name = binned_contig_name.split(".BinnedContig")[0]
trimmed_binned_contig_name_list.append(trimmed_binned_contig_name)
contig_to_bin_file_name = "{}_contigs_to_bins.tsv".format(trimmed_binned_contig_name)
contig_to_bin_file_name_list.append(contig_to_bin_file_name)
f = open(contig_to_bin_file_name, "w+")
for bin in bins:
bin_id = bin.get('bid')
trimmed_bin_id = bin_id.split(".fasta")[0]
contigs = bin.get('contigs')
for contig_id, contig_value in contigs.items():
f.write("{}\t{}.{}\n".format(contig_id, trimmed_binned_contig_name, trimmed_bin_id))
f.close()
return (trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
def generate_das_tool_command(self, params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list):
print("\n\nRunning generate_das_tool_command")
command = 'DAS_Tool '
command += '-i {} '.format(contig_to_bin_file_name_list)
command += '-l {} '.format(trimmed_binned_contig_name_list)
command += '-c {} '.format(params.get('contig_file_path'))
command += '-o {} '.format(self.BINNER_RESULT_DIRECTORY)
command += '--search_engine {} '.format(params.get('search_engine'))
command += '--score_threshold {} '.format(params.get('score_threshold'))
command += '--duplicate_penalty {} '.format(params.get('duplicate_penalty'))
command += '--megabin_penalty {} '.format(params.get('megabin_penalty'))
command += '--write_bin_evals {} '.format(params.get('write_bin_evals'))
command += '--create_plots {} '.format(params.get('create_plots'))
command += '--write_bins 1 '
command += '--write_unbinned 0 '
command += '-t {}'.format(self.DASTOOL_THREADS)
log('Generated das_tool command: {}'.format(command))
return command
def run_das_tool(self, params):
log('--->\nrunning DASToolUtil.run_das_tool\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self.validate_run_das_tool_params(params)
print("\n\nFinished running validate_run_das_tool_params")
contig_file = self.get_contig_file(params.get('assembly_ref'))
params['contig_file_path'] = contig_file
result_directory = os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY)
params['result_directory'] = result_directory
self.mkdir_p(result_directory)
cwd = os.getcwd()
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
(trimmed_binned_contig_name_list, contig_to_bin_file_name_list) = self.generate_das_tool_input_files_and_commands_from_binned_contigs(params)
comma_symbol = ','
trimmed_binned_contig_name_list = comma_symbol.join(trimmed_binned_contig_name_list)
contig_to_bin_file_name_list = comma_symbol.join(contig_to_bin_file_name_list)
log(os.listdir(result_directory))
log("trimmed_binned_contig_name_list {}".format(trimmed_binned_contig_name_list))
log("contig_to_bin_file_name_list {}".format(contig_to_bin_file_name_list))
l_command(params, trimmed_binned_contig_name_list, contig_to_bin_file_name_list)
log('\nWorking dir is {}'.format(result_directory))
log('\nWorking dir is {}'.format(os.getcwd()))
log('Changing working dir to {}'.format(result_directory))
os.chdir(result_directory)
self.run_command(command)
self.rename_and_standardize_bin_names()
os.chdir(self.scratch)
task_params = {}
task_params['result_directory'] = os.path.join(self.scratch)
task_params['bin_result_directory'] = os.path.join(self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins")
if not os.path.exists(task_params['bin_result_directory']):
raise AssertionError('No bins produced - skipping the creation of a new BinnedContig object')
self.make_binned_contig_summary_file_for_binning_apps(task_params)
generate_binned_contig_param = {
'file_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY , "das_tool_output_dir_DASTool_bins"),
'assembly_ref': params.get('assembly_ref'),
'binned_contig_name': params.get('output_binned_contig_name'),
'workspace_name': params.get('workspace_name')
}
binned_contig_obj_ref = self.mgu.file_to_binned_contigs(
generate_binned_contig_param).get('binned_contig_obj_ref')
reportVal = self.generate_report(binned_contig_obj_ref, params)
returnVal = {
'result_directory': os.path.join(self.scratch, self.BINNER_RESULT_DIRECTORY),
'binned_contig_obj_ref': binned_contig_obj_ref
}
returnVal.update(reportVal)
return returnVal
| true | true |
f72a8b1d41dcd8162bc15ea1ac9f0f974c941910 | 801 | py | Python | venv/Scripts/f2py.py | nfuster2017/AmazonWebCrawler | d45e2dec826b5cadd632ed8a94c2c4c127430000 | [
"MIT"
] | 1 | 2019-07-28T05:32:10.000Z | 2019-07-28T05:32:10.000Z | venv/Scripts/f2py.py | nfuster2017/AmazonWebCrawler | d45e2dec826b5cadd632ed8a94c2c4c127430000 | [
"MIT"
] | 4 | 2021-06-08T20:08:26.000Z | 2022-03-11T23:54:16.000Z | venv/Scripts/f2py.py | nfuster2017/AmazonWebCrawler | d45e2dec826b5cadd632ed8a94c2c4c127430000 | [
"MIT"
] | null | null | null | #!D:\School\UMD\INST326\Group Project\venv\Scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| 27.62069 | 67 | 0.645443 |
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| true | true |
f72a8d3c2c03ab88dbb2873eded7eee4dbec4655 | 1,951 | py | Python | testing/exercise/vehicle/test/test_vehicle.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | 1 | 2021-05-27T07:59:17.000Z | 2021-05-27T07:59:17.000Z | testing/exercise/vehicle/test/test_vehicle.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | null | null | null | testing/exercise/vehicle/test/test_vehicle.py | PetkoAndreev/Python-OOP | 2cc3094940cdf078f0ee60be938e883f843766e4 | [
"MIT"
] | null | null | null | import unittest
from python_oop.testing.exercise.vehicle.project.vehicle import Vehicle
# from project.vehicle import Vehicle
class VehicleTest(unittest.TestCase):
def setUp(self):
self.vehicle = Vehicle(50.0, 300.0)
def test_vehicle__init_method(self):
self.assertEqual(50.0, self.vehicle.fuel)
self.assertEqual(50.0, self.vehicle.capacity)
self.assertEqual(300.0, self.vehicle.horse_power)
self.assertEqual(self.vehicle.DEFAULT_FUEL_CONSUMPTION, self.vehicle.fuel_consumption)
def test_vehicle__fuel_capacity_if_fuel_changed(self):
self.assertEqual(50.0, self.vehicle.capacity)
self.vehicle.fuel = 20.0
self.assertEqual(50.0, self.vehicle.capacity)
def test_vehicle__str_method(self):
expected_result = f"The vehicle has {self.vehicle.horse_power} " \
f"horse power with {self.vehicle.fuel} fuel left and {self.vehicle.fuel_consumption} fuel consumption"
actual_result = self.vehicle.__str__()
self.assertEqual(expected_result, actual_result)
def test_vehicle__drive_method_success(self):
self.vehicle.drive(5)
self.assertEqual(43.75, self.vehicle.fuel)
def test_vehicle__drive_method__expect_exception(self):
expected_result = "Not enough fuel"
with self.assertRaises(Exception) as context:
self.vehicle.drive(100)
self.assertEqual(expected_result, str(context.exception))
def test_vehicle__refuel_method_success(self):
self.vehicle.drive(5)
self.vehicle.refuel(6.25)
self.assertEqual(50.0, self.vehicle.fuel)
def test_vehicle__refuel_method__expect_exception(self):
expected_result = "Too much fuel"
with self.assertRaises(Exception) as context:
self.vehicle.refuel(100)
self.assertEqual(expected_result, str(context.exception))
if __name__ == '__main__':
unittest.main()
| 36.811321 | 128 | 0.708355 | import unittest
from python_oop.testing.exercise.vehicle.project.vehicle import Vehicle
class VehicleTest(unittest.TestCase):
def setUp(self):
self.vehicle = Vehicle(50.0, 300.0)
def test_vehicle__init_method(self):
self.assertEqual(50.0, self.vehicle.fuel)
self.assertEqual(50.0, self.vehicle.capacity)
self.assertEqual(300.0, self.vehicle.horse_power)
self.assertEqual(self.vehicle.DEFAULT_FUEL_CONSUMPTION, self.vehicle.fuel_consumption)
def test_vehicle__fuel_capacity_if_fuel_changed(self):
self.assertEqual(50.0, self.vehicle.capacity)
self.vehicle.fuel = 20.0
self.assertEqual(50.0, self.vehicle.capacity)
def test_vehicle__str_method(self):
expected_result = f"The vehicle has {self.vehicle.horse_power} " \
f"horse power with {self.vehicle.fuel} fuel left and {self.vehicle.fuel_consumption} fuel consumption"
actual_result = self.vehicle.__str__()
self.assertEqual(expected_result, actual_result)
def test_vehicle__drive_method_success(self):
self.vehicle.drive(5)
self.assertEqual(43.75, self.vehicle.fuel)
def test_vehicle__drive_method__expect_exception(self):
expected_result = "Not enough fuel"
with self.assertRaises(Exception) as context:
self.vehicle.drive(100)
self.assertEqual(expected_result, str(context.exception))
def test_vehicle__refuel_method_success(self):
self.vehicle.drive(5)
self.vehicle.refuel(6.25)
self.assertEqual(50.0, self.vehicle.fuel)
def test_vehicle__refuel_method__expect_exception(self):
expected_result = "Too much fuel"
with self.assertRaises(Exception) as context:
self.vehicle.refuel(100)
self.assertEqual(expected_result, str(context.exception))
if __name__ == '__main__':
unittest.main()
| true | true |
f72a8f6331ddd325a61a09e43d5a54e5309a6648 | 1,760 | py | Python | examples/ad_manager/v201911/user_service/get_all_users.py | MattCardoso/googleads-python-lib | 62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201911/user_service/get_all_users.py | MattCardoso/googleads-python-lib | 62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201911/user_service/get_all_users.py | MattCardoso/googleads-python-lib | 62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all users.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201911')
# Create a statement to select users.
statement = ad_manager.StatementBuilder(version='v201911')
# Retrieve a small amount of users at a time, paging
# through until all users have been retrieved.
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for user in response['results']:
# Print out some information for each user.
print('User with ID "%d" and name "%s" was found.\n' % (user['id'],
user['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 35.2 | 78 | 0.702841 |
from googleads import ad_manager
def main(client):
user_service = client.GetService('UserService', version='v201911')
statement = ad_manager.StatementBuilder(version='v201911')
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for user in response['results']:
print('User with ID "%d" and name "%s" was found.\n' % (user['id'],
user['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| true | true |
f72a8ffa64b566ce6eec8c0c16ba2850ac0a95f6 | 2,245 | py | Python | examples/lm1b/main.py | mjsML/fast_flax | d982b59b715524884d08d6ed506ab325e8be1ece | [
"Apache-2.0"
] | null | null | null | examples/lm1b/main.py | mjsML/fast_flax | d982b59b715524884d08d6ed506ab325e8be1ece | [
"Apache-2.0"
] | 1 | 2021-08-16T09:16:55.000Z | 2021-08-16T09:16:55.000Z | examples/lm1b/main.py | mjsML/fast_flax | d982b59b715524884d08d6ed506ab325e8be1ece | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file for running the Language Modelling example with LM1B.
This file is intentionally kept short. The majority for logic is in libraries
than can be easily tested and imported in Colab.
"""
from absl import app
from absl import flags
from absl import logging
from clu import platform
import train
import jax
from ml_collections import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('workdir', None, 'Directory to store model data.')
config_flags.DEFINE_config_file(
'config',
'configs/default.py',
'File path to the training hyperparameter configuration.',
lock_config=True)
flags.mark_flags_as_required(['config', 'workdir'])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
# it unavailable to JAX.
tf.config.experimental.set_visible_devices([], 'GPU')
logging.info('JAX process: %d / %d', jax.process_index(), jax.process_count())
logging.info('JAX local devices: %r', jax.local_devices())
# Add a note so that we can tell which task is which JAX host.
# (Depending on the platform task 0 is not guaranteed to be host 0)
platform.work_unit().set_task_status(f'process_index: {jax.process_index()}, '
f'process_count: {jax.process_count()}')
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, 'workdir')
train.train_and_evaluate(FLAGS.config, FLAGS.workdir)
if __name__ == '__main__':
jax.config.parse_flags_with_absl()
app.run(main)
| 34.015152 | 80 | 0.728285 |
from absl import app
from absl import flags
from absl import logging
from clu import platform
import train
import jax
from ml_collections import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('workdir', None, 'Directory to store model data.')
config_flags.DEFINE_config_file(
'config',
'configs/default.py',
'File path to the training hyperparameter configuration.',
lock_config=True)
flags.mark_flags_as_required(['config', 'workdir'])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.config.experimental.set_visible_devices([], 'GPU')
logging.info('JAX process: %d / %d', jax.process_index(), jax.process_count())
logging.info('JAX local devices: %r', jax.local_devices())
platform.work_unit().set_task_status(f'process_index: {jax.process_index()}, '
f'process_count: {jax.process_count()}')
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, 'workdir')
train.train_and_evaluate(FLAGS.config, FLAGS.workdir)
if __name__ == '__main__':
jax.config.parse_flags_with_absl()
app.run(main)
| true | true |
f72a91102600de8d03a0b64c5ee35b9767a86fd4 | 4,373 | py | Python | dash_core_components/RangeSlider.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | dash_core_components/RangeSlider.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | dash_core_components/RangeSlider.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class RangeSlider(Component):
"""A RangeSlider component.
A double slider with two handles.
Used for specifying a range of numerical values.
Keyword arguments:
- id (string; optional)
- marks (optional): Marks on the slider.
The key determines the position,
and the value determines what will show.
If you want to set the style of a specific mark point,
the value should be an object which
contains style and label properties.. marks has the following type: dict containing keys 'number'.
Those keys have the following types:
- number (optional): . number has the following type: string | dict containing keys 'style', 'label'.
Those keys have the following types:
- style (dict; optional)
- label (string; optional)
- value (list; optional): The value of the input
- allowCross (boolean; optional): allowCross could be set as true to allow those handles to cross.
- className (string; optional): Additional CSS class for the root DOM node
- count (number; optional): Determine how many ranges to render, and multiple handles
will be rendered (number + 1).
- disabled (boolean; optional): If true, the handles can't be moved.
- dots (boolean; optional): When the step value is greater than 1,
you can set the dots to true if you want to
render the slider with dots.
- included (boolean; optional): If the value is true, it means a continuous
value is included. Otherwise, it is an independent value.
- min (number; optional): Minimum allowed value of the slider
- max (number; optional): Maximum allowed value of the slider
- pushable (boolean | number; optional): pushable could be set as true to allow pushing of
surrounding handles when moving an handle.
When set to a number, the number will be the
minimum ensured distance between handles.
- step (number; optional): Value by which increments or decrements are made
- vertical (boolean; optional): If true, the slider will be vertical
- updatemode (a value equal to: 'mouseup', 'drag'; optional): Determines when the component should update
its value. If `mouseup`, then the slider
will only trigger its value when the user has
finished dragging the slider. If `drag`, then
the slider will update its value continuously
as it is being dragged.
Only use `drag` if your updates are fast.
- loading_state (optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, marks=Component.UNDEFINED, value=Component.UNDEFINED, allowCross=Component.UNDEFINED, className=Component.UNDEFINED, count=Component.UNDEFINED, disabled=Component.UNDEFINED, dots=Component.UNDEFINED, included=Component.UNDEFINED, min=Component.UNDEFINED, max=Component.UNDEFINED, pushable=Component.UNDEFINED, step=Component.UNDEFINED, vertical=Component.UNDEFINED, updatemode=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self._type = 'RangeSlider'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(RangeSlider, self).__init__(**args)
| 59.094595 | 490 | 0.732678 |
from dash.development.base_component import Component, _explicitize_args
class RangeSlider(Component):
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, marks=Component.UNDEFINED, value=Component.UNDEFINED, allowCross=Component.UNDEFINED, className=Component.UNDEFINED, count=Component.UNDEFINED, disabled=Component.UNDEFINED, dots=Component.UNDEFINED, included=Component.UNDEFINED, min=Component.UNDEFINED, max=Component.UNDEFINED, pushable=Component.UNDEFINED, step=Component.UNDEFINED, vertical=Component.UNDEFINED, updatemode=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self._type = 'RangeSlider'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'marks', 'value', 'allowCross', 'className', 'count', 'disabled', 'dots', 'included', 'min', 'max', 'pushable', 'step', 'vertical', 'updatemode', 'loading_state']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs)
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(RangeSlider, self).__init__(**args)
| true | true |
f72a9127d88cdefba5bdb5fb1446f97866851501 | 1,697 | py | Python | model/optimizer.py | keonlee9420/DiffSinger | 2bfcae4a78068c2061eae64ee675959a077aa54b | [
"MIT"
] | 95 | 2021-06-04T02:22:36.000Z | 2022-03-25T03:19:51.000Z | model/optimizer.py | keonlee9420/DiffSinger | 2bfcae4a78068c2061eae64ee675959a077aa54b | [
"MIT"
] | 3 | 2021-06-23T08:57:00.000Z | 2021-10-14T10:44:43.000Z | model/optimizer.py | keonlee9420/DiffSinger | 2bfcae4a78068c2061eae64ee675959a077aa54b | [
"MIT"
] | 15 | 2021-06-04T03:09:12.000Z | 2022-03-30T08:23:05.000Z | import torch
import numpy as np
class ScheduledOptim:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config, model_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
self.anneal_steps = train_config["optimizer"]["anneal_steps"]
self.anneal_rate = train_config["optimizer"]["anneal_rate"]
self.current_step = current_step
self.init_lr = train_config["optimizer"]["init_lr"]
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
# print("self.init_lr:", self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _get_lr_scale(self):
lr = np.min(
[
np.power(self.current_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.current_step,
]
)
for s in self.anneal_steps:
if self.current_step > s:
lr = lr * self.anneal_rate
return lr
def _update_learning_rate(self):
""" Learning rate scheduling per step """
self.current_step += 1
lr = self.init_lr
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
| 32.634615 | 73 | 0.592222 | import torch
import numpy as np
class ScheduledOptim:
def __init__(self, model, train_config, model_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
self.anneal_steps = train_config["optimizer"]["anneal_steps"]
self.anneal_rate = train_config["optimizer"]["anneal_rate"]
self.current_step = current_step
self.init_lr = train_config["optimizer"]["init_lr"]
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _get_lr_scale(self):
lr = np.min(
[
np.power(self.current_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.current_step,
]
)
for s in self.anneal_steps:
if self.current_step > s:
lr = lr * self.anneal_rate
return lr
def _update_learning_rate(self):
self.current_step += 1
lr = self.init_lr
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
| true | true |
f72a913c6611cb848d4d5714cbbb1562b72dda22 | 14,624 | py | Python | billforward/models/resume_subscription_amendment.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 2 | 2016-11-23T17:32:37.000Z | 2022-02-24T05:13:20.000Z | billforward/models/resume_subscription_amendment.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | null | null | null | billforward/models/resume_subscription_amendment.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 1 | 2016-12-30T20:02:48.000Z | 2016-12-30T20:02:48.000Z | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ResumeSubscriptionAmendment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, created=None, changed_by=None, updated=None, type=None, id=None, organization_id=None, subscription_id=None, amendment_type=None, actioning_time=None, actioned_time=None, state=None, deleted=False):
"""
ResumeSubscriptionAmendment - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created': 'datetime',
'changed_by': 'str',
'updated': 'datetime',
'type': 'str',
'id': 'str',
'organization_id': 'str',
'subscription_id': 'str',
'amendment_type': 'str',
'actioning_time': 'datetime',
'actioned_time': 'datetime',
'state': 'str',
'deleted': 'bool'
}
self.attribute_map = {
'created': 'created',
'changed_by': 'changedBy',
'updated': 'updated',
'type': '@type',
'id': 'id',
'organization_id': 'organizationID',
'subscription_id': 'subscriptionID',
'amendment_type': 'amendmentType',
'actioning_time': 'actioningTime',
'actioned_time': 'actionedTime',
'state': 'state',
'deleted': 'deleted'
}
self._created = created
self._changed_by = changed_by
self._updated = updated
self._type = type
self._id = id
self._organization_id = organization_id
self._subscription_id = subscription_id
self._amendment_type = amendment_type
self._actioning_time = actioning_time
self._actioned_time = actioned_time
self._state = state
self._deleted = deleted
@property
def created(self):
"""
Gets the created of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was created.\", \"verbs\":[] }
:return: The created of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was created.\", \"verbs\":[] }
:param created: The created of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._created = created
@property
def changed_by(self):
"""
Gets the changed_by of this ResumeSubscriptionAmendment.
{ \"description\" : \"ID of the user who last updated the entity.\", \"verbs\":[] }
:return: The changed_by of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._changed_by
@changed_by.setter
def changed_by(self, changed_by):
"""
Sets the changed_by of this ResumeSubscriptionAmendment.
{ \"description\" : \"ID of the user who last updated the entity.\", \"verbs\":[] }
:param changed_by: The changed_by of this ResumeSubscriptionAmendment.
:type: str
"""
self._changed_by = changed_by
@property
def updated(self):
"""
Gets the updated of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was last updated.\", \"verbs\":[] }
:return: The updated of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this ResumeSubscriptionAmendment.
{ \"description\" : \"The UTC DateTime when the object was last updated.\", \"verbs\":[] }
:param updated: The updated of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._updated = updated
@property
def type(self):
"""
Gets the type of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"default\" : \"\", \"verbs\":[\"POST\",\"GET\"] }
:return: The type of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"default\" : \"\", \"verbs\":[\"POST\",\"GET\"] }
:param type: The type of this ResumeSubscriptionAmendment.
:type: str
"""
allowed_values = ["InvoiceOutstandingChargesAmendment", "IssueInvoiceAmendment", "PricingComponentValueAmendment", "InvoiceRecalculationAmendment", "CancellationAmendment", "InvoiceNextExecutionAttemptAmendment", "FixedTermExpiryAmendment", "EndTrialAmendment", "ProductRatePlanMigrationAmendment", "AmendmentDiscardAmendment", "UpdateComponentValueAmendment", "ServiceEndAmendment", "ResumeSubscriptionAmendment", "CreateSubscriptionChargeAmendment", "TimerAmendment"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def id(self):
"""
Gets the id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"GET\"] }
:return: The id of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"GET\"] }
:param id: The id of this ResumeSubscriptionAmendment.
:type: str
"""
self._id = id
@property
def organization_id(self):
"""
Gets the organization_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"\"] }
:return: The organization_id of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
"""
Sets the organization_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"\"] }
:param organization_id: The organization_id of this ResumeSubscriptionAmendment.
:type: str
"""
self._organization_id = organization_id
@property
def subscription_id(self):
"""
Gets the subscription_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The subscription_id of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._subscription_id
@subscription_id.setter
def subscription_id(self, subscription_id):
"""
Sets the subscription_id of this ResumeSubscriptionAmendment.
{ \"description\" : \"\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param subscription_id: The subscription_id of this ResumeSubscriptionAmendment.
:type: str
"""
self._subscription_id = subscription_id
@property
def amendment_type(self):
"""
Gets the amendment_type of this ResumeSubscriptionAmendment.
{ \"description\" : \"Type of amendment\", \"verbs\":[\"POST\",\"GET\"] }
:return: The amendment_type of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._amendment_type
@amendment_type.setter
def amendment_type(self, amendment_type):
"""
Sets the amendment_type of this ResumeSubscriptionAmendment.
{ \"description\" : \"Type of amendment\", \"verbs\":[\"POST\",\"GET\"] }
:param amendment_type: The amendment_type of this ResumeSubscriptionAmendment.
:type: str
"""
allowed_values = ["InvoiceNextExecutionAttempt", "Cancellation", "PricingComponentValue", "AmendmentDiscard", "Compound", "FixedTermExpiry", "InvoiceRecalculation", "EndTrial", "InvoiceOutstandingCharges", "IssueInvoice", "ProductRatePlanMigration", "UpdateComponentValue", "ServiceEnd", "ResumeSubscription", "CreateSubscriptionCharge", "Timer"]
if amendment_type not in allowed_values:
raise ValueError(
"Invalid value for `amendment_type` ({0}), must be one of {1}"
.format(amendment_type, allowed_values)
)
self._amendment_type = amendment_type
@property
def actioning_time(self):
"""
Gets the actioning_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"When the amendment will run\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:return: The actioning_time of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._actioning_time
@actioning_time.setter
def actioning_time(self, actioning_time):
"""
Sets the actioning_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"When the amendment will run\", \"verbs\":[\"POST\",\"PUT\",\"GET\"] }
:param actioning_time: The actioning_time of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._actioning_time = actioning_time
@property
def actioned_time(self):
"""
Gets the actioned_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"The time the amendment completed.\", \"verbs\":[\"GET\"] }
:return: The actioned_time of this ResumeSubscriptionAmendment.
:rtype: datetime
"""
return self._actioned_time
@actioned_time.setter
def actioned_time(self, actioned_time):
"""
Sets the actioned_time of this ResumeSubscriptionAmendment.
{ \"description\" : \"The time the amendment completed.\", \"verbs\":[\"GET\"] }
:param actioned_time: The actioned_time of this ResumeSubscriptionAmendment.
:type: datetime
"""
self._actioned_time = actioned_time
@property
def state(self):
"""
Gets the state of this ResumeSubscriptionAmendment.
Whether the subscription-amendment is: pending (to be actioned in the future), succeeded (actioning completed), failed (actioning was attempted but no effect was made) or discarded (the amendment had been cancelled before being actioned). Default: Pending
:return: The state of this ResumeSubscriptionAmendment.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this ResumeSubscriptionAmendment.
Whether the subscription-amendment is: pending (to be actioned in the future), succeeded (actioning completed), failed (actioning was attempted but no effect was made) or discarded (the amendment had been cancelled before being actioned). Default: Pending
:param state: The state of this ResumeSubscriptionAmendment.
:type: str
"""
allowed_values = ["Pending", "Succeeded", "Failed", "Discarded"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def deleted(self):
"""
Gets the deleted of this ResumeSubscriptionAmendment.
{ \"description\" : \"Is the amendment deleted.\", \"verbs\":[\"GET\"] }
:return: The deleted of this ResumeSubscriptionAmendment.
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""
Sets the deleted of this ResumeSubscriptionAmendment.
{ \"description\" : \"Is the amendment deleted.\", \"verbs\":[\"GET\"] }
:param deleted: The deleted of this ResumeSubscriptionAmendment.
:type: bool
"""
self._deleted = deleted
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.088578 | 477 | 0.601409 |
from pprint import pformat
from six import iteritems
import re
class ResumeSubscriptionAmendment(object):
def __init__(self, created=None, changed_by=None, updated=None, type=None, id=None, organization_id=None, subscription_id=None, amendment_type=None, actioning_time=None, actioned_time=None, state=None, deleted=False):
self.swagger_types = {
'created': 'datetime',
'changed_by': 'str',
'updated': 'datetime',
'type': 'str',
'id': 'str',
'organization_id': 'str',
'subscription_id': 'str',
'amendment_type': 'str',
'actioning_time': 'datetime',
'actioned_time': 'datetime',
'state': 'str',
'deleted': 'bool'
}
self.attribute_map = {
'created': 'created',
'changed_by': 'changedBy',
'updated': 'updated',
'type': '@type',
'id': 'id',
'organization_id': 'organizationID',
'subscription_id': 'subscriptionID',
'amendment_type': 'amendmentType',
'actioning_time': 'actioningTime',
'actioned_time': 'actionedTime',
'state': 'state',
'deleted': 'deleted'
}
self._created = created
self._changed_by = changed_by
self._updated = updated
self._type = type
self._id = id
self._organization_id = organization_id
self._subscription_id = subscription_id
self._amendment_type = amendment_type
self._actioning_time = actioning_time
self._actioned_time = actioned_time
self._state = state
self._deleted = deleted
@property
def created(self):
return self._created
@created.setter
def created(self, created):
self._created = created
@property
def changed_by(self):
return self._changed_by
@changed_by.setter
def changed_by(self, changed_by):
self._changed_by = changed_by
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, updated):
self._updated = updated
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["InvoiceOutstandingChargesAmendment", "IssueInvoiceAmendment", "PricingComponentValueAmendment", "InvoiceRecalculationAmendment", "CancellationAmendment", "InvoiceNextExecutionAttemptAmendment", "FixedTermExpiryAmendment", "EndTrialAmendment", "ProductRatePlanMigrationAmendment", "AmendmentDiscardAmendment", "UpdateComponentValueAmendment", "ServiceEndAmendment", "ResumeSubscriptionAmendment", "CreateSubscriptionChargeAmendment", "TimerAmendment"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def organization_id(self):
return self._organization_id
@organization_id.setter
def organization_id(self, organization_id):
self._organization_id = organization_id
@property
def subscription_id(self):
return self._subscription_id
@subscription_id.setter
def subscription_id(self, subscription_id):
self._subscription_id = subscription_id
@property
def amendment_type(self):
return self._amendment_type
@amendment_type.setter
def amendment_type(self, amendment_type):
allowed_values = ["InvoiceNextExecutionAttempt", "Cancellation", "PricingComponentValue", "AmendmentDiscard", "Compound", "FixedTermExpiry", "InvoiceRecalculation", "EndTrial", "InvoiceOutstandingCharges", "IssueInvoice", "ProductRatePlanMigration", "UpdateComponentValue", "ServiceEnd", "ResumeSubscription", "CreateSubscriptionCharge", "Timer"]
if amendment_type not in allowed_values:
raise ValueError(
"Invalid value for `amendment_type` ({0}), must be one of {1}"
.format(amendment_type, allowed_values)
)
self._amendment_type = amendment_type
@property
def actioning_time(self):
return self._actioning_time
@actioning_time.setter
def actioning_time(self, actioning_time):
self._actioning_time = actioning_time
@property
def actioned_time(self):
return self._actioned_time
@actioned_time.setter
def actioned_time(self, actioned_time):
self._actioned_time = actioned_time
@property
def state(self):
return self._state
@state.setter
def state(self, state):
allowed_values = ["Pending", "Succeeded", "Failed", "Discarded"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, deleted):
self._deleted = deleted
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72a91979056173940df72257c9632371d082496 | 7,763 | py | Python | zun/objects/container_action.py | hualingson/zun | 4fc4e9e0e0f5478d749215c7ba0679a8502f7737 | [
"Apache-2.0"
] | null | null | null | zun/objects/container_action.py | hualingson/zun | 4fc4e9e0e0f5478d749215c7ba0679a8502f7737 | [
"Apache-2.0"
] | null | null | null | zun/objects/container_action.py | hualingson/zun | 4fc4e9e0e0f5478d749215c7ba0679a8502f7737 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_versionedobjects import fields
import six
from zun.db import api as dbapi
from zun.objects import base
LOG = logging.getLogger(__name__)
@base.ZunObjectRegistry.register
class ContainerAction(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Initial version
# Version 1.1: Add uuid column.
# Version 1.2: Remove uuid column.
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'action': fields.StringField(nullable=True),
'container_uuid': fields.UUIDField(nullable=True),
'request_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'message': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, action, db_action):
for field in action.fields:
setattr(action, field, getattr(db_action, field, None))
action.obj_reset_changes()
return action
@staticmethod
def _from_db_object_list(context, cls, db_objects):
"""Converts a list of database entities to a list of formal objects."""
return [ContainerAction._from_db_object(context, cls(context), obj)
for obj in db_objects]
@staticmethod
def pack_action_start(context, container_uuid, action_name):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'user_id': context.user_id,
'project_id': context.project_id,
'action': action_name,
'start_time': context.timestamp}
return values
@staticmethod
def pack_action_finish(context, container_uuid, action_name,
exc_val=None, exc_tb=None):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'action': action_name,
'finish_time': timeutils.utcnow()}
if exc_tb is not None:
values['message'] = 'Error'
return values
@base.remotable_classmethod
def get_by_request_id(cls, context, container_uuid, request_id):
db_action = dbapi.action_get_by_request_id(context, container_uuid,
request_id)
if db_action:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_start(cls, context, container_uuid, action_name,
want_result=True):
values = cls.pack_action_start(context, container_uuid, action_name)
db_action = dbapi.action_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_finish(cls, context, container_uuid, action_name, exc_val=None,
exc_tb=None, want_result=True):
values = cls.pack_action_finish(context, container_uuid, action_name,
exc_val=exc_val, exc_tb=exc_tb)
db_action = dbapi.action_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def get_by_container_uuid(cls, context, container_uuid):
db_actions = dbapi.actions_get(context, container_uuid)
return ContainerAction._from_db_object_list(context, cls, db_actions)
@base.ZunObjectRegistry.register
class ContainerActionEvent(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'event': fields.StringField(nullable=True),
'action_id': fields.IntegerField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'result': fields.StringField(nullable=True),
'traceback': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
setattr(event, field, db_event[field])
event.obj_reset_changes()
return event
@staticmethod
def _from_db_object_list(context, cls, db_objects):
"""Converts a list of database entities to a list of formal objects."""
return [ContainerActionEvent._from_db_object(context, cls(context),
obj)
for obj in db_objects]
@staticmethod
def pack_action_event_start(context, container_uuid, event_name):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
@staticmethod
def pack_action_event_finish(context, container_uuid, event_name,
exc_val=None, exc_tb=None):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = exc_val
values['traceback'] = exc_tb
return values
@base.remotable_classmethod
def event_start(cls, context, container_uuid, event_name,
want_result=True):
values = cls.pack_action_event_start(context, container_uuid,
event_name)
db_event = dbapi.action_event_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def event_finish(cls, context, container_uuid, event_name, exc_val=None,
exc_tb=None, want_result=None):
if exc_val:
exc_val = six.text_type(exc_val)
if exc_tb and not isinstance(exc_tb, six.string_types):
exc_tb = ''.join(traceback.format_tb(exc_tb))
values = cls.pack_action_event_finish(context, container_uuid,
event_name, exc_val=exc_val,
exc_tb=exc_tb)
db_event = dbapi.action_event_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def get_by_action(cls, context, action_id):
db_events = dbapi.action_events_get(context, action_id)
return ContainerActionEvent._from_db_object_list(context, cls,
db_events)
| 40.643979 | 79 | 0.637382 |
import traceback
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_versionedobjects import fields
import six
from zun.db import api as dbapi
from zun.objects import base
LOG = logging.getLogger(__name__)
@base.ZunObjectRegistry.register
class ContainerAction(base.ZunPersistentObject, base.ZunObject):
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'action': fields.StringField(nullable=True),
'container_uuid': fields.UUIDField(nullable=True),
'request_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'message': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, action, db_action):
for field in action.fields:
setattr(action, field, getattr(db_action, field, None))
action.obj_reset_changes()
return action
@staticmethod
def _from_db_object_list(context, cls, db_objects):
return [ContainerAction._from_db_object(context, cls(context), obj)
for obj in db_objects]
@staticmethod
def pack_action_start(context, container_uuid, action_name):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'user_id': context.user_id,
'project_id': context.project_id,
'action': action_name,
'start_time': context.timestamp}
return values
@staticmethod
def pack_action_finish(context, container_uuid, action_name,
exc_val=None, exc_tb=None):
values = {'request_id': context.request_id,
'container_uuid': container_uuid,
'action': action_name,
'finish_time': timeutils.utcnow()}
if exc_tb is not None:
values['message'] = 'Error'
return values
@base.remotable_classmethod
def get_by_request_id(cls, context, container_uuid, request_id):
db_action = dbapi.action_get_by_request_id(context, container_uuid,
request_id)
if db_action:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_start(cls, context, container_uuid, action_name,
want_result=True):
values = cls.pack_action_start(context, container_uuid, action_name)
db_action = dbapi.action_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def action_finish(cls, context, container_uuid, action_name, exc_val=None,
exc_tb=None, want_result=True):
values = cls.pack_action_finish(context, container_uuid, action_name,
exc_val=exc_val, exc_tb=exc_tb)
db_action = dbapi.action_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_action)
@base.remotable_classmethod
def get_by_container_uuid(cls, context, container_uuid):
db_actions = dbapi.actions_get(context, container_uuid)
return ContainerAction._from_db_object_list(context, cls, db_actions)
@base.ZunObjectRegistry.register
class ContainerActionEvent(base.ZunPersistentObject, base.ZunObject):
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'event': fields.StringField(nullable=True),
'action_id': fields.IntegerField(nullable=True),
'start_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'finish_time': fields.DateTimeField(tzinfo_aware=False, nullable=True),
'result': fields.StringField(nullable=True),
'traceback': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
setattr(event, field, db_event[field])
event.obj_reset_changes()
return event
@staticmethod
def _from_db_object_list(context, cls, db_objects):
return [ContainerActionEvent._from_db_object(context, cls(context),
obj)
for obj in db_objects]
@staticmethod
def pack_action_event_start(context, container_uuid, event_name):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
@staticmethod
def pack_action_event_finish(context, container_uuid, event_name,
exc_val=None, exc_tb=None):
values = {'event': event_name,
'container_uuid': container_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = exc_val
values['traceback'] = exc_tb
return values
@base.remotable_classmethod
def event_start(cls, context, container_uuid, event_name,
want_result=True):
values = cls.pack_action_event_start(context, container_uuid,
event_name)
db_event = dbapi.action_event_start(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def event_finish(cls, context, container_uuid, event_name, exc_val=None,
exc_tb=None, want_result=None):
if exc_val:
exc_val = six.text_type(exc_val)
if exc_tb and not isinstance(exc_tb, six.string_types):
exc_tb = ''.join(traceback.format_tb(exc_tb))
values = cls.pack_action_event_finish(context, container_uuid,
event_name, exc_val=exc_val,
exc_tb=exc_tb)
db_event = dbapi.action_event_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(context), db_event)
@base.remotable_classmethod
def get_by_action(cls, context, action_id):
db_events = dbapi.action_events_get(context, action_id)
return ContainerActionEvent._from_db_object_list(context, cls,
db_events)
| true | true |
f72a92773a71cfadb3c6851d87b7e65eff09358d | 6,096 | py | Python | BaseExtension.py | heyzec/Inkscape-Extentions | dedfc5e6d567218a397d48133c4cb5a62cd5b09b | [
"MIT"
] | 5 | 2021-07-04T10:28:49.000Z | 2022-02-22T16:48:04.000Z | BaseExtension.py | heyzec/Inkscape-Extentions | dedfc5e6d567218a397d48133c4cb5a62cd5b09b | [
"MIT"
] | null | null | null | BaseExtension.py | heyzec/Inkscape-Extentions | dedfc5e6d567218a397d48133c4cb5a62cd5b09b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# pylint: disable=too-many-ancestors
# standard library
import os
import sys
import re
import argparse
from shutil import copy2
# from subprocess import Popen, PIPE
# import time
# from lxml import etree
# local library
import inkex
from inkex.command import inkscape
from inkex.elements import _selected as selection
MIN_PYTHON_VERSION = (3, 6) # Mainly for f-strings
if (sys.version_info.major, sys.version_info.minor) < (3, 6):
inkex.Effect.msg(f"Python {MIN_PYTHON_VERSION[0]}.{MIN_PYTHON_VERSION[1]} or later required.")
sys.exit(1)
class BaseExtension(inkex.Effect):
"""Custom class that makes creation of extensions easier.
Users of this class need not worry about boilerplates, such as how to
call inkscape via shell, and the management of tempfiles. Useful functions
are also provided."""
def __init__(self, custom_effect, args_adder=None):
"""Init base class.
In a typical Inkscape extension that does not make use of BaseExtension,
the effect is determined by the "effect" method of the extension class.
This init function will take in a method, and run it in the "effect" method
together with the other boilerplate.
This init method takes in a function under the custom_effect argument.
This function will handle the user's effects, minus the boilerplate. It
has to return a list[str] object, with each str being a verb that inkscape
can execute."""
inkex.Effect.__init__(self)
self.custom_effect = custom_effect
self._msg = self.msg # The old msg function provided by inkex (only accepts strings)
def msg(*args, sep=' '):
"""Improved msg method, similar to Python's print"""
self._msg(sep.join([str(arg) for arg in args]))
self.msg = msg
if args_adder is not None:
args_adder(self.arg_parser)
self.args_adder = args_adder
def z_sort(self, alist):
"""Return new list sorted in document order (depth-first traversal)."""
return list(self.z_iter(alist))
def z_iter(self, alist):
"""Return iterator over ids in document order (depth-first traversal)."""
id_list = list(alist)
count = len(id_list)
for element in self.document.getroot().iter():
# element_id = element.get('id')
# if element_id is not None and element_id in id_list:
if element in alist:
id_list.remove(element)
yield element
count -= 1
if not count:
return
@staticmethod
def show(obj):
"""Returns a str representation of object"""
def rep(obj):
if hasattr(obj, 'get_id'):
return f"{type(obj).__name__}({obj.get_id()})"
return f"{type(obj).__name__}"
if type(obj).__name__ == 'ElementList':
return ('ElementList(' +
', '.join([rep(child) for child in obj.values()]) +
')')
if isinstance(obj, list):
return '[' + ', '.join(rep(child) for child in obj) + ']'
return rep(obj)
def find(self, obj: any, xpath='/*') -> list:
"""Returns a list of objects which satisfies XPath
Args:
obj (any): Parent object to recurse into. Examples include root, selected, or a group.
xpath (str, optional): Defaults to '/*'.
Returns:
list: [description]
"""
BASIC_TAGS = ('circle', 'ellipse', 'line', 'polygon', 'polyline', 'rect', 'path', 'image', 'g')
SPECIAL_TAGS = {
'l': "svg:g[@inkscape:groupmode='layer']",
'p': 'svg:path'
}
xpath = re.sub(r'((?<=/)(' + '|'.join(BASIC_TAGS) + r')\b)', r'svg:\1', xpath)
for k, v in SPECIAL_TAGS.items():
xpath = re.sub('(?<=/)' + k + r'\b', v, xpath)
xpath = re.sub(r'(?<=\[)(\d+):(\d+)(?=\])', r'position()>=\1 and position()<\2', xpath)
if type(obj).__name__ != 'ElementList':
obj = [obj]
output = []
for child in obj:
matches = child.xpath(xpath, namespaces={
'svg': 'http://www.w3.org/2000/svg',
'inkscape': 'http://www.inkscape.org/namespaces/inkscape'})
for match in matches:
if type(match).__name__ not in ('Defs', 'NamedView', 'Metadata'):
output.append(match)
return output
def effect(self):
"""Main entry point to process current document. Not to be called externally."""
actions_list = self.custom_effect(self)
if actions_list is None or actions_list == []:
self.msg("No actions received. Perhaps you are calling inkex object methods?")
elif isinstance(actions_list, list):
tempfile = self.options.input_file + "-BaseExtension.svg"
# prepare
copy2(self.options.input_file, tempfile)
actions_list.append("FileSave")
actions_list.append("FileQuit")
actions = ";".join(actions_list)
inkscape(tempfile, "--with-gui", actions=actions)
# finish up
# replace current document with content of temp copy file
self.document = inkex.load_svg(tempfile)
# update self.svg
self.svg = self.document.getroot()
# Clean up tempfile
try:
os.remove(tempfile)
except Exception: # pylint: disable=broad-except
pass
def call(self, child, ext_options):
"""Used to call an extension from another extension"""
old_options = self.options
parser = argparse.ArgumentParser()
child.args_adder(parser)
self.options = parser.parse_args([])
for k, v in ext_options.items():
setattr(self.options, k, v)
output = child.custom_effect(self)
self.options = old_options
return output
| 32.425532 | 103 | 0.58563 |
import os
import sys
import re
import argparse
from shutil import copy2
import inkex
from inkex.command import inkscape
from inkex.elements import _selected as selection
MIN_PYTHON_VERSION = (3, 6)
if (sys.version_info.major, sys.version_info.minor) < (3, 6):
inkex.Effect.msg(f"Python {MIN_PYTHON_VERSION[0]}.{MIN_PYTHON_VERSION[1]} or later required.")
sys.exit(1)
class BaseExtension(inkex.Effect):
def __init__(self, custom_effect, args_adder=None):
inkex.Effect.__init__(self)
self.custom_effect = custom_effect
self._msg = self.msg
def msg(*args, sep=' '):
self._msg(sep.join([str(arg) for arg in args]))
self.msg = msg
if args_adder is not None:
args_adder(self.arg_parser)
self.args_adder = args_adder
def z_sort(self, alist):
return list(self.z_iter(alist))
def z_iter(self, alist):
id_list = list(alist)
count = len(id_list)
for element in self.document.getroot().iter():
if element in alist:
id_list.remove(element)
yield element
count -= 1
if not count:
return
@staticmethod
def show(obj):
def rep(obj):
if hasattr(obj, 'get_id'):
return f"{type(obj).__name__}({obj.get_id()})"
return f"{type(obj).__name__}"
if type(obj).__name__ == 'ElementList':
return ('ElementList(' +
', '.join([rep(child) for child in obj.values()]) +
')')
if isinstance(obj, list):
return '[' + ', '.join(rep(child) for child in obj) + ']'
return rep(obj)
def find(self, obj: any, xpath='/*') -> list:
BASIC_TAGS = ('circle', 'ellipse', 'line', 'polygon', 'polyline', 'rect', 'path', 'image', 'g')
SPECIAL_TAGS = {
'l': "svg:g[@inkscape:groupmode='layer']",
'p': 'svg:path'
}
xpath = re.sub(r'((?<=/)(' + '|'.join(BASIC_TAGS) + r')\b)', r'svg:\1', xpath)
for k, v in SPECIAL_TAGS.items():
xpath = re.sub('(?<=/)' + k + r'\b', v, xpath)
xpath = re.sub(r'(?<=\[)(\d+):(\d+)(?=\])', r'position()>=\1 and position()<\2', xpath)
if type(obj).__name__ != 'ElementList':
obj = [obj]
output = []
for child in obj:
matches = child.xpath(xpath, namespaces={
'svg': 'http://www.w3.org/2000/svg',
'inkscape': 'http://www.inkscape.org/namespaces/inkscape'})
for match in matches:
if type(match).__name__ not in ('Defs', 'NamedView', 'Metadata'):
output.append(match)
return output
def effect(self):
actions_list = self.custom_effect(self)
if actions_list is None or actions_list == []:
self.msg("No actions received. Perhaps you are calling inkex object methods?")
elif isinstance(actions_list, list):
tempfile = self.options.input_file + "-BaseExtension.svg"
copy2(self.options.input_file, tempfile)
actions_list.append("FileSave")
actions_list.append("FileQuit")
actions = ";".join(actions_list)
inkscape(tempfile, "--with-gui", actions=actions)
self.document = inkex.load_svg(tempfile)
self.svg = self.document.getroot()
try:
os.remove(tempfile)
except Exception:
pass
def call(self, child, ext_options):
old_options = self.options
parser = argparse.ArgumentParser()
child.args_adder(parser)
self.options = parser.parse_args([])
for k, v in ext_options.items():
setattr(self.options, k, v)
output = child.custom_effect(self)
self.options = old_options
return output
| true | true |
f72a93a6ef5bdbef78fc92eeacc5548f6c09045a | 181 | py | Python | delphi_epidata/_constants.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | null | null | null | delphi_epidata/_constants.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | null | null | null | delphi_epidata/_constants.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | 1 | 2021-12-22T23:56:58.000Z | 2021-12-22T23:56:58.000Z | from typing import Final
__version__: Final = "1.0.0"
HTTP_HEADERS: Final = {"User-Agent": f"delphi_epidata/{__version__}"}
BASE_URL: Final = "https://delphi.cmu.edu/epidata/"
| 18.1 | 69 | 0.712707 | from typing import Final
__version__: Final = "1.0.0"
HTTP_HEADERS: Final = {"User-Agent": f"delphi_epidata/{__version__}"}
BASE_URL: Final = "https://delphi.cmu.edu/epidata/"
| true | true |
f72a948c70be197e27db61e983500bbbb2328e4d | 1,015 | py | Python | plugins/readme/girder_readme/rest.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 395 | 2015-01-12T19:20:13.000Z | 2022-03-30T05:40:40.000Z | plugins/readme/girder_readme/rest.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 2,388 | 2015-01-01T20:09:19.000Z | 2022-03-29T16:49:14.000Z | plugins/readme/girder_readme/rest.py | JKitok/girder | 317962d155fc9811d25e5f33bd3e849c4ac96645 | [
"Apache-2.0"
] | 177 | 2015-01-04T14:47:00.000Z | 2022-03-25T09:01:51.000Z | # -*- coding: utf-8 -*-
import re
import cherrypy
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.constants import AccessType, TokenScope
from girder.models.file import File as FileModel
from girder.models.folder import Folder as FolderModel
from girder.models.item import Item as ItemModel
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get the README for a folder, if it exists.')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.errorResponse()
.errorResponse('Read access was denied on the folder.', 403)
)
def _getFolderReadme(folder):
query = {
'folderId': folder['_id'],
'name': {'$regex': re.compile(r'^README(\..+)?$')},
}
item = ItemModel().findOne(query)
if item:
files = list(ItemModel().childFiles(item=item, limit=1))
if len(files) >= 1:
return FileModel().download(files[0])
cherrypy.response.status = 204
return ''
| 32.741935 | 64 | 0.693596 |
import re
import cherrypy
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.constants import AccessType, TokenScope
from girder.models.file import File as FileModel
from girder.models.folder import Folder as FolderModel
from girder.models.item import Item as ItemModel
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get the README for a folder, if it exists.')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.errorResponse()
.errorResponse('Read access was denied on the folder.', 403)
)
def _getFolderReadme(folder):
query = {
'folderId': folder['_id'],
'name': {'$regex': re.compile(r'^README(\..+)?$')},
}
item = ItemModel().findOne(query)
if item:
files = list(ItemModel().childFiles(item=item, limit=1))
if len(files) >= 1:
return FileModel().download(files[0])
cherrypy.response.status = 204
return ''
| true | true |
f72a950bc8465538bf3a53bd013a276fafc97895 | 28,988 | py | Python | sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py | lynshi/azure-sdk-for-python | 40c530f2e9a6d93025b01cc8f6c94829c7fe95fc | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py | lynshi/azure-sdk-for-python | 40c530f2e9a6d93025b01cc8f6c94829c7fe95fc | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py | lynshi/azure-sdk-for-python | 40c530f2e9a6d93025b01cc8f6c94829c7fe95fc | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import pytest
import platform
import functools
import itertools
import datetime
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, GlobalTextAnalyticsAccountPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from azure.ai.textanalytics import (
TextAnalyticsClient,
RecognizeEntitiesAction,
RecognizeLinkedEntitiesAction,
RecognizePiiEntitiesAction,
ExtractKeyPhrasesAction,
AnalyzeSentimentAction,
TextDocumentInput,
VERSION,
TextAnalyticsApiVersion,
AnalyzeActionsType,
)
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestAnalyze(TextAnalyticsTest):
def _interval(self):
return 5 if self.is_live else 0
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_no_single_input(self, client):
with self.assertRaises(TypeError):
response = client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval())
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_key_phrase_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertIn("Paul Allen", doc.key_phrases)
self.assertIn("Bill Gates", doc.key_phrases)
self.assertIn("Microsoft", doc.key_phrases)
self.assertIsNotNone(doc.id)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_sentiment_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = client.begin_analyze_actions(
docs,
actions=[AnalyzeSentimentAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].sentiment, "neutral")
self.assertEqual(action_result.document_results[1].sentiment, "negative")
self.assertEqual(action_result.document_results[2].sentiment, "positive")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
self.validateConfidenceScores(doc.confidence_scores)
self.assertIsNotNone(doc.sentences)
self.assertEqual(len(action_result.document_results[0].sentences), 1)
self.assertEqual(action_result.document_results[0].sentences[0].text, "Microsoft was founded by Bill Gates and Paul Allen.")
self.assertEqual(len(action_result.document_results[1].sentences), 2)
self.assertEqual(action_result.document_results[1].sentences[0].text, "I did not like the hotel we stayed at.")
self.assertEqual(action_result.document_results[1].sentences[1].text, "It was too expensive.")
self.assertEqual(len(action_result.document_results[2].sentences), 2)
self.assertEqual(action_result.document_results[2].sentences[0].text, "The restaurant had really good food.")
self.assertEqual(action_result.document_results[2].sentences[1].text, "I recommend you try it.")
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_sentiment_analysis_task_with_opinion_mining(self, client):
documents = [
"It has a sleek premium aluminum design that makes it beautiful to look at.",
"The food and service is not good"
]
response = client.begin_analyze_actions(
documents,
actions=[AnalyzeSentimentAction(show_opinion_mining=True)],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(documents)
for idx, doc in enumerate(action_result.document_results):
for sentence in doc.sentences:
if idx == 0:
for mined_opinion in sentence.mined_opinions:
target = mined_opinion.target
self.assertEqual('design', target.text)
self.assertEqual('positive', target.sentiment)
self.assertEqual(0.0, target.confidence_scores.neutral)
self.validateConfidenceScores(target.confidence_scores)
self.assertEqual(32, target.offset)
sleek_opinion = mined_opinion.assessments[0]
self.assertEqual('sleek', sleek_opinion.text)
self.assertEqual('positive', sleek_opinion.sentiment)
self.assertEqual(0.0, sleek_opinion.confidence_scores.neutral)
self.validateConfidenceScores(sleek_opinion.confidence_scores)
self.assertEqual(9, sleek_opinion.offset)
self.assertFalse(sleek_opinion.is_negated)
premium_opinion = mined_opinion.assessments[1]
self.assertEqual('premium', premium_opinion.text)
self.assertEqual('positive', premium_opinion.sentiment)
self.assertEqual(0.0, premium_opinion.confidence_scores.neutral)
self.validateConfidenceScores(premium_opinion.confidence_scores)
self.assertEqual(15, premium_opinion.offset)
self.assertFalse(premium_opinion.is_negated)
else:
food_target = sentence.mined_opinions[0].target
service_target = sentence.mined_opinions[1].target
self.validateConfidenceScores(food_target.confidence_scores)
self.assertEqual(4, food_target.offset)
self.assertEqual('service', service_target.text)
self.assertEqual('negative', service_target.sentiment)
self.assertEqual(0.0, service_target.confidence_scores.neutral)
self.validateConfidenceScores(service_target.confidence_scores)
self.assertEqual(13, service_target.offset)
food_opinion = sentence.mined_opinions[0].assessments[0]
service_opinion = sentence.mined_opinions[1].assessments[0]
self.assertOpinionsEqual(food_opinion, service_opinion)
self.assertEqual('good', food_opinion.text)
self.assertEqual('negative', food_opinion.sentiment)
self.assertEqual(0.0, food_opinion.confidence_scores.neutral)
self.validateConfidenceScores(food_opinion.confidence_scores)
self.assertEqual(28, food_opinion.offset)
self.assertTrue(food_opinion.is_negated)
service_target = sentence.mined_opinions[1].target
self.assertEqual('food', food_target.text)
self.assertEqual('negative', food_target.sentiment)
self.assertEqual(0.0, food_target.confidence_scores.neutral)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_text_document_input_entities_task(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975", language="en"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975.", language="es"),
TextDocumentInput(id="3", text="Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet.", language="de"),
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizeEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertEqual(len(doc.entities), 4)
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_string_pii_entities_task(self, client):
docs = ["My SSN is 859-98-0987.",
"Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.",
"Is 998.214.865-68 your Brazilian CPF number?"
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizePiiEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].entities[0].text, "859-98-0987")
self.assertEqual(action_result.document_results[0].entities[0].category, "USSocialSecurityNumber")
self.assertEqual(action_result.document_results[1].entities[0].text, "111000025")
# self.assertEqual(results[1].entities[0].category, "ABA Routing Number") # Service is currently returning PhoneNumber here
# commenting out brazil cpf, currently service is not returning it
# self.assertEqual(action_result.document_results[2].entities[0].text, "998.214.865-68")
# self.assertEqual(action_result.document_results[2].entities[0].category, "Brazil CPF Number")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_request_on_empty_document(self, client):
docs = [u""]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "",
})
def test_empty_credential_class(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "xxxxxxxxxxxx",
})
def test_bad_credentials(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_out_of_order_ids_multiple_tasks(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
action_results = [r for r in action_results if not r.is_error]
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
in_order = ["56", "0", "19", "1"]
for action_result in action_results:
for idx, resp in enumerate(action_result.document_results):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_show_stats_and_model_version_multiple_tasks(self, client):
def callback(resp):
if resp.raw_response:
a = "b"
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="latest"),
RecognizePiiEntitiesAction(model_version="latest"),
RecognizeLinkedEntitiesAction(model_version="latest"),
AnalyzeSentimentAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=callback,
)
response = poller.result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
for action_result in action_results:
assert action_result.statistics
for doc in action_result.document_results:
assert doc.statistics
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_poller_metadata(self, client):
docs = [{"id": "56", "text": ":)"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
)
response = poller.result()
assert isinstance(poller.created_on, datetime.datetime)
poller._polling_method.display_name
assert isinstance(poller.expires_on, datetime.datetime)
assert poller.actions_failed_count == 0
assert poller.actions_in_progress_count == 0
assert poller.actions_succeeded_count == 1
assert isinstance(poller.last_modified_on, datetime.datetime)
assert poller.total_actions_count == 1
assert poller.id
### TODO: Commenting out language tests. Right now analyze only supports language 'en', so no point to these tests yet
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer()
# def test_whole_batch_language_hint(self, client):
# def callback(resp):
# language_str = "\"language\": \"fr\""
# if resp.http_request.body:
# language = resp.http_request.body.count(language_str)
# self.assertEqual(language, 3)
# docs = [
# u"This was the best day of my life.",
# u"I did not like the hotel we stayed at. It was too expensive.",
# u"The restaurant was not as good as I hoped."
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# language="fr",
# polling_interval=self._interval(),
# raw_response_hook=callback
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# self.assertFalse(doc.is_error)
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer(client_kwargs={
# "default_language": "en"
# })
# def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
# def callback(resp):
# pass
# # if resp.http_request.body:
# # language_str = "\"language\": \"es\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 2)
# # language_str = "\"language\": \"en\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 1)
# docs = [
# TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
# TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
# TextDocumentInput(id="3", text="猫は幸せ"),
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# polling_interval=self._interval(),
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# assert not doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_invalid_language_hint_method(self, client):
response = list(client.begin_analyze_actions(
["This should fail because we're passing in an invalid language hint"],
language="notalanguage",
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result())
for action_result in response:
for doc in action_result.document_results:
assert doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_multiple_tasks(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with pytest.raises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_all_tasks(self, client): # TODO: verify behavior of service
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="bad"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(None, None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = client.begin_analyze_actions(
documents=["Test passing cls to endpoint"],
actions=[
RecognizeEntitiesAction(),
],
cls=callback,
polling_interval=self._interval(),
).result()
assert res == "cls result"
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_multiple_pages_of_results_returned_successfully(self, client):
single_doc = "hello world"
docs = [{"id": str(idx), "text": val} for (idx, val) in enumerate(list(itertools.repeat(single_doc, 25)))] # max number of documents is 25
result = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
show_stats=True,
polling_interval=self._interval(),
).result()
recognize_entities_results = []
extract_key_phrases_results = []
recognize_pii_entities_results = []
recognize_linked_entities_results = []
analyze_sentiment_results = []
action_results = list(result)
# do 2 pages of 5 task results
for idx, action_result in enumerate(action_results):
if idx % 5 == 0:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
recognize_entities_results.append(action_result)
elif idx % 5 == 1:
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
extract_key_phrases_results.append(action_result)
elif idx % 5 == 2:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
recognize_pii_entities_results.append(action_result)
elif idx % 5 == 3:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
recognize_linked_entities_results.append(action_result)
else:
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
analyze_sentiment_results.append(action_result)
if idx < 5: # first page of task results
assert len(action_result.document_results) == 20
else:
assert len(action_result.document_results) == 5
assert all([action_result for action_result in recognize_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in extract_key_phrases_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_pii_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_linked_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in analyze_sentiment_results if len(action_result.document_results) == len(docs)])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_too_many_documents(self, client):
docs = list(itertools.repeat("input document", 26)) # Maximum number of documents per request is 25
with pytest.raises(HttpResponseError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert excinfo.value.status_code == 400
| 44.054711 | 146 | 0.630537 |
import os
import pytest
import platform
import functools
import itertools
import datetime
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, GlobalTextAnalyticsAccountPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from azure.ai.textanalytics import (
TextAnalyticsClient,
RecognizeEntitiesAction,
RecognizeLinkedEntitiesAction,
RecognizePiiEntitiesAction,
ExtractKeyPhrasesAction,
AnalyzeSentimentAction,
TextDocumentInput,
VERSION,
TextAnalyticsApiVersion,
AnalyzeActionsType,
)
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestAnalyze(TextAnalyticsTest):
def _interval(self):
return 5 if self.is_live else 0
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_no_single_input(self, client):
with self.assertRaises(TypeError):
response = client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval())
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_key_phrase_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertIn("Paul Allen", doc.key_phrases)
self.assertIn("Bill Gates", doc.key_phrases)
self.assertIn("Microsoft", doc.key_phrases)
self.assertIsNotNone(doc.id)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_sentiment_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = client.begin_analyze_actions(
docs,
actions=[AnalyzeSentimentAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].sentiment, "neutral")
self.assertEqual(action_result.document_results[1].sentiment, "negative")
self.assertEqual(action_result.document_results[2].sentiment, "positive")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
self.validateConfidenceScores(doc.confidence_scores)
self.assertIsNotNone(doc.sentences)
self.assertEqual(len(action_result.document_results[0].sentences), 1)
self.assertEqual(action_result.document_results[0].sentences[0].text, "Microsoft was founded by Bill Gates and Paul Allen.")
self.assertEqual(len(action_result.document_results[1].sentences), 2)
self.assertEqual(action_result.document_results[1].sentences[0].text, "I did not like the hotel we stayed at.")
self.assertEqual(action_result.document_results[1].sentences[1].text, "It was too expensive.")
self.assertEqual(len(action_result.document_results[2].sentences), 2)
self.assertEqual(action_result.document_results[2].sentences[0].text, "The restaurant had really good food.")
self.assertEqual(action_result.document_results[2].sentences[1].text, "I recommend you try it.")
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_sentiment_analysis_task_with_opinion_mining(self, client):
documents = [
"It has a sleek premium aluminum design that makes it beautiful to look at.",
"The food and service is not good"
]
response = client.begin_analyze_actions(
documents,
actions=[AnalyzeSentimentAction(show_opinion_mining=True)],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert len(action_result.document_results) == len(documents)
for idx, doc in enumerate(action_result.document_results):
for sentence in doc.sentences:
if idx == 0:
for mined_opinion in sentence.mined_opinions:
target = mined_opinion.target
self.assertEqual('design', target.text)
self.assertEqual('positive', target.sentiment)
self.assertEqual(0.0, target.confidence_scores.neutral)
self.validateConfidenceScores(target.confidence_scores)
self.assertEqual(32, target.offset)
sleek_opinion = mined_opinion.assessments[0]
self.assertEqual('sleek', sleek_opinion.text)
self.assertEqual('positive', sleek_opinion.sentiment)
self.assertEqual(0.0, sleek_opinion.confidence_scores.neutral)
self.validateConfidenceScores(sleek_opinion.confidence_scores)
self.assertEqual(9, sleek_opinion.offset)
self.assertFalse(sleek_opinion.is_negated)
premium_opinion = mined_opinion.assessments[1]
self.assertEqual('premium', premium_opinion.text)
self.assertEqual('positive', premium_opinion.sentiment)
self.assertEqual(0.0, premium_opinion.confidence_scores.neutral)
self.validateConfidenceScores(premium_opinion.confidence_scores)
self.assertEqual(15, premium_opinion.offset)
self.assertFalse(premium_opinion.is_negated)
else:
food_target = sentence.mined_opinions[0].target
service_target = sentence.mined_opinions[1].target
self.validateConfidenceScores(food_target.confidence_scores)
self.assertEqual(4, food_target.offset)
self.assertEqual('service', service_target.text)
self.assertEqual('negative', service_target.sentiment)
self.assertEqual(0.0, service_target.confidence_scores.neutral)
self.validateConfidenceScores(service_target.confidence_scores)
self.assertEqual(13, service_target.offset)
food_opinion = sentence.mined_opinions[0].assessments[0]
service_opinion = sentence.mined_opinions[1].assessments[0]
self.assertOpinionsEqual(food_opinion, service_opinion)
self.assertEqual('good', food_opinion.text)
self.assertEqual('negative', food_opinion.sentiment)
self.assertEqual(0.0, food_opinion.confidence_scores.neutral)
self.validateConfidenceScores(food_opinion.confidence_scores)
self.assertEqual(28, food_opinion.offset)
self.assertTrue(food_opinion.is_negated)
service_target = sentence.mined_opinions[1].target
self.assertEqual('food', food_target.text)
self.assertEqual('negative', food_target.sentiment)
self.assertEqual(0.0, food_target.confidence_scores.neutral)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_text_document_input_entities_task(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975", language="en"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975.", language="es"),
TextDocumentInput(id="3", text="Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet.", language="de"),
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizeEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert len(action_result.document_results) == len(docs)
for doc in action_result.document_results:
self.assertEqual(len(doc.entities), 4)
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_string_pii_entities_task(self, client):
docs = ["My SSN is 859-98-0987.",
"Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.",
"Is 998.214.865-68 your Brazilian CPF number?"
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizePiiEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 1
action_result = action_results[0]
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert len(action_result.document_results) == len(docs)
self.assertEqual(action_result.document_results[0].entities[0].text, "859-98-0987")
self.assertEqual(action_result.document_results[0].entities[0].category, "USSocialSecurityNumber")
self.assertEqual(action_result.document_results[1].entities[0].text, "111000025")
# self.assertEqual(results[1].entities[0].category, "ABA Routing Number") # Service is currently returning PhoneNumber here
# commenting out brazil cpf, currently service is not returning it
# self.assertEqual(action_result.document_results[2].entities[0].text, "998.214.865-68")
# self.assertEqual(action_result.document_results[2].entities[0].category, "Brazil CPF Number")
for doc in action_result.document_results:
self.assertIsNotNone(doc.id)
for entity in doc.entities:
self.assertIsNotNone(entity.text)
self.assertIsNotNone(entity.category)
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_request_on_empty_document(self, client):
docs = [u""]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "",
})
def test_empty_credential_class(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "xxxxxxxxxxxx",
})
def test_bad_credentials(self, client):
with self.assertRaises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_out_of_order_ids_multiple_tasks(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
action_results = [r for r in action_results if not r.is_error]
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
in_order = ["56", "0", "19", "1"]
for action_result in action_results:
for idx, resp in enumerate(action_result.document_results):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_show_stats_and_model_version_multiple_tasks(self, client):
def callback(resp):
if resp.raw_response:
a = "b"
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="latest"),
RecognizePiiEntitiesAction(model_version="latest"),
RecognizeLinkedEntitiesAction(model_version="latest"),
AnalyzeSentimentAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=callback,
)
response = poller.result()
action_results = list(response)
assert len(action_results) == 5
assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)])
for action_result in action_results:
assert action_result.statistics
for doc in action_result.document_results:
assert doc.statistics
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_poller_metadata(self, client):
docs = [{"id": "56", "text": ":)"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
)
response = poller.result()
assert isinstance(poller.created_on, datetime.datetime)
poller._polling_method.display_name
assert isinstance(poller.expires_on, datetime.datetime)
assert poller.actions_failed_count == 0
assert poller.actions_in_progress_count == 0
assert poller.actions_succeeded_count == 1
assert isinstance(poller.last_modified_on, datetime.datetime)
assert poller.total_actions_count == 1
assert poller.id
### TODO: Commenting out language tests. Right now analyze only supports language 'en', so no point to these tests yet
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer()
# def test_whole_batch_language_hint(self, client):
# def callback(resp):
# language_str = "\"language\": \"fr\""
# if resp.http_request.body:
# language = resp.http_request.body.count(language_str)
# self.assertEqual(language, 3)
# docs = [
# u"This was the best day of my life.",
# u"I did not like the hotel we stayed at. It was too expensive.",
# u"The restaurant was not as good as I hoped."
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# language="fr",
# polling_interval=self._interval(),
# raw_response_hook=callback
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# self.assertFalse(doc.is_error)
# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsClientPreparer(client_kwargs={
# "default_language": "en"
# })
# def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
# def callback(resp):
# pass
# # if resp.http_request.body:
# # language_str = "\"language\": \"es\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 2)
# # language_str = "\"language\": \"en\""
# # language = resp.http_request.body.count(language_str)
# # self.assertEqual(language, 1)
# docs = [
# TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
# TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
# TextDocumentInput(id="3", text="猫は幸せ"),
# ]
# response = list(client.begin_analyze_actions(
# docs,
# actions=[
# RecognizeEntitiesAction(),
# ExtractKeyPhrasesAction(),
# RecognizePiiEntitiesAction()
# ],
# polling_interval=self._interval(),
# ).result())
# for action_result in response:
# for doc in action_result.document_results:
# assert not doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_invalid_language_hint_method(self, client):
response = list(client.begin_analyze_actions(
["This should fail because we're passing in an invalid language hint"],
language="notalanguage",
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result())
for action_result in response:
for doc in action_result.document_results:
assert doc.is_error
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_multiple_tasks(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with pytest.raises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_all_tasks(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with self.assertRaises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="bad"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad")
],
polling_interval=self._interval(),
).result()
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(None, None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = client.begin_analyze_actions(
documents=["Test passing cls to endpoint"],
actions=[
RecognizeEntitiesAction(),
],
cls=callback,
polling_interval=self._interval(),
).result()
assert res == "cls result"
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_multiple_pages_of_results_returned_successfully(self, client):
single_doc = "hello world"
docs = [{"id": str(idx), "text": val} for (idx, val) in enumerate(list(itertools.repeat(single_doc, 25)))]
result = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
show_stats=True,
polling_interval=self._interval(),
).result()
recognize_entities_results = []
extract_key_phrases_results = []
recognize_pii_entities_results = []
recognize_linked_entities_results = []
analyze_sentiment_results = []
action_results = list(result)
for idx, action_result in enumerate(action_results):
if idx % 5 == 0:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES
recognize_entities_results.append(action_result)
elif idx % 5 == 1:
assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES
extract_key_phrases_results.append(action_result)
elif idx % 5 == 2:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
recognize_pii_entities_results.append(action_result)
elif idx % 5 == 3:
assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
recognize_linked_entities_results.append(action_result)
else:
assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT
analyze_sentiment_results.append(action_result)
if idx < 5:
assert len(action_result.document_results) == 20
else:
assert len(action_result.document_results) == 5
assert all([action_result for action_result in recognize_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in extract_key_phrases_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_pii_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in recognize_linked_entities_results if len(action_result.document_results) == len(docs)])
assert all([action_result for action_result in analyze_sentiment_results if len(action_result.document_results) == len(docs)])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
def test_too_many_documents(self, client):
docs = list(itertools.repeat("input document", 26))
with pytest.raises(HttpResponseError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
)
assert excinfo.value.status_code == 400
| true | true |
f72a95a36cb73e6369f0ae2694d3a4c317a14ec5 | 1,549 | py | Python | package/spack-py-backcall/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-py-backcall/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-py-backcall/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBackcall(PythonPackage):
"""Specifications for callback functions passed in to an API"""
homepage = "https://github.com/takluyver/backcall"
url = "https://pypi.io/packages/source/b/backcall/backcall-0.1.0.tar.gz"
version('0.1.0', '87ce0c7839808e6a3427d57df6a792e7')
| 44.257143 | 78 | 0.68173 | true | true | |
f72a95f93d1a01fdc3492157c0b5fc9e8d191481 | 524 | py | Python | alembic/versions/1c697a5bd34f_addeding_lessons_tau.py | codeforamerica/bizfriendly-api | b3f3b9f83652ec67752d629baaf0bc1d4ec67695 | [
"BSD-Source-Code"
] | 13 | 2015-04-27T14:26:19.000Z | 2021-11-21T16:11:17.000Z | alembic/versions/1c697a5bd34f_addeding_lessons_tau.py | codeforamerica/bizfriendly-api | b3f3b9f83652ec67752d629baaf0bc1d4ec67695 | [
"BSD-Source-Code"
] | 15 | 2015-04-25T22:29:50.000Z | 2016-09-01T16:59:21.000Z | alembic/versions/1c697a5bd34f_addeding_lessons_tau.py | codeforamerica/bizfriendly-api | b3f3b9f83652ec67752d629baaf0bc1d4ec67695 | [
"BSD-Source-Code"
] | 9 | 2015-06-19T19:48:40.000Z | 2021-04-16T10:27:29.000Z | """Addeding lessons taught to user
Revision ID: 1c697a5bd34f
Revises: 23aebf11a765
Create Date: 2014-01-04 13:13:39.599020
"""
# revision identifiers, used by Alembic.
revision = '1c697a5bd34f'
down_revision = '23aebf11a765'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| 19.407407 | 63 | 0.694656 |
revision = '1c697a5bd34f'
down_revision = '23aebf11a765'
from alembic import op
import sqlalchemy as sa
def upgrade():
| true | true |
f72a96fc280c4b6ed142ea022c0e6662161e6044 | 378 | py | Python | tests/operator/function_test.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | tests/operator/function_test.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | tests/operator/function_test.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | import yaml
from tests.base.io_test import BaseIOTest
from tests.base.value_error import BaseValueErrorTest
from queenbee.operator.function import Function
ASSET_FOLDER = 'tests/assets/functions'
class TestIO(BaseIOTest):
klass = Function
asset_folder = ASSET_FOLDER
class TestValueError(BaseValueErrorTest):
klass = Function
asset_folder = ASSET_FOLDER
| 18.9 | 53 | 0.793651 | import yaml
from tests.base.io_test import BaseIOTest
from tests.base.value_error import BaseValueErrorTest
from queenbee.operator.function import Function
ASSET_FOLDER = 'tests/assets/functions'
class TestIO(BaseIOTest):
klass = Function
asset_folder = ASSET_FOLDER
class TestValueError(BaseValueErrorTest):
klass = Function
asset_folder = ASSET_FOLDER
| true | true |
f72a99520193f77a04dcbe1808375927c8ee383b | 289 | py | Python | Feature/structure_tensor_eigenvalues.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Feature/structure_tensor_eigenvalues.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | Feature/structure_tensor_eigenvalues.py | Joevaen/Scikit-image_On_CT | e3bf0eeadc50691041b4b7c44a19d07546a85001 | [
"Apache-2.0"
] | null | null | null | # 计算结构张量的特征值。
from skimage.feature import structure_tensor
from skimage.feature import structure_tensor_eigenvalues
import numpy as np
square = np.zeros((5, 5))
square[2, 2] = 1
A_elems = structure_tensor(square, sigma=0.1, order='rc')
print(structure_tensor_eigenvalues(A_elems)[0])
| 20.642857 | 57 | 0.778547 |
from skimage.feature import structure_tensor
from skimage.feature import structure_tensor_eigenvalues
import numpy as np
square = np.zeros((5, 5))
square[2, 2] = 1
A_elems = structure_tensor(square, sigma=0.1, order='rc')
print(structure_tensor_eigenvalues(A_elems)[0])
| true | true |
f72a99912cc462e12e16df173b954984f4d5d9a7 | 6,184 | py | Python | nomadgram/images/views.py | wayhome25/nomadgram | 54d578e5674a0b35786d6c889b06ba019b648575 | [
"MIT"
] | null | null | null | nomadgram/images/views.py | wayhome25/nomadgram | 54d578e5674a0b35786d6c889b06ba019b648575 | [
"MIT"
] | 11 | 2020-09-05T05:23:03.000Z | 2022-03-11T23:26:18.000Z | nomadgram/images/views.py | wayhome25/nomadgram | 54d578e5674a0b35786d6c889b06ba019b648575 | [
"MIT"
] | 4 | 2017-12-22T05:53:37.000Z | 2020-04-25T03:13:47.000Z | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models import Q
from django.shortcuts import get_object_or_404
from nomadgram.images.models import Comment
from nomadgram.images.models import Image
from nomadgram.images.models import Like
from nomadgram.images.serializers import CommentSerializer
from nomadgram.images.serializers import CountImageSerializer
from nomadgram.images.serializers import ImageSerializer
from nomadgram.images.serializers import InputImageSerializer
from nomadgram.notifications.models import Notification
from nomadgram.users.models import User
from nomadgram.users.serializer import ListUserSerializer
class Images(APIView):
def get(self, request):
user = request.user
following_users = user.following.all()
feed_images = Image.objects.filter(Q(creator__in=following_users) | Q(creator=user))[:3]
query = feed_images.select_related('creator').prefetch_related('comments__creator', 'tags', 'likes')
serializer = ImageSerializer(query, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request):
user = request.user
serializer = InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ImageDetail(APIView):
def find_own_image(self, image_id, user):
try:
image = Image.objects.get(id=image_id, creator=user)
return image
except Image.DoesNotExist:
return None
def get(self, request, image_id):
image = get_object_or_404(Image, id=image_id)
serializer = ImageSerializer(image)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
serializer = InputImageSerializer(image, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.erros, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
def delete(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class LikeImage(APIView):
def get(self, request, image_id):
"""like 유저 리스트를 가져온다"""
likes = Like.objects.filter(image_id=image_id)
likes_creator_ids = likes.values('creator_id')
like_users = User.objects.filter(id__in=likes_creator_ids)
serializer = ListUserSerializer(like_users, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id):
"""like를 추가한다"""
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
Like.objects.get(creator=user, image=image)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except Like.DoesNotExist:
Like.objects.create(creator=user, image=image) # NOTE(다른방법): image.likes.create(creator=user)
Notification.objects.create(creator=user, to=image.creator, image=image,
notificaiton_type=Notification.NotificationType.LIKE)
return Response(status=status.HTTP_201_CREATED)
class UnLikeImage(APIView):
def delete(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
preexisting_like = Like.objects.get(creator=user, image=image)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentOnImage(APIView):
def post(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
serializer = CommentSerializer(data=request.POST)
if serializer.is_valid():
comment = serializer.save(creator=user, image=image) # NOTE: serializer.save() 는 모델 인스턴스를 리턴
Notification.objects.create(creator=user, to=image.creator, image=image, comment=comment,
notificaiton_type=Notification.NotificationType.COMMENT)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentView(APIView):
def delete(self, request, comment_id):
user = request.user
comment = get_object_or_404(Comment, id=comment_id, creator=user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ModerateComments(APIView):
def delete(self, request, image_id, comment_id):
comment = get_object_or_404(Comment, id=comment_id, image_id=image_id, image__creatorgs=request.user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request):
tags = request.query_params.get('tags', None) # NOTE: query_params 를 통해서 query string을 가져온다.
if tags:
tags = tags.split(',')
images = Image.objects.filter(tags__name__in=tags).distinct()
serializer = CountImageSerializer(images, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
| 37.02994 | 109 | 0.687581 | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models import Q
from django.shortcuts import get_object_or_404
from nomadgram.images.models import Comment
from nomadgram.images.models import Image
from nomadgram.images.models import Like
from nomadgram.images.serializers import CommentSerializer
from nomadgram.images.serializers import CountImageSerializer
from nomadgram.images.serializers import ImageSerializer
from nomadgram.images.serializers import InputImageSerializer
from nomadgram.notifications.models import Notification
from nomadgram.users.models import User
from nomadgram.users.serializer import ListUserSerializer
class Images(APIView):
def get(self, request):
user = request.user
following_users = user.following.all()
feed_images = Image.objects.filter(Q(creator__in=following_users) | Q(creator=user))[:3]
query = feed_images.select_related('creator').prefetch_related('comments__creator', 'tags', 'likes')
serializer = ImageSerializer(query, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request):
user = request.user
serializer = InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ImageDetail(APIView):
def find_own_image(self, image_id, user):
try:
image = Image.objects.get(id=image_id, creator=user)
return image
except Image.DoesNotExist:
return None
def get(self, request, image_id):
image = get_object_or_404(Image, id=image_id)
serializer = ImageSerializer(image)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
serializer = InputImageSerializer(image, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.erros, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
def delete(self, request, image_id):
user = request.user
image = self.find_own_image(image_id, user)
if image:
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class LikeImage(APIView):
def get(self, request, image_id):
likes = Like.objects.filter(image_id=image_id)
likes_creator_ids = likes.values('creator_id')
like_users = User.objects.filter(id__in=likes_creator_ids)
serializer = ListUserSerializer(like_users, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
Like.objects.get(creator=user, image=image)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except Like.DoesNotExist:
Like.objects.create(creator=user, image=image)
Notification.objects.create(creator=user, to=image.creator, image=image,
notificaiton_type=Notification.NotificationType.LIKE)
return Response(status=status.HTTP_201_CREATED)
class UnLikeImage(APIView):
def delete(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
try:
preexisting_like = Like.objects.get(creator=user, image=image)
preexisting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentOnImage(APIView):
def post(self, request, image_id):
user = request.user
image = get_object_or_404(Image, id=image_id)
serializer = CommentSerializer(data=request.POST)
if serializer.is_valid():
comment = serializer.save(creator=user, image=image)
Notification.objects.create(creator=user, to=image.creator, image=image, comment=comment,
notificaiton_type=Notification.NotificationType.COMMENT)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentView(APIView):
def delete(self, request, comment_id):
user = request.user
comment = get_object_or_404(Comment, id=comment_id, creator=user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ModerateComments(APIView):
def delete(self, request, image_id, comment_id):
comment = get_object_or_404(Comment, id=comment_id, image_id=image_id, image__creatorgs=request.user)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request):
tags = request.query_params.get('tags', None)
if tags:
tags = tags.split(',')
images = Image.objects.filter(tags__name__in=tags).distinct()
serializer = CountImageSerializer(images, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
| true | true |
f72a99a11e52fd71703f1220515453c9acbbe085 | 552 | py | Python | socialnews/mptt/tests/settings.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | 30 | 2015-01-18T16:34:03.000Z | 2021-05-23T20:05:54.000Z | socialnews/mptt/tests/settings.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | null | null | null | socialnews/mptt/tests/settings.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | 11 | 2015-02-21T10:45:41.000Z | 2021-01-24T21:08:20.000Z | import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = os.path.join(DIRNAME, 'mptt.db')
#DATABASE_ENGINE = 'mysql'
#DATABASE_NAME = 'mptt_test'
#DATABASE_USER = 'root'
#DATABASE_PASSWORD = ''
#DATABASE_HOST = 'localhost'
#DATABASE_PORT = '3306'
#DATABASE_ENGINE = 'postgresql_psycopg2'
#DATABASE_NAME = 'mptt_test'
#DATABASE_USER = 'postgres'
#DATABASE_PASSWORD = ''
#DATABASE_HOST = 'localhost'
#DATABASE_PORT = '5432'
INSTALLED_APPS = (
'mptt',
'mptt.tests',
)
| 19.714286 | 49 | 0.684783 | import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = os.path.join(DIRNAME, 'mptt.db')
INSTALLED_APPS = (
'mptt',
'mptt.tests',
)
| true | true |
f72a9a36d3435f37d58e7109c367b25a53d50743 | 541 | py | Python | samples/fan_in_fan_out/HttpStart/__init__.py | sebastianburckhardt/azure-functions-durable-python | 634f70887e415f0ff9e7ee1e2fb3f58f90112772 | [
"MIT"
] | 78 | 2020-03-30T19:05:23.000Z | 2022-03-30T06:55:47.000Z | samples/fan_in_fan_out/HttpStart/__init__.py | sebastianburckhardt/azure-functions-durable-python | 634f70887e415f0ff9e7ee1e2fb3f58f90112772 | [
"MIT"
] | 180 | 2020-04-01T22:25:59.000Z | 2022-03-29T14:23:16.000Z | samples/fan_in_fan_out/HttpStart/__init__.py | sebastianburckhardt/azure-functions-durable-python | 634f70887e415f0ff9e7ee1e2fb3f58f90112772 | [
"MIT"
] | 40 | 2020-03-31T19:52:31.000Z | 2022-02-06T05:52:44.000Z | import logging
import json
import azure.functions as func
import azure.durable_functions as df
async def main(req: func.HttpRequest, starter: str) -> func.HttpResponse:
client = df.DurableOrchestrationClient(starter)
payload: str = json.loads(req.get_body().decode()) # Load JSON post request data
instance_id = await client.start_new(req.route_params["functionName"], client_input=payload)
logging.info(f"Started orchestration with ID = '{instance_id}'.")
return client.create_check_status_response(req, instance_id) | 38.642857 | 96 | 0.770795 | import logging
import json
import azure.functions as func
import azure.durable_functions as df
async def main(req: func.HttpRequest, starter: str) -> func.HttpResponse:
client = df.DurableOrchestrationClient(starter)
payload: str = json.loads(req.get_body().decode())
instance_id = await client.start_new(req.route_params["functionName"], client_input=payload)
logging.info(f"Started orchestration with ID = '{instance_id}'.")
return client.create_check_status_response(req, instance_id) | true | true |
f72a9a3bdb59d938db776e22dab6ecf91d768216 | 9,420 | py | Python | Ryven/packages/auto_generated/ctypes.test.test_pickling/nodes.py | tfroehlich82/Ryven | cb57c91d13949712844a4410a9302c4a90d28dcd | [
"MIT"
] | 2,872 | 2020-07-01T09:06:34.000Z | 2022-03-31T05:52:32.000Z | Ryven/packages/auto_generated/ctypes.test.test_pickling/nodes.py | dhf327/Ryven | a11e361528d982a9dd3c489dd536f8b05ffd56e1 | [
"MIT"
] | 59 | 2020-06-28T12:50:50.000Z | 2022-03-27T19:07:54.000Z | Ryven/packages/auto_generated/ctypes.test.test_pickling/nodes.py | dhf327/Ryven | a11e361528d982a9dd3c489dd536f8b05ffd56e1 | [
"MIT"
] | 339 | 2020-07-05T04:36:20.000Z | 2022-03-24T07:25:18.000Z |
from NENV import *
import ctypes.test.test_pickling
class NodeBase(Node):
pass
class Array_Node(NodeBase):
"""
"""
title = 'ARRAY'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='len'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.ARRAY(self.input(0), self.input(1)))
class Cfunctype_Node(NodeBase):
"""
CFUNCTYPE(restype, *argtypes,
use_errno=False, use_last_error=False) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in different ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
"""
title = 'CFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.CFUNCTYPE(self.input(0)))
class Dllcanunloadnow_Node(NodeBase):
"""
"""
title = 'DllCanUnloadNow'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllCanUnloadNow())
class Dllgetclassobject_Node(NodeBase):
"""
"""
title = 'DllGetClassObject'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='rclsid'),
NodeInputBP(label='riid'),
NodeInputBP(label='ppv'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllGetClassObject(self.input(0), self.input(1), self.input(2)))
class Pyfunctype_Node(NodeBase):
"""
"""
title = 'PYFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.PYFUNCTYPE(self.input(0)))
class Setpointertype_Node(NodeBase):
"""
"""
title = 'SetPointerType'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='pointer'),
NodeInputBP(label='cls'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.SetPointerType(self.input(0), self.input(1)))
class Winfunctype_Node(NodeBase):
"""
"""
title = 'WINFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WINFUNCTYPE(self.input(0)))
class Winerror_Node(NodeBase):
"""
"""
title = 'WinError'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='code', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='descr', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WinError(self.input(0), self.input(1)))
class _Calcsize_Node(NodeBase):
"""
Return size in bytes of the struct described by the format string."""
title = '_calcsize'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='format'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._calcsize(self.input(0)))
class _Check_Size_Node(NodeBase):
"""
"""
title = '_check_size'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='typecode', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._check_size(self.input(0), self.input(1)))
class _Reset_Cache_Node(NodeBase):
"""
"""
title = '_reset_cache'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._reset_cache())
class C_Buffer_Node(NodeBase):
"""
"""
title = 'c_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.c_buffer(self.input(0), self.input(1)))
class Cast_Node(NodeBase):
"""
"""
title = 'cast'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='obj'),
NodeInputBP(label='typ'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.cast(self.input(0), self.input(1)))
class Create_String_Buffer_Node(NodeBase):
"""
create_string_buffer(aBytes) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aBytes, anInteger) -> character array
"""
title = 'create_string_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_string_buffer(self.input(0), self.input(1)))
class Create_Unicode_Buffer_Node(NodeBase):
"""
create_unicode_buffer(aString) -> character array
create_unicode_buffer(anInteger) -> character array
create_unicode_buffer(aString, anInteger) -> character array
"""
title = 'create_unicode_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_unicode_buffer(self.input(0), self.input(1)))
class String_At_Node(NodeBase):
"""
string_at(addr[, size]) -> string
Return the string at addr."""
title = 'string_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.string_at(self.input(0), self.input(1)))
class Wstring_At_Node(NodeBase):
"""
wstring_at(addr[, size]) -> string
Return the string at addr."""
title = 'wstring_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.wstring_at(self.input(0), self.input(1)))
export_nodes(
Array_Node,
Cfunctype_Node,
Dllcanunloadnow_Node,
Dllgetclassobject_Node,
Pyfunctype_Node,
Setpointertype_Node,
Winfunctype_Node,
Winerror_Node,
_Calcsize_Node,
_Check_Size_Node,
_Reset_Cache_Node,
C_Buffer_Node,
Cast_Node,
Create_String_Buffer_Node,
Create_Unicode_Buffer_Node,
String_At_Node,
Wstring_At_Node,
)
| 25.254692 | 120 | 0.619639 |
from NENV import *
import ctypes.test.test_pickling
class NodeBase(Node):
pass
class Array_Node(NodeBase):
title = 'ARRAY'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='len'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.ARRAY(self.input(0), self.input(1)))
class Cfunctype_Node(NodeBase):
title = 'CFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.CFUNCTYPE(self.input(0)))
class Dllcanunloadnow_Node(NodeBase):
title = 'DllCanUnloadNow'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllCanUnloadNow())
class Dllgetclassobject_Node(NodeBase):
title = 'DllGetClassObject'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='rclsid'),
NodeInputBP(label='riid'),
NodeInputBP(label='ppv'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.DllGetClassObject(self.input(0), self.input(1), self.input(2)))
class Pyfunctype_Node(NodeBase):
title = 'PYFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.PYFUNCTYPE(self.input(0)))
class Setpointertype_Node(NodeBase):
title = 'SetPointerType'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='pointer'),
NodeInputBP(label='cls'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.SetPointerType(self.input(0), self.input(1)))
class Winfunctype_Node(NodeBase):
title = 'WINFUNCTYPE'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='restype'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WINFUNCTYPE(self.input(0)))
class Winerror_Node(NodeBase):
title = 'WinError'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='code', dtype=dtypes.Data(default=None, size='s')),
NodeInputBP(label='descr', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.WinError(self.input(0), self.input(1)))
class _Calcsize_Node(NodeBase):
title = '_calcsize'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='format'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._calcsize(self.input(0)))
class _Check_Size_Node(NodeBase):
title = '_check_size'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='typ'),
NodeInputBP(label='typecode', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._check_size(self.input(0), self.input(1)))
class _Reset_Cache_Node(NodeBase):
title = '_reset_cache'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling._reset_cache())
class C_Buffer_Node(NodeBase):
title = 'c_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.c_buffer(self.input(0), self.input(1)))
class Cast_Node(NodeBase):
title = 'cast'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='obj'),
NodeInputBP(label='typ'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.cast(self.input(0), self.input(1)))
class Create_String_Buffer_Node(NodeBase):
title = 'create_string_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_string_buffer(self.input(0), self.input(1)))
class Create_Unicode_Buffer_Node(NodeBase):
title = 'create_unicode_buffer'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='init'),
NodeInputBP(label='size', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.create_unicode_buffer(self.input(0), self.input(1)))
class String_At_Node(NodeBase):
title = 'string_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.string_at(self.input(0), self.input(1)))
class Wstring_At_Node(NodeBase):
title = 'wstring_at'
type_ = 'ctypes.test.test_pickling'
init_inputs = [
NodeInputBP(label='ptr'),
NodeInputBP(label='size', dtype=dtypes.Data(default=-1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, ctypes.test.test_pickling.wstring_at(self.input(0), self.input(1)))
export_nodes(
Array_Node,
Cfunctype_Node,
Dllcanunloadnow_Node,
Dllgetclassobject_Node,
Pyfunctype_Node,
Setpointertype_Node,
Winfunctype_Node,
Winerror_Node,
_Calcsize_Node,
_Check_Size_Node,
_Reset_Cache_Node,
C_Buffer_Node,
Cast_Node,
Create_String_Buffer_Node,
Create_Unicode_Buffer_Node,
String_At_Node,
Wstring_At_Node,
)
| true | true |
f72a9b400fc3d0e9b4c84e2cd50ded8e71059a28 | 6,080 | py | Python | corehq/apps/smsforms/app.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/smsforms/app.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/smsforms/app.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | import re
import uuid
from xml.etree.cElementTree import XML, tostring
from django.conf import settings
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.touchforms_api import CaseSessionDataHelper
from corehq.apps.formplayer_api.smsforms import sms as tfsms
from corehq.apps.formplayer_api.smsforms.api import (
InvalidSessionIdException,
TouchformsError,
XFormsConfig,
get_raw_instance,
)
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.apps.users.models import CouchUser
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.scheduling.util import utcnow
from .models import XFORMS_SESSION_SMS, SQLXFormsSession
COMMCONNECT_DEVICE_ID = "commconnect"
def start_session(session, domain, contact, app, module, form, case_id=None, yield_responses=False,
case_for_case_submission=False):
"""
Starts a session in touchforms and saves the record in the database.
Returns a tuple containing the session object and the (text-only)
list of generated questions/responses based on the form.
Special params:
yield_responses - If True, the list of xforms responses is returned, otherwise the text prompt for each is returned
session_type - XFORMS_SESSION_SMS or XFORMS_SESSION_IVR
case_for_case_submission - True if this is a submission that a case is making to alter another related case. For example, if a parent case is filling out
an SMS survey which will update its child case, this should be True.
"""
# NOTE: this call assumes that "contact" will expose three
# properties: .raw_username, .get_id, and .get_language_code
session_data = CaseSessionDataHelper(domain, contact, case_id, app, form).get_session_data(COMMCONNECT_DEVICE_ID)
# since the API user is a superuser, force touchforms to query only
# the contact's cases by specifying it as an additional filter
if is_commcarecase(contact) and form.requires_case():
session_data["additional_filters"] = {
"case_id": case_id,
"footprint": "true" if form.uses_parent_case() else "false",
}
elif isinstance(contact, CouchUser):
session_data["additional_filters"] = {
"user_id": contact.get_id,
"footprint": "true"
}
kwargs = {}
if is_commcarecase(contact):
kwargs['restore_as_case_id'] = contact.case_id
else:
kwargs['restore_as'] = contact.raw_username
if app and form:
session_data.update(get_cloudcare_session_data(domain, form, contact))
language = contact.get_language_code()
config = XFormsConfig(form_content=form.render_xform().decode('utf-8'),
language=language,
session_data=session_data,
domain=domain,
**kwargs)
session_start_info = tfsms.start_session(config)
session.session_id = session_start_info.session_id
session.save()
responses = session_start_info.first_responses
if len(responses) > 0 and responses[0].status == 'http-error':
session.mark_completed(False)
session.save()
raise TouchformsError('Cannot connect to touchforms.')
# Prevent future update conflicts by getting the session again from the db
# since the session could have been updated separately in the first_responses call
session = SQLXFormsSession.objects.get(pk=session.pk)
if yield_responses:
return (session, responses)
else:
return (session, _responses_to_text(responses))
def get_responses(domain, session_id, text):
"""
Try to process this message like a session-based submission against
an xform.
Returns a list of responses if there are any.
"""
return list(tfsms.next_responses(session_id, text, domain))
def _responses_to_text(responses):
return [r.text_prompt for r in responses if r.text_prompt]
def submit_unfinished_form(session):
"""
Gets the raw instance of the session's form and submits it. This is used with
sms and ivr surveys to save all questions answered so far in a session that
needs to close.
If session.include_case_updates_in_partial_submissions is False, no case
create / update / close actions will be performed, but the form will still be submitted.
The form is only submitted if the smsforms session has not yet completed.
"""
# Get and clean the raw xml
try:
response = get_raw_instance(session.session_id, session.domain)
xml = response['output']
except InvalidSessionIdException:
return
root = XML(xml)
case_tag_regex = re.compile(r"^(\{.*\}){0,1}case$") # Use regex in order to search regardless of namespace
meta_tag_regex = re.compile(r"^(\{.*\}){0,1}meta$")
timeEnd_tag_regex = re.compile(r"^(\{.*\}){0,1}timeEnd$")
current_timstamp = json_format_datetime(utcnow())
for child in root:
if case_tag_regex.match(child.tag) is not None:
# Found the case tag
case_element = child
case_element.set("date_modified", current_timstamp)
if not session.include_case_updates_in_partial_submissions:
# Remove case actions (create, update, close)
child_elements = [case_action for case_action in case_element]
for case_action in child_elements:
case_element.remove(case_action)
elif meta_tag_regex.match(child.tag) is not None:
# Found the meta tag, now set the value for timeEnd
for meta_child in child:
if timeEnd_tag_regex.match(meta_child.tag):
meta_child.text = current_timstamp
cleaned_xml = tostring(root)
# Submit the xml
result = submit_form_locally(cleaned_xml, session.domain, app_id=session.app_id, partial_submission=True)
session.submission_id = result.xform.form_id
| 40.533333 | 157 | 0.702138 | import re
import uuid
from xml.etree.cElementTree import XML, tostring
from django.conf import settings
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.touchforms_api import CaseSessionDataHelper
from corehq.apps.formplayer_api.smsforms import sms as tfsms
from corehq.apps.formplayer_api.smsforms.api import (
InvalidSessionIdException,
TouchformsError,
XFormsConfig,
get_raw_instance,
)
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.apps.users.models import CouchUser
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.scheduling.util import utcnow
from .models import XFORMS_SESSION_SMS, SQLXFormsSession
COMMCONNECT_DEVICE_ID = "commconnect"
def start_session(session, domain, contact, app, module, form, case_id=None, yield_responses=False,
case_for_case_submission=False):
session_data = CaseSessionDataHelper(domain, contact, case_id, app, form).get_session_data(COMMCONNECT_DEVICE_ID)
if is_commcarecase(contact) and form.requires_case():
session_data["additional_filters"] = {
"case_id": case_id,
"footprint": "true" if form.uses_parent_case() else "false",
}
elif isinstance(contact, CouchUser):
session_data["additional_filters"] = {
"user_id": contact.get_id,
"footprint": "true"
}
kwargs = {}
if is_commcarecase(contact):
kwargs['restore_as_case_id'] = contact.case_id
else:
kwargs['restore_as'] = contact.raw_username
if app and form:
session_data.update(get_cloudcare_session_data(domain, form, contact))
language = contact.get_language_code()
config = XFormsConfig(form_content=form.render_xform().decode('utf-8'),
language=language,
session_data=session_data,
domain=domain,
**kwargs)
session_start_info = tfsms.start_session(config)
session.session_id = session_start_info.session_id
session.save()
responses = session_start_info.first_responses
if len(responses) > 0 and responses[0].status == 'http-error':
session.mark_completed(False)
session.save()
raise TouchformsError('Cannot connect to touchforms.')
# Prevent future update conflicts by getting the session again from the db
# since the session could have been updated separately in the first_responses call
session = SQLXFormsSession.objects.get(pk=session.pk)
if yield_responses:
return (session, responses)
else:
return (session, _responses_to_text(responses))
def get_responses(domain, session_id, text):
return list(tfsms.next_responses(session_id, text, domain))
def _responses_to_text(responses):
return [r.text_prompt for r in responses if r.text_prompt]
def submit_unfinished_form(session):
# Get and clean the raw xml
try:
response = get_raw_instance(session.session_id, session.domain)
xml = response['output']
except InvalidSessionIdException:
return
root = XML(xml)
case_tag_regex = re.compile(r"^(\{.*\}){0,1}case$") # Use regex in order to search regardless of namespace
meta_tag_regex = re.compile(r"^(\{.*\}){0,1}meta$")
timeEnd_tag_regex = re.compile(r"^(\{.*\}){0,1}timeEnd$")
current_timstamp = json_format_datetime(utcnow())
for child in root:
if case_tag_regex.match(child.tag) is not None:
# Found the case tag
case_element = child
case_element.set("date_modified", current_timstamp)
if not session.include_case_updates_in_partial_submissions:
# Remove case actions (create, update, close)
child_elements = [case_action for case_action in case_element]
for case_action in child_elements:
case_element.remove(case_action)
elif meta_tag_regex.match(child.tag) is not None:
# Found the meta tag, now set the value for timeEnd
for meta_child in child:
if timeEnd_tag_regex.match(meta_child.tag):
meta_child.text = current_timstamp
cleaned_xml = tostring(root)
# Submit the xml
result = submit_form_locally(cleaned_xml, session.domain, app_id=session.app_id, partial_submission=True)
session.submission_id = result.xform.form_id
| true | true |
f72a9b93aae1aefa7bf9852e1961a1a1a0e15237 | 667 | py | Python | src/actuariat_python/data/data_population/__init__.py | Pandinosaurus/actuariat_python | 77533a75fcc63a5a7ebca664a19a24c9439670ee | [
"MIT"
] | 5 | 2017-03-13T15:58:40.000Z | 2021-02-03T12:52:58.000Z | src/actuariat_python/data/data_population/__init__.py | Pandinosaurus/actuariat_python | 77533a75fcc63a5a7ebca664a19a24c9439670ee | [
"MIT"
] | 13 | 2015-06-14T22:01:37.000Z | 2021-01-05T13:57:00.000Z | src/actuariat_python/data/data_population/__init__.py | Pandinosaurus/actuariat_python | 77533a75fcc63a5a7ebca664a19a24c9439670ee | [
"MIT"
] | 9 | 2017-01-15T15:06:55.000Z | 2022-01-18T20:42:48.000Z | # -*- coding: utf-8 -*-
"""
@file
@brief Data from INSEE
**Source**
* ``irsocsd2014_G10.xlsx``: ?
* ``fm-fecondite-age-mere.csv``: `INSEE Bilan Démographique 2016 <https://www.insee.fr/fr/statistiques/1892259?sommaire=1912926>`_
* ``pop-totale-france.xlsx``: `INED Population totale
<https://www.ined.fr/fr/tout-savoir-population/chiffres/france/evolution-population/population-totale/>`_
* ``TF00-02_D.xls``: `spac-actuaires, tables de mortalité <http://www.spac-actuaires.fr/glossaire/Table_de_mortalit%C3%A9>`_
* ``TH00-02_D.xls``: `spac-actuaires, tables de mortalité <http://www.spac-actuaires.fr/glossaire/Table_de_mortalit%C3%A9>`_
""" # pragma: no cover
| 44.466667 | 130 | 0.724138 | true | true | |
f72a9be347a7824928143bcd46fdc086c947e678 | 18,930 | py | Python | orbital_utilities.py | desertfireballnetwork/DFN_darkflight | f41d2a2b82ce96f380f26acfe278c0afa536b9cd | [
"MIT"
] | 1 | 2020-10-19T15:13:09.000Z | 2020-10-19T15:13:09.000Z | orbital_utilities.py | desertfireballnetwork/DFN_darkflight | f41d2a2b82ce96f380f26acfe278c0afa536b9cd | [
"MIT"
] | null | null | null | orbital_utilities.py | desertfireballnetwork/DFN_darkflight | f41d2a2b82ce96f380f26acfe278c0afa536b9cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Functions and objects to deal with meteoroids orbits
"""
__author__ = "Hadrien A.R. Devillepoix, Trent Jansen-Sturgeon "
__copyright__ = "Copyright 2016-2017, Desert Fireball Network"
__license__ = "MIT"
__version__ = "1.0"
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import HCRS, ITRS, GCRS
from astropy.utils.iers import IERS_A, IERS_A_URL, IERS
from astropy.utils.data import download_file
from trajectory_utilities import ECEF2LLH, \
EarthPosition, HCRS2HCI, HCI2ECI_pos, \
OrbitalElements2PosVel, ECI2ECEF_pos
try:
iers_a_file = download_file(IERS_A_URL, cache=True)
iers_a = IERS_A.open(iers_a_file)
IERS.iers_table = iers_a
except:
print('IERS_A_URL is temporarily unavailable')
pass
AU = 1*u.au.to(u.m)
SMA_JUPITER = 5.20336301 * u.au
def tisserand_wrt_jupiter(a, e, i):
'''
Calculate the Tisserrand criterion with respect to Jupiter
'''
T_j = (SMA_JUPITER / a +
2 * np.cos(i) *
np.sqrt(a / SMA_JUPITER * (1 - e**2)))
return T_j
# Conversion vector
AU_Deg2m_Rad = np.vstack((AU, 1, np.pi / 180 * np.ones((4, 1))))
Planets = {'Mercury': np.vstack((0.387099, 0.205636, 7.004979, 29.127030, 48.330766, 252.250324)),
'Venus': np.vstack((0.723336, 0.006777, 3.394676, 54.922625, 76.679843, 181.979100)),
'Earth': np.vstack((1.000003, 0.016711, -0.000015, 102.937682, 0.000000, 100.464572)),
'Mars': np.vstack((1.523710, 0.093394, 1.849691, -73.503169, 49.559539, -4.553432)),
'Jupiter': np.vstack((5.202887, 0.048386, 1.304397, -85.745429, 100.473909, 34.396441)),
'Saturn': np.vstack((9.536676,0.053862,2.485992,-21.063546,113.662424,49.954244)),
'Uranus': np.vstack((19.189165,0.047257,0.772638,96.937351,74.016925,313.238105)),
'Neptune': np.vstack((30.069923,0.008590,1.770043,-86.819463,131.784226,-55.120030))}
class OrbitObject(object):
"""
Solar system object osculating orbit
"""
def __init__(self,
orbit_type,
a, e, i, omega, Omega, theta,
ra_corr=np.nan*u.rad, dec_corr=np.nan*u.rad,
v_g=np.nan*u.m/u.second):
self.semi_major_axis = a.to(u.au)
self.eccentricity = e
self.inclination = i.to(u.deg)
self.argument_periapsis = omega.to(u.deg)
self.longitude_ascending_node = Omega.to(u.deg)
self.longitude_perihelion = (self.longitude_ascending_node + self.argument_periapsis) % (360 * u.deg)
self.true_anomaly = theta.to(u.deg)
self.orbit_type = orbit_type
self.perihelion = (1 - self.eccentricity) * self.semi_major_axis
self.aphelion = (1 + self.eccentricity) * self.semi_major_axis
self.corr_radiant_ra = (ra_corr.to(u.deg)) % (360 * u.deg)
self.corr_radiant_dec = dec_corr.to(u.deg)
radiant = HCRS(ra=self.corr_radiant_ra, dec=self.corr_radiant_dec, distance=1.0*u.au)
ecpliptic_radiant = HCRS2HCI(np.vstack(radiant.cartesian.xyz.value))
self.ecliptic_latitude = np.rad2deg(np.arcsin(ecpliptic_radiant[2] / norm(ecpliptic_radiant)))*u.deg
self.velocity_g = v_g.to(u.m / u.second)
self.T_j = self.tisserand_criterion_wrt_jupiter()
def tisserand_criterion_wrt_jupiter(self):
'''
Calculate the Tisserrand criterion with respect to Jupiter
'''
return tisserand_wrt_jupiter(self.semi_major_axis, self.eccentricity, self.inclination)
def __str__(self):
return str("Semi-major axis: " + str(self.semi_major_axis) + "\n" +
"Eccentricity: " + str(self.eccentricity) + "\n" +
"Inclination: " + str(self.inclination) + "\n" +
"Argument of Periapsis: " + str(self.argument_periapsis) + "\n" +
"Longitude of Ascending Node: " + str(self.longitude_ascending_node) + "\n" +
"True Anomaly: " + str(self.true_anomaly) + "\n\n" +
"Ra_corrected: " + str(self.corr_radiant_ra) + "\n" +
"Dec_corrected: " + str(self.corr_radiant_dec) + "\n" +
"Vel_g: " + str(self.velocity_g))
'''
Function delibaretely outside of native StateVector class to allow multithreaded call
'''
def random_compute_orbit_ceplecha(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='Ceplecha')
return sv
def random_compute_orbit_integration_EOE(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_EOE')
return sv
def random_compute_orbit_integration_posvel(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_posvel')
return sv
def PlotOrbitalElements(COE, t_jd, t_soi, Sol):
Colour = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
i = 2 #FIXME error
plt.figure()
plt.subplot(321)
plt.plot(t_jd, COE[0] / AU, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Semi-major Axis (AU)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(322)
plt.plot(t_jd, COE[1], Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Eccentricity")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(323)
plt.plot(t_jd, COE[2] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Inclination (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(324)
plt.plot(t_jd, COE[3] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Argument of Periapsis (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(325)
plt.plot(t_jd, COE[4] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Longitude of the Ascending Node (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
plt.subplot(326)
plt.plot(t_jd, COE[5] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("True Anomaly (deg)")
# plt.axvline(x=t_soi[1], color='k')
# plt.axvline(x=t_soi[2], color='c')
if Sol != 'NoSol':
plt.subplot(321)
plt.axhline(Sol.semi_major_axis.value, color='g')
plt.subplot(322)
plt.axhline(Sol.eccentricity, color='g')
plt.subplot(323)
plt.axhline(Sol.inclination.value, color='g')
plt.subplot(324)
plt.axhline(Sol.argument_periapsis.value, color='g')
plt.subplot(325)
plt.axhline(Sol.longitude_ascending_node.value, color='g')
plt.subplot(326)
plt.axhline(Sol.true_anomaly.value, color='g')
plt.show()
def PlotOrbit3D(OrbObjList, t0=2457535.0, Sol='NoSol'):
from mpl_toolkits.mplot3d import Axes3D
''' 3D Orbit Plot'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for OrbObj in OrbObjList:
COE = np.vstack((OrbObj.semi_major_axis.value,
OrbObj.eccentricity,
OrbObj.inclination.value,
OrbObj.argument_periapsis.value,
OrbObj.longitude_ascending_node.value,
OrbObj.true_anomaly.value)) * AU_Deg2m_Rad
COE = COE + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[Pos_HCI, Vel_HCI] = OrbitalElements2PosVel(COE, 'Sun', 'Classical')
ax.plot(Pos_HCI[0]/AU, Pos_HCI[1]/AU, Pos_HCI[2]/AU, color='r', label='Determined Orbit')
''' Plot the planets'''
for Planet in Planets:
COE = Planets[Planet] * AU_Deg2m_Rad
COEs = COE + np.vstack((np.zeros((5, 200)), np.linspace(0, 2 * np.pi, 200)))
[pos, vel] = OrbitalElements2PosVel(COEs, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='b')
# t_yr = t0 + np.linspace(0, 365.25, 100)
# pos_earth = EarthPosition(t_yr)
# ax.plot(pos_earth[0]/AU, pos_earth[1]/AU, pos_earth[2]/AU,
# color='b', linewidth=2.0, label='Earth')
''' Plot the solution (if given) '''
if Sol != 'NoSol':
Sol_oe = np.vstack((Sol.semi_major_axis.value,
Sol.eccentricity,
Sol.inclination.value,
Sol.argument_periapsis.value,
Sol.longitude_ascending_node.value,
Sol.true_anomaly.value)) * AU_Deg2m_Rad
Sol_oe = Sol_oe + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[pos, vel] = OrbitalElements2PosVel(Sol_oe, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='g', label='Published Orbit')
plt.legend()
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.set_zlim([-5, 5])
plt.show()
def PlotPerts(Pert):
PPert = np.vstack(Pert).T; t = PPert[0]
plt.figure(figsize=(16,9))
t_rel = t - np.max(t) # Days
plt.plot(t_rel, PPert[1], '-b', linewidth=3.0, label='Earth')
plt.plot(t_rel, PPert[2], '--k', linewidth=3.0, label='Moon')
plt.plot(t_rel, PPert[3], '-.r', linewidth=3.0, label='Sun')
PertJ2 = PPert[4][~np.isnan(PPert[4])]
plt.plot(t_rel[~np.isnan(PPert[4])], PertJ2, ':g', linewidth=3.0, label='J2')
PertDrag = PPert[5][~np.isnan(PPert[5])]
plt.plot(t_rel[~np.isnan(PPert[5])], PertDrag, '-.c', linewidth=3.0, label='Drag')
plt.yscale('log'); plt.grid(True); plt.legend(loc='best')
plt.xlabel('Relative Time [days]'); plt.ylabel('Perturbation Acceleration [m/s^2]')
plt.show()
def PlotIntStep(t):
dt=[]
for k in range(len(t)-1):
dt.append((t[k+1] - t[k]) * 24*60*60)
plt.figure(figsize=(16,9))
t_rel = t - np.max(t) # Days
plt.plot(t_rel[1:], abs(np.array(dt)))
plt.yscale('log'); plt.grid(True)#; plt.legend()
plt.xlabel('Relative Time [days]'); plt.ylabel('Timestep [sec]')
plt.show()
def ThirdBodyPerturbation(Pos, rho, mu):
'''
Pos is the position of the meteoroid (m)
rho is the position of the third body (m)
mu is the standard gravitational parameter of the third body (m3/s2)
'''
# Battin's scalar formula for vector difference
q = np.dot(Pos.T, (Pos - 2 * rho) / (np.dot(rho.T, rho)))
f = (3 * q + 3 * q**2 + q**3) / (1 + (1 + q)**1.5)
# Third body perturbation acceleration (with indirect term)
u = -mu * (Pos + f * rho) / ((norm(Pos - rho))**3)
return u
def NRLMSISE_00(pos, time, pos_type='eci'):
''' Courtesy of Ellie Sansom '''
"""
Inputs: inertial position and time
Outputs: [altitude, temp, atm_pres, atm density, sos, dyn_vis]
"""
from nrlmsise_00_header import nrlmsise_input, nrlmsise_output, nrlmsise_flags
from nrlmsise_00 import gtd7
time = Time(time, format='jd', scale='utc')
# Convert ECI to LLH coordinates
if pos_type == 'eci':
Pos_LLH = ECEF2LLH(ECI2ECEF_pos(pos, time))
elif pos_type == 'ecef':
Pos_LLH = ECEF2LLH(pos)
elif pos_type == 'llh':
Pos_LLH = pos
else:
print('NRLMSISE_00 error: Invalid pos_type')
exit()
g_lat = np.rad2deg(Pos_LLH[0][0])
g_long = np.rad2deg(Pos_LLH[1][0])
alt = Pos_LLH[2][0]
# Break up time into year, day of year, and seconds of the day
yDay = time.yday.split(':'); yr = float(yDay[0]); doy = float(yDay[1])
sec = float(yDay[2]) * 60*60 + float(yDay[3]) * 60 + float(yDay[4])
# Assign our variables into the nrmsise inputs
Input = nrlmsise_input(yr, doy, sec, alt/1000, g_lat, g_long)
Output = nrlmsise_output(); Flags = nrlmsise_flags()
# Switches
for i in range(1, 24):
Flags.switches[i]=1
# GTD7 atmospheric model subroutine
gtd7(Input, Flags, Output)
# Temperature at alt [deg K]
T = Output.t[1]
# Molecular number densities [m-3]
He = Output.d[0] # He
O = Output.d[1] # O
N2 = Output.d[2] # N2
O2 = Output.d[3] # O2
Ar = Output.d[4] # Ar
H = Output.d[6] # H
N = Output.d[7] # N
# ano_O = Output.d[8] # Anomalous oxygen
sum_mass = He + O + N2 + O2 + Ar + H + N
# Molar mass
He_mass = 4.0026 # g/mol
O_mass = 15.9994 # g/mol
N2_mass = 28.013 # g/mol
O2_mass = 31.998 # g/mol
Ar_mass = 39.948 # g/mol
H_mass = 1.0079 # g/mol
N_mass = 14.0067 # g/mol
# Molecular weight of air [kg/mol]
mol_mass_air = (He_mass * He + O_mass * O + N2_mass * N2 + O2_mass * O2
+ Ar_mass * Ar + H_mass * H + N_mass * N) / (1000 * sum_mass)
# Total mass density [kg*m-3]
po = Output.d[5] * 1000
Ru = 8.3144621 # Universal gas constant [J/(K*mol)]
R = Ru / mol_mass_air # Individual gas constant [J/(kg*K)] #287.058
# Ideal gas law
atm_pres = po * T * R
# Speed of sound in atm
sos = 331.3 * np.sqrt(1 + T / 273.15)
# Dynamic viscosity (http://en.wikipedia.org/wiki/Viscosity)
C = 120 #Sutherland's constant for air [deg K]
mu_ref = 18.27e-6 # Reference viscosity [[mu_Pa s] * e-6]
T_ref = 291.15 # Reference temperature [deg K]
dyn_vis = mu_ref * (T_ref + C) / (T + C) * (T / T_ref)**1.5
return T, atm_pres, po, sos, dyn_vis
# def compute_infinity_radiant(stateVec):
# ''' This method computing the apparent radiant, it doesn't consider the zenith attraction '''
# Pos_geo = stateVec.position
# Vel_geo = stateVec.vel_xyz
# t0 = stateVec.epoch
# # Compute radiant (apparent ORIGIN of meteoroid)
# Vel_eci = ECEF2ECI(Pos_geo, Vel_geo, t0)[1]
# ra_eci = np.arctan2(-Vel_eci[1], -Vel_eci[0])
# dec_eci = np.arcsin(-Vel_eci[2] / norm(Vel_eci))
# # ^-- redundant information. Already have it in metadata
# return ra_eci, dec_eci
def compute_cartesian_velocities_from_radiant(stateVec):
'''
Turn apparent ecef radiant and velocity into cartesian velocity component
'''
vel_geo = -(stateVec.velocity_inf *
np.vstack((np.cos(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.dec_ecef_inf)))))
return vel_geo
def SimilarityCriterion(COE1, COE2, method='SH'):
'''
Southworth & Hawkins similarity criterion (1963); or
Drummond's similarity criterion (1981); or
Jopek's similarity criterion (1993).
'''
if type(COE1) == np.ndarray:
a1 = COE1[0]/AU; a2 = COE2[0]/AU # [AU]
e1 = COE1[1]; e2 = COE2[1] # []
i1 = COE1[2]; i2 = COE2[2] # [rad]
w1 = COE1[3]; w2 = COE2[3] # [rad]
W1 = COE1[4]; W2 = COE2[4] # [rad]
else:
a1 = COE1.semi_major_axis.value; a2 = COE2.semi_major_axis.value # [AU]
e1 = COE1.eccentricity; e2 = COE2.eccentricity # []
i1 = COE1.inclination.to(u.rad).value; i2 = COE2.inclination.to(u.rad).value # [rad]
w1 = COE1.argument_periapsis.to(u.rad).value; w2 = COE2.argument_periapsis.to(u.rad).value # [rad]
W1 = COE1.longitude_ascending_node.to(u.rad).value; W2 = COE2.longitude_ascending_node.to(u.rad).value # [rad]
q1 = a1 * (1 - e1) # [AU]
q2 = a2 * (1 - e2) # [AU]
# Angle between the orbital planes (I21)
var = (2 * np.sin((i2 - i1) / 2))**2 + np.sin(i1) * np.sin(i2) * (2 * np.sin((W2 - W1) / 2))**2
I21 = 2 * np.arcsin(np.sqrt(var) / 2)
if method == 'SH':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + (q2 - q1)**2 + var + (((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
elif method == 'D':
# Angle between the orbital lines of apsides (theta21)
# l1 = W1 + np.arcsin(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
# l2 = W2 + np.arcsin(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
l1 = W1 + np.arctan(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
l2 = W2 + np.arctan(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
theta21 = np.arccos(np.sin(b1) * np.sin(b2) + np.cos(b1) * np.cos(b2) * np.cos(l2 - l1))
Similarity2 = ((e2 - e1) / (e2 + e1))**2 + ((q2 - q1) / (q2 + q1))**2 + \
(I21 / np.pi)**2 + ((e2 + e1) / 2)**2 * (theta21 / np.pi)**2
Similarity = np.sqrt(Similarity2)
elif method == 'H':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + ((q2 - q1) / (q2 + q1))**2 + var + \
(((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
return Similarity
def generate_ephemeris(pos_hci, t_jd):
# Save the datetime
ephem_dict = {'datetime': Time(t_jd, format='jd', scale='utc').isot}
ephem_dict['MJD'] = Time(t_jd, format='jd', scale='utc').mjd
# distance to sun
ephem_dict['distance_to_sun'] = norm(pos_hci, axis=0) / 1000 #km
# Convert to eci coordinates
pos_eci = HCI2ECI_pos(pos_hci, t_jd)
ephem_dict['pos_eci_x'] = pos_eci[0]
ephem_dict['pos_eci_y'] = pos_eci[1]
ephem_dict['pos_eci_z'] = pos_eci[2]
pos_hcrs = HCI2HCRS(pos_hci)
# Calculate phase angle
ephem_dict['phase_angle'] = np.rad2deg(np.arccos(np.sum(pos_hcrs * pos_eci, axis=0)
/ (norm(pos_hcrs, axis=0) * norm(pos_eci, axis=0))))
# Calculate elongation angle
pos_sun = pos_eci - pos_hcrs
ephem_dict['elongation_angle'] = np.rad2deg(np.arccos(np.sum(pos_sun * pos_eci, axis=0)
/ (norm(pos_sun, axis=0) * norm(pos_eci, axis=0))))
# Calculate ephemeris
dist = norm(pos_eci, axis=0) #m
ephem_dict['ra'] = np.rad2deg(np.arctan2(pos_eci[1], pos_eci[0]))%360 #deg
ephem_dict['dec'] = np.rad2deg(np.arcsin(pos_eci[2] / dist)) #deg
ephem_dict['distance_to_earth'] = norm(pos_eci, axis=0) / 1000 #km
return ephem_dict
| 37.485149 | 118 | 0.592974 |
__author__ = "Hadrien A.R. Devillepoix, Trent Jansen-Sturgeon "
__copyright__ = "Copyright 2016-2017, Desert Fireball Network"
__license__ = "MIT"
__version__ = "1.0"
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import HCRS, ITRS, GCRS
from astropy.utils.iers import IERS_A, IERS_A_URL, IERS
from astropy.utils.data import download_file
from trajectory_utilities import ECEF2LLH, \
EarthPosition, HCRS2HCI, HCI2ECI_pos, \
OrbitalElements2PosVel, ECI2ECEF_pos
try:
iers_a_file = download_file(IERS_A_URL, cache=True)
iers_a = IERS_A.open(iers_a_file)
IERS.iers_table = iers_a
except:
print('IERS_A_URL is temporarily unavailable')
pass
AU = 1*u.au.to(u.m)
SMA_JUPITER = 5.20336301 * u.au
def tisserand_wrt_jupiter(a, e, i):
T_j = (SMA_JUPITER / a +
2 * np.cos(i) *
np.sqrt(a / SMA_JUPITER * (1 - e**2)))
return T_j
AU_Deg2m_Rad = np.vstack((AU, 1, np.pi / 180 * np.ones((4, 1))))
Planets = {'Mercury': np.vstack((0.387099, 0.205636, 7.004979, 29.127030, 48.330766, 252.250324)),
'Venus': np.vstack((0.723336, 0.006777, 3.394676, 54.922625, 76.679843, 181.979100)),
'Earth': np.vstack((1.000003, 0.016711, -0.000015, 102.937682, 0.000000, 100.464572)),
'Mars': np.vstack((1.523710, 0.093394, 1.849691, -73.503169, 49.559539, -4.553432)),
'Jupiter': np.vstack((5.202887, 0.048386, 1.304397, -85.745429, 100.473909, 34.396441)),
'Saturn': np.vstack((9.536676,0.053862,2.485992,-21.063546,113.662424,49.954244)),
'Uranus': np.vstack((19.189165,0.047257,0.772638,96.937351,74.016925,313.238105)),
'Neptune': np.vstack((30.069923,0.008590,1.770043,-86.819463,131.784226,-55.120030))}
class OrbitObject(object):
def __init__(self,
orbit_type,
a, e, i, omega, Omega, theta,
ra_corr=np.nan*u.rad, dec_corr=np.nan*u.rad,
v_g=np.nan*u.m/u.second):
self.semi_major_axis = a.to(u.au)
self.eccentricity = e
self.inclination = i.to(u.deg)
self.argument_periapsis = omega.to(u.deg)
self.longitude_ascending_node = Omega.to(u.deg)
self.longitude_perihelion = (self.longitude_ascending_node + self.argument_periapsis) % (360 * u.deg)
self.true_anomaly = theta.to(u.deg)
self.orbit_type = orbit_type
self.perihelion = (1 - self.eccentricity) * self.semi_major_axis
self.aphelion = (1 + self.eccentricity) * self.semi_major_axis
self.corr_radiant_ra = (ra_corr.to(u.deg)) % (360 * u.deg)
self.corr_radiant_dec = dec_corr.to(u.deg)
radiant = HCRS(ra=self.corr_radiant_ra, dec=self.corr_radiant_dec, distance=1.0*u.au)
ecpliptic_radiant = HCRS2HCI(np.vstack(radiant.cartesian.xyz.value))
self.ecliptic_latitude = np.rad2deg(np.arcsin(ecpliptic_radiant[2] / norm(ecpliptic_radiant)))*u.deg
self.velocity_g = v_g.to(u.m / u.second)
self.T_j = self.tisserand_criterion_wrt_jupiter()
def tisserand_criterion_wrt_jupiter(self):
return tisserand_wrt_jupiter(self.semi_major_axis, self.eccentricity, self.inclination)
def __str__(self):
return str("Semi-major axis: " + str(self.semi_major_axis) + "\n" +
"Eccentricity: " + str(self.eccentricity) + "\n" +
"Inclination: " + str(self.inclination) + "\n" +
"Argument of Periapsis: " + str(self.argument_periapsis) + "\n" +
"Longitude of Ascending Node: " + str(self.longitude_ascending_node) + "\n" +
"True Anomaly: " + str(self.true_anomaly) + "\n\n" +
"Ra_corrected: " + str(self.corr_radiant_ra) + "\n" +
"Dec_corrected: " + str(self.corr_radiant_dec) + "\n" +
"Vel_g: " + str(self.velocity_g))
def random_compute_orbit_ceplecha(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='Ceplecha')
return sv
def random_compute_orbit_integration_EOE(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_EOE')
return sv
def random_compute_orbit_integration_posvel(sv):
sv.randomize_velocity_vector()
sv.computeOrbit(orbit_computation_method='integrate_posvel')
return sv
def PlotOrbitalElements(COE, t_jd, t_soi, Sol):
Colour = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
i = 2
plt.figure()
plt.subplot(321)
plt.plot(t_jd, COE[0] / AU, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Semi-major Axis (AU)")
plt.subplot(322)
plt.plot(t_jd, COE[1], Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Eccentricity")
plt.subplot(323)
plt.plot(t_jd, COE[2] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Inclination (deg)")
plt.subplot(324)
plt.plot(t_jd, COE[3] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Argument of Periapsis (deg)")
plt.subplot(325)
plt.plot(t_jd, COE[4] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("Longitude of the Ascending Node (deg)")
plt.subplot(326)
plt.plot(t_jd, COE[5] * 180 / np.pi, Colour[i])
plt.axvline(x=t_soi[0], color='b'); plt.grid()
plt.xlabel("Time (JD)"); plt.ylabel("True Anomaly (deg)")
if Sol != 'NoSol':
plt.subplot(321)
plt.axhline(Sol.semi_major_axis.value, color='g')
plt.subplot(322)
plt.axhline(Sol.eccentricity, color='g')
plt.subplot(323)
plt.axhline(Sol.inclination.value, color='g')
plt.subplot(324)
plt.axhline(Sol.argument_periapsis.value, color='g')
plt.subplot(325)
plt.axhline(Sol.longitude_ascending_node.value, color='g')
plt.subplot(326)
plt.axhline(Sol.true_anomaly.value, color='g')
plt.show()
def PlotOrbit3D(OrbObjList, t0=2457535.0, Sol='NoSol'):
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for OrbObj in OrbObjList:
COE = np.vstack((OrbObj.semi_major_axis.value,
OrbObj.eccentricity,
OrbObj.inclination.value,
OrbObj.argument_periapsis.value,
OrbObj.longitude_ascending_node.value,
OrbObj.true_anomaly.value)) * AU_Deg2m_Rad
COE = COE + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[Pos_HCI, Vel_HCI] = OrbitalElements2PosVel(COE, 'Sun', 'Classical')
ax.plot(Pos_HCI[0]/AU, Pos_HCI[1]/AU, Pos_HCI[2]/AU, color='r', label='Determined Orbit')
for Planet in Planets:
COE = Planets[Planet] * AU_Deg2m_Rad
COEs = COE + np.vstack((np.zeros((5, 200)), np.linspace(0, 2 * np.pi, 200)))
[pos, vel] = OrbitalElements2PosVel(COEs, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='b')
if Sol != 'NoSol':
Sol_oe = np.vstack((Sol.semi_major_axis.value,
Sol.eccentricity,
Sol.inclination.value,
Sol.argument_periapsis.value,
Sol.longitude_ascending_node.value,
Sol.true_anomaly.value)) * AU_Deg2m_Rad
Sol_oe = Sol_oe + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))
[pos, vel] = OrbitalElements2PosVel(Sol_oe, 'Sun', 'Classical')
ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='g', label='Published Orbit')
plt.legend()
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.set_zlim([-5, 5])
plt.show()
def PlotPerts(Pert):
PPert = np.vstack(Pert).T; t = PPert[0]
plt.figure(figsize=(16,9))
t_rel = t - np.max(t)
plt.plot(t_rel, PPert[1], '-b', linewidth=3.0, label='Earth')
plt.plot(t_rel, PPert[2], '--k', linewidth=3.0, label='Moon')
plt.plot(t_rel, PPert[3], '-.r', linewidth=3.0, label='Sun')
PertJ2 = PPert[4][~np.isnan(PPert[4])]
plt.plot(t_rel[~np.isnan(PPert[4])], PertJ2, ':g', linewidth=3.0, label='J2')
PertDrag = PPert[5][~np.isnan(PPert[5])]
plt.plot(t_rel[~np.isnan(PPert[5])], PertDrag, '-.c', linewidth=3.0, label='Drag')
plt.yscale('log'); plt.grid(True); plt.legend(loc='best')
plt.xlabel('Relative Time [days]'); plt.ylabel('Perturbation Acceleration [m/s^2]')
plt.show()
def PlotIntStep(t):
dt=[]
for k in range(len(t)-1):
dt.append((t[k+1] - t[k]) * 24*60*60)
plt.figure(figsize=(16,9))
t_rel = t - np.max(t)
plt.plot(t_rel[1:], abs(np.array(dt)))
plt.yscale('log'); plt.grid(True)
plt.xlabel('Relative Time [days]'); plt.ylabel('Timestep [sec]')
plt.show()
def ThirdBodyPerturbation(Pos, rho, mu):
q = np.dot(Pos.T, (Pos - 2 * rho) / (np.dot(rho.T, rho)))
f = (3 * q + 3 * q**2 + q**3) / (1 + (1 + q)**1.5)
# Third body perturbation acceleration (with indirect term)
u = -mu * (Pos + f * rho) / ((norm(Pos - rho))**3)
return u
def NRLMSISE_00(pos, time, pos_type='eci'):
from nrlmsise_00_header import nrlmsise_input, nrlmsise_output, nrlmsise_flags
from nrlmsise_00 import gtd7
time = Time(time, format='jd', scale='utc')
# Convert ECI to LLH coordinates
if pos_type == 'eci':
Pos_LLH = ECEF2LLH(ECI2ECEF_pos(pos, time))
elif pos_type == 'ecef':
Pos_LLH = ECEF2LLH(pos)
elif pos_type == 'llh':
Pos_LLH = pos
else:
print('NRLMSISE_00 error: Invalid pos_type')
exit()
g_lat = np.rad2deg(Pos_LLH[0][0])
g_long = np.rad2deg(Pos_LLH[1][0])
alt = Pos_LLH[2][0]
# Break up time into year, day of year, and seconds of the day
yDay = time.yday.split(':'); yr = float(yDay[0]); doy = float(yDay[1])
sec = float(yDay[2]) * 60*60 + float(yDay[3]) * 60 + float(yDay[4])
# Assign our variables into the nrmsise inputs
Input = nrlmsise_input(yr, doy, sec, alt/1000, g_lat, g_long)
Output = nrlmsise_output(); Flags = nrlmsise_flags()
# Switches
for i in range(1, 24):
Flags.switches[i]=1
# GTD7 atmospheric model subroutine
gtd7(Input, Flags, Output)
# Temperature at alt [deg K]
T = Output.t[1]
# Molecular number densities [m-3]
He = Output.d[0] # He
O = Output.d[1] # O
N2 = Output.d[2] # N2
O2 = Output.d[3] # O2
Ar = Output.d[4] # Ar
H = Output.d[6] # H
N = Output.d[7] # N
# ano_O = Output.d[8] # Anomalous oxygen
sum_mass = He + O + N2 + O2 + Ar + H + N
# Molar mass
He_mass = 4.0026 # g/mol
O_mass = 15.9994 # g/mol
N2_mass = 28.013 # g/mol
O2_mass = 31.998 # g/mol
Ar_mass = 39.948 # g/mol
H_mass = 1.0079 # g/mol
N_mass = 14.0067 # g/mol
# Molecular weight of air [kg/mol]
mol_mass_air = (He_mass * He + O_mass * O + N2_mass * N2 + O2_mass * O2
+ Ar_mass * Ar + H_mass * H + N_mass * N) / (1000 * sum_mass)
# Total mass density [kg*m-3]
po = Output.d[5] * 1000
Ru = 8.3144621 # Universal gas constant [J/(K*mol)]
R = Ru / mol_mass_air # Individual gas constant [J/(kg*K)] #287.058
# Ideal gas law
atm_pres = po * T * R
# Speed of sound in atm
sos = 331.3 * np.sqrt(1 + T / 273.15)
# Dynamic viscosity (http://en.wikipedia.org/wiki/Viscosity)
C = 120 #Sutherland's constant for air [deg K]
mu_ref = 18.27e-6
T_ref = 291.15
dyn_vis = mu_ref * (T_ref + C) / (T + C) * (T / T_ref)**1.5
return T, atm_pres, po, sos, dyn_vis
# Pos_geo = stateVec.position
# Vel_geo = stateVec.vel_xyz
# t0 = stateVec.epoch
# # Compute radiant (apparent ORIGIN of meteoroid)
# Vel_eci = ECEF2ECI(Pos_geo, Vel_geo, t0)[1]
# ra_eci = np.arctan2(-Vel_eci[1], -Vel_eci[0])
# dec_eci = np.arcsin(-Vel_eci[2] / norm(Vel_eci))
# # ^-- redundant information. Already have it in metadata
# return ra_eci, dec_eci
def compute_cartesian_velocities_from_radiant(stateVec):
vel_geo = -(stateVec.velocity_inf *
np.vstack((np.cos(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),
np.sin(np.deg2rad(stateVec.dec_ecef_inf)))))
return vel_geo
def SimilarityCriterion(COE1, COE2, method='SH'):
if type(COE1) == np.ndarray:
a1 = COE1[0]/AU; a2 = COE2[0]/AU # [AU]
e1 = COE1[1]; e2 = COE2[1] # []
i1 = COE1[2]; i2 = COE2[2] # [rad]
w1 = COE1[3]; w2 = COE2[3] # [rad]
W1 = COE1[4]; W2 = COE2[4] # [rad]
else:
a1 = COE1.semi_major_axis.value; a2 = COE2.semi_major_axis.value # [AU]
e1 = COE1.eccentricity; e2 = COE2.eccentricity # []
i1 = COE1.inclination.to(u.rad).value; i2 = COE2.inclination.to(u.rad).value # [rad]
w1 = COE1.argument_periapsis.to(u.rad).value; w2 = COE2.argument_periapsis.to(u.rad).value # [rad]
W1 = COE1.longitude_ascending_node.to(u.rad).value; W2 = COE2.longitude_ascending_node.to(u.rad).value # [rad]
q1 = a1 * (1 - e1) # [AU]
q2 = a2 * (1 - e2) # [AU]
# Angle between the orbital planes (I21)
var = (2 * np.sin((i2 - i1) / 2))**2 + np.sin(i1) * np.sin(i2) * (2 * np.sin((W2 - W1) / 2))**2
I21 = 2 * np.arcsin(np.sqrt(var) / 2)
if method == 'SH':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + (q2 - q1)**2 + var + (((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
elif method == 'D':
# Angle between the orbital lines of apsides (theta21)
# l1 = W1 + np.arcsin(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
# l2 = W2 + np.arcsin(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
l1 = W1 + np.arctan(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))
l2 = W2 + np.arctan(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))
theta21 = np.arccos(np.sin(b1) * np.sin(b2) + np.cos(b1) * np.cos(b2) * np.cos(l2 - l1))
Similarity2 = ((e2 - e1) / (e2 + e1))**2 + ((q2 - q1) / (q2 + q1))**2 + \
(I21 / np.pi)**2 + ((e2 + e1) / 2)**2 * (theta21 / np.pi)**2
Similarity = np.sqrt(Similarity2)
elif method == 'H':
# Difference between orbits longitude of perihelion (pi21)
pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))
Similarity2 = (e2 - e1)**2 + ((q2 - q1) / (q2 + q1))**2 + var + \
(((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2
Similarity = np.sqrt(Similarity2)
return Similarity
def generate_ephemeris(pos_hci, t_jd):
# Save the datetime
ephem_dict = {'datetime': Time(t_jd, format='jd', scale='utc').isot}
ephem_dict['MJD'] = Time(t_jd, format='jd', scale='utc').mjd
# distance to sun
ephem_dict['distance_to_sun'] = norm(pos_hci, axis=0) / 1000 #km
# Convert to eci coordinates
pos_eci = HCI2ECI_pos(pos_hci, t_jd)
ephem_dict['pos_eci_x'] = pos_eci[0]
ephem_dict['pos_eci_y'] = pos_eci[1]
ephem_dict['pos_eci_z'] = pos_eci[2]
pos_hcrs = HCI2HCRS(pos_hci)
# Calculate phase angle
ephem_dict['phase_angle'] = np.rad2deg(np.arccos(np.sum(pos_hcrs * pos_eci, axis=0)
/ (norm(pos_hcrs, axis=0) * norm(pos_eci, axis=0))))
# Calculate elongation angle
pos_sun = pos_eci - pos_hcrs
ephem_dict['elongation_angle'] = np.rad2deg(np.arccos(np.sum(pos_sun * pos_eci, axis=0)
/ (norm(pos_sun, axis=0) * norm(pos_eci, axis=0))))
# Calculate ephemeris
dist = norm(pos_eci, axis=0) #m
ephem_dict['ra'] = np.rad2deg(np.arctan2(pos_eci[1], pos_eci[0]))%360 #deg
ephem_dict['dec'] = np.rad2deg(np.arcsin(pos_eci[2] / dist)) #deg
ephem_dict['distance_to_earth'] = norm(pos_eci, axis=0) / 1000 #km
return ephem_dict
| true | true |
f72a9c84ade667bf4db98b90f5ec6bc9cc38d9af | 5,205 | py | Python | tests/integration/modules/hosts.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | 1 | 2020-09-06T16:03:14.000Z | 2020-09-06T16:03:14.000Z | tests/integration/modules/hosts.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | tests/integration/modules/hosts.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | '''
Test the hosts module
'''
# Import python libs
import os
import shutil
# Import Salt libs
import integration
HFN = os.path.join(integration.TMP, 'hosts')
class HostsModuleTest(integration.ModuleCase):
'''
Test the hosts module
'''
def __clean_hosts(self):
'''
Clean out the hosts file
'''
shutil.copyfile(os.path.join(integration.FILES, 'hosts'), HFN)
def __clear_hosts(self):
'''
Delete the tmp hosts file
'''
if os.path.isfile(HFN):
os.remove(HFN)
def tearDown(self):
'''
Make sure the tmp hosts file is gone
'''
self.__clear_hosts()
def test_list_hosts(self):
'''
hosts.list_hosts
'''
self.__clean_hosts()
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(len(hosts), 6)
self.assertEqual(hosts['::1'], ['ip6-localhost', 'ip6-loopback'])
self.assertEqual(hosts['127.0.0.1'], ['localhost', 'myname'])
def test_list_hosts_nofile(self):
'''
hosts.list_hosts
without a hosts file
'''
if os.path.isfile(HFN):
os.remove(HFN)
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(hosts, {})
def test_get_ip(self):
'''
hosts.get_ip
'''
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['myname']), '127.0.0.1')
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
def test_get_alias(self):
'''
hosts.get_alias
'''
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), ['localhost', 'myname'])
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.2']), [])
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), [])
def test_has_pair(self):
'''
hosts.has_pair
'''
self.__clean_hosts()
self.assertTrue(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']))
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'othername']))
def test_set_host(self):
'''
hosts.set_hosts
'''
self.__clean_hosts()
assert self.run_function('hosts.set_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.set_host', ['127.0.0.1', 'localhost'])
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']), 'should remove second entry')
def test_add_host(self):
'''
hosts.add_host
'''
self.__clean_hosts()
assert self.run_function('hosts.add_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.add_host', ['127.0.0.1', 'othernameip'])
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
def test_rm_host(self):
self.__clean_hosts()
assert self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'myname'])
assert not self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'unknown'])
def test_add_host_formatting(self):
'''
Ensure that hosts.add_host isn't adding duplicates and that
it's formatting the output correctly
'''
# instead of using the "clean" hosts file we're going to
# use an empty one so we can prove the syntax of the entries
# being added by the hosts module
self.__clear_hosts()
f = open(HFN, 'w')
f.close()
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'oldhost2'])
assert self.run_function('hosts.add_host', ['192.168.1.3', 'host3.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2-reorder'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1-reorder'])
# now read the lines and ensure they're formatted correctly
lines = open(HFN, 'r').readlines()
self.assertEqual(lines, [
"192.168.1.3\t\thost3.fqdn.com\n",
"192.168.1.2\t\thost2.fqdn.com\thost2\toldhost2\thost2-reorder\n",
"192.168.1.1\t\thost1.fqdn.com\thost1\thost1-reorder\n",
])
| 36.914894 | 116 | 0.599039 |
import os
import shutil
import integration
HFN = os.path.join(integration.TMP, 'hosts')
class HostsModuleTest(integration.ModuleCase):
def __clean_hosts(self):
shutil.copyfile(os.path.join(integration.FILES, 'hosts'), HFN)
def __clear_hosts(self):
if os.path.isfile(HFN):
os.remove(HFN)
def tearDown(self):
self.__clear_hosts()
def test_list_hosts(self):
self.__clean_hosts()
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(len(hosts), 6)
self.assertEqual(hosts['::1'], ['ip6-localhost', 'ip6-loopback'])
self.assertEqual(hosts['127.0.0.1'], ['localhost', 'myname'])
def test_list_hosts_nofile(self):
if os.path.isfile(HFN):
os.remove(HFN)
hosts = self.run_function('hosts.list_hosts')
self.assertEqual(hosts, {})
def test_get_ip(self):
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['myname']), '127.0.0.1')
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_ip', ['othername']), '')
def test_get_alias(self):
self.__clean_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), ['localhost', 'myname'])
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.2']), [])
self.__clear_hosts()
self.assertEqual(self.run_function('hosts.get_alias', ['127.0.0.1']), [])
def test_has_pair(self):
self.__clean_hosts()
self.assertTrue(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']))
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'othername']))
def test_set_host(self):
self.__clean_hosts()
assert self.run_function('hosts.set_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.set_host', ['127.0.0.1', 'localhost'])
self.assertFalse(self.run_function('hosts.has_pair', ['127.0.0.1', 'myname']), 'should remove second entry')
def test_add_host(self):
self.__clean_hosts()
assert self.run_function('hosts.add_host', ['192.168.1.123', 'newip'])
self.assertTrue(self.run_function('hosts.has_pair', ['192.168.1.123', 'newip']))
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
assert self.run_function('hosts.add_host', ['127.0.0.1', 'othernameip'])
self.assertEqual(len(self.run_function('hosts.list_hosts')), 7)
def test_rm_host(self):
self.__clean_hosts()
assert self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'myname'])
assert not self.run_function('hosts.has_pair', ['127.0.0.1', 'myname'])
assert self.run_function('hosts.rm_host', ['127.0.0.1', 'unknown'])
def test_add_host_formatting(self):
# use an empty one so we can prove the syntax of the entries
# being added by the hosts module
self.__clear_hosts()
f = open(HFN, 'w')
f.close()
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'oldhost2'])
assert self.run_function('hosts.add_host', ['192.168.1.3', 'host3.fqdn.com'])
assert self.run_function('hosts.add_host', ['192.168.1.2', 'host2-reorder'])
assert self.run_function('hosts.add_host', ['192.168.1.1', 'host1-reorder'])
# now read the lines and ensure they're formatted correctly
lines = open(HFN, 'r').readlines()
self.assertEqual(lines, [
"192.168.1.3\t\thost3.fqdn.com\n",
"192.168.1.2\t\thost2.fqdn.com\thost2\toldhost2\thost2-reorder\n",
"192.168.1.1\t\thost1.fqdn.com\thost1\thost1-reorder\n",
])
| true | true |
f72a9c92bcaf6e4caffd7e1804851dab456f389c | 5,440 | py | Python | vi_engine_s.py | idigitopia/Distributed-VI | 323be8c50862d8dff9cae68313c518080a9df72e | [
"MIT"
] | 6 | 2019-08-18T17:04:36.000Z | 2022-03-26T08:31:22.000Z | vi_engine_s.py | idigitopia/Distributed-VI | 323be8c50862d8dff9cae68313c518080a9df72e | [
"MIT"
] | null | null | null | vi_engine_s.py | idigitopia/Distributed-VI | 323be8c50862d8dff9cae68313c518080a9df72e | [
"MIT"
] | null | null | null | import numpy as np
import ray
ray.shutdown()
ray.init()
# A : Action Space
# S : State Space
@ray.remote
class VI_worker(object):
def __init__(self, list_of_actions, tran_dict, reward_dict, beta, backup_states, true_action_prob=0.8,
unknown_value=0):
self.backup_states = backup_states
self.list_of_actions = list_of_actions
self.tran_dict = tran_dict
self.reward_dict = reward_dict
self.beta = beta
self.unknown_value = unknown_value # Default Value for any states that do not have transitions defined.
self.true_action_prob = true_action_prob
self.slip_prob = 1 - self.true_action_prob
self.slip_action_prob = self.slip_prob / len(self.list_of_actions)
def compute(self, V_t, backup_states=None):
"""
:param V_t: Value Vector at t
:return:
"""
backup_states = backup_states or self.backup_states
V_tplus1 = {s: 0 for s in backup_states}
max_vals = {s: float("-inf") for s in backup_states}
max_error = 0
for s in backup_states:
for a in self.tran_dict[s]:
expected_ns_val = 0
for ns in self.tran_dict[s][a]:
try:
expected_ns_val += self.tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += self.tran_dict[s][a][ns] * self.unknown_value
expect_s_val = self.reward_dict[s][a] + self.beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += self.slip_action_prob * expect_s_val
V_tplus1[s] += (self.true_action_prob - self.slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
return V_tplus1, max_error
def distributed_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
# Split the state space evenly to be distributed to VI workers
state_chunks = [a.tolist() for a in np.array_split(np.array(S), workers_num)]
V_t = {s: 0 for s in S} if seed_value is None else seed_value
# Make VI workers
workers_list = [VI_worker.remote(list_of_actions=A,
tran_dict=tran_dict,
reward_dict=reward_dict,
beta=beta,
backup_states=state_chunk,
unknown_value=unknown_value,
true_action_prob=true_action_prob)
for state_chunk in state_chunks]
# Do VI computation
error = float('inf')
while error > epsilon:
object_list = [workers_list[i].compute.remote(V_t) for i in range(workers_num)]
error_list = []
for i in range(workers_num):
finish_id = ray.wait(object_list, num_returns=1, timeout=None)[0][0]
object_list.remove(finish_id)
V_tplus1, error = ray.get(finish_id)
V_t.update(V_tplus1)
error_list.append(error)
if (verbose):
print("Error:", error)
error = max(error_list)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def simple_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
slip_prob = 1 - true_action_prob
slip_action_prob = slip_prob / len(A)
V_t = {s: 0 for s in S} if seed_value is None else seed_value
error = float("inf")
while error > epsilon:
V_tplus1 = {s: 0 for s in S}
max_vals = {s: float("-inf") for s in S}
max_error = 0
for s in S:
for a in tran_dict[s]:
expected_ns_val = 0
for ns in tran_dict[s][a]:
try:
expected_ns_val += tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += tran_dict[s][a][ns] * unknown_value
expect_s_val = reward_dict[s][a] + beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += slip_action_prob * expect_s_val
V_tplus1[s] += (true_action_prob - slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
V_t.update(V_tplus1)
error = max_error
if (verbose):
print("Error:", error)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def get_pi_from_value(V, list_of_actions, tran_dict, reward_dict, beta):
v_max = {s: float('-inf') for s in V}
pi = {}
for s in V:
for a in tran_dict[s]:
expected_val = 0
for ns in tran_dict[s][a]:
try:
expected_val += tran_dict[s][a][ns] * V[ns]
except:
expected_val += tran_dict[s][a][ns] * 0
expect_s_val = reward_dict[s][a] + beta * expected_val
if expect_s_val > v_max[s]:
v_max[s] = expect_s_val
pi[s] = a
return pi
| 34.871795 | 117 | 0.564154 | import numpy as np
import ray
ray.shutdown()
ray.init()
@ray.remote
class VI_worker(object):
def __init__(self, list_of_actions, tran_dict, reward_dict, beta, backup_states, true_action_prob=0.8,
unknown_value=0):
self.backup_states = backup_states
self.list_of_actions = list_of_actions
self.tran_dict = tran_dict
self.reward_dict = reward_dict
self.beta = beta
self.unknown_value = unknown_value
self.true_action_prob = true_action_prob
self.slip_prob = 1 - self.true_action_prob
self.slip_action_prob = self.slip_prob / len(self.list_of_actions)
def compute(self, V_t, backup_states=None):
backup_states = backup_states or self.backup_states
V_tplus1 = {s: 0 for s in backup_states}
max_vals = {s: float("-inf") for s in backup_states}
max_error = 0
for s in backup_states:
for a in self.tran_dict[s]:
expected_ns_val = 0
for ns in self.tran_dict[s][a]:
try:
expected_ns_val += self.tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += self.tran_dict[s][a][ns] * self.unknown_value
expect_s_val = self.reward_dict[s][a] + self.beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += self.slip_action_prob * expect_s_val
V_tplus1[s] += (self.true_action_prob - self.slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
return V_tplus1, max_error
def distributed_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
state_chunks = [a.tolist() for a in np.array_split(np.array(S), workers_num)]
V_t = {s: 0 for s in S} if seed_value is None else seed_value
workers_list = [VI_worker.remote(list_of_actions=A,
tran_dict=tran_dict,
reward_dict=reward_dict,
beta=beta,
backup_states=state_chunk,
unknown_value=unknown_value,
true_action_prob=true_action_prob)
for state_chunk in state_chunks]
error = float('inf')
while error > epsilon:
object_list = [workers_list[i].compute.remote(V_t) for i in range(workers_num)]
error_list = []
for i in range(workers_num):
finish_id = ray.wait(object_list, num_returns=1, timeout=None)[0][0]
object_list.remove(finish_id)
V_tplus1, error = ray.get(finish_id)
V_t.update(V_tplus1)
error_list.append(error)
if (verbose):
print("Error:", error)
error = max(error_list)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def simple_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,
beta=0.99, epsilon=0.01, workers_num=4, verbose=True):
slip_prob = 1 - true_action_prob
slip_action_prob = slip_prob / len(A)
V_t = {s: 0 for s in S} if seed_value is None else seed_value
error = float("inf")
while error > epsilon:
V_tplus1 = {s: 0 for s in S}
max_vals = {s: float("-inf") for s in S}
max_error = 0
for s in S:
for a in tran_dict[s]:
expected_ns_val = 0
for ns in tran_dict[s][a]:
try:
expected_ns_val += tran_dict[s][a][ns] * V_t[ns]
except:
expected_ns_val += tran_dict[s][a][ns] * unknown_value
expect_s_val = reward_dict[s][a] + beta * expected_ns_val
max_vals[s] = max(max_vals[s], expect_s_val)
V_tplus1[s] += slip_action_prob * expect_s_val
V_tplus1[s] += (true_action_prob - slip_action_prob) * max_vals[s]
max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))
V_t.update(V_tplus1)
error = max_error
if (verbose):
print("Error:", error)
pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)
return V_t, pi
def get_pi_from_value(V, list_of_actions, tran_dict, reward_dict, beta):
v_max = {s: float('-inf') for s in V}
pi = {}
for s in V:
for a in tran_dict[s]:
expected_val = 0
for ns in tran_dict[s][a]:
try:
expected_val += tran_dict[s][a][ns] * V[ns]
except:
expected_val += tran_dict[s][a][ns] * 0
expect_s_val = reward_dict[s][a] + beta * expected_val
if expect_s_val > v_max[s]:
v_max[s] = expect_s_val
pi[s] = a
return pi
| true | true |
f72a9cb5225fd598744b0a2b231293e1f98ddf01 | 78 | py | Python | auto/utils/__init__.py | trisongz/autobot | d1c8eb419ec702a7b38877b4c299807d23692c3d | [
"MIT"
] | null | null | null | auto/utils/__init__.py | trisongz/autobot | d1c8eb419ec702a7b38877b4c299807d23692c3d | [
"MIT"
] | null | null | null | auto/utils/__init__.py | trisongz/autobot | d1c8eb419ec702a7b38877b4c299807d23692c3d | [
"MIT"
] | null | null | null | from .average_meter import AverageMeter
from .progress import TrainingProgress | 39 | 39 | 0.884615 | from .average_meter import AverageMeter
from .progress import TrainingProgress | true | true |
f72a9cc5d314217c9ae136b4edba4248c11cdb62 | 658 | py | Python | scripts/issue_terminal.py | gmatteo/awesome-panel | 7eb6965f4b3a7eca08c07561e631e5beb189ffd3 | [
"Apache-2.0"
] | 179 | 2019-12-04T14:54:53.000Z | 2022-03-30T09:08:38.000Z | scripts/issue_terminal.py | hbueno/awesome-panel | fb27bcaf265cef1278cfa0c78799fbbf6c9a6834 | [
"Apache-2.0"
] | 62 | 2019-12-14T16:51:28.000Z | 2022-03-19T18:47:12.000Z | scripts/issue_terminal.py | hbueno/awesome-panel | fb27bcaf265cef1278cfa0c78799fbbf6c9a6834 | [
"Apache-2.0"
] | 35 | 2019-12-08T13:19:53.000Z | 2022-03-25T10:33:02.000Z | import panel as pn
SCRIPT = """
<script src="https://www.unpkg.com/terminal@0.1.4/lib/terminal.js" type="text/javascript"></script>
"""
script_panel = pn.pane.HTML(SCRIPT, width=0, height=0, margin=0, sizing_mode="fixed")
HTML = """
<div id="terminal-1"></div>
<script>
var t1 = new Terminal()
t1.setHeight("100%")
t1.setWidth('100%')
el = document.getElementById("terminal-1")
el.appendChild(t1.html)
t1.print('Hello, world!')
t1.input('Whats your name?', function (input) {
t1.print('Welcome, ' + input)
})
</script>
"""
terminal = pn.pane.HTML(HTML, height=200, width=200)
pn.Column(terminal).servable()
| 22.689655 | 100 | 0.635258 | import panel as pn
SCRIPT = """
<script src="https://www.unpkg.com/terminal@0.1.4/lib/terminal.js" type="text/javascript"></script>
"""
script_panel = pn.pane.HTML(SCRIPT, width=0, height=0, margin=0, sizing_mode="fixed")
HTML = """
<div id="terminal-1"></div>
<script>
var t1 = new Terminal()
t1.setHeight("100%")
t1.setWidth('100%')
el = document.getElementById("terminal-1")
el.appendChild(t1.html)
t1.print('Hello, world!')
t1.input('Whats your name?', function (input) {
t1.print('Welcome, ' + input)
})
</script>
"""
terminal = pn.pane.HTML(HTML, height=200, width=200)
pn.Column(terminal).servable()
| true | true |
f72a9e641315711aa7910aa3b6ee493d2ef27967 | 822 | py | Python | server/article_topic/urls.py | cuongw/article-topic | 2022908590ada829c286d3f76a8450b4eb33f709 | [
"MIT"
] | 1 | 2020-10-21T18:16:27.000Z | 2020-10-21T18:16:27.000Z | server/article_topic/urls.py | 103cuong/article-topic | 2022908590ada829c286d3f76a8450b4eb33f709 | [
"MIT"
] | 2 | 2020-01-05T08:00:24.000Z | 2020-01-05T08:00:25.000Z | server/article_topic/urls.py | cuongw/article-topic | 2022908590ada829c286d3f76a8450b4eb33f709 | [
"MIT"
] | 1 | 2020-08-18T09:09:42.000Z | 2020-08-18T09:09:42.000Z | """article_topic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from topic_detection import views
urlpatterns = [
path('admin/', admin.site.urls),
path('article/', views.index)
]
| 35.73913 | 77 | 0.715328 | from django.contrib import admin
from django.urls import path
from topic_detection import views
urlpatterns = [
path('admin/', admin.site.urls),
path('article/', views.index)
]
| true | true |
f72a9ee3f1f72fad4a331be0f301eba5e57a8290 | 17,450 | py | Python | apps/render_mandelbulb_slim.py | yyuting/learning_from_program_trace | e0e4ac9bc2d4069eef64bdc2de64a87a735fa508 | [
"MIT"
] | null | null | null | apps/render_mandelbulb_slim.py | yyuting/learning_from_program_trace | e0e4ac9bc2d4069eef64bdc2de64a87a735fa508 | [
"MIT"
] | null | null | null | apps/render_mandelbulb_slim.py | yyuting/learning_from_program_trace | e0e4ac9bc2d4069eef64bdc2de64a87a735fa508 | [
"MIT"
] | null | null | null | from render_util import *
from render_single import *
import numpy
import skimage
import skimage.io
def mb(p, time):
z = [p[0], p[1], p[2]]
dr = 1.0
t0 = 1.0
cond = True
power = 20.0
for i in range(4):
r = sqrt(z[0] ** 2.0 + z[1] ** 2.0 + z[2] ** 2.0)
#cond *= r <= 2.0
#cond = select(r <= 2.0, cond, False)
cond = r <= 2.0
theta = atan(z[1] / z[0]) * power
phi = (asin(z[2] / r) + time * 0.1) * power
#dr = select(cond, (r ** (power - 1.0)) * dr * power + 1.0, dr)
#r = select(cond, r ** power, r)
this_power = select(cond, power, 1.0)
new_dr = (r ** (this_power - 1.0)) * dr * power + 1.0
dr = select(cond, new_dr, dr)
r = select(cond, r ** this_power, r)
cos_phi = cos(phi)
z[0] = select(cond, r * cos(theta) * cos_phi + p[0], z[0])
z[1] = select(cond, r * sin(theta) * cos_phi + p[1], z[1])
z[2] = select(cond, r * sin(phi) + p[2], z[2])
t0 = select(cond, min_nosmooth(t0, r), t0)
return [0.5 * log(r) * r / dr, t0]
def f(p, time):
new_p = rotation_y(p, time * 0.2)
return mb(new_p, time)
def intersect(ro, rd, time, orig_t):
t = orig_t
res_t = ConstExpr(0.0)
res_c1 = ConstExpr(0.0)
max_error = ConstExpr(1000.0)
d = ConstExpr(1.0)
pd = ConstExpr(100.0)
os = ConstExpr(0.0)
step = ConstExpr(0.0)
error = ConstExpr(1000.0)
cond1 = True
c = [ConstExpr(0.0), ConstExpr(0.0)]
for i in loop_generator(48, is_raymarching=True):
compiler.DEFAULT_FOR_LOOP_ITER = i
#cond1 *= (error >= 0.0) * (t <= 20.0)
cond1 = (error >= 0.0) * (t <= 20.0)
c = f(ro + rd * t, time)
d = select(cond1, c[0], d)
cond2 = d > os
os = select(cond2, 0.4 * d * d / pd, 0.0)
step = select(cond2, d + os, -os)
pd = select(cond2, d, 100.0)
d = select(cond2, d, 1.0)
error = select(cond1, d / t, error)
cond3 = cond1 * (error < max_error)
max_error = select(cond3, error, max_error)
res_t = select(cond3, t, res_t)
res_c1 = select(cond3, c[1], res_c1)
t = select(cond1, t + step, t)
#compiler.DEFAULT_FOR_LOOP_NAME = None
#compiler.DEFAULT_FOR_LOOP_ITER = None
ro_len = sqrt(ro[0] ** 2 + ro[1] ** 2 + ro[2] ** 2)
res_t = select(t > ro_len, -1.0, res_t)
#res_t = select(t > 2.0, -1.0, res_t)
#res_t = Var('res_t', select(t <= 1.0, -10.0, res_t))
return [res_t, res_c1]
def mandelbulb_slim(ray_dir_p, ray_origin, time):
sundir = numpy.array([0.1, 0.8, 0.6])
sundir /= numpy.linalg.norm(sundir)
sun = numpy.array([1.64, 1.27, 0.99])
skycolor = numpy.array([0.6, 1.5, 1.0])
ray_origin = numpy.array(ray_origin)
ray_dir_p = numpy.array(ray_dir_p)
orig_t = (ray_origin[0] ** 2.0 + ray_origin[1] ** 2.0 + ray_origin[2] ** 2.0) ** 0.5 / 3.0
res = intersect(ray_origin, ray_dir_p, time, orig_t)
t_ray = Var(log_prefix + 't_ray', res[0])
t_ray.log_intermediates_rank = 2
cond = t_ray > 0.0
p = ray_origin + res[0] * ray_dir_p
n = normal_functor(lambda x: f(x, time)[0], 0.001, 3)(p)
# change log_intermediates_rank for input arguments
old_log_intermediates_rank = compiler.log_intermediates_rank
compiler.log_intermediates_rank = 1
for list in [ray_dir_p, ray_origin, [time], [res[0]], n]:
for item in list:
item.log_intermediates_rank = compiler.log_intermediates_rank
dif = max_nosmooth(0.0, n[0] * sundir[0] + n[1] * sundir[1] + n[2] * sundir[2])
sky = 0.6 + 0.4 * max_nosmooth(0.0, n[1])
bac = max_nosmooth(0.0, 0.3 + 0.7 * (-n[0] * sundir[0] - n[1] - n[2] * sundir[2]))
lin_coef_a = 4.5 * dif + 0.8 * bac
lin_coef_b = 0.6 * sky
lin0 = sun[0] * lin_coef_a + skycolor[0] * lin_coef_b
lin1 = sun[1] * lin_coef_a + skycolor[1] * lin_coef_b
lin2 = sun[2] * lin_coef_a + skycolor[2] * lin_coef_b
tc0_coef = 3.0 + 4.2 * (res[1] ** 0.55)
col0 = lin0 * 0.9 * 0.2 * (0.5 + 0.5 * sin(tc0_coef))
col1 = lin1 * 0.8 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 0.5))
col2 = lin2 * 0.6 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 1.0))
col0 = select(cond, col0 ** 0.45, 0.0)
col1 = select(cond, col1 ** 0.45, 0.0)
col2 = select(cond, col2 ** 0.45, 0.0)
col = numpy.array([col0, col1, col2])
col = col * 0.6 + 0.4 * col * col * (3.0 - 2.0 * col)
col = col * 1.5 - 0.5 * 0.33 * (col[0] + col[1] + col[2])
#col = select(res[0] <= -2.0, numpy.array([1.0, 1.0, 1.0]), col)
compiler.log_intermediates_rank = old_log_intermediates_rank
for expr in col.tolist() + n.tolist() + [t_ray]:
expr.log_intermediates_subset_rank = 1
return output_color(col)
shaders = [mandelbulb_slim]
is_color = True
# use a different rotation parameterization so can easily compute direction to world coord origin
fov = 'small_seperable'
x_center = 0.0
y_center = 0.0
z_center = 0.0
offset = np.array([0.4, 0.4, 0.4])
def pos_solver(x0, x1, x2):
"""
given x (length 3) as camera position,
solve a camera direction that satisfies:
the center of the image points to the point (0.0, 0.4, 0.0) plus some noise,
the actual center is (0.0, 0.4, 0.0) + (random(3) * 2.0 - 1.0) * (0.2, 0.2, 0.07)
the horizonal axis in image is perpendicular to the upward (y axis) in world,
the vertical axis upward in image is in the same direction of the upward y axis in world.
"""
random_offset = (np.random.rand(3) * 2.0 - 1.0) * offset
a = x_center - x0 + random_offset[0]
b = y_center - x1 + random_offset[1]
c = z_center - x2 + random_offset[2]
norm = (a ** 2 + b ** 2 + c ** 2) ** 0.5
d = a / norm
e = b / norm
f = c / norm
ang1 = np.random.rand() * 2 * np.pi
de_norm = (d ** 2 + e ** 2) ** 0.5
if de_norm > 0:
# assume cos2 > 0
ang3 = math.atan2(e / de_norm, d / de_norm)
cos3 = np.cos(ang3)
if cos3 != 0:
ang2 = math.atan2(-f, d / cos3)
else:
sin3 = np.sin(ang3)
ang2 = math.atan2(-f, e / sin3)
else:
if f > 0:
ang2 = - np.pi / 2
else:
ang2 = np.pi / 2
ang3 = np.random.rand() * 2 * np.pi
return ang1, ang2, ang3
def main():
if len(sys.argv) < 3:
print('Usage: python render_[shader].py base_mode base_dir')
raise
base_mode = sys.argv[1]
base_dir = sys.argv[2]
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
if base_mode == 'collect_raw':
camera_pos = numpy.load(os.path.join(camera_dir, 'train.npy'))
render_t = numpy.load(os.path.join(camera_dir, 'train_time.npy'))
nframes = render_t.shape[0]
train_start = numpy.load(os.path.join(camera_dir, 'train_start.npy'))
render_single(os.path.join(preprocess_dir, 'train'), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = (80, 80), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': 'train_small', 'tile_only': True, 'tile_start': train_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
elif base_mode == 'generate_dataset':
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
if mode in ['train', 'validate']:
target_dir = os.path.join(camera_dir, mode + '_img')
else:
target_dir = os.path.join(camera_dir, 'test_img')
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'sample_camera_pos':
test_render_t = None
t_range = 31.5
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
x_min = -4
x_max = 4
y_min = -4
y_max = 4
z_min = -4
z_max = 4
if mode == 'train':
nframes = 800
x_max = 3.5
y_max = 3.5
elif mode == 'validate':
nframes = 80
x_max = 3.5
y_max = 3.5
elif mode == 'test_close':
nframes = 5
x_min = 3.5
elif mode == 'test_far':
nframes = 5
y_min = 3.5
elif mode == 'test_middle':
nframes = 20
x_max = 3.5
y_max = 3.5
camera_pos = numpy.empty([nframes, 6])
for i in range(nframes):
while True:
x = numpy.random.rand() * (x_max - x_min) + x_min
y = numpy.random.rand() * (y_max - y_min) + y_min
z = numpy.random.rand() * (z_max - z_min) + z_min
if (x ** 2 + y ** 2 + z ** 2) > 1.8 ** 2:
break
ang1, ang2, ang3 = pos_solver(x, y, z)
camera_pos[i] = np.array([x, y, z, ang1, ang2, ang3])
numpy.save(os.path.join(preprocess_dir, '%s.npy' % mode), camera_pos)
if mode in ['train', 'validate']:
expand_boundary = 160
render_t = np.random.rand(nframes) * t_range
numpy.save(os.path.join(preprocess_dir, mode + '_time.npy'), render_t)
else:
expand_boundary = 0
if test_render_t is None:
test_render_t = np.random.rand(30) * t_range
np.save(os.path.join(preprocess_dir, 'test_time.npy'), render_t)
if mode == 'test_close':
render_t = test_render_t[:5]
elif mode == 'test_far':
render_t = test_render_t[5:10]
else:
render_t = test_render_t[10:]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = (640, 960), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'expand_boundary': expand_boundary})
elif base_mode == 'generate_temporal_dataset':
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_temporal_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_temporal')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
for mode in ['train', 'test', 'validate']:
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))
render_size = (320, 320)
tile_only = True
render_t_base = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
t_schedule = np.arange(8)
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_base = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
camera_pos = np.concatenate((np.load(os.path.join(camera_dir, 'test_close.npy')),
np.load(os.path.join(camera_dir, 'test_far.npy')),
np.load(os.path.join(camera_dir, 'test_middle.npy'))), axis=0)
t_schedule = [0, 1, 29]
nframes = camera_pos.shape[0]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
for t_val in t_schedule:
render_t = render_t_base + t_val / 30
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground_%d' % (mode, t_val), 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
target_dir = os.path.join(camera_dir, '%s_img' % mode)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'generate_blur_additional':
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_blur')
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'log_t_ray': True, 'log_intermediates_level': 2})
return
if __name__ == '__main__':
main()
| 40.962441 | 541 | 0.544413 | from render_util import *
from render_single import *
import numpy
import skimage
import skimage.io
def mb(p, time):
z = [p[0], p[1], p[2]]
dr = 1.0
t0 = 1.0
cond = True
power = 20.0
for i in range(4):
r = sqrt(z[0] ** 2.0 + z[1] ** 2.0 + z[2] ** 2.0)
cond = r <= 2.0
theta = atan(z[1] / z[0]) * power
phi = (asin(z[2] / r) + time * 0.1) * power
this_power = select(cond, power, 1.0)
new_dr = (r ** (this_power - 1.0)) * dr * power + 1.0
dr = select(cond, new_dr, dr)
r = select(cond, r ** this_power, r)
cos_phi = cos(phi)
z[0] = select(cond, r * cos(theta) * cos_phi + p[0], z[0])
z[1] = select(cond, r * sin(theta) * cos_phi + p[1], z[1])
z[2] = select(cond, r * sin(phi) + p[2], z[2])
t0 = select(cond, min_nosmooth(t0, r), t0)
return [0.5 * log(r) * r / dr, t0]
def f(p, time):
new_p = rotation_y(p, time * 0.2)
return mb(new_p, time)
def intersect(ro, rd, time, orig_t):
t = orig_t
res_t = ConstExpr(0.0)
res_c1 = ConstExpr(0.0)
max_error = ConstExpr(1000.0)
d = ConstExpr(1.0)
pd = ConstExpr(100.0)
os = ConstExpr(0.0)
step = ConstExpr(0.0)
error = ConstExpr(1000.0)
cond1 = True
c = [ConstExpr(0.0), ConstExpr(0.0)]
for i in loop_generator(48, is_raymarching=True):
compiler.DEFAULT_FOR_LOOP_ITER = i
cond1 = (error >= 0.0) * (t <= 20.0)
c = f(ro + rd * t, time)
d = select(cond1, c[0], d)
cond2 = d > os
os = select(cond2, 0.4 * d * d / pd, 0.0)
step = select(cond2, d + os, -os)
pd = select(cond2, d, 100.0)
d = select(cond2, d, 1.0)
error = select(cond1, d / t, error)
cond3 = cond1 * (error < max_error)
max_error = select(cond3, error, max_error)
res_t = select(cond3, t, res_t)
res_c1 = select(cond3, c[1], res_c1)
t = select(cond1, t + step, t)
ro_len = sqrt(ro[0] ** 2 + ro[1] ** 2 + ro[2] ** 2)
res_t = select(t > ro_len, -1.0, res_t)
return [res_t, res_c1]
def mandelbulb_slim(ray_dir_p, ray_origin, time):
sundir = numpy.array([0.1, 0.8, 0.6])
sundir /= numpy.linalg.norm(sundir)
sun = numpy.array([1.64, 1.27, 0.99])
skycolor = numpy.array([0.6, 1.5, 1.0])
ray_origin = numpy.array(ray_origin)
ray_dir_p = numpy.array(ray_dir_p)
orig_t = (ray_origin[0] ** 2.0 + ray_origin[1] ** 2.0 + ray_origin[2] ** 2.0) ** 0.5 / 3.0
res = intersect(ray_origin, ray_dir_p, time, orig_t)
t_ray = Var(log_prefix + 't_ray', res[0])
t_ray.log_intermediates_rank = 2
cond = t_ray > 0.0
p = ray_origin + res[0] * ray_dir_p
n = normal_functor(lambda x: f(x, time)[0], 0.001, 3)(p)
old_log_intermediates_rank = compiler.log_intermediates_rank
compiler.log_intermediates_rank = 1
for list in [ray_dir_p, ray_origin, [time], [res[0]], n]:
for item in list:
item.log_intermediates_rank = compiler.log_intermediates_rank
dif = max_nosmooth(0.0, n[0] * sundir[0] + n[1] * sundir[1] + n[2] * sundir[2])
sky = 0.6 + 0.4 * max_nosmooth(0.0, n[1])
bac = max_nosmooth(0.0, 0.3 + 0.7 * (-n[0] * sundir[0] - n[1] - n[2] * sundir[2]))
lin_coef_a = 4.5 * dif + 0.8 * bac
lin_coef_b = 0.6 * sky
lin0 = sun[0] * lin_coef_a + skycolor[0] * lin_coef_b
lin1 = sun[1] * lin_coef_a + skycolor[1] * lin_coef_b
lin2 = sun[2] * lin_coef_a + skycolor[2] * lin_coef_b
tc0_coef = 3.0 + 4.2 * (res[1] ** 0.55)
col0 = lin0 * 0.9 * 0.2 * (0.5 + 0.5 * sin(tc0_coef))
col1 = lin1 * 0.8 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 0.5))
col2 = lin2 * 0.6 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 1.0))
col0 = select(cond, col0 ** 0.45, 0.0)
col1 = select(cond, col1 ** 0.45, 0.0)
col2 = select(cond, col2 ** 0.45, 0.0)
col = numpy.array([col0, col1, col2])
col = col * 0.6 + 0.4 * col * col * (3.0 - 2.0 * col)
col = col * 1.5 - 0.5 * 0.33 * (col[0] + col[1] + col[2])
compiler.log_intermediates_rank = old_log_intermediates_rank
for expr in col.tolist() + n.tolist() + [t_ray]:
expr.log_intermediates_subset_rank = 1
return output_color(col)
shaders = [mandelbulb_slim]
is_color = True
fov = 'small_seperable'
x_center = 0.0
y_center = 0.0
z_center = 0.0
offset = np.array([0.4, 0.4, 0.4])
def pos_solver(x0, x1, x2):
random_offset = (np.random.rand(3) * 2.0 - 1.0) * offset
a = x_center - x0 + random_offset[0]
b = y_center - x1 + random_offset[1]
c = z_center - x2 + random_offset[2]
norm = (a ** 2 + b ** 2 + c ** 2) ** 0.5
d = a / norm
e = b / norm
f = c / norm
ang1 = np.random.rand() * 2 * np.pi
de_norm = (d ** 2 + e ** 2) ** 0.5
if de_norm > 0:
ang3 = math.atan2(e / de_norm, d / de_norm)
cos3 = np.cos(ang3)
if cos3 != 0:
ang2 = math.atan2(-f, d / cos3)
else:
sin3 = np.sin(ang3)
ang2 = math.atan2(-f, e / sin3)
else:
if f > 0:
ang2 = - np.pi / 2
else:
ang2 = np.pi / 2
ang3 = np.random.rand() * 2 * np.pi
return ang1, ang2, ang3
def main():
if len(sys.argv) < 3:
print('Usage: python render_[shader].py base_mode base_dir')
raise
base_mode = sys.argv[1]
base_dir = sys.argv[2]
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
if base_mode == 'collect_raw':
camera_pos = numpy.load(os.path.join(camera_dir, 'train.npy'))
render_t = numpy.load(os.path.join(camera_dir, 'train_time.npy'))
nframes = render_t.shape[0]
train_start = numpy.load(os.path.join(camera_dir, 'train_start.npy'))
render_single(os.path.join(preprocess_dir, 'train'), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = (80, 80), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': 'train_small', 'tile_only': True, 'tile_start': train_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
elif base_mode == 'generate_dataset':
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
if mode in ['train', 'validate']:
target_dir = os.path.join(camera_dir, mode + '_img')
else:
target_dir = os.path.join(camera_dir, 'test_img')
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'sample_camera_pos':
test_render_t = None
t_range = 31.5
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
x_min = -4
x_max = 4
y_min = -4
y_max = 4
z_min = -4
z_max = 4
if mode == 'train':
nframes = 800
x_max = 3.5
y_max = 3.5
elif mode == 'validate':
nframes = 80
x_max = 3.5
y_max = 3.5
elif mode == 'test_close':
nframes = 5
x_min = 3.5
elif mode == 'test_far':
nframes = 5
y_min = 3.5
elif mode == 'test_middle':
nframes = 20
x_max = 3.5
y_max = 3.5
camera_pos = numpy.empty([nframes, 6])
for i in range(nframes):
while True:
x = numpy.random.rand() * (x_max - x_min) + x_min
y = numpy.random.rand() * (y_max - y_min) + y_min
z = numpy.random.rand() * (z_max - z_min) + z_min
if (x ** 2 + y ** 2 + z ** 2) > 1.8 ** 2:
break
ang1, ang2, ang3 = pos_solver(x, y, z)
camera_pos[i] = np.array([x, y, z, ang1, ang2, ang3])
numpy.save(os.path.join(preprocess_dir, '%s.npy' % mode), camera_pos)
if mode in ['train', 'validate']:
expand_boundary = 160
render_t = np.random.rand(nframes) * t_range
numpy.save(os.path.join(preprocess_dir, mode + '_time.npy'), render_t)
else:
expand_boundary = 0
if test_render_t is None:
test_render_t = np.random.rand(30) * t_range
np.save(os.path.join(preprocess_dir, 'test_time.npy'), render_t)
if mode == 'test_close':
render_t = test_render_t[:5]
elif mode == 'test_far':
render_t = test_render_t[5:10]
else:
render_t = test_render_t[10:]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = (640, 960), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'expand_boundary': expand_boundary})
elif base_mode == 'generate_temporal_dataset':
camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_temporal_with_bg')
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_temporal')
if not os.path.exists(camera_dir):
os.makedirs(camera_dir, exist_ok=True)
if not os.path.exists(preprocess_dir):
os.makedirs(preprocess_dir, exist_ok=True)
for mode in ['train', 'test', 'validate']:
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))
render_size = (320, 320)
tile_only = True
render_t_base = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
t_schedule = np.arange(8)
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_base = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
camera_pos = np.concatenate((np.load(os.path.join(camera_dir, 'test_close.npy')),
np.load(os.path.join(camera_dir, 'test_far.npy')),
np.load(os.path.join(camera_dir, 'test_middle.npy'))), axis=0)
t_schedule = [0, 1, 29]
nframes = camera_pos.shape[0]
outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')
for t_val in t_schedule:
render_t = render_t_base + t_val / 30
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground_%d' % (mode, t_val), 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})
target_dir = os.path.join(camera_dir, '%s_img' % mode)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for file in os.listdir(outdir):
if file.startswith('%s_ground' % mode) and file.endswith('.png'):
os.rename(os.path.join(outdir, file),
os.path.join(target_dir, file))
elif base_mode == 'generate_blur_additional':
preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_blur')
for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:
camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy'))
nframes = camera_pos.shape[0]
if mode in ['train', 'validate']:
tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]
render_size = (320, 320)
tile_only = True
render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))
else:
tile_start = None
render_size = (640, 960)
tile_only = False
render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))
if mode == 'test_close':
render_t = render_t_pool[:5]
elif mode == 'test_far':
render_t = render_t_pool[5:10]
else:
render_t = render_t_pool[10:]
render_t = render_t[:nframes]
render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'log_t_ray': True, 'log_intermediates_level': 2})
return
if __name__ == '__main__':
main()
| true | true |
f72a9f3994030f9517b36005ab3842f621032778 | 4,599 | py | Python | aliddns.py | k4nzdroid/ddns-client | d0177c17da145827a8b08800adc21f7f3e196b43 | [
"Apache-2.0"
] | null | null | null | aliddns.py | k4nzdroid/ddns-client | d0177c17da145827a8b08800adc21f7f3e196b43 | [
"Apache-2.0"
] | null | null | null | aliddns.py | k4nzdroid/ddns-client | d0177c17da145827a8b08800adc21f7f3e196b43 | [
"Apache-2.0"
] | null | null | null | from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkalidns.request.v20150109.DescribeSubDomainRecordsRequest import DescribeSubDomainRecordsRequest
from aliyunsdkalidns.request.v20150109.DescribeDomainRecordsRequest import DescribeDomainRecordsRequest
import requests
from urllib.request import urlopen
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--access-key-id')
parser.add_argument('--access-key-secret')
parser.add_argument('--domain-name')
parser.add_argument('--host')
args = parser.parse_args()
print(args)
accessKeyId = args.access_key_id
accessSecret = args.access_key_secret
domain = args.domain_name
ipv4_flag = 1
name_ipv4 = args.host
ipv6_flag = 0 # 是否开启ipv6 ddns解析,1为开启,0为关闭
name_ipv6 = "ipv6.test" # 要进行ipv6 ddns解析的子域名
client = AcsClient(accessKeyId, accessSecret, 'cn-hangzhou')
def update(RecordId, RR, Type, Value): # 修改域名解析记录
from aliyunsdkalidns.request.v20150109.UpdateDomainRecordRequest import UpdateDomainRecordRequest
request = UpdateDomainRecordRequest()
request.set_accept_format('json')
request.set_RecordId(RecordId)
request.set_RR(RR)
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
def add(DomainName, RR, Type, Value): # 添加新的域名解析记录
from aliyunsdkalidns.request.v20150109.AddDomainRecordRequest import AddDomainRecordRequest
request = AddDomainRecordRequest()
request.set_accept_format('json')
request.set_DomainName(DomainName)
request.set_RR(RR) # https://blog.zeruns.tech
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
if ipv4_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv4 + '.' + domain)
response = client.do_action_with_exception(request) # 获取域名解析记录列表
domain_list = json.loads(response) # 将返回的JSON数据转化为Python能识别的
ip = urlopen('https://api-ipv4.ip.sb/ip').read() # 使用 IP.SB 的接口获取ipv4地址
ipv4 = str(ip, encoding='utf-8')
print("当前 IPv4 地址:%s" % ipv4)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv4, "A", ipv4)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv4.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv4, "A", ipv4)
print("修改域名解析成功")
else: # https://blog.zeruns.tech
print("IPv4地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain) # https://blog.zeruns.tech
request.set_RR(name_ipv4)
response = client.do_action_with_exception(request)
add(domain, name_ipv4, "A", ipv4)
print("修改域名解析成功")
if ipv6_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv6 + '.' + domain)
response = client.do_action_with_exception(request) # 获取域名解析记录列表
domain_list = json.loads(response) # 将返回的JSON数据转化为Python能识别的
ip = urlopen('https://api-ipv6.ip.sb/ip').read() # 使用IP.SB的接口获取ipv6地址
ipv6 = str(ip, encoding='utf-8')
print("获取到IPv6地址:%s" % ipv6)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv6, "AAAA", ipv6)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv6.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
else: # https://blog.zeruns.tech
print("IPv6地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_RR(name_ipv6) # https://blog.zeruns.tech
response = client.do_action_with_exception(request)
add(domain, name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
| 38.974576 | 113 | 0.719069 | from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkalidns.request.v20150109.DescribeSubDomainRecordsRequest import DescribeSubDomainRecordsRequest
from aliyunsdkalidns.request.v20150109.DescribeDomainRecordsRequest import DescribeDomainRecordsRequest
import requests
from urllib.request import urlopen
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--access-key-id')
parser.add_argument('--access-key-secret')
parser.add_argument('--domain-name')
parser.add_argument('--host')
args = parser.parse_args()
print(args)
accessKeyId = args.access_key_id
accessSecret = args.access_key_secret
domain = args.domain_name
ipv4_flag = 1
name_ipv4 = args.host
ipv6_flag = 0
name_ipv6 = "ipv6.test"
client = AcsClient(accessKeyId, accessSecret, 'cn-hangzhou')
def update(RecordId, RR, Type, Value):
from aliyunsdkalidns.request.v20150109.UpdateDomainRecordRequest import UpdateDomainRecordRequest
request = UpdateDomainRecordRequest()
request.set_accept_format('json')
request.set_RecordId(RecordId)
request.set_RR(RR)
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
def add(DomainName, RR, Type, Value):
from aliyunsdkalidns.request.v20150109.AddDomainRecordRequest import AddDomainRecordRequest
request = AddDomainRecordRequest()
request.set_accept_format('json')
request.set_DomainName(DomainName)
request.set_RR(RR)
request.set_Type(Type)
request.set_Value(Value)
response = client.do_action_with_exception(request)
if ipv4_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv4 + '.' + domain)
response = client.do_action_with_exception(request)
domain_list = json.loads(response)
ip = urlopen('https://api-ipv4.ip.sb/ip').read()
ipv4 = str(ip, encoding='utf-8')
print("当前 IPv4 地址:%s" % ipv4)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv4, "A", ipv4)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv4.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv4, "A", ipv4)
print("修改域名解析成功")
else:
print("IPv4地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_RR(name_ipv4)
response = client.do_action_with_exception(request)
add(domain, name_ipv4, "A", ipv4)
print("修改域名解析成功")
if ipv6_flag == 1:
request = DescribeSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_SubDomain(name_ipv6 + '.' + domain)
response = client.do_action_with_exception(request)
domain_list = json.loads(response)
ip = urlopen('https://api-ipv6.ip.sb/ip').read()
ipv6 = str(ip, encoding='utf-8')
print("获取到IPv6地址:%s" % ipv6)
if domain_list['TotalCount'] == 0:
add(domain, name_ipv6, "AAAA", ipv6)
print("新建域名解析成功")
elif domain_list['TotalCount'] == 1:
if domain_list['DomainRecords']['Record'][0]['Value'].strip() != ipv6.strip():
update(domain_list['DomainRecords']['Record'][0]['RecordId'], name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
else:
print("IPv6地址没变")
elif domain_list['TotalCount'] > 1:
from aliyunsdkalidns.request.v20150109.DeleteSubDomainRecordsRequest import DeleteSubDomainRecordsRequest
request = DeleteSubDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
request.set_RR(name_ipv6)
response = client.do_action_with_exception(request)
add(domain, name_ipv6, "AAAA", ipv6)
print("修改域名解析成功")
| true | true |
f72a9f4ec04b375aa26c8218249e75c1e2a2db4d | 3,486 | py | Python | bindings/python/ensmallen/datasets/string/halanaerobiumkushneri.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/halanaerobiumkushneri.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/halanaerobiumkushneri.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Halanaerobium kushneri.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HalanaerobiumKushneri(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Halanaerobium kushneri graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Halanaerobium kushneri graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HalanaerobiumKushneri",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.2 | 223 | 0.679002 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def HalanaerobiumKushneri(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="HalanaerobiumKushneri",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f72aa007710dbb5ce8eb5bd9d8566a13f57787d4 | 36,004 | py | Python | tests/i18n/test_extraction.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2017-08-30T06:46:16.000Z | 2017-08-30T06:46:16.000Z | tests/i18n/test_extraction.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/i18n/test_extraction.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2018-07-23T12:13:04.000Z | 2018-07-23T12:13:04.000Z | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import time
import warnings
from unittest import SkipTest, skipUnless
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.testcases import SerializeMixin
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SerializeMixin, SimpleTestCase):
# makemessages scans the current working directory and writes in the
# locale subdirectory. There aren't any options to control this. As a
# consequence tests can't run in parallel. Since i18n tests run in less
# than 4 seconds, serializing them with SerializeMixin is acceptable.
lockfile = __file__
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def _assertPoKeyword(self, keyword, expected_value, haystack, use_quotes=True):
q = '"'
if use_quotes:
expected_value = '"%s"' % expected_value
q = "'"
needle = '%s %s' % (keyword, expected_value)
expected_value = re.escape(expected_value)
return self.assertTrue(re.search('^%s %s' % (keyword, expected_value), haystack, re.MULTILINE),
'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertMsgId(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid', msgid, haystack, use_quotes=use_quotes)
def assertMsgIdPlural(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid_plural', msgid, haystack, use_quotes=use_quotes)
def assertMsgStr(self, msgstr, haystack, use_quotes=True):
return self._assertPoKeyword('msgstr', msgstr, haystack, use_quotes=use_quotes)
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
path = os.path.join(cwd_prefix, *comment_parts)
parts.append(path)
if isinstance(line_number, six.string_types):
line_number = self._get_token_line_number(path, line_number)
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertIn(needle, po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertNotIn(needle, po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def _get_token_line_number(self, path, token):
with open(path) as f:
for line, content in enumerate(f, 1):
if token in force_text(content):
return line
self.fail("The token '%s' could not be found in %s, please check the test config" % (token, path))
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
"""
self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py')
verifies that the django.po file has a gettext-style location comment of the form
`#: dirA/dirB/foo.py:42`
(or `#: .\dirA\dirB\foo.py:42` on Windows)
None can be passed for the line_number argument to skip checking of
the :42 suffix part.
A string token can also be passed as line_number, in which case it
will be searched in the template, and its line number will be used.
A msgid is a suitable candidate.
"""
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
"""Check the opposite of assertLocationComment()"""
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
"""
Assert that file was recently modified (modification time was less than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
"""
Assert that file was not recently modified (modification time was more than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertNotIn('This comment should not be extracted', po_contents)
# Comments in templates
self.assertIn('#. Translators: This comment should be extracted', po_contents)
self.assertIn(
"#. Translators: Django comment block for translators\n#. "
"string's meaning unveiled",
po_contents
)
self.assertIn('#. Translators: One-line translator comment #1', po_contents)
self.assertIn('#. Translators: Two-line translator comment #1\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #2', po_contents)
self.assertIn('#. Translators: Two-line translator comment #2\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #3', po_contents)
self.assertIn('#. Translators: Two-line translator comment #3\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #4', po_contents)
self.assertIn('#. Translators: Two-line translator comment #4\n#. continued here.', po_contents)
self.assertIn(
'#. Translators: One-line translator comment #5 -- with '
'non ASCII characters: áéíóúö',
po_contents
)
self.assertIn(
'#. Translators: Two-line translator comment #5 -- with '
'non ASCII characters: áéíóúö\n#. continued here.',
po_contents
)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("Get my line number", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 'Get my line number', 'templates', 'test.html')
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
self.assertTrue(MakeMessagesCommand.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
msg = (
'Translation blocks must not include other block tags: blocktrans '
'(file %s, line 3)' % os.path.join('templates', 'template_with_error.tpl')
)
with self.assertRaisesMessage(SyntaxError, msg):
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Special trans context #1"', po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertIn('msgctxt "Special trans context #2"', po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertIn('msgctxt "Special trans context #3"', po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% trans %} with a filter
for minor_part in 'abcdefgh': # Iterate from #7.1a to #7.1h template markers
self.assertIn('msgctxt "context #7.1{}"'.format(minor_part), po_contents)
self.assertMsgId('Translatable literal #7.1{}'.format(minor_part), po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context #1"', po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertIn('msgctxt "Special blocktrans context #2"', po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertIn("Translatable literal #8b-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #3"', po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertIn("Translatable literal #8c-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #4"', po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Context wrapped in single quotes"', po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Special blocktrans context wrapped in single quotes"', po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #1' \(file templates[/\\]comments.thtml, line 4\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #3' \(file templates[/\\]comments.thtml, line 6\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #4' \(file templates[/\\]comments.thtml, line 8\) "
"was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertNotIn('ignored comment #1', po_contents)
self.assertNotIn('Translators: ignored i18n comment #1', po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertNotIn('ignored i18n comment #2', po_contents)
self.assertNotIn('ignored comment #2', po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertNotIn('ignored comment #3', po_contents)
self.assertNotIn('ignored i18n comment #3', po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertNotIn('ignored comment #4', po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertNotIn('ignored comment #5', po_contents)
self.assertNotIn('ignored i18n comment #4', po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertIn('#. Translators: valid i18n comment #5', po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertIn('#. Translators: valid i18n comment #6', po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertIn('#. Translators: valid i18n comment #7', po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
def test_makemessages_find_files(self):
"""
Test that find_files only discover files having the proper extensions.
"""
cmd = MakeMessagesCommand()
cmd.ignore_patterns = ['CVS', '.*', '*~', '*.pyc']
cmd.symlinks = False
cmd.domain = 'django'
cmd.extensions = ['html', 'txt', 'py']
cmd.verbosity = 0
cmd.locale_paths = []
cmd.default_locale_path = os.path.join(self.test_dir, 'locale')
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.py', '.html', '.txt'}), set())
cmd.extensions = ['js']
cmd.domain = 'djangojs'
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.js'}), set())
@mock.patch('django.core.management.commands.makemessages.popen_wrapper')
def test_makemessages_gettext_version(self, mocked_popen_wrapper):
# "Normal" output:
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.18.1\n"
"Copyright (C) 1995-1998, 2000-2010 Free Software Foundation, Inc.\n"
"License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n"
"Written by Ulrich Drepper.\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 18, 1))
# Version number with only 2 parts (#23788)
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.17\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 17))
# Bad version output
mocked_popen_wrapper.return_value = (
"any other return value\n", '', 0)
cmd = MakeMessagesCommand()
with six.assertRaisesRegex(self, CommandError, "Unable to get gettext version. Is it installed?"):
cmd.gettext_version
def test_po_file_encoding_when_updating(self):
"""Update of PO file doesn't corrupt it with non-UTF-8 encoding on Python3+Windows (#23271)"""
BR_PO_BASE = 'locale/pt_BR/LC_MESSAGES/django'
os.chdir(self.test_dir)
shutil.copyfile(BR_PO_BASE + '.pristine', BR_PO_BASE + '.po')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'locale', 'pt_BR', 'LC_MESSAGES', 'django.po'))
management.call_command('makemessages', locale=['pt_BR'], verbosity=0)
self.assertTrue(os.path.exists(BR_PO_BASE + '.po'))
with io.open(BR_PO_BASE + '.po', 'r', encoding='utf-8') as fp:
po_contents = force_text(fp.read())
self.assertMsgStr("Größe", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('gettext_noop should, too.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
"""
Regression test for #23717.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# Windows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertIn('templates_symlinked/test.html', po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertIn('Plural-Forms: nplurals=2; plural=(n != 1)', po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
def test_trans_and_plural_blocktrans_collision(self):
"""
Ensures a correct workaround for the gettext bug when handling a literal
found inside a {% trans %} tag and also in another file inside a
{% blocktrans %} with a plural (#17375).
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], extensions=['html', 'djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertNotIn("#-#-#-#-# django.pot (PACKAGE VERSION) #-#-#-#-#\\n", po_contents)
self.assertMsgId('First `trans`, then `blocktrans` with a plural', po_contents)
self.assertMsgIdPlural('Plural for a `trans` and `blocktrans` collision case', po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'This literal should also be included wrapped or not wrapped '
'depending on the use of the --no-wrap option.',
po_contents
)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'""\n"This literal should also be included wrapped or not '
'wrapped depending on the "\n"use of the --no-wrap option."',
po_contents,
use_quotes=False
)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 'Translatable literal #6b', 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
super(MultipleLocaleExtractionTests, self).tearDown()
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
"""
Set access and modification times to the Unix epoch time for all the .po files.
"""
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
super(CustomLayoutExtractionTests, self).setUp()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=[os.path.join(this_directory, 'project_dir', 'project_locale')],
)
def test_project_locale_paths(self):
"""
Test that:
* translations for an app containing a locale folder are stored in that folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
| 45.517067 | 113 | 0.644067 |
from __future__ import unicode_literals
import io
import os
import re
import shutil
import time
import warnings
from unittest import SkipTest, skipUnless
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.testcases import SerializeMixin
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SerializeMixin, SimpleTestCase):
# consequence tests can't run in parallel. Since i18n tests run in less
lockfile = __file__
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def _assertPoKeyword(self, keyword, expected_value, haystack, use_quotes=True):
q = '"'
if use_quotes:
expected_value = '"%s"' % expected_value
q = "'"
needle = '%s %s' % (keyword, expected_value)
expected_value = re.escape(expected_value)
return self.assertTrue(re.search('^%s %s' % (keyword, expected_value), haystack, re.MULTILINE),
'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertMsgId(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid', msgid, haystack, use_quotes=use_quotes)
def assertMsgIdPlural(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid_plural', msgid, haystack, use_quotes=use_quotes)
def assertMsgStr(self, msgstr, haystack, use_quotes=True):
return self._assertPoKeyword('msgstr', msgstr, haystack, use_quotes=use_quotes)
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
path = os.path.join(cwd_prefix, *comment_parts)
parts.append(path)
if isinstance(line_number, six.string_types):
line_number = self._get_token_line_number(path, line_number)
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertIn(needle, po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertNotIn(needle, po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def _get_token_line_number(self, path, token):
with open(path) as f:
for line, content in enumerate(f, 1):
if token in force_text(content):
return line
self.fail("The token '%s' could not be found in %s, please check the test config" % (token, path))
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertNotIn('This comment should not be extracted', po_contents)
# Comments in templates
self.assertIn('#. Translators: This comment should be extracted', po_contents)
self.assertIn(
"#. Translators: Django comment block for translators\n#. "
"string's meaning unveiled",
po_contents
)
self.assertIn('#. Translators: One-line translator comment #1', po_contents)
self.assertIn('#. Translators: Two-line translator comment #1\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #2', po_contents)
self.assertIn('#. Translators: Two-line translator comment #2\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #3', po_contents)
self.assertIn('#. Translators: Two-line translator comment #3\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #4', po_contents)
self.assertIn('#. Translators: Two-line translator comment #4\n#. continued here.', po_contents)
self.assertIn(
'#. Translators: One-line translator comment #5 -- with '
'non ASCII characters: áéíóúö',
po_contents
)
self.assertIn(
'#. Translators: Two-line translator comment #5 -- with '
'non ASCII characters: áéíóúö\n#. continued here.',
po_contents
)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("Get my line number", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 'Get my line number', 'templates', 'test.html')
def test_force_en_us_locale(self):
self.assertTrue(MakeMessagesCommand.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
msg = (
'Translation blocks must not include other block tags: blocktrans '
'(file %s, line 3)' % os.path.join('templates', 'template_with_error.tpl')
)
with self.assertRaisesMessage(SyntaxError, msg):
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Special trans context #1"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn('msgctxt "Special trans context #2"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn('msgctxt "Special trans context #3"', po_contents)
self.assertMsgId("Translatable literal
# {% trans %} with a filter
for minor_part in 'abcdefgh': # Iterate from #7.1a to #7.1h template markers
self.assertIn('msgctxt "context #7.1{}"'.format(minor_part), po_contents)
self.assertMsgId('Translatable literal #7.1{}'.format(minor_part), po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context #1"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn('msgctxt "Special blocktrans context #2"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn("Translatable literal
self.assertIn('msgctxt "Special blocktrans context #3"', po_contents)
self.assertMsgId("Translatable literal
self.assertIn("Translatable literal
self.assertIn('msgctxt "Special blocktrans context #4"', po_contents)
self.assertMsgId("Translatable literal
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Context wrapped in single quotes"', po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Special blocktrans context wrapped in single quotes"', po_contents)
def test_template_comments(self):
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #1' \(file templates[/\\]comments.thtml, line 4\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #4' \(file templates[/\\]comments.thtml, line 8\) "
"was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertNotIn('ignored comment #1', po_contents)
self.assertNotIn('Translators: ignored i18n comment #1', po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertNotIn('ignored i18n comment #2', po_contents)
self.assertNotIn('ignored comment #2', po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertNotIn('ignored comment #3', po_contents)
self.assertNotIn('ignored i18n comment #3', po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertNotIn('ignored comment #4', po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertNotIn('ignored comment #5', po_contents)
self.assertNotIn('ignored i18n comment #4', po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertIn('#. Translators: valid i18n comment #5', po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertIn('#. Translators: valid i18n comment #6', po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertIn('#. Translators: valid i18n comment #7', po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
def test_makemessages_find_files(self):
cmd = MakeMessagesCommand()
cmd.ignore_patterns = ['CVS', '.*', '*~', '*.pyc']
cmd.symlinks = False
cmd.domain = 'django'
cmd.extensions = ['html', 'txt', 'py']
cmd.verbosity = 0
cmd.locale_paths = []
cmd.default_locale_path = os.path.join(self.test_dir, 'locale')
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.py', '.html', '.txt'}), set())
cmd.extensions = ['js']
cmd.domain = 'djangojs'
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.js'}), set())
@mock.patch('django.core.management.commands.makemessages.popen_wrapper')
def test_makemessages_gettext_version(self, mocked_popen_wrapper):
# "Normal" output:
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.18.1\n"
"Copyright (C) 1995-1998, 2000-2010 Free Software Foundation, Inc.\n"
"License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n"
"Written by Ulrich Drepper.\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 18, 1))
# Version number with only 2 parts (#23788)
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.17\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 17))
# Bad version output
mocked_popen_wrapper.return_value = (
"any other return value\n", '', 0)
cmd = MakeMessagesCommand()
with six.assertRaisesRegex(self, CommandError, "Unable to get gettext version. Is it installed?"):
cmd.gettext_version
def test_po_file_encoding_when_updating(self):
BR_PO_BASE = 'locale/pt_BR/LC_MESSAGES/django'
os.chdir(self.test_dir)
shutil.copyfile(BR_PO_BASE + '.pristine', BR_PO_BASE + '.po')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'locale', 'pt_BR', 'LC_MESSAGES', 'django.po'))
management.call_command('makemessages', locale=['pt_BR'], verbosity=0)
self.assertTrue(os.path.exists(BR_PO_BASE + '.po'))
with io.open(BR_PO_BASE + '.po', 'r', encoding='utf-8') as fp:
po_contents = force_text(fp.read())
self.assertMsgStr("Größe", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('gettext_noop should, too.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# Windows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertIn('templates_symlinked/test.html', po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertIn('Plural-Forms: nplurals=2; plural=(n != 1)', po_contents)
def test_override_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
def test_trans_and_plural_blocktrans_collision(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], extensions=['html', 'djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertNotIn("#-#-#-#-# django.pot (PACKAGE VERSION) #-#-#-#-#\\n", po_contents)
self.assertMsgId('First `trans`, then `blocktrans` with a plural', po_contents)
self.assertMsgIdPlural('Plural for a `trans` and `blocktrans` collision case', po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'This literal should also be included wrapped or not wrapped '
'depending on the use of the --no-wrap option.',
po_contents
)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'""\n"This literal should also be included wrapped or not '
'wrapped depending on the "\n"use of the --no-wrap option."',
po_contents,
use_quotes=False
)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 'Translatable literal #6b', 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
super(MultipleLocaleExtractionTests, self).tearDown()
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
super(CustomLayoutExtractionTests, self).setUp()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=[os.path.join(this_directory, 'project_dir', 'project_locale')],
)
def test_project_locale_paths(self):
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
| true | true |
f72aa05957ea518052b423c49d9e13775118a954 | 8,920 | py | Python | downstream/UNITER/adapter/src/transformers/adapters/models/gpt2.py | yeonseok-jeong-cm/multimodal_research | bb1140f13f76d4cda6175a072806a0ee0908bd0d | [
"MIT"
] | null | null | null | downstream/UNITER/adapter/src/transformers/adapters/models/gpt2.py | yeonseok-jeong-cm/multimodal_research | bb1140f13f76d4cda6175a072806a0ee0908bd0d | [
"MIT"
] | null | null | null | downstream/UNITER/adapter/src/transformers/adapters/models/gpt2.py | yeonseok-jeong-cm/multimodal_research | bb1140f13f76d4cda6175a072806a0ee0908bd0d | [
"MIT"
] | null | null | null | from typing import Union
import torch
from torch import nn
from ..composition import AdapterCompositionBlock, parse_composition
from ..heads import CausalLMHead, ClassificationHead, MultiLabelClassificationHead
from ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin
from .bert import (
BertEncoderAdaptersMixin,
BertOutputAdaptersMixin,
BertSelfOutputAdaptersMixin,
ModelWithFlexibleHeadsAdaptersMixin,
)
class GPT2AttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):
"""Adds attention adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2OutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):
"""Adds output adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2DecoderBlockAdaptersMixin(BertEncoderAdaptersMixin):
"""Adds adapters to the TransformerBlock module of DistilBert."""
def _init_adapter_modules(self):
self.attention_adapters = GPT2AttentionAdaptersModule(self)
self.output_adapters = GPT2OutputAdaptersModule(self)
self.attention_adapters._init_adapter_modules()
self.output_adapters._init_adapter_modules()
def add_fusion_layer(self, adapter_names):
self.attention_adapters.add_fusion_layer(adapter_names)
self.output_adapters.add_fusion_layer(adapter_names)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.attention_adapters.add_adapter(adapter_name, layer_idx)
self.output_adapters.add_adapter(adapter_name, layer_idx)
def delete_adapter(self, adapter_name):
self.attention_adapters.delete_adapter(adapter_name)
self.output_adapters.delete_adapter(adapter_name)
def delete_fusion_layer(self, adapter_names):
self.attention_adapters.delete_fusion_layer(adapter_names)
self.output_adapters.delete_fusion_layer(adapter_names)
def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):
self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
class GPT2ModelAdapterMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _init_adapter_modules(self):
super()._init_adapter_modules()
# add adapters specified in config; invertible adapter will only be added if required
for adapter_name in self.config.adapters.adapters:
self._add_adapter(adapter_name)
# fusion
if hasattr(self.config, "fusion_models"):
for fusion_adapter_names in self.config.fusion_models:
self.add_fusion_layer(fusion_adapter_names)
def _add_adapter(self, adapter_name: str):
adapter_config = self.config.adapters.get(adapter_name)
leave_out = adapter_config.get("leave_out", [])
for i, layer in enumerate(self.base_model.h):
if i not in leave_out:
layer.add_adapter(adapter_name, i)
self.add_invertible_adapter(adapter_name)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, True, False)
self.enable_invertible_adapters(adapter_setup.flatten())
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, unfreeze_adapters, True)
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def enable_adapters(
self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_attention: bool
):
for layer in self.base_model.h:
layer.enable_adapters(adapter_setup, unfreeze_adapters, unfreeze_attention)
def adjust_attention_mask_for_parallel(self, hidden_states, attention_mask):
if attention_mask is not None and hidden_states.shape[0] != attention_mask.shape[0]:
repeats = [1] * len(attention_mask.shape)
repeats[0] = hidden_states.shape[0] // attention_mask.shape[0]
attention_mask = attention_mask.repeat(*repeats)
return attention_mask
def _add_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.add_fusion_layer(adapter_names)
def _delete_adapter(self, adapter_name: str):
for layer in self.base_model.h:
layer.delete_adapter(adapter_name)
self.delete_invertible_adapter(adapter_name)
def _delete_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.delete_fusion_layer(adapter_names)
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for _, v in self.base_model.h._modules.items():
for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name):
return_adapters = {}
for idx, layer in enumerate(self.h):
adapters = {
"attention": layer.attention_adapters.adapters,
"output": layer.output_adapters.adapters,
}
for key, adapt in adapters.items():
if hasattr(adapt, name):
if idx not in return_adapters:
return_adapters[idx] = {}
return_adapters[idx][key] = getattr(adapt, name)
return return_adapters
class GPT2ModelHeadsMixin(ModelWithFlexibleHeadsAdaptersMixin):
"""Adds flexible heads to a GPT-2 model."""
head_types = {
"classification": ClassificationHead,
"multilabel_classification": MultiLabelClassificationHead,
"causal_lm": CausalLMHead,
}
def add_classification_head(
self,
head_name,
num_labels=2,
layers=2,
activation_function="tanh",
overwrite_ok=False,
multilabel=False,
id2label=None,
):
"""
Adds a sequence classification head on top of the model.
Args:
head_name (str): The name of the head.
num_labels (int, optional): Number of classification labels. Defaults to 2.
layers (int, optional): Number of layers. Defaults to 2.
activation_function (str, optional): Activation function. Defaults to 'tanh'.
overwrite_ok (bool, optional): Force overwrite if a head with the same name exists. Defaults to False.
multilabel (bool, optional): Enable multilabel classification setup. Defaults to False.
"""
if multilabel:
head = MultiLabelClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
else:
head = ClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
self.add_prediction_head(head, overwrite_ok)
def add_causal_lm_head(self, head_name, overwrite_ok=False):
"""
Adds a causal language modeling head on top of the model.
Args:
head_name (str): The name of the head.
overwrite_ok (bool, optional): Force overwrite if a head with the same name exists. Defaults to False.
"""
head = CausalLMHead(self, head_name)
self.add_prediction_head(head, overwrite_ok=overwrite_ok)
| 40.545455 | 116 | 0.69361 | from typing import Union
import torch
from torch import nn
from ..composition import AdapterCompositionBlock, parse_composition
from ..heads import CausalLMHead, ClassificationHead, MultiLabelClassificationHead
from ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin
from .bert import (
BertEncoderAdaptersMixin,
BertOutputAdaptersMixin,
BertSelfOutputAdaptersMixin,
ModelWithFlexibleHeadsAdaptersMixin,
)
class GPT2AttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):
def __init__(self, parent):
super().__init__()
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2OutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):
def __init__(self, parent):
super().__init__()
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return None
class GPT2DecoderBlockAdaptersMixin(BertEncoderAdaptersMixin):
def _init_adapter_modules(self):
self.attention_adapters = GPT2AttentionAdaptersModule(self)
self.output_adapters = GPT2OutputAdaptersModule(self)
self.attention_adapters._init_adapter_modules()
self.output_adapters._init_adapter_modules()
def add_fusion_layer(self, adapter_names):
self.attention_adapters.add_fusion_layer(adapter_names)
self.output_adapters.add_fusion_layer(adapter_names)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.attention_adapters.add_adapter(adapter_name, layer_idx)
self.output_adapters.add_adapter(adapter_name, layer_idx)
def delete_adapter(self, adapter_name):
self.attention_adapters.delete_adapter(adapter_name)
self.output_adapters.delete_adapter(adapter_name)
def delete_fusion_layer(self, adapter_names):
self.attention_adapters.delete_fusion_layer(adapter_names)
self.output_adapters.delete_fusion_layer(adapter_names)
def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):
self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
class GPT2ModelAdapterMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _init_adapter_modules(self):
super()._init_adapter_modules()
for adapter_name in self.config.adapters.adapters:
self._add_adapter(adapter_name)
if hasattr(self.config, "fusion_models"):
for fusion_adapter_names in self.config.fusion_models:
self.add_fusion_layer(fusion_adapter_names)
def _add_adapter(self, adapter_name: str):
adapter_config = self.config.adapters.get(adapter_name)
leave_out = adapter_config.get("leave_out", [])
for i, layer in enumerate(self.base_model.h):
if i not in leave_out:
layer.add_adapter(adapter_name, i)
self.add_invertible_adapter(adapter_name)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, True, False)
self.enable_invertible_adapters(adapter_setup.flatten())
self.set_active_adapters(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.enable_adapters(adapter_setup, unfreeze_adapters, True)
self.set_active_adapters(adapter_setup)
def enable_adapters(
self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_attention: bool
):
for layer in self.base_model.h:
layer.enable_adapters(adapter_setup, unfreeze_adapters, unfreeze_attention)
def adjust_attention_mask_for_parallel(self, hidden_states, attention_mask):
if attention_mask is not None and hidden_states.shape[0] != attention_mask.shape[0]:
repeats = [1] * len(attention_mask.shape)
repeats[0] = hidden_states.shape[0] // attention_mask.shape[0]
attention_mask = attention_mask.repeat(*repeats)
return attention_mask
def _add_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.add_fusion_layer(adapter_names)
def _delete_adapter(self, adapter_name: str):
for layer in self.base_model.h:
layer.delete_adapter(adapter_name)
self.delete_invertible_adapter(adapter_name)
def _delete_fusion_layer(self, adapter_names):
for layer in self.base_model.h:
layer.delete_fusion_layer(adapter_names)
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for _, v in self.base_model.h._modules.items():
for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name):
return_adapters = {}
for idx, layer in enumerate(self.h):
adapters = {
"attention": layer.attention_adapters.adapters,
"output": layer.output_adapters.adapters,
}
for key, adapt in adapters.items():
if hasattr(adapt, name):
if idx not in return_adapters:
return_adapters[idx] = {}
return_adapters[idx][key] = getattr(adapt, name)
return return_adapters
class GPT2ModelHeadsMixin(ModelWithFlexibleHeadsAdaptersMixin):
head_types = {
"classification": ClassificationHead,
"multilabel_classification": MultiLabelClassificationHead,
"causal_lm": CausalLMHead,
}
def add_classification_head(
self,
head_name,
num_labels=2,
layers=2,
activation_function="tanh",
overwrite_ok=False,
multilabel=False,
id2label=None,
):
if multilabel:
head = MultiLabelClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
else:
head = ClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)
self.add_prediction_head(head, overwrite_ok)
def add_causal_lm_head(self, head_name, overwrite_ok=False):
head = CausalLMHead(self, head_name)
self.add_prediction_head(head, overwrite_ok=overwrite_ok)
| true | true |
f72aa0e8cc0326318ce5bd49e2a78712241bce3f | 1,562 | py | Python | tests/test_elastic_service.py | occidere/blind-review-parser | 72dd3a3c897d87d79f9303597016801e5fb1c648 | [
"Apache-2.0"
] | 7 | 2021-02-15T16:43:20.000Z | 2021-03-23T17:10:47.000Z | tests/test_elastic_service.py | occidere/blind-review-parser | 72dd3a3c897d87d79f9303597016801e5fb1c648 | [
"Apache-2.0"
] | 8 | 2021-02-16T13:38:40.000Z | 2021-02-16T13:51:35.000Z | tests/test_elastic_service.py | occidere/blind-review-parser | 72dd3a3c897d87d79f9303597016801e5fb1c648 | [
"Apache-2.0"
] | null | null | null | import unittest
from blindreviewparser.parser.blind_review_parser import *
class TestElasticService(unittest.TestCase):
def setUp(self) -> None:
self.es_endpoint = 'http://localhost:9200'
self.elastic_service = ElasticService(self.es_endpoint)
self.sample = Review(
company='occidere',
title='"테스트 리뷰"',
url='/kr/company/occidere/review/af9-0df3j',
score=5.0,
auth='현직원 · i*********", · IT 엔지니어 - 2021.02.17'
)
def tearDown(self) -> None:
self.__delete_sample()
def test_exist_any(self):
# BUILD
self.__index_sample()
# OPERATE
exist = self.elastic_service.exist_any([self.sample])
# CHECK
self.assertTrue(exist)
def test_bulk_upsert(self):
# BUILD
self.__delete_sample()
# OPERATE
self.elastic_service.bulk_upsert([self.sample])
# CHECK
resp = requests.get(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
self.assertEqual(resp.status_code, 200)
def __index_sample(self) -> None:
requests.post(
url=f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}',
headers={'Content-Type': 'application/json'},
data=self.sample.to_json_str().encode('utf-8')
)
def __delete_sample(self) -> None:
requests.delete(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
| 30.038462 | 99 | 0.589629 | import unittest
from blindreviewparser.parser.blind_review_parser import *
class TestElasticService(unittest.TestCase):
def setUp(self) -> None:
self.es_endpoint = 'http://localhost:9200'
self.elastic_service = ElasticService(self.es_endpoint)
self.sample = Review(
company='occidere',
title='"테스트 리뷰"',
url='/kr/company/occidere/review/af9-0df3j',
score=5.0,
auth='현직원 · i*********", · IT 엔지니어 - 2021.02.17'
)
def tearDown(self) -> None:
self.__delete_sample()
def test_exist_any(self):
# BUILD
self.__index_sample()
# OPERATE
exist = self.elastic_service.exist_any([self.sample])
# CHECK
self.assertTrue(exist)
def test_bulk_upsert(self):
# BUILD
self.__delete_sample()
# OPERATE
self.elastic_service.bulk_upsert([self.sample])
# CHECK
resp = requests.get(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
self.assertEqual(resp.status_code, 200)
def __index_sample(self) -> None:
requests.post(
url=f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}',
headers={'Content-Type': 'application/json'},
data=self.sample.to_json_str().encode('utf-8')
)
def __delete_sample(self) -> None:
requests.delete(f'{self.es_endpoint}/blind-review-210217/_doc/{self.sample.url_hash}')
| true | true |
f72aa166383d481c6103f4761816a7c123b14e5f | 2,262 | py | Python | adb/systrace/catapult/devil/devil/android/tools/system_app_test.py | mohanedmoh/TBS | 6aebf52643911fe0dce7d02825eb0f046da1b3b1 | [
"Apache-2.0"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | adb/systrace/catapult/devil/devil/android/tools/system_app_test.py | mohanedmoh/TBS | 6aebf52643911fe0dce7d02825eb0f046da1b3b1 | [
"Apache-2.0"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | adb/systrace/catapult/devil/devil/android/tools/system_app_test.py | mohanedmoh/TBS | 6aebf52643911fe0dce7d02825eb0f046da1b3b1 | [
"Apache-2.0"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil import devil_env
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.android.tools import system_app
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock
class SystemAppTest(unittest.TestCase):
def testDoubleEnableModification(self):
"""Ensures that system app modification logic isn't repeated.
If EnableSystemAppModification uses are nested, inner calls should
not need to perform any of the expensive modification logic.
"""
# pylint: disable=no-self-use,protected-access
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
type(mock_device).build_version_sdk = mock.PropertyMock(
return_value=version_codes.LOLLIPOP)
system_props = {}
def dict_setprop(prop_name, value):
system_props[prop_name] = value
def dict_getprop(prop_name):
return system_props.get(prop_name, '')
mock_device.SetProp.side_effect = dict_setprop
mock_device.GetProp.side_effect = dict_getprop
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_called_once()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '1')
mock_device.reset_mock()
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_not_called()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_not_called()
mock_device.reset_mock()
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '0')
if __name__ == '__main__':
unittest.main()
| 32.314286 | 72 | 0.751105 |
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from devil import devil_env
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.android.tools import system_app
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock
class SystemAppTest(unittest.TestCase):
def testDoubleEnableModification(self):
mock_device = mock.Mock(spec=device_utils.DeviceUtils)
mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
type(mock_device).build_version_sdk = mock.PropertyMock(
return_value=version_codes.LOLLIPOP)
system_props = {}
def dict_setprop(prop_name, value):
system_props[prop_name] = value
def dict_getprop(prop_name):
return system_props.get(prop_name, '')
mock_device.SetProp.side_effect = dict_setprop
mock_device.GetProp.side_effect = dict_getprop
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_called_once()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '1')
mock_device.reset_mock()
with system_app.EnableSystemAppModification(mock_device):
mock_device.EnableRoot.assert_not_called()
mock_device.GetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP)
mock_device.SetProp.assert_not_called()
mock_device.reset_mock()
mock_device.SetProp.assert_called_once_with(
system_app._ENABLE_MODIFICATION_PROP, '0')
if __name__ == '__main__':
unittest.main()
| true | true |
f72aa18c0f5b6396366947977df0c5e4b9da7877 | 264 | py | Python | tests/hamiltonian/test_exact.py | ymtz03/freqerica | d79e76181a037da5c11b47f8a4e1bf4387a0468f | [
"BSD-2-Clause"
] | 1 | 2020-05-08T15:28:04.000Z | 2020-05-08T15:28:04.000Z | tests/hamiltonian/test_exact.py | ymtz03/freqerica | d79e76181a037da5c11b47f8a4e1bf4387a0468f | [
"BSD-2-Clause"
] | null | null | null | tests/hamiltonian/test_exact.py | ymtz03/freqerica | d79e76181a037da5c11b47f8a4e1bf4387a0468f | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import freqerica.hamiltonian.exact
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_absolute_truth_and_meaning(self):
assert True
if __name__ == '__main__':
unittest.main()
| 16.5 | 46 | 0.689394 |
import unittest
import freqerica.hamiltonian.exact
class BasicTestSuite(unittest.TestCase):
def test_absolute_truth_and_meaning(self):
assert True
if __name__ == '__main__':
unittest.main()
| true | true |
f72aa18da6b39da5e2c7c96f9b56b0327cc9cf0a | 1,301 | py | Python | authapp/forms.py | EvgenDEP1/exam | 0c6faf8986e890bc03f8a407fb3d72b7ccecc1e0 | [
"Apache-2.0"
] | null | null | null | authapp/forms.py | EvgenDEP1/exam | 0c6faf8986e890bc03f8a407fb3d72b7ccecc1e0 | [
"Apache-2.0"
] | null | null | null | authapp/forms.py | EvgenDEP1/exam | 0c6faf8986e890bc03f8a407fb3d72b7ccecc1e0 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password'].widget.attrs['placeholder'] = f'Пароль'
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password1', 'password2')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password1'].label = ""
self.fields['password2'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password1'].widget.attrs['placeholder'] = f'Пароль'
self.fields['password2'].widget.attrs['placeholder'] = f'Повтор пароля '
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
item.help_text = '' | 40.65625 | 80 | 0.629516 | from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password'].widget.attrs['placeholder'] = f'Пароль'
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password1', 'password2')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = ""
self.fields['password1'].label = ""
self.fields['password2'].label = ""
self.fields['username'].widget.attrs['placeholder'] = f'Логин'
self.fields['password1'].widget.attrs['placeholder'] = f'Пароль'
self.fields['password2'].widget.attrs['placeholder'] = f'Повтор пароля '
for name, item in self.fields.items():
item.widget.attrs['class'] = f'input-line full-width'
item.help_text = '' | true | true |
f72aa1cad8297f5101397a1f8105d26c2f65379d | 415 | py | Python | online_quiz/online_quiz/asgi.py | abhinavkavuri/django-trivia | e451ffd85a06ec9c1e1d690c67fdc51601fa6a5c | [
"Apache-2.0"
] | null | null | null | online_quiz/online_quiz/asgi.py | abhinavkavuri/django-trivia | e451ffd85a06ec9c1e1d690c67fdc51601fa6a5c | [
"Apache-2.0"
] | 6 | 2020-06-05T20:37:41.000Z | 2021-09-22T18:27:23.000Z | online_quiz/online_quiz/asgi.py | abhinavkavuri/django-trivia | e451ffd85a06ec9c1e1d690c67fdc51601fa6a5c | [
"Apache-2.0"
] | null | null | null | """
ASGI config for online_quiz project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'online_quiz.settings')
application = get_asgi_application()
| 24.411765 | 79 | 0.759036 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'online_quiz.settings')
application = get_asgi_application()
| true | true |
f72aa1e7883558de22bc21371e82becfb2c32478 | 1,010 | py | Python | server/routes/prometheus.py | sadiejay/Open-Sentencing-Model | fc83af2f37c9d77035349d1d39cf1cc309837045 | [
"Apache-2.0"
] | 52 | 2019-12-27T03:52:00.000Z | 2022-03-26T18:16:30.000Z | server/routes/prometheus.py | sadiejay/Open-Sentencing-Model | fc83af2f37c9d77035349d1d39cf1cc309837045 | [
"Apache-2.0"
] | 114 | 2019-12-03T04:13:38.000Z | 2020-10-03T18:02:03.000Z | server/routes/prometheus.py | sadiejay/Open-Sentencing-Model | fc83af2f37c9d77035349d1d39cf1cc309837045 | [
"Apache-2.0"
] | 45 | 2019-12-22T08:17:08.000Z | 2022-03-13T09:57:09.000Z |
from server import app
from flask import Response, request
from prometheus_client import generate_latest, Counter
from functools import wraps
# route to display configured Prometheus metrics
# note that you will need to set up custom metric observers for your app
@app.route('/metrics')
def prometheus_metrics():
MIMETYPE = 'text/plain; version=0.0.4; charset=utf-8'
return Response(generate_latest(), mimetype=MIMETYPE)
# creates a Prometheus Counter to track requests for specified routes
# usage:
# @app.route('/example')
# @prometheus.track_requests
# def example():
# pass
route_counter = Counter('requests_for_routes', 'Number of requests for specififed routes', ['method', 'endpoint'])
def track_requests(route):
@wraps(route)
def wrapper(*args, **kwargs):
route_labels = {
"method": request.method,
"endpoint": str(request.path)
}
route_counter.labels(**route_labels).inc()
return route(*args, **kwargs)
return wrapper
| 31.5625 | 114 | 0.708911 |
from server import app
from flask import Response, request
from prometheus_client import generate_latest, Counter
from functools import wraps
@app.route('/metrics')
def prometheus_metrics():
MIMETYPE = 'text/plain; version=0.0.4; charset=utf-8'
return Response(generate_latest(), mimetype=MIMETYPE)
route_counter = Counter('requests_for_routes', 'Number of requests for specififed routes', ['method', 'endpoint'])
def track_requests(route):
@wraps(route)
def wrapper(*args, **kwargs):
route_labels = {
"method": request.method,
"endpoint": str(request.path)
}
route_counter.labels(**route_labels).inc()
return route(*args, **kwargs)
return wrapper
| true | true |
f72aa29a2c3502ea40faf7f916df3b0ead8ebf1a | 1,577 | py | Python | xl2code/parsers/direct_parser.py | youlanhai/ExcelToCode | d160c75b9b7a305f4b3367d85ee0550572869d3e | [
"MIT"
] | 47 | 2017-06-23T07:47:50.000Z | 2022-03-07T22:36:19.000Z | xl2code/parsers/direct_parser.py | twjitm/ExcelToCode | d160c75b9b7a305f4b3367d85ee0550572869d3e | [
"MIT"
] | 1 | 2019-03-12T06:12:50.000Z | 2019-04-03T00:50:01.000Z | xl2code/parsers/direct_parser.py | twjitm/ExcelToCode | d160c75b9b7a305f4b3367d85ee0550572869d3e | [
"MIT"
] | 23 | 2017-05-12T07:46:07.000Z | 2022-01-22T03:19:50.000Z | # -*- coding: utf-8 -*-
import traceback
import xlsconfig
import util
from tps import tp0, convention
from base_parser import ConverterInfo, BaseParser
# 利用Excel表头描述,进行导表,不需要转换器
class DirectParser(BaseParser):
def __init__(self, filename, module, sheet_index=0):
super(DirectParser, self).__init__(filename, module, sheet_index)
self.field_row_index = xlsconfig.SHEET_ROW_INDEX["field"]
self.type_row_index = xlsconfig.SHEET_ROW_INDEX["type"]
# 使用Excel表头提供的信息,构造转换器
def parse_header(self, rows):
header_row = [self.extract_cell_value(cell) for cell in rows[self.header_row_index]]
field_row = [self.extract_cell_value(cell) for cell in rows[self.field_row_index]]
type_row = [self.extract_cell_value(cell) for cell in rows[self.type_row_index]]
for col, field in enumerate(field_row):
if field == "": break
self.converters[col] = None
if field in self.field_2_col:
util.log_error("列名'%s'重复,列:%s", field, util.int_to_base26(col))
continue
self.field_2_col[field] = col
header = header_row[col] or field
type = type_row[col] or "String"
method = None
try:
method = convention.type2function(type)
except:
util.log_error("无效的类型'%s',列:%s", type, util.int_to_base26(col))
continue
self.converters[col] = ConverterInfo((header, field, method, True))
self.sheet_types[field] = (col, field, header, type)
self.key_name = self.converters[0].field
return
def parse_arguments(self, rows):
super(DirectParser, self).parse_arguments(rows)
self.is_multi_key = self.arguments.get("multiKey", False)
| 30.326923 | 87 | 0.733037 |
import traceback
import xlsconfig
import util
from tps import tp0, convention
from base_parser import ConverterInfo, BaseParser
class DirectParser(BaseParser):
def __init__(self, filename, module, sheet_index=0):
super(DirectParser, self).__init__(filename, module, sheet_index)
self.field_row_index = xlsconfig.SHEET_ROW_INDEX["field"]
self.type_row_index = xlsconfig.SHEET_ROW_INDEX["type"]
def parse_header(self, rows):
header_row = [self.extract_cell_value(cell) for cell in rows[self.header_row_index]]
field_row = [self.extract_cell_value(cell) for cell in rows[self.field_row_index]]
type_row = [self.extract_cell_value(cell) for cell in rows[self.type_row_index]]
for col, field in enumerate(field_row):
if field == "": break
self.converters[col] = None
if field in self.field_2_col:
util.log_error("列名'%s'重复,列:%s", field, util.int_to_base26(col))
continue
self.field_2_col[field] = col
header = header_row[col] or field
type = type_row[col] or "String"
method = None
try:
method = convention.type2function(type)
except:
util.log_error("无效的类型'%s',列:%s", type, util.int_to_base26(col))
continue
self.converters[col] = ConverterInfo((header, field, method, True))
self.sheet_types[field] = (col, field, header, type)
self.key_name = self.converters[0].field
return
def parse_arguments(self, rows):
super(DirectParser, self).parse_arguments(rows)
self.is_multi_key = self.arguments.get("multiKey", False)
| true | true |
f72aa4256943cc8676fb4c07209c50e85fa11d40 | 4,050 | py | Python | examples/structural/beam.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | 1 | 2020-07-23T00:15:00.000Z | 2020-07-23T00:15:00.000Z | examples/structural/beam.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | null | null | null | examples/structural/beam.py | SzymonSzyszko/AeroPy | b061c690e5926fdd834b7c50837c25108e908156 | [
"MIT"
] | null | null | null | from aeropy.geometry.parametric import poly
from aeropy.structural.stable_solution import (structure, mesh_1D, properties,
boundary_conditions)
from aeropy.xfoil_module import output_reader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pickle
abaqus_primary = pickle.load(open("save.p", "rb"), encoding='latin1')
abaqus_secondary = output_reader('secondary_variables.txt')
# sort data
abaqus_data = np.array(sorted(zip(abaqus_primary['C_U']['x'],
abaqus_primary['C_U']['y'],
abaqus_primary['U'][:, 0],
abaqus_primary['U'][:, 1],)))
abq_x, abq_y, abq_u1, abq_u2 = abaqus_data.T
abq_y = -abq_y + .005
abq_u2 = -abq_u2
# Convert log strains into engineering strain
abaqus_secondary['LE11'] = np.exp(np.array(abaqus_secondary['LE11'])) - 1
abaqus_secondary['LE12'] = np.exp(np.array(abaqus_secondary['LE12'])) - 1
abaqus_secondary['LE22'] = np.exp(np.array(abaqus_secondary['LE22'])) - 1
coefficients = np.array([0, 0, 0, 0])
bp = properties()
bc = boundary_conditions(load=np.array([[0, -1]]))
analytical_solution = bc.concentrated_load[0][1]/(6*bp.young*bp.inertia) * \
np.array([-1, 3, 0, 0])
mesh = mesh_1D(mesh_n=10)
curve_parent = poly(a=[0, 0, 0, 0])
curve_child = poly(a=analytical_solution)
beam = structure(curve_parent, curve_child, mesh, bp, bc)
beam.calculate_position()
strain = beam.strain()
stress = beam.stress(loading_condition='plane_stress')
# Plot beam results
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.plot(beam.r_p[0], beam.r_p[1], label='parent')
plt.scatter(beam.r_p[0], beam.r_p[1], label='parent')
plt.plot(beam.r_c[0], beam.r_c[1], label='child')
plt.scatter(beam.r_c[0], beam.r_c[1], label='child')
plt.plot(abq_x, abq_y, label='Abaqus')
plt.title('Position')
plt.grid()
plt.legend()
# Plot beam results
plt.figure()
r1_p, r1_c = beam.calculate_position(diff='x1')
r2_p, r2_c = beam.calculate_position(diff='x2')
# plt.plot(beam.r_p[0], r1_p[0], label='$r_{1,1}^p$')
plt.plot(beam.r_p[0], r1_p[1], label='$r_{2,1}^p$')
# plt.plot(beam.r_p[0], r2_p[0], label='$r_{1,2}^p$')
plt.plot(beam.r_p[0], r2_p[1], label='$r_{2,2}^p$')
# plt.plot(beam.r_p[0], r1_c[0], label='$r_{1,1}^c$')
plt.plot(beam.r_p[0], r1_c[1], label='$r_{2,1}^c$')
# plt.plot(beam.r_p[0], r2_c[0], label='$r_{1,2}^c$')
plt.plot(beam.r_p[0], r2_c[1], label='$r_{2,2}^c$')
plt.title('Position gradients')
plt.grid()
plt.legend()
# Plot beam results
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.scatter(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.scatter(beam.mesh.x_p, u[1], label=r'$u_2$')
plt.plot(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.plot(beam.mesh.x_p, u[1], label=r'$u_2$')
# plt.plot(abq_x, abq_u1, label=r'Abaqus $u_1$')
# plt.plot(abq_x, abq_u2, label=r'Abaqus $u_2$')
plt.title('Displacement diff')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, strain[0][0], label=r'$\epsilon_{11}$')
plt.plot(beam.mesh.x_p, strain[0][1], label=r'$\epsilon_{12}$')
plt.plot(beam.mesh.x_p, strain[1][1], label=r'$\epsilon_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE11'],
label=r'Abaqus $\epsilon_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE12'],
label=r'Abaqus $\epsilon_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE22'],
label=r'Abaqus $\epsilon_{22}$')
plt.title('Strain')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, stress[0][0], label=r'$\sigma_{11}$')
plt.plot(beam.mesh.x_p, stress[0][1], label=r'$\sigma_{12}$')
plt.plot(beam.mesh.x_p, stress[1][1], label=r'$\sigma_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S11'],
label=r'Abaqus $\sigma_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S12'],
label=r'Abaqus $\sigma_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S22'],
label=r'Abaqus $\sigma_{22}$')
plt.legend()
plt.title('Stress')
plt.show()
| 36.486486 | 78 | 0.655062 | from aeropy.geometry.parametric import poly
from aeropy.structural.stable_solution import (structure, mesh_1D, properties,
boundary_conditions)
from aeropy.xfoil_module import output_reader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pickle
abaqus_primary = pickle.load(open("save.p", "rb"), encoding='latin1')
abaqus_secondary = output_reader('secondary_variables.txt')
abaqus_data = np.array(sorted(zip(abaqus_primary['C_U']['x'],
abaqus_primary['C_U']['y'],
abaqus_primary['U'][:, 0],
abaqus_primary['U'][:, 1],)))
abq_x, abq_y, abq_u1, abq_u2 = abaqus_data.T
abq_y = -abq_y + .005
abq_u2 = -abq_u2
abaqus_secondary['LE11'] = np.exp(np.array(abaqus_secondary['LE11'])) - 1
abaqus_secondary['LE12'] = np.exp(np.array(abaqus_secondary['LE12'])) - 1
abaqus_secondary['LE22'] = np.exp(np.array(abaqus_secondary['LE22'])) - 1
coefficients = np.array([0, 0, 0, 0])
bp = properties()
bc = boundary_conditions(load=np.array([[0, -1]]))
analytical_solution = bc.concentrated_load[0][1]/(6*bp.young*bp.inertia) * \
np.array([-1, 3, 0, 0])
mesh = mesh_1D(mesh_n=10)
curve_parent = poly(a=[0, 0, 0, 0])
curve_child = poly(a=analytical_solution)
beam = structure(curve_parent, curve_child, mesh, bp, bc)
beam.calculate_position()
strain = beam.strain()
stress = beam.stress(loading_condition='plane_stress')
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.plot(beam.r_p[0], beam.r_p[1], label='parent')
plt.scatter(beam.r_p[0], beam.r_p[1], label='parent')
plt.plot(beam.r_c[0], beam.r_c[1], label='child')
plt.scatter(beam.r_c[0], beam.r_c[1], label='child')
plt.plot(abq_x, abq_y, label='Abaqus')
plt.title('Position')
plt.grid()
plt.legend()
plt.figure()
r1_p, r1_c = beam.calculate_position(diff='x1')
r2_p, r2_c = beam.calculate_position(diff='x2')
plt.plot(beam.r_p[0], r1_p[1], label='$r_{2,1}^p$')
plt.plot(beam.r_p[0], r2_p[1], label='$r_{2,2}^p$')
plt.plot(beam.r_p[0], r1_c[1], label='$r_{2,1}^c$')
plt.plot(beam.r_p[0], r2_c[1], label='$r_{2,2}^c$')
plt.title('Position gradients')
plt.grid()
plt.legend()
plt.figure()
u = beam.u()
u1 = beam.u(diff='x1')
u2 = beam.u(diff='x2')
plt.scatter(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.scatter(beam.mesh.x_p, u[1], label=r'$u_2$')
plt.plot(beam.mesh.x_p, u[0], label=r'$u_1$')
plt.plot(beam.mesh.x_p, u[1], label=r'$u_2$')
plt.title('Displacement diff')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, strain[0][0], label=r'$\epsilon_{11}$')
plt.plot(beam.mesh.x_p, strain[0][1], label=r'$\epsilon_{12}$')
plt.plot(beam.mesh.x_p, strain[1][1], label=r'$\epsilon_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE11'],
label=r'Abaqus $\epsilon_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE12'],
label=r'Abaqus $\epsilon_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['LE22'],
label=r'Abaqus $\epsilon_{22}$')
plt.title('Strain')
plt.legend()
plt.figure()
plt.plot(beam.mesh.x_p, stress[0][0], label=r'$\sigma_{11}$')
plt.plot(beam.mesh.x_p, stress[0][1], label=r'$\sigma_{12}$')
plt.plot(beam.mesh.x_p, stress[1][1], label=r'$\sigma_{22}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S11'],
label=r'Abaqus $\sigma_{11}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S12'],
label=r'Abaqus $\sigma_{12}$')
plt.plot(abaqus_secondary['X'], abaqus_secondary['S22'],
label=r'Abaqus $\sigma_{22}$')
plt.legend()
plt.title('Stress')
plt.show()
| true | true |
f72aa529d44c10ff374c8d1442847033225d002f | 449 | py | Python | examples/subgraphs/create_knn_subgraph.py | gugarosa/opfython | 19b467a92d85c7c26d231efec770645096827b4e | [
"Apache-2.0"
] | 26 | 2018-04-24T20:16:18.000Z | 2022-03-09T14:03:28.000Z | examples/subgraphs/create_knn_subgraph.py | gugarosa/opfython | 19b467a92d85c7c26d231efec770645096827b4e | [
"Apache-2.0"
] | 4 | 2020-12-26T14:57:18.000Z | 2022-03-30T02:34:18.000Z | examples/subgraphs/create_knn_subgraph.py | gugarosa/opfython | 19b467a92d85c7c26d231efec770645096827b4e | [
"Apache-2.0"
] | 16 | 2019-05-20T15:41:56.000Z | 2022-03-23T17:59:53.000Z | import opfython.stream.loader as l
import opfython.stream.parser as p
from opfython.subgraphs import KNNSubgraph
# Defining an input file
input_file = 'data/boat.txt'
# Loading a .txt file to a dataframe
txt = l.load_txt(input_file)
# Parsing a pre-loaded dataframe
X, Y = p.parse_loader(txt)
# Creating a knn-subgraph structure
g = KNNSubgraph(X, Y)
# KNNSubgraph can also be directly created from a file
g = KNNSubgraph(from_file=input_file)
| 23.631579 | 54 | 0.772829 | import opfython.stream.loader as l
import opfython.stream.parser as p
from opfython.subgraphs import KNNSubgraph
input_file = 'data/boat.txt'
txt = l.load_txt(input_file)
X, Y = p.parse_loader(txt)
g = KNNSubgraph(X, Y)
g = KNNSubgraph(from_file=input_file)
| true | true |
f72aa5fc954c18cf553f3deb747a7fd96e64bef0 | 2,664 | py | Python | ishuhui/controllers/admin.py | Nayak-cyber/flask_ishuhui | 34352da462d4999bc7788c87773001312a213b20 | [
"MIT"
] | 192 | 2017-08-27T13:56:37.000Z | 2022-03-09T00:59:14.000Z | ishuhui/controllers/admin.py | Soumi7/flask_ishuhui | a3444b3679c45d5ba94c5c9a66551207eff1a646 | [
"MIT"
] | null | null | null | ishuhui/controllers/admin.py | Soumi7/flask_ishuhui | a3444b3679c45d5ba94c5c9a66551207eff1a646 | [
"MIT"
] | 54 | 2017-08-28T01:04:04.000Z | 2021-07-07T17:27:50.000Z | from flask import Blueprint, render_template, current_app, session
from flask import abort, jsonify
from flask_login import current_user
import ishuhui.tasks.task as task
from ..models.chapter import Chapter
from ..models.comic import Comic
from ..tasks.celery_task import refresh_chapters_task
bp_admin = Blueprint('admin', __name__, url_prefix='/admin')
@bp_admin.before_request
def login():
if not current_user.is_authenticated:
abort(403)
@bp_admin.route('/mange', methods=['GET'])
def mange():
return render_template('mange.html', chapter_count=Chapter.query.count(),
comic_count=Comic.query.count(),
comics=Comic.query.all(),
task_id=session.get('task_id'),
enable_celery=current_app.config['ENABLE_CELERY'],
running=session.get('task_id') is not None)
@bp_admin.route('/refresh_comics')
def refresh_comics():
return jsonify(task.refresh_comics())
@bp_admin.route('/refresh_chapters')
def refresh_chapters():
if current_app.config['ENABLE_CELERY']:
if session.get('task_id') is None:
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
else:
result = refresh_chapters_task.AsyncResult(session['task_id'])
if result.state == 'SUCCESS' or result.state == 'FAILURE':
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
return 'Already running', 400
return jsonify(task.refresh_chapters())
@bp_admin.route('/tasks/status/<task_id>')
def task_status(task_id):
result = refresh_chapters_task.AsyncResult(task_id)
if result.state == 'PENDING':
response = {
'state': result.state,
'progress': 0,
}
elif result.state != 'FAILURE':
response = {
'state': result.state,
'progress': result.info.get('progress', 0),
}
if result.state == 'SUCCESS':
session.pop('task_id')
if 'result' in result.info:
response['result'] = result.info['result']
else:
# something went wrong in the background job
session.pop('task_id')
response = {
'state': result.state,
'progress': 0,
'status': str(result.info), # this is the exception raised
}
return jsonify(response)
@bp_admin.route('/refresh_comic_images')
def refresh_comic_images():
return jsonify(task.refresh_comic_images())
| 32.487805 | 77 | 0.614489 | from flask import Blueprint, render_template, current_app, session
from flask import abort, jsonify
from flask_login import current_user
import ishuhui.tasks.task as task
from ..models.chapter import Chapter
from ..models.comic import Comic
from ..tasks.celery_task import refresh_chapters_task
bp_admin = Blueprint('admin', __name__, url_prefix='/admin')
@bp_admin.before_request
def login():
if not current_user.is_authenticated:
abort(403)
@bp_admin.route('/mange', methods=['GET'])
def mange():
return render_template('mange.html', chapter_count=Chapter.query.count(),
comic_count=Comic.query.count(),
comics=Comic.query.all(),
task_id=session.get('task_id'),
enable_celery=current_app.config['ENABLE_CELERY'],
running=session.get('task_id') is not None)
@bp_admin.route('/refresh_comics')
def refresh_comics():
return jsonify(task.refresh_comics())
@bp_admin.route('/refresh_chapters')
def refresh_chapters():
if current_app.config['ENABLE_CELERY']:
if session.get('task_id') is None:
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
else:
result = refresh_chapters_task.AsyncResult(session['task_id'])
if result.state == 'SUCCESS' or result.state == 'FAILURE':
t = refresh_chapters_task.apply_async()
session['task_id'] = t.id
return session['task_id']
return 'Already running', 400
return jsonify(task.refresh_chapters())
@bp_admin.route('/tasks/status/<task_id>')
def task_status(task_id):
result = refresh_chapters_task.AsyncResult(task_id)
if result.state == 'PENDING':
response = {
'state': result.state,
'progress': 0,
}
elif result.state != 'FAILURE':
response = {
'state': result.state,
'progress': result.info.get('progress', 0),
}
if result.state == 'SUCCESS':
session.pop('task_id')
if 'result' in result.info:
response['result'] = result.info['result']
else:
session.pop('task_id')
response = {
'state': result.state,
'progress': 0,
'status': str(result.info),
}
return jsonify(response)
@bp_admin.route('/refresh_comic_images')
def refresh_comic_images():
return jsonify(task.refresh_comic_images())
| true | true |
f72aa61a7732c6e419535219feafdd67e09bdfe5 | 5,711 | py | Python | doors/gatekeeper_app.py | manens/nadine | 4938afa2d2c69ae5ac54f4360b081d10521a0a2f | [
"Apache-2.0"
] | null | null | null | doors/gatekeeper_app.py | manens/nadine | 4938afa2d2c69ae5ac54f4360b081d10521a0a2f | [
"Apache-2.0"
] | null | null | null | doors/gatekeeper_app.py | manens/nadine | 4938afa2d2c69ae5ac54f4360b081d10521a0a2f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import json
import time
import logging
import traceback
from core import Messages, EncryptedConnection, Gatekeeper
from threads import Heartbeat, EventWatcher
class GatekeeperApp(object):
def run(self, config):
try:
logging.info("Starting up Gatekeeper...")
gatekeeper = Gatekeeper(config)
connection = gatekeeper.get_connection()
# Sync our system clocks
gatekeeper.set_system_clock()
# Test the connection encryption
if gatekeeper.test_keymaster_connection():
logging.info("Keymaster encrypted connection successfull!")
# Pull the configuration
gatekeeper.configure_doors()
if len(gatekeeper.doors) == 0:
logging.error("No doors to program. Exiting")
return
logging.info("Configured %d doors" % len(gatekeeper.doors))
# Set the time on each door
if config['syncClocks']:
gatekeeper.sync_clocks()
# Clear out all the door codes if requested
if config['clearCodes']:
gatekeeper.clear_all_codes()
initialSync = True
# Pull new data if requested
if config['initialSync']:
gatekeeper.pull_door_codes()
try:
# Start with a clean bowl
sys.stdout.flush()
heartbeat = None
event_watcher = None
hb_conn_err = False
while True:
# Keep our heartbeat alive
if not heartbeat or not heartbeat.is_alive():
hb_conn_err = False
if heartbeat and heartbeat.error:
try:
# Heartbeat errors can come from a poor connection to the Keymaster
# In cases like these we need to keep retrying to send the log up
gatekeeper.send_gatekeper_log("Heartbeat: " + str(heartbeat.error))
except Exception as e:
hb_conn_err = True
logging.warning("Unable to report hearbeat error!: %s" % str(e))
time.sleep(5)
if not hb_conn_err:
logging.info("Starting Heartbeat...")
poll_delay = config.get('KEYMASTER_POLL_DELAY_SEC', 5)
heartbeat = Heartbeat(connection, poll_delay)
heartbeat.setDaemon(True)
heartbeat.start()
# Keep our event watcher alive
if not event_watcher or not event_watcher.is_alive():
if event_watcher and event_watcher.error:
gatekeeper.send_gatekeper_log("EventWatcher: " + str(event_watcher.error))
time.sleep(5)
logging.info("Starting Event Watcher...")
poll_delay = config.get('EVENT_POLL_DELAY_SEC', 10)
event_watcher = EventWatcher(gatekeeper, poll_delay)
event_watcher.setDaemon(True)
event_watcher.start()
if heartbeat.new_data:
gatekeeper.pull_door_codes()
heartbeat.all_clear()
if event_watcher.new_data:
event_logs = gatekeeper.pull_event_logs()
gatekeeper.push_event_logs(event_logs)
event_watcher.all_clear()
time.sleep(.1)
except KeyboardInterrupt:
logging.info(" Keyboard Interupt!")
logging.info("Shutting down Heartbeat...")
if heartbeat and heartbeat.is_alive():
heartbeat.stop()
#heartbeat.join()
logging.info("Shutting down Event Watcher...")
if event_watcher and event_watcher.is_alive():
event_watcher.stop()
#event_watcher.join()
except Exception as e:
traceback.print_exc()
logging.error("Error: %s" % str(e))
if __name__ == "__main__":
# Pull the config
with open('gw_config.json', 'r') as f:
config = json.load(f)
# Pull the command line args
config['initialSync'] = "--sync" in sys.argv
config['syncClocks'] = "--set-time" in sys.argv
config['clearCodes'] = "--clear-all" in sys.argv
if "--debug" in sys.argv:
config['DEBUG'] = True
# Configure logging
log_level = logging.DEBUG if config.get('DEBUG', False) else logging.INFO
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=log_level)
logging.getLogger("requests").setLevel(logging.WARNING)
# Start the application
app = GatekeeperApp()
app.run(config)
# Copyright 2019 Office Nomads LLC (http://www.officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 42.93985 | 583 | 0.555244 |
import sys
import json
import time
import logging
import traceback
from core import Messages, EncryptedConnection, Gatekeeper
from threads import Heartbeat, EventWatcher
class GatekeeperApp(object):
def run(self, config):
try:
logging.info("Starting up Gatekeeper...")
gatekeeper = Gatekeeper(config)
connection = gatekeeper.get_connection()
gatekeeper.set_system_clock()
if gatekeeper.test_keymaster_connection():
logging.info("Keymaster encrypted connection successfull!")
gatekeeper.configure_doors()
if len(gatekeeper.doors) == 0:
logging.error("No doors to program. Exiting")
return
logging.info("Configured %d doors" % len(gatekeeper.doors))
if config['syncClocks']:
gatekeeper.sync_clocks()
if config['clearCodes']:
gatekeeper.clear_all_codes()
initialSync = True
if config['initialSync']:
gatekeeper.pull_door_codes()
try:
sys.stdout.flush()
heartbeat = None
event_watcher = None
hb_conn_err = False
while True:
if not heartbeat or not heartbeat.is_alive():
hb_conn_err = False
if heartbeat and heartbeat.error:
try:
gatekeeper.send_gatekeper_log("Heartbeat: " + str(heartbeat.error))
except Exception as e:
hb_conn_err = True
logging.warning("Unable to report hearbeat error!: %s" % str(e))
time.sleep(5)
if not hb_conn_err:
logging.info("Starting Heartbeat...")
poll_delay = config.get('KEYMASTER_POLL_DELAY_SEC', 5)
heartbeat = Heartbeat(connection, poll_delay)
heartbeat.setDaemon(True)
heartbeat.start()
if not event_watcher or not event_watcher.is_alive():
if event_watcher and event_watcher.error:
gatekeeper.send_gatekeper_log("EventWatcher: " + str(event_watcher.error))
time.sleep(5)
logging.info("Starting Event Watcher...")
poll_delay = config.get('EVENT_POLL_DELAY_SEC', 10)
event_watcher = EventWatcher(gatekeeper, poll_delay)
event_watcher.setDaemon(True)
event_watcher.start()
if heartbeat.new_data:
gatekeeper.pull_door_codes()
heartbeat.all_clear()
if event_watcher.new_data:
event_logs = gatekeeper.pull_event_logs()
gatekeeper.push_event_logs(event_logs)
event_watcher.all_clear()
time.sleep(.1)
except KeyboardInterrupt:
logging.info(" Keyboard Interupt!")
logging.info("Shutting down Heartbeat...")
if heartbeat and heartbeat.is_alive():
heartbeat.stop()
logging.info("Shutting down Event Watcher...")
if event_watcher and event_watcher.is_alive():
event_watcher.stop()
except Exception as e:
traceback.print_exc()
logging.error("Error: %s" % str(e))
if __name__ == "__main__":
with open('gw_config.json', 'r') as f:
config = json.load(f)
config['initialSync'] = "--sync" in sys.argv
config['syncClocks'] = "--set-time" in sys.argv
config['clearCodes'] = "--clear-all" in sys.argv
if "--debug" in sys.argv:
config['DEBUG'] = True
log_level = logging.DEBUG if config.get('DEBUG', False) else logging.INFO
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=log_level)
logging.getLogger("requests").setLevel(logging.WARNING)
app = GatekeeperApp()
app.run(config)
| true | true |
f72aa63243daf6a11e454d0ec669bad1d564b255 | 4,178 | py | Python | src/dataAnalyze.py | bankrollhunter/DreamTrader | c8f2f9043b0ff11a67146007b6f952fca05a629d | [
"MIT"
] | 32 | 2020-10-16T17:48:04.000Z | 2021-06-16T06:14:31.000Z | src/dataAnalyze.py | bankrollhunter/DreamTrader | c8f2f9043b0ff11a67146007b6f952fca05a629d | [
"MIT"
] | null | null | null | src/dataAnalyze.py | bankrollhunter/DreamTrader | c8f2f9043b0ff11a67146007b6f952fca05a629d | [
"MIT"
] | 19 | 2020-10-16T17:13:27.000Z | 2021-05-26T02:44:56.000Z | from .dataSource import DataSource
from .trend import TrendAnalyze
from jqdatasdk import *
from jqdatasdk.api import get_fundamentals, get_industry_stocks, get_security_info
from jqdatasdk.utils import query
import talib
from datetime import datetime, timedelta
import json
import logging
from sqlalchemy.orm.query import Query
from .store import MongoDB
analyzePeriod = 5
longArrangeLimit = 5
ema20NegativeThreshold = -0.05
nearMovingAverageThreshold = 0.003
class MarketBreadth:
def __init__(self):
super().__init__()
def report_daily_first_level_market_breadth(self):
"""
1. 300 index
2. HY001 energy
3. HY002 meterial
4. HY003 industry
5. HY005 daily consume
6. HY006 medical
7. HY007 financial
8. HY008 information&technology
9. HY009 telecommunications
10. HY010 public utilities
11. HY011 real estate
"""
logging.info('report first level market breadth')
logging.info('HS300 index')
stocks = DataSource.query_index_stocks('000300.XSHG')
self.get_market_breadth(stocks)
codes = {
'HY001': '能源',
'HY002': '材料',
'HY003': '工业',
'HY004': '可选消费',
'HY005': '日常消费',
'HY006': '医疗保健',
'HY007': '金融',
'HY008': '信息技术',
'HY009': '电信服务',
'HY010': '公共事业',
'HY011': '房地产'
}
for k, v in codes.items():
self.report_market_breadth(k, v)
# 图片,write to report.
def report_daily_second_level_market_breadth(self):
"""
JQ行业: https://www.joinquant.com/help/api/help?name=plateData#%E8%81%9A%E5%AE%BD%E8%A1%8C%E4%B8%9A
TODO: 清洁能源板块,光伏,电动车
"""
codes = {
'HY477': '啤酒',
'HY478': '白酒',
'HY479': '软饮料',
'HY481': '食品加工和肉类',
'HY504': '人寿与健康保险',
'HY523': '半导体设备',
'HY524': '半导体产品',
'HY446': '消费电子',
'HY572': '中药',
'HY491': '生物科技',
'HY492': '西药',
'HY485': '医疗保健设备',
'HY486': '医疗保健用品',
'HY487': '保健护理产品经销商',
'HY488': '保健护理服务',
'HY435': '航空',
'HY439': '机场服务',
'HY449': '家用电器',
'HY454': '鞋类',
'HY493': '多元化银行',
'HY494': '区域性银行',
'HY496': '多元化金融',
'HY501': '投资银行业与经纪业',
'HY505': '多元化保险',
'HY444': '汽车制造',
'HY445': '摩托车制造',
'HY576': '汽车零售',
'HY426': '建筑机械与重型卡车',
'HY466': '互联网零售',
'HY601': '新能源发电业'
}
logging.info('report second level market breadth')
for k, v in codes.items():
self.report_market_breadth(k, v, enableDetail=False)
def report_market_breadth(self, code, description, enableDetail=False):
logging.info('report {} {}'.format(code, description))
stocks = DataSource.query_industry_stocks(code)
for it in stocks:
if(enableDetail):
logging.info(DataSource.query_security_info(it)
['display_name'])
self.get_market_breadth(stocks)
def get_market_breadth(self, stocks=[], period=analyzePeriod):
res = None
for it in stocks:
price = DataSource.query_price_data(it)
aboveEMA20 = self.AboveEMA20(price.close)
if(res is None):
res = aboveEMA20
else:
res = res.add(aboveEMA20)
for idx, item in res[-period:].items():
logging.info("{} : {:.2%}".format(idx, item/len(stocks)))
def AboveEMA20(self, close):
ema20 = talib.EMA(close, timeperiod=20)
res = close.copy()
for idx, item in close.items():
if(item > ema20[idx]):
res[idx] = 1
else:
res[idx] = 0
return res
# https://discourse.julialang.org/t/plotting-while-working-with-vs-code-remote-ssh/34309/7
# https://github.com/microsoft/vscode-remote-release/issues/452
| 30.057554 | 105 | 0.533748 | from .dataSource import DataSource
from .trend import TrendAnalyze
from jqdatasdk import *
from jqdatasdk.api import get_fundamentals, get_industry_stocks, get_security_info
from jqdatasdk.utils import query
import talib
from datetime import datetime, timedelta
import json
import logging
from sqlalchemy.orm.query import Query
from .store import MongoDB
analyzePeriod = 5
longArrangeLimit = 5
ema20NegativeThreshold = -0.05
nearMovingAverageThreshold = 0.003
class MarketBreadth:
def __init__(self):
super().__init__()
def report_daily_first_level_market_breadth(self):
logging.info('report first level market breadth')
logging.info('HS300 index')
stocks = DataSource.query_index_stocks('000300.XSHG')
self.get_market_breadth(stocks)
codes = {
'HY001': '能源',
'HY002': '材料',
'HY003': '工业',
'HY004': '可选消费',
'HY005': '日常消费',
'HY006': '医疗保健',
'HY007': '金融',
'HY008': '信息技术',
'HY009': '电信服务',
'HY010': '公共事业',
'HY011': '房地产'
}
for k, v in codes.items():
self.report_market_breadth(k, v)
def report_daily_second_level_market_breadth(self):
codes = {
'HY477': '啤酒',
'HY478': '白酒',
'HY479': '软饮料',
'HY481': '食品加工和肉类',
'HY504': '人寿与健康保险',
'HY523': '半导体设备',
'HY524': '半导体产品',
'HY446': '消费电子',
'HY572': '中药',
'HY491': '生物科技',
'HY492': '西药',
'HY485': '医疗保健设备',
'HY486': '医疗保健用品',
'HY487': '保健护理产品经销商',
'HY488': '保健护理服务',
'HY435': '航空',
'HY439': '机场服务',
'HY449': '家用电器',
'HY454': '鞋类',
'HY493': '多元化银行',
'HY494': '区域性银行',
'HY496': '多元化金融',
'HY501': '投资银行业与经纪业',
'HY505': '多元化保险',
'HY444': '汽车制造',
'HY445': '摩托车制造',
'HY576': '汽车零售',
'HY426': '建筑机械与重型卡车',
'HY466': '互联网零售',
'HY601': '新能源发电业'
}
logging.info('report second level market breadth')
for k, v in codes.items():
self.report_market_breadth(k, v, enableDetail=False)
def report_market_breadth(self, code, description, enableDetail=False):
logging.info('report {} {}'.format(code, description))
stocks = DataSource.query_industry_stocks(code)
for it in stocks:
if(enableDetail):
logging.info(DataSource.query_security_info(it)
['display_name'])
self.get_market_breadth(stocks)
def get_market_breadth(self, stocks=[], period=analyzePeriod):
res = None
for it in stocks:
price = DataSource.query_price_data(it)
aboveEMA20 = self.AboveEMA20(price.close)
if(res is None):
res = aboveEMA20
else:
res = res.add(aboveEMA20)
for idx, item in res[-period:].items():
logging.info("{} : {:.2%}".format(idx, item/len(stocks)))
def AboveEMA20(self, close):
ema20 = talib.EMA(close, timeperiod=20)
res = close.copy()
for idx, item in close.items():
if(item > ema20[idx]):
res[idx] = 1
else:
res[idx] = 0
return res
| true | true |
f72aa66497de214ed535e744d03638f19d86133b | 2,551 | py | Python | test/old/Old2/modules/SCF.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | test/old/Old2/modules/SCF.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | test/old/Old2/modules/SCF.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(thispath),"helper"))
from MiscFxns import *
from StandardModules import *
def CompareEgy(EgyIn):
return EgyIn+224.912529687124<0.00001
def CompareGrad(GradIn):
CorrectGrad=[
0.00631057813355, 0.00571458363554, 0.05476152065996,
0.02287072160272, -0.0002840915734, -0.03359062789176,
-0.02457654725095, -0.00435313214139, -0.02443656592336,
-0.02033326759132, -0.04939904659428, -0.00601012407546,
0.01536321804528, 0.02452313009004, -0.01889869345071,
0.0056070168479, 0.02707750704665, 0.03157680066598,
0.01965867456494, 0.03636269982351, -0.03762798149958,
-0.03166475907529, -0.02714461080685, 0.00193798500615,
0.00676436472219, -0.01249703947853, 0.03228768650336]
AllGood=True
for i in range(0,27):
AllGood=AllGood and CorrectGrad[i]-GradIn[i]<0.00001
return AllGood
def Run(mm):
try:
tester = psr.testing.Tester("Testing SCF")
tester.print_header()
LoadDefaultModules(mm)
mm.change_option("PSR_SCF","BASIS_SET","sto-3g")
MyMod=mm.get_module("PSR_SCF",0)
mol=psr.system.MakeSystem("""
0 1
O 1.2361419 1.0137761 -0.0612424
H 0.5104418 0.8944555 0.5514190
H 1.9926927 1.1973129 0.4956931
O -0.9957202 0.0160415 1.2422556
H -1.4542703 -0.5669741 1.8472817
H -0.9377950 -0.4817912 0.4267562
O -0.2432343 -1.0198566 -1.1953808
H 0.4367536 -0.3759433 -0.9973297
H -0.5031835 -0.8251492 -2.0957959
""")
mol = ApplyBasis(mol,"sto-3g","sto-3g")
wfn=psr.datastore.Wavefunction()
wfn.system=mol
NewWfn,Egy=MyMod.Deriv(0,wfn)
tester.test("Testing Energy via Deriv(0)", True, CompareEgy, Egy[0])
NewWfn,Egy=MyModenergy(wfn)
tester.test("Testing Energy via Energy()", True, CompareEgy, Egy)
NewWfn,Grad=MyMod.Deriv(1,wfn)
tester.test("Testing Gradient via Deriv(1)", True, CompareGrad, Grad)
NewWfn,Grad=MyMod.Gradient(wfn)
tester.test("Testing Gradient via Gradient()", True, CompareGrad, Grad)
tester.print_results()
except Exception as e:
psr.output.Output("Caught exception in main handler\n")
traceback.print_exc()
with psr.ModuleAdministrator() as mm:
Run(mm)
psr.finalize()
| 33.565789 | 79 | 0.642101 |
import os
import sys
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(thispath),"helper"))
from MiscFxns import *
from StandardModules import *
def CompareEgy(EgyIn):
return EgyIn+224.912529687124<0.00001
def CompareGrad(GradIn):
CorrectGrad=[
0.00631057813355, 0.00571458363554, 0.05476152065996,
0.02287072160272, -0.0002840915734, -0.03359062789176,
-0.02457654725095, -0.00435313214139, -0.02443656592336,
-0.02033326759132, -0.04939904659428, -0.00601012407546,
0.01536321804528, 0.02452313009004, -0.01889869345071,
0.0056070168479, 0.02707750704665, 0.03157680066598,
0.01965867456494, 0.03636269982351, -0.03762798149958,
-0.03166475907529, -0.02714461080685, 0.00193798500615,
0.00676436472219, -0.01249703947853, 0.03228768650336]
AllGood=True
for i in range(0,27):
AllGood=AllGood and CorrectGrad[i]-GradIn[i]<0.00001
return AllGood
def Run(mm):
try:
tester = psr.testing.Tester("Testing SCF")
tester.print_header()
LoadDefaultModules(mm)
mm.change_option("PSR_SCF","BASIS_SET","sto-3g")
MyMod=mm.get_module("PSR_SCF",0)
mol=psr.system.MakeSystem("""
0 1
O 1.2361419 1.0137761 -0.0612424
H 0.5104418 0.8944555 0.5514190
H 1.9926927 1.1973129 0.4956931
O -0.9957202 0.0160415 1.2422556
H -1.4542703 -0.5669741 1.8472817
H -0.9377950 -0.4817912 0.4267562
O -0.2432343 -1.0198566 -1.1953808
H 0.4367536 -0.3759433 -0.9973297
H -0.5031835 -0.8251492 -2.0957959
""")
mol = ApplyBasis(mol,"sto-3g","sto-3g")
wfn=psr.datastore.Wavefunction()
wfn.system=mol
NewWfn,Egy=MyMod.Deriv(0,wfn)
tester.test("Testing Energy via Deriv(0)", True, CompareEgy, Egy[0])
NewWfn,Egy=MyModenergy(wfn)
tester.test("Testing Energy via Energy()", True, CompareEgy, Egy)
NewWfn,Grad=MyMod.Deriv(1,wfn)
tester.test("Testing Gradient via Deriv(1)", True, CompareGrad, Grad)
NewWfn,Grad=MyMod.Gradient(wfn)
tester.test("Testing Gradient via Gradient()", True, CompareGrad, Grad)
tester.print_results()
except Exception as e:
psr.output.Output("Caught exception in main handler\n")
traceback.print_exc()
with psr.ModuleAdministrator() as mm:
Run(mm)
psr.finalize()
| true | true |
f72aa69c7ec0962f39e0a42210cdf6e5308bb185 | 801 | py | Python | scripts/utils.py | onchere/whack | 0702e46f13855d4efd8dd0cb67af2fddfb84b00c | [
"Apache-2.0"
] | 54 | 2018-10-28T07:18:31.000Z | 2022-03-08T20:30:40.000Z | scripts/utils.py | onchere/whack | 0702e46f13855d4efd8dd0cb67af2fddfb84b00c | [
"Apache-2.0"
] | null | null | null | scripts/utils.py | onchere/whack | 0702e46f13855d4efd8dd0cb67af2fddfb84b00c | [
"Apache-2.0"
] | 5 | 2018-10-28T14:43:53.000Z | 2020-04-26T19:52:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Onchere Bironga
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def read(filename):
with open(filename, "r") as f:
return f.read()
def write(filename, contents):
with open(filename, "w+") as f:
f.write(contents)
| 30.807692 | 74 | 0.714107 |
def read(filename):
with open(filename, "r") as f:
return f.read()
def write(filename, contents):
with open(filename, "w+") as f:
f.write(contents)
| true | true |
f72aa75a0f0acb3039551b59174f7f22257880d1 | 31,650 | py | Python | credit_default/app/views.py | sandymule/Credit-Card-Default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | 1 | 2017-05-20T06:08:05.000Z | 2017-05-20T06:08:05.000Z | credit_default/app/views.py | sandymule/credit-card-default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | null | null | null | credit_default/app/views.py | sandymule/credit-card-default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | 2 | 2017-05-20T06:08:25.000Z | 2019-05-18T19:59:31.000Z | import logging
import json
import pandas as pd
from flask import render_template
from flask_wtf import Form
from wtforms import fields
from wtforms.validators import Required
from . import app, estimator, target_names
logger = logging.getLogger('app')
class PredictForm(Form):
"""Fields for Predict"""
# sepal_length = fields.DecimalField('Sepal Length:', places=2, validators=[Required()])
# sepal_width = fields.DecimalField('Sepal Width:', places=2, validators=[Required()])
# petal_length = fields.DecimalField('Petal Length:', places=2, validators=[Required()])
# petal_width = fields.DecimalField('Petal Width:', places=2, validators=[Required()])
Limit_bal = fields.DecimalField('Limit Balance:', places=2, validators=[Required()])
Gender_list = [(1, "Male"), (2, "Female")]
Gender = fields.SelectField("Gender", choices=Gender_list, coerce=int)
Education_list = [(1, "Graduate school"), (2, "College"), (3, "High school"), (4, "Less than high school")]
Education = fields.SelectField("Education", choices=Education_list, coerce=int)
Marriage_list = [(1, "Married"), (2, "Single"), (3, "Separated, Divorced, or Widowed")]
Marriage = fields.SelectField("Marriage", choices=Marriage_list, coerce=int)
Age= fields.DecimalField('Age:', places=2, validators=[Required()])
Percent_1_monthago = fields.DecimalField('Percent Paid 1 Month Ago:', places=2, validators=[Required()])
Percent_2_monthago = fields.DecimalField('Percent Paid 2 Months Ago:', places=2, validators=[Required()])
Percent_3_monthago = fields.DecimalField('Percent Paid 3 Months Ago:', places=2, validators=[Required()])
Percent_4_monthago = fields.DecimalField('Percent Paid 4 Months Ago:', places=2, validators=[Required()])
Percent_5_monthago = fields.DecimalField('Percent Paid 5 Months Ago:', places=2, validators=[Required()])
Percent_6_monthago = fields.DecimalField('Percent Paid 6 Months Ago:', places=2, validators=[Required()])
submit = fields.SubmitField('Submit')
@app.route('/',methods=('GET','POST'))
def predict():
return render_template('homepage.html')
@app.route('/visualize',methods=('GET','POST'))
def visualize():
datastuff = []
"""Index page"""
form = PredictForm()
# predicted_iris = None
result = None
if form.validate_on_submit():
# store the submitted values
submitted_data = form.data
# Retrieve values from form
# sepal_length = float(submitted_data['sepal_length'])
# sepal_width = float(submitted_data['sepal_width'])
# petal_length = float(submitted_data['petal_length'])
# petal_width = float(submitted_data['petal_width'])
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
# Create array from values
# flower_instance = [sepal_length, sepal_width, petal_length, petal_width]
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
# my_prediction = estimator.predict(flower_instance)
result = estimator.predict(default_instance)[0] # Target Predicted
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('visualize.html',
form=form,
# prediction=predicted_iris
prediction=result, data=datastuff)
@app.route('/predict', methods=('GET', 'POST'))
def index():
datastuff = []
"""Index page"""
form = PredictForm()
# predicted_iris = None
result = None
if form.validate_on_submit():
# store the submitted values
submitted_data = form.data
# Retrieve values from form
# sepal_length = float(submitted_data['sepal_length'])
# sepal_width = float(submitted_data['sepal_width'])
# petal_length = float(submitted_data['petal_length'])
# petal_width = float(submitted_data['petal_width'])
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
# Create array from values
# flower_instance = [sepal_length, sepal_width, petal_length, petal_width]
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
# my_prediction = estimator.predict(flower_instance)
result = estimator.predict(default_instance)[0] # Target Predicted
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('predict.html',
form=form,
# prediction=predicted_iris
prediction=result, data=datastuff)
| 58.938547 | 156 | 0.576461 | import logging
import json
import pandas as pd
from flask import render_template
from flask_wtf import Form
from wtforms import fields
from wtforms.validators import Required
from . import app, estimator, target_names
logger = logging.getLogger('app')
class PredictForm(Form):
Limit_bal = fields.DecimalField('Limit Balance:', places=2, validators=[Required()])
Gender_list = [(1, "Male"), (2, "Female")]
Gender = fields.SelectField("Gender", choices=Gender_list, coerce=int)
Education_list = [(1, "Graduate school"), (2, "College"), (3, "High school"), (4, "Less than high school")]
Education = fields.SelectField("Education", choices=Education_list, coerce=int)
Marriage_list = [(1, "Married"), (2, "Single"), (3, "Separated, Divorced, or Widowed")]
Marriage = fields.SelectField("Marriage", choices=Marriage_list, coerce=int)
Age= fields.DecimalField('Age:', places=2, validators=[Required()])
Percent_1_monthago = fields.DecimalField('Percent Paid 1 Month Ago:', places=2, validators=[Required()])
Percent_2_monthago = fields.DecimalField('Percent Paid 2 Months Ago:', places=2, validators=[Required()])
Percent_3_monthago = fields.DecimalField('Percent Paid 3 Months Ago:', places=2, validators=[Required()])
Percent_4_monthago = fields.DecimalField('Percent Paid 4 Months Ago:', places=2, validators=[Required()])
Percent_5_monthago = fields.DecimalField('Percent Paid 5 Months Ago:', places=2, validators=[Required()])
Percent_6_monthago = fields.DecimalField('Percent Paid 6 Months Ago:', places=2, validators=[Required()])
submit = fields.SubmitField('Submit')
@app.route('/',methods=('GET','POST'))
def predict():
return render_template('homepage.html')
@app.route('/visualize',methods=('GET','POST'))
def visualize():
datastuff = []
form = PredictForm()
result = None
if form.validate_on_submit():
submitted_data = form.data
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
result = estimator.predict(default_instance)[0]
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('visualize.html',
form=form,
prediction=result, data=datastuff)
@app.route('/predict', methods=('GET', 'POST'))
def index():
datastuff = []
form = PredictForm()
result = None
if form.validate_on_submit():
submitted_data = form.data
Limit_bal = float(submitted_data['Limit_bal'])
Gender = float(submitted_data['Gender'])
Education = float(submitted_data['Education'])
Marriage = float(submitted_data['Marriage'])
Age = float(submitted_data['Age'])
Percent_1_monthago = float(submitted_data['Percent_1_monthago'])
Percent_2_monthago = float(submitted_data['Percent_2_monthago'])
Percent_3_monthago = float(submitted_data['Percent_3_monthago'])
Percent_4_monthago = float(submitted_data['Percent_4_monthago'])
Percent_5_monthago = float(submitted_data['Percent_5_monthago'])
Percent_6_monthago = float(submitted_data['Percent_6_monthago'])
default_instance = [Limit_bal, Gender, Education, Marriage, Age,
Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,
Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]
result = estimator.predict(default_instance)[0]
df = pd.DataFrame([{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
}
])
datastuff = df.to_json(orient="records")
else:
print (form.data)
return render_template('predict.html',
form=form,
prediction=result, data=datastuff)
| true | true |
f72aa7ac988a87f3f873350a9bce3b67813e99a1 | 188 | py | Python | lessons/ObjectOrientedProgramming/IdeFiles/3a_python_package/setup.py | cnegrelli/DSND_Term2 | c69a654a7d492ce895c9b835b6c05e89eef84a1b | [
"MIT"
] | null | null | null | lessons/ObjectOrientedProgramming/IdeFiles/3a_python_package/setup.py | cnegrelli/DSND_Term2 | c69a654a7d492ce895c9b835b6c05e89eef84a1b | [
"MIT"
] | null | null | null | lessons/ObjectOrientedProgramming/IdeFiles/3a_python_package/setup.py | cnegrelli/DSND_Term2 | c69a654a7d492ce895c9b835b6c05e89eef84a1b | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name = 'distributions',
version = '0.2',
description = 'Gaussian distributions',
packages = ['distributions'],
zip_safe = False) | 26.857143 | 45 | 0.648936 | from setuptools import setup
setup(name = 'distributions',
version = '0.2',
description = 'Gaussian distributions',
packages = ['distributions'],
zip_safe = False) | true | true |
f72aa85d1d51fabcb355ee1ffa6f5c5410b545f6 | 27,086 | py | Python | nuitka/tree/ReformulationFunctionStatements.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | nuitka/tree/ReformulationFunctionStatements.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | nuitka/tree/ReformulationFunctionStatements.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of function statements.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
ExpressionTargetTempVariableRef,
ExpressionTargetVariableRef,
StatementAssignmentVariable,
StatementReleaseVariable
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
StatementSpecialUnpackCheck
)
from nuitka.nodes.BuiltinNextNodes import ExpressionSpecialUnpack
from nuitka.nodes.BuiltinRefNodes import ExpressionBuiltinRef
from nuitka.nodes.CallNodes import ExpressionCallNoKeywords
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.ConstantRefNodes import (
ExpressionConstantNoneRef,
makeConstantRefNode
)
from nuitka.nodes.ContainerMakingNodes import ExpressionMakeTuple
from nuitka.nodes.CoroutineNodes import (
ExpressionCoroutineObjectBody,
ExpressionMakeCoroutineObject
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionBody,
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef
)
from nuitka.nodes.GeneratorNodes import (
ExpressionGeneratorObjectBody,
ExpressionMakeGeneratorObject,
StatementGeneratorReturn
)
from nuitka.nodes.ParameterSpecs import ParameterSpec
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef
)
from nuitka.PythonVersions import python_version
from nuitka.tree import SyntaxErrors
from .Helpers import (
buildFrameNode,
buildNode,
buildNodeList,
detectFunctionBodyKind,
extractDocFromBody,
getKind,
makeDictCreationOrConstant,
makeStatementsSequenceFromStatement,
mangleName
)
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
def _insertFinalReturnStatement(function_statements_body, return_class,
source_ref):
return_statement = return_class(
expression = ExpressionConstantNoneRef(
source_ref = source_ref
),
source_ref = source_ref
)
if function_statements_body is None:
function_statements_body = makeStatementsSequenceFromStatement(
statement = return_statement,
)
elif not function_statements_body.isStatementAborting():
function_statements_body.setStatements(
function_statements_body.getStatements() +
(
return_statement,
)
)
return function_statements_body
def buildFunctionNode(provider, node, source_ref):
# Functions have way too many details, pylint: disable=R0912,R0914
assert getKind(node) == "FunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
outer_body, function_body, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = function_kind,
name = node.name,
function_doc = function_doc,
flags = flags,
node = node,
source_ref = source_ref
)
if function_kind == "Function":
code_body = function_body
elif function_kind == "Generator":
code_body = ExpressionGeneratorObjectBody(
provider = function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
for variable in function_body.getVariables():
code_body.getVariableForReference(variable.getName())
else:
assert False, function_kind
if function_kind == "Generator":
function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeGeneratorObject(
generator_ref = ExpressionFunctionRef(
function_body = code_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = function_body,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = code_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
if function_kind == "Function":
# TODO: Generators might have to raise GeneratorExit instead.
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
code_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = outer_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
# Add the "staticmethod" decorator to __new__ methods if not provided.
# CPython made these optional, but secretly applies them when it does
# "class __new__". We add them earlier, so our optimization will see it.
if node.name == "__new__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "staticmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "staticmethod",
source_ref = source_ref
)
)
if python_version >= 360 and \
node.name == "__init_subclass__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "classmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "classmethod",
source_ref = source_ref
)
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
if python_version >= 340:
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildAsyncFunctionNode(provider, node, source_ref):
# We are creating a function here that creates coroutine objects, with
# many details each, pylint: disable=R0914
assert getKind(node) == "AsyncFunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
_function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
creator_function_body, _, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = "Coroutine",
name = node.name,
flags = (),
function_doc = function_doc,
node = node,
source_ref = source_ref
)
function_body = ExpressionCoroutineObjectBody(
provider = creator_function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = function_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementGeneratorReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
function_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = creator_function_body,
source_ref = source_ref
)
creator_function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeCoroutineObject(
coroutine_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = creator_function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildParameterKwDefaults(provider, node, function_body, source_ref):
# Build keyword only arguments default values. We are hiding here, that it
# is a Python3 only feature.
if python_version >= 300:
kw_only_names = function_body.getParameters().getKwOnlyParameterNames()
if kw_only_names:
keys = []
values = []
for kw_only_name, kw_default in \
zip(kw_only_names, node.args.kw_defaults):
if kw_default is not None:
keys.append(
makeConstantRefNode(
constant = kw_only_name,
source_ref = source_ref
)
)
values.append(
buildNode(provider, kw_default, source_ref)
)
kw_defaults = makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
kw_defaults = None
else:
kw_defaults = None
return kw_defaults
def buildParameterAnnotations(provider, node, source_ref):
# Too many branches, because there is too many cases, pylint: disable=R0912
# Build annotations. We are hiding here, that it is a Python3 only feature.
if python_version < 300:
return None
# Starting with Python 3.4, the names of parameters are mangled in
# annotations as well.
if python_version < 340:
mangle = lambda variable_name: variable_name
else:
mangle = lambda variable_name: mangleName(variable_name, provider)
keys = []
values = []
def addAnnotation(key, value):
keys.append(
makeConstantRefNode(
constant = mangle(key),
source_ref = source_ref,
user_provided = True
)
)
values.append(value)
def extractArg(arg):
if getKind(arg) == "Name":
assert arg.annotation is None
elif getKind(arg) == "arg":
if arg.annotation is not None:
addAnnotation(
key = arg.arg,
value = buildNode(provider, arg.annotation, source_ref)
)
elif getKind(arg) == "Tuple":
for arg in arg.elts:
extractArg(arg)
else:
assert False, getKind(arg)
for arg in node.args.args:
extractArg(arg)
for arg in node.args.kwonlyargs:
extractArg(arg)
if python_version < 340:
if node.args.varargannotation is not None:
addAnnotation(
key = node.args.vararg,
value = buildNode(
provider, node.args.varargannotation, source_ref
)
)
if node.args.kwargannotation is not None:
addAnnotation(
key = node.args.kwarg,
value = buildNode(
provider, node.args.kwargannotation, source_ref
)
)
else:
if node.args.vararg is not None:
extractArg(node.args.vararg)
if node.args.kwarg is not None:
extractArg(node.args.kwarg)
# Return value annotation (not there for lambdas)
if hasattr(node, "returns") and node.returns is not None:
addAnnotation(
key = "return",
value = buildNode(
provider, node.returns, source_ref
)
)
if keys:
return makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
return None
def buildFunctionWithParsing(provider, function_kind, name, function_doc, flags,
node, source_ref):
# This contains a complex re-formulation for nested parameter functions.
# pylint: disable=R0914
kind = getKind(node)
assert kind in ("FunctionDef", "Lambda", "AsyncFunctionDef"), "unsupported for kind " + kind
def extractArg(arg):
if arg is None:
return None
elif type(arg) is str:
return mangleName(arg, provider)
elif getKind(arg) == "Name":
return mangleName(arg.id, provider)
elif getKind(arg) == "arg":
return mangleName(arg.arg, provider)
elif getKind(arg) == "Tuple":
# These are to be re-formulated on the outside.
assert False
else:
assert False, getKind(arg)
special_args = {}
def extractNormalArgs(args):
normal_args = []
for arg in args:
if type(arg) is not str and getKind(arg) == "Tuple":
special_arg_name = ".%d" % (len(special_args) + 1)
special_args[special_arg_name] = arg.elts
normal_args.append(special_arg_name)
else:
normal_args.append(extractArg(arg))
return normal_args
normal_args = extractNormalArgs(node.args.args)
parameters = ParameterSpec(
ps_name = name,
ps_normal_args = normal_args,
ps_kw_only_args = [
extractArg(arg)
for arg in
node.args.kwonlyargs
]
if python_version >= 300 else
[],
ps_list_star_arg = extractArg(node.args.vararg),
ps_dict_star_arg = extractArg(node.args.kwarg),
ps_default_count = len(node.args.defaults)
)
message = parameters.checkValid()
if message is not None:
SyntaxErrors.raiseSyntaxError(
message,
source_ref
)
code_object = CodeObjectSpec(
co_name = name,
co_kind = function_kind,
co_varnames = parameters.getParameterNames(),
co_argcount = parameters.getArgumentCount(),
co_kwonlyargcount = parameters.getKwOnlyParameterCount(),
co_has_starlist = parameters.getStarListArgumentName() is not None,
co_has_stardict = parameters.getStarDictArgumentName() is not None
)
outer_body = ExpressionFunctionBody(
provider = provider,
name = name,
flags = flags,
doc = function_doc,
parameters = parameters,
source_ref = source_ref
)
if special_args:
inner_name = name.strip("<>") + "$inner"
inner_arg_names = []
iter_vars = []
values = []
statements = []
def unpackFrom(source, arg_names):
accesses = []
sub_special_index = 0
iter_var = outer_body.allocateTempVariable(None, "arg_iter_%d" % len(iter_vars))
iter_vars.append(iter_var)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
source = ExpressionBuiltinIter1(
value = source,
source_ref = source_ref
),
source_ref = source_ref
)
)
for element_index, arg_name in enumerate(arg_names):
if getKind(arg_name) == "Name":
inner_arg_names.append(arg_name.id)
arg_var = outer_body.allocateTempVariable(None, "tmp_" + arg_name.id)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = arg_var,
source_ref = source_ref
),
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
source_ref = source_ref
)
)
accesses.append(
ExpressionTempVariableRef(
variable = arg_var,
source_ref = source_ref
)
)
elif getKind(arg_name) == "Tuple":
accesses.extend(
unpackFrom(
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
arg_names = arg_name.elts
)
)
sub_special_index += 1
else:
assert False, arg_name
statements.append(
StatementSpecialUnpackCheck(
iterator = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = len(arg_names),
source_ref = source_ref
)
)
return accesses
for arg_name in parameters.getParameterNames():
if arg_name.startswith('.'):
source = ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
values.extend(
unpackFrom(source, special_args[arg_name])
)
else:
values.append(
ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
)
inner_arg_names.append(arg_name)
inner_parameters = ParameterSpec(
ps_name = inner_name,
ps_normal_args = inner_arg_names,
ps_kw_only_args = (),
ps_list_star_arg = None,
ps_dict_star_arg = None,
ps_default_count = None
)
function_body = ExpressionFunctionBody(
provider = outer_body,
name = inner_name,
flags = flags,
doc = function_doc,
parameters = inner_parameters,
source_ref = source_ref
)
statements.append(
StatementReturn(
ExpressionFunctionCall(
function = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = (),
kw_defaults = None,
annotations = None,
source_ref = source_ref
),
values = values,
source_ref = source_ref
),
source_ref = source_ref
)
)
outer_body.setBody(
makeStatementsSequenceFromStatement(
statement = makeTryFinallyStatement(
provider,
tried = statements,
final = [
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
for variable in
outer_body.getTempVariables()
] ,
source_ref = source_ref,
public_exc = False
)
)
)
else:
function_body = outer_body
return outer_body, function_body, code_object
def addFunctionVariableReleases(function):
assert function.isExpressionFunctionBody() or \
function.isExpressionClassBody() or \
function.isExpressionGeneratorObjectBody() or \
function.isExpressionCoroutineObjectBody()
releases = []
# We attach everything to the function definition source location.
source_ref = function.getSourceReference()
for variable in function.getLocalVariables():
# Shared variables are freed by function object attachment.
if variable.getOwner() is not function:
continue
releases.append(
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
)
if releases:
body = function.getBody()
if body.isStatementsFrame():
body = makeStatementsSequenceFromStatement(
statement = body
)
body = makeTryFinallyStatement(
provider = function,
tried = body,
final = releases,
source_ref = source_ref
)
function.setBody(
makeStatementsSequenceFromStatement(
statement = body
)
)
# assert body.isStatementAborting(), body.asXmlText()
| 32.283671 | 96 | 0.558554 |
from nuitka.nodes.AssignNodes import (
ExpressionTargetTempVariableRef,
ExpressionTargetVariableRef,
StatementAssignmentVariable,
StatementReleaseVariable
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
StatementSpecialUnpackCheck
)
from nuitka.nodes.BuiltinNextNodes import ExpressionSpecialUnpack
from nuitka.nodes.BuiltinRefNodes import ExpressionBuiltinRef
from nuitka.nodes.CallNodes import ExpressionCallNoKeywords
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.ConstantRefNodes import (
ExpressionConstantNoneRef,
makeConstantRefNode
)
from nuitka.nodes.ContainerMakingNodes import ExpressionMakeTuple
from nuitka.nodes.CoroutineNodes import (
ExpressionCoroutineObjectBody,
ExpressionMakeCoroutineObject
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionBody,
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef
)
from nuitka.nodes.GeneratorNodes import (
ExpressionGeneratorObjectBody,
ExpressionMakeGeneratorObject,
StatementGeneratorReturn
)
from nuitka.nodes.ParameterSpecs import ParameterSpec
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef
)
from nuitka.PythonVersions import python_version
from nuitka.tree import SyntaxErrors
from .Helpers import (
buildFrameNode,
buildNode,
buildNodeList,
detectFunctionBodyKind,
extractDocFromBody,
getKind,
makeDictCreationOrConstant,
makeStatementsSequenceFromStatement,
mangleName
)
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
def _insertFinalReturnStatement(function_statements_body, return_class,
source_ref):
return_statement = return_class(
expression = ExpressionConstantNoneRef(
source_ref = source_ref
),
source_ref = source_ref
)
if function_statements_body is None:
function_statements_body = makeStatementsSequenceFromStatement(
statement = return_statement,
)
elif not function_statements_body.isStatementAborting():
function_statements_body.setStatements(
function_statements_body.getStatements() +
(
return_statement,
)
)
return function_statements_body
def buildFunctionNode(provider, node, source_ref):
assert getKind(node) == "FunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
outer_body, function_body, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = function_kind,
name = node.name,
function_doc = function_doc,
flags = flags,
node = node,
source_ref = source_ref
)
if function_kind == "Function":
code_body = function_body
elif function_kind == "Generator":
code_body = ExpressionGeneratorObjectBody(
provider = function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
for variable in function_body.getVariables():
code_body.getVariableForReference(variable.getName())
else:
assert False, function_kind
if function_kind == "Generator":
function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeGeneratorObject(
generator_ref = ExpressionFunctionRef(
function_body = code_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = function_body,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = code_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
if function_kind == "Function":
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
code_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = outer_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
if node.name == "__new__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "staticmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "staticmethod",
source_ref = source_ref
)
)
if python_version >= 360 and \
node.name == "__init_subclass__" and \
provider.isExpressionClassBody():
for decorator in decorators:
if decorator.isExpressionVariableRef() and \
decorator.getVariableName() == "classmethod":
break
else:
decorators.append(
ExpressionBuiltinRef(
builtin_name = "classmethod",
source_ref = source_ref
)
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
if python_version >= 340:
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildAsyncFunctionNode(provider, node, source_ref):
assert getKind(node) == "AsyncFunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
_function_kind, flags, _written_variables, _non_local_declarations, _global_declarations = \
detectFunctionBodyKind(
nodes = function_statement_nodes
)
creator_function_body, _, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = "Coroutine",
name = node.name,
flags = (),
function_doc = function_doc,
node = node,
source_ref = source_ref
)
function_body = ExpressionCoroutineObjectBody(
provider = creator_function_body,
name = node.name,
flags = flags,
source_ref = source_ref
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = function_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_class = StatementGeneratorReturn,
source_ref = source_ref
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
function_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = creator_function_body,
source_ref = source_ref
)
creator_function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = ExpressionMakeCoroutineObject(
coroutine_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
source_ref = source_ref
),
source_ref = source_ref
)
)
)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = creator_function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = ExpressionCallNoKeywords(
called = decorator,
args = ExpressionMakeTuple(
elements = (decorated_function,),
source_ref = source_ref
),
source_ref = decorator.getSourceReference()
)
result = StatementAssignmentVariable(
variable_ref = ExpressionTargetVariableRef(
variable_name = mangleName(node.name, provider),
source_ref = source_ref
),
source = decorated_function,
source_ref = source_ref
)
function_body.qualname_setup = result.getTargetVariableRef()
return result
def buildParameterKwDefaults(provider, node, function_body, source_ref):
if python_version >= 300:
kw_only_names = function_body.getParameters().getKwOnlyParameterNames()
if kw_only_names:
keys = []
values = []
for kw_only_name, kw_default in \
zip(kw_only_names, node.args.kw_defaults):
if kw_default is not None:
keys.append(
makeConstantRefNode(
constant = kw_only_name,
source_ref = source_ref
)
)
values.append(
buildNode(provider, kw_default, source_ref)
)
kw_defaults = makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
kw_defaults = None
else:
kw_defaults = None
return kw_defaults
def buildParameterAnnotations(provider, node, source_ref):
if python_version < 300:
return None
if python_version < 340:
mangle = lambda variable_name: variable_name
else:
mangle = lambda variable_name: mangleName(variable_name, provider)
keys = []
values = []
def addAnnotation(key, value):
keys.append(
makeConstantRefNode(
constant = mangle(key),
source_ref = source_ref,
user_provided = True
)
)
values.append(value)
def extractArg(arg):
if getKind(arg) == "Name":
assert arg.annotation is None
elif getKind(arg) == "arg":
if arg.annotation is not None:
addAnnotation(
key = arg.arg,
value = buildNode(provider, arg.annotation, source_ref)
)
elif getKind(arg) == "Tuple":
for arg in arg.elts:
extractArg(arg)
else:
assert False, getKind(arg)
for arg in node.args.args:
extractArg(arg)
for arg in node.args.kwonlyargs:
extractArg(arg)
if python_version < 340:
if node.args.varargannotation is not None:
addAnnotation(
key = node.args.vararg,
value = buildNode(
provider, node.args.varargannotation, source_ref
)
)
if node.args.kwargannotation is not None:
addAnnotation(
key = node.args.kwarg,
value = buildNode(
provider, node.args.kwargannotation, source_ref
)
)
else:
if node.args.vararg is not None:
extractArg(node.args.vararg)
if node.args.kwarg is not None:
extractArg(node.args.kwarg)
if hasattr(node, "returns") and node.returns is not None:
addAnnotation(
key = "return",
value = buildNode(
provider, node.returns, source_ref
)
)
if keys:
return makeDictCreationOrConstant(
keys = keys,
values = values,
source_ref = source_ref
)
else:
return None
def buildFunctionWithParsing(provider, function_kind, name, function_doc, flags,
node, source_ref):
kind = getKind(node)
assert kind in ("FunctionDef", "Lambda", "AsyncFunctionDef"), "unsupported for kind " + kind
def extractArg(arg):
if arg is None:
return None
elif type(arg) is str:
return mangleName(arg, provider)
elif getKind(arg) == "Name":
return mangleName(arg.id, provider)
elif getKind(arg) == "arg":
return mangleName(arg.arg, provider)
elif getKind(arg) == "Tuple":
assert False
else:
assert False, getKind(arg)
special_args = {}
def extractNormalArgs(args):
normal_args = []
for arg in args:
if type(arg) is not str and getKind(arg) == "Tuple":
special_arg_name = ".%d" % (len(special_args) + 1)
special_args[special_arg_name] = arg.elts
normal_args.append(special_arg_name)
else:
normal_args.append(extractArg(arg))
return normal_args
normal_args = extractNormalArgs(node.args.args)
parameters = ParameterSpec(
ps_name = name,
ps_normal_args = normal_args,
ps_kw_only_args = [
extractArg(arg)
for arg in
node.args.kwonlyargs
]
if python_version >= 300 else
[],
ps_list_star_arg = extractArg(node.args.vararg),
ps_dict_star_arg = extractArg(node.args.kwarg),
ps_default_count = len(node.args.defaults)
)
message = parameters.checkValid()
if message is not None:
SyntaxErrors.raiseSyntaxError(
message,
source_ref
)
code_object = CodeObjectSpec(
co_name = name,
co_kind = function_kind,
co_varnames = parameters.getParameterNames(),
co_argcount = parameters.getArgumentCount(),
co_kwonlyargcount = parameters.getKwOnlyParameterCount(),
co_has_starlist = parameters.getStarListArgumentName() is not None,
co_has_stardict = parameters.getStarDictArgumentName() is not None
)
outer_body = ExpressionFunctionBody(
provider = provider,
name = name,
flags = flags,
doc = function_doc,
parameters = parameters,
source_ref = source_ref
)
if special_args:
inner_name = name.strip("<>") + "$inner"
inner_arg_names = []
iter_vars = []
values = []
statements = []
def unpackFrom(source, arg_names):
accesses = []
sub_special_index = 0
iter_var = outer_body.allocateTempVariable(None, "arg_iter_%d" % len(iter_vars))
iter_vars.append(iter_var)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
source = ExpressionBuiltinIter1(
value = source,
source_ref = source_ref
),
source_ref = source_ref
)
)
for element_index, arg_name in enumerate(arg_names):
if getKind(arg_name) == "Name":
inner_arg_names.append(arg_name.id)
arg_var = outer_body.allocateTempVariable(None, "tmp_" + arg_name.id)
statements.append(
StatementAssignmentVariable(
variable_ref = ExpressionTargetTempVariableRef(
variable = arg_var,
source_ref = source_ref
),
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
source_ref = source_ref
)
)
accesses.append(
ExpressionTempVariableRef(
variable = arg_var,
source_ref = source_ref
)
)
elif getKind(arg_name) == "Tuple":
accesses.extend(
unpackFrom(
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
source_ref = source_ref
),
arg_names = arg_name.elts
)
)
sub_special_index += 1
else:
assert False, arg_name
statements.append(
StatementSpecialUnpackCheck(
iterator = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = len(arg_names),
source_ref = source_ref
)
)
return accesses
for arg_name in parameters.getParameterNames():
if arg_name.startswith('.'):
source = ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
values.extend(
unpackFrom(source, special_args[arg_name])
)
else:
values.append(
ExpressionVariableRef(
variable_name = arg_name,
source_ref = source_ref
)
)
inner_arg_names.append(arg_name)
inner_parameters = ParameterSpec(
ps_name = inner_name,
ps_normal_args = inner_arg_names,
ps_kw_only_args = (),
ps_list_star_arg = None,
ps_dict_star_arg = None,
ps_default_count = None
)
function_body = ExpressionFunctionBody(
provider = outer_body,
name = inner_name,
flags = flags,
doc = function_doc,
parameters = inner_parameters,
source_ref = source_ref
)
statements.append(
StatementReturn(
ExpressionFunctionCall(
function = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
code_object = code_object,
defaults = (),
kw_defaults = None,
annotations = None,
source_ref = source_ref
),
values = values,
source_ref = source_ref
),
source_ref = source_ref
)
)
outer_body.setBody(
makeStatementsSequenceFromStatement(
statement = makeTryFinallyStatement(
provider,
tried = statements,
final = [
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
for variable in
outer_body.getTempVariables()
] ,
source_ref = source_ref,
public_exc = False
)
)
)
else:
function_body = outer_body
return outer_body, function_body, code_object
def addFunctionVariableReleases(function):
assert function.isExpressionFunctionBody() or \
function.isExpressionClassBody() or \
function.isExpressionGeneratorObjectBody() or \
function.isExpressionCoroutineObjectBody()
releases = []
source_ref = function.getSourceReference()
for variable in function.getLocalVariables():
if variable.getOwner() is not function:
continue
releases.append(
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
)
if releases:
body = function.getBody()
if body.isStatementsFrame():
body = makeStatementsSequenceFromStatement(
statement = body
)
body = makeTryFinallyStatement(
provider = function,
tried = body,
final = releases,
source_ref = source_ref
)
function.setBody(
makeStatementsSequenceFromStatement(
statement = body
)
)
| true | true |
f72aaada40c2662f3b0cfa6fbf29805cd48bed68 | 3,807 | py | Python | argo/workflows/client/models/v1alpha1_metrics.py | jyotishp/argo-client-python | 7dfe27c8bc542a9142efcb0a8f55bb85c915448c | [
"Apache-2.0"
] | 1 | 2021-03-10T23:09:42.000Z | 2021-03-10T23:09:42.000Z | argo/workflows/client/models/v1alpha1_metrics.py | jyotishp/argo-client-python | 7dfe27c8bc542a9142efcb0a8f55bb85c915448c | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_metrics.py | jyotishp/argo-client-python | 7dfe27c8bc542a9142efcb0a8f55bb85c915448c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v2.11.8
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1Metrics(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'prometheus': 'list[V1alpha1Prometheus]'
}
attribute_map = {
'prometheus': 'prometheus'
}
def __init__(self, prometheus=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1Metrics - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._prometheus = None
self.discriminator = None
self.prometheus = prometheus
@property
def prometheus(self):
"""Gets the prometheus of this V1alpha1Metrics. # noqa: E501
Prometheus is a list of prometheus metrics to be emitted # noqa: E501
:return: The prometheus of this V1alpha1Metrics. # noqa: E501
:rtype: list[V1alpha1Prometheus]
"""
return self._prometheus
@prometheus.setter
def prometheus(self, prometheus):
"""Sets the prometheus of this V1alpha1Metrics.
Prometheus is a list of prometheus metrics to be emitted # noqa: E501
:param prometheus: The prometheus of this V1alpha1Metrics. # noqa: E501
:type: list[V1alpha1Prometheus]
"""
if self.local_vars_configuration.client_side_validation and prometheus is None: # noqa: E501
raise ValueError("Invalid value for `prometheus`, must not be `None`") # noqa: E501
self._prometheus = prometheus
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Metrics):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1Metrics):
return True
return self.to_dict() != other.to_dict()
| 30.701613 | 134 | 0.599947 |
import pprint
import re
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1Metrics(object):
openapi_types = {
'prometheus': 'list[V1alpha1Prometheus]'
}
attribute_map = {
'prometheus': 'prometheus'
}
def __init__(self, prometheus=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._prometheus = None
self.discriminator = None
self.prometheus = prometheus
@property
def prometheus(self):
return self._prometheus
@prometheus.setter
def prometheus(self, prometheus):
if self.local_vars_configuration.client_side_validation and prometheus is None:
raise ValueError("Invalid value for `prometheus`, must not be `None`")
self._prometheus = prometheus
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1alpha1Metrics):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1alpha1Metrics):
return True
return self.to_dict() != other.to_dict()
| true | true |
f72aabfa532e7c390422910aefe539d8af6b71e5 | 4,802 | py | Python | cnn_training.py | xiangzhemeng/epfl-ml2017-project2 | 16345b3e453989dfeba70667773b76362897a782 | [
"MIT"
] | 11 | 2018-12-11T05:59:50.000Z | 2020-09-30T03:01:02.000Z | cnn_training.py | xiangzhemeng/epfl-ml2017-project2 | 16345b3e453989dfeba70667773b76362897a782 | [
"MIT"
] | 1 | 2019-02-28T15:51:26.000Z | 2019-02-28T15:51:26.000Z | cnn_training.py | xiangzhemeng/epfl-ml2017-project2 | 16345b3e453989dfeba70667773b76362897a782 | [
"MIT"
] | 3 | 2018-03-06T07:34:39.000Z | 2018-05-28T03:13:32.000Z | import pandas as pd
import numpy as np
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import LSTM
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import EarlyStopping
# Main function of cnn training
def run_neural_network():
print(" == Enter into CNN training step ==")
np.random.seed(0)
x_train = pd.read_pickle("data/pickles/train_after_preprocess.pkl")
x_train = np.array(x_train['tweet'])
x_test = pd.read_pickle("data/pickles/test_after_preprocess.pkl")
x_test = np.array(x_test['tweet'])
y = np.array(int(2500000 / 2) * [0] + int(2500000 / 2) * [1])
print("Data loading finish!")
# Tokenization
tokenizer = Tokenizer(filters='')
tokenizer.fit_on_texts(x_train)
# Turn x_train into sequence form
sequence_train = tokenizer.texts_to_sequences(x_train)
# Turn x_test into sequence form
sequence_test = tokenizer.texts_to_sequences(x_test)
# Transform sequence_train into into a 2D Numpy array
sequence_train = sequence.pad_sequences(sequence_train, maxlen = 30)
# Transform sequence_test into into a 2D Numpy array
sequence_test = sequence.pad_sequences(sequence_test, maxlen = 30)
# Affect input dimension
input_dim = len(tokenizer.word_index) + 1
input_length = sequence_train.shape[1]
print("Tokenization finish!")
# Shuffle training dataset
new_index = np.arange(sequence_train.shape[0])
np.random.shuffle(new_index)
sequence_train = sequence_train[new_index]
y = y[new_index]
print("Data shuffling finish!")
earlyStopping = EarlyStopping(monitor = 'val_loss', patience = 2)
### Model 1 ###
print("Build model1!")
np.random.seed(1)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(Flatten())
model.add(Dense(250, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model1!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model1 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model1, open('data/xgboost/train_model1.txt', 'wb'))
test_model1 = model.predict(sequence_test)
pickle.dump(test_model1, open('data/xgboost/test_model1.txt', 'wb'))
print("Model1 finished!")
### Model 2 ###
print("Build model2!")
np.random.seed(2)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(LSTM(100, recurrent_dropout = 0.2, dropout = 0.2))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model2!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model2 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model2, open('data/xgboost/train_model2.txt', 'wb'))
test_model2 = model.predict(sequence_test)
pickle.dump(test_model2, open('data/xgboost/test_model2.txt', 'wb'))
print("Model2 finished!")
### Model 3 ###
print("Build model1!")
np.random.seed(3)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(LSTM(100))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model3!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model3= model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model3, open('data/xgboost/train_model3.txt', 'wb'))
test_model3 = model.predict(sequence_test)
pickle.dump(test_model3, open('data/xgboost/test_model3.txt', 'wb'))
print("Model3 finished!")
if __name__ == "__main__":
run_neural_network()
| 37.515625 | 145 | 0.704082 | import pandas as pd
import numpy as np
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import LSTM
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import EarlyStopping
def run_neural_network():
print(" == Enter into CNN training step ==")
np.random.seed(0)
x_train = pd.read_pickle("data/pickles/train_after_preprocess.pkl")
x_train = np.array(x_train['tweet'])
x_test = pd.read_pickle("data/pickles/test_after_preprocess.pkl")
x_test = np.array(x_test['tweet'])
y = np.array(int(2500000 / 2) * [0] + int(2500000 / 2) * [1])
print("Data loading finish!")
tokenizer = Tokenizer(filters='')
tokenizer.fit_on_texts(x_train)
sequence_train = tokenizer.texts_to_sequences(x_train)
sequence_test = tokenizer.texts_to_sequences(x_test)
sequence_train = sequence.pad_sequences(sequence_train, maxlen = 30)
sequence_test = sequence.pad_sequences(sequence_test, maxlen = 30)
input_dim = len(tokenizer.word_index) + 1
input_length = sequence_train.shape[1]
print("Tokenization finish!")
new_index = np.arange(sequence_train.shape[0])
np.random.shuffle(new_index)
sequence_train = sequence_train[new_index]
y = y[new_index]
print("Data shuffling finish!")
earlyStopping = EarlyStopping(monitor = 'val_loss', patience = 2)
p.random.seed(1)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(Flatten())
model.add(Dense(250, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model1!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model1 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model1, open('data/xgboost/train_model1.txt', 'wb'))
test_model1 = model.predict(sequence_test)
pickle.dump(test_model1, open('data/xgboost/test_model1.txt', 'wb'))
print("Model1 finished!")
p.random.seed(2)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(LSTM(100, recurrent_dropout = 0.2, dropout = 0.2))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model2!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model2 = model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model2, open('data/xgboost/train_model2.txt', 'wb'))
test_model2 = model.predict(sequence_test)
pickle.dump(test_model2, open('data/xgboost/test_model2.txt', 'wb'))
print("Model2 finished!")
p.random.seed(3)
model = Sequential()
model.add(Embedding(input_dim, 50, input_length = input_length))
model.add(Conv1D(padding = "same", kernel_size = 3, filters = 32, activation = "relu"))
model.add(MaxPooling1D(pool_size = 2))
model.add(LSTM(100))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print("Fit model3!")
model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])
print("Generate prediction!")
train_model3= model.predict(sequence_train, batch_size = 128)
pickle.dump(train_model3, open('data/xgboost/train_model3.txt', 'wb'))
test_model3 = model.predict(sequence_test)
pickle.dump(test_model3, open('data/xgboost/test_model3.txt', 'wb'))
print("Model3 finished!")
if __name__ == "__main__":
run_neural_network()
| true | true |
f72aacfc0d05c9205783f92c37e379035bd0665e | 5,592 | py | Python | Doc/conf.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | 2 | 2018-12-22T08:20:13.000Z | 2020-06-24T02:48:52.000Z | Doc/conf.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | null | null | null | Doc/conf.py | whtsky/python | 715a6e5035bb21ac49382772076ec4c630d6e960 | [
"PSF-2.0"
] | 3 | 2018-03-06T05:12:17.000Z | 2021-04-22T10:01:01.000Z | # -*- coding: utf-8 -*-
#
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/sphinxext'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'pyspecific']
templates_path = ['tools/sphinxext']
# General substitutions.
project = 'Python'
copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y')
# The default replacements for |version| and |release|.
#
# The short X.Y version.
# version = '2.6'
# The full version, including alpha/beta/rc tags.
# release = '2.6a0'
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of files that shouldn't be included in the build.
unused_docs = [
'maclib/scrap',
'library/xmllib',
'library/xml.etree',
]
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# Options for HTML output
# -----------------------
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'http://docs.python.org/dev'
# Additional static files.
html_static_path = ['tools/sphinxext/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distutils/index', 'distutils.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('documenting/index', 'documenting.tex',
'Documenting Python', 'Georg Brandl', 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('install/index', 'install.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Using Python', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{docs@python.org}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
| 30.557377 | 82 | 0.669886 |
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/sphinxext'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'pyspecific']
templates_path = ['tools/sphinxext']
# General substitutions.
project = 'Python'
copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y')
# The default replacements for |version| and |release|.
#
# The short X.Y version.
# version = '2.6'
# The full version, including alpha/beta/rc tags.
# release = '2.6a0'
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of files that shouldn't be included in the build.
unused_docs = [
'maclib/scrap',
'library/xmllib',
'library/xml.etree',
]
refcount_file = 'data/refcounts.dat'
add_function_parentheses = True
add_module_names = True
html_last_updated_fmt = '%b %d, %Y'
html_use_smartypants = True
html_sidebars = {
'index': 'indexsidebar.html',
}
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
html_use_opensearch = 'http://docs.python.org/dev'
html_static_path = ['tools/sphinxext/static']
htmlhelp_basename = 'python' + release.replace('.', '')
html_split_index = True
latex_paper_size = 'a4'
latex_font_size = '10pt'
_stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distutils/index', 'distutils.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('documenting/index', 'documenting.tex',
'Documenting Python', 'Georg Brandl', 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('install/index', 'install.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Using Python', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{docs@python.org}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
| true | true |
f72aad60b115d3fd47a1a6dd7076ff1a07ca0230 | 140 | py | Python | tests/data/config1.py | seismopy/figcon | 7e5d6ac30ea49bce8a566f9afb7e9e5af081164c | [
"BSD-3-Clause"
] | null | null | null | tests/data/config1.py | seismopy/figcon | 7e5d6ac30ea49bce8a566f9afb7e9e5af081164c | [
"BSD-3-Clause"
] | null | null | null | tests/data/config1.py | seismopy/figcon | 7e5d6ac30ea49bce8a566f9afb7e9e5af081164c | [
"BSD-3-Clause"
] | null | null | null | """
The first config example
"""
from types import SimpleNamespace
agency = 'NSA'
snuffler = SimpleNamespace(phase_map={1: 'P', 2: 'S'})
| 14 | 54 | 0.685714 | from types import SimpleNamespace
agency = 'NSA'
snuffler = SimpleNamespace(phase_map={1: 'P', 2: 'S'})
| true | true |
f72aadc6e09185ea3c69fa953e810a4ae3a1ee00 | 1,277 | py | Python | cc_backend_lib/models/scales.py | prio-data/cc_backend_lib | 7daa3c38d96e9063074367ea0873e39d7544e2b7 | [
"MIT"
] | null | null | null | cc_backend_lib/models/scales.py | prio-data/cc_backend_lib | 7daa3c38d96e9063074367ea0873e39d7544e2b7 | [
"MIT"
] | null | null | null | cc_backend_lib/models/scales.py | prio-data/cc_backend_lib | 7daa3c38d96e9063074367ea0873e39d7544e2b7 | [
"MIT"
] | null | null | null | """
The intensity measurement scale has changed, and might change again
Therefore, I need this module to translate between numeric intensity scores
and casualty numbers
"""
from typing import Optional
from datetime import date
import pydantic
class CasualtyRange(pydantic.BaseModel):
lower: int
upper: Optional[int]
text: Optional[str]
@property
def zero(self):
return self.upper == 0
SCALES = {
# The old scale
date(1,1,1):{
0: CasualtyRange(lower=0,upper=1),
1: CasualtyRange(lower=2,upper=25),
2: CasualtyRange(lower=26,upper=99),
3: CasualtyRange(lower=100,upper=999),
4: CasualtyRange(lower=1000,upper=None),
},
# The current scale
date(2021,1,1):{
0: CasualtyRange(lower=1,upper=25,text="Low"),
1: CasualtyRange(lower=26,upper=99,text="Medium"),
2: CasualtyRange(lower=100,upper=None,text="High"),
}
}
def scaled(date:date,intensity_value:int)->CasualtyRange:
if intensity_value < 0:
return CasualtyRange(lower=0,upper=0)
valid_scales = {k:v for k,v in SCALES.items() if k <= date}
scale_for_date = SCALES[max((d for d,_ in valid_scales.items()))]
return scale_for_date[intensity_value]
| 30.404762 | 75 | 0.649961 | from typing import Optional
from datetime import date
import pydantic
class CasualtyRange(pydantic.BaseModel):
lower: int
upper: Optional[int]
text: Optional[str]
@property
def zero(self):
return self.upper == 0
SCALES = {
date(1,1,1):{
0: CasualtyRange(lower=0,upper=1),
1: CasualtyRange(lower=2,upper=25),
2: CasualtyRange(lower=26,upper=99),
3: CasualtyRange(lower=100,upper=999),
4: CasualtyRange(lower=1000,upper=None),
},
date(2021,1,1):{
0: CasualtyRange(lower=1,upper=25,text="Low"),
1: CasualtyRange(lower=26,upper=99,text="Medium"),
2: CasualtyRange(lower=100,upper=None,text="High"),
}
}
def scaled(date:date,intensity_value:int)->CasualtyRange:
if intensity_value < 0:
return CasualtyRange(lower=0,upper=0)
valid_scales = {k:v for k,v in SCALES.items() if k <= date}
scale_for_date = SCALES[max((d for d,_ in valid_scales.items()))]
return scale_for_date[intensity_value]
| true | true |
f72aadee17de447d48becb2d1e2d660cbd57c250 | 3,508 | py | Python | third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/renderer/build/scripts/core/css/properties/make_css_property_instances.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json5_generator
import template_expander
from collections import namedtuple
from core.css import css_properties
class PropertyClassData(
namedtuple(
'PropertyClassData',
'enum_key,enum_value,property_id,classname,namespace_group,filename'
)):
pass
class CSSPropertyInstancesWriter(json5_generator.Writer):
def __init__(self, json5_file_paths, output_dir):
super(CSSPropertyInstancesWriter, self).__init__([], output_dir)
self._input_files = json5_file_paths
self._outputs = {
'css_property_instances.h':
self.generate_property_instances_header,
'css_property_instances.cc':
self.generate_property_instances_implementation
}
# These files are no longer generated. If the files are present from
# a previous build, we remove them. This avoids accidentally #including
# a stale generated header.
self._cleanup = set([
'css_property.cc', 'css_property.h', 'css_unresolved_property.cc',
'css_unresolved_property.h'
])
self._css_properties = css_properties.CSSProperties(json5_file_paths)
properties = self._css_properties.longhands + self._css_properties.shorthands
aliases = self._css_properties.aliases
# Lists of PropertyClassData.
self._property_classes_by_id = list(map(self.get_class, properties))
self._alias_classes_by_id = list(map(self.get_class, aliases))
# Sort by enum value.
self._property_classes_by_id.sort(key=lambda t: t.enum_value)
self._alias_classes_by_id.sort(key=lambda t: t.enum_value)
def get_class(self, property_):
"""Gets the automatically
generated class name for a property.
Args:
property_: A single property from CSSProperties.properties()
Returns:
The name to use for the property class.
"""
namespace_group = 'Shorthand' if property_['longhands'] else 'Longhand'
return PropertyClassData(
enum_key=property_['enum_key'],
enum_value=property_['enum_value'],
property_id=property_['property_id'],
classname=property_['name'].to_upper_camel_case(),
namespace_group=namespace_group,
filename=property_['name'].to_snake_case())
@property
def css_properties(self):
return self._css_properties
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.h.tmpl')
def generate_property_instances_header(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.cc.tmpl')
def generate_property_instances_implementation(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
if __name__ == '__main__':
json5_generator.Maker(CSSPropertyInstancesWriter).main()
| 37.319149 | 85 | 0.686431 |
import json5_generator
import template_expander
from collections import namedtuple
from core.css import css_properties
class PropertyClassData(
namedtuple(
'PropertyClassData',
'enum_key,enum_value,property_id,classname,namespace_group,filename'
)):
pass
class CSSPropertyInstancesWriter(json5_generator.Writer):
def __init__(self, json5_file_paths, output_dir):
super(CSSPropertyInstancesWriter, self).__init__([], output_dir)
self._input_files = json5_file_paths
self._outputs = {
'css_property_instances.h':
self.generate_property_instances_header,
'css_property_instances.cc':
self.generate_property_instances_implementation
}
self._cleanup = set([
'css_property.cc', 'css_property.h', 'css_unresolved_property.cc',
'css_unresolved_property.h'
])
self._css_properties = css_properties.CSSProperties(json5_file_paths)
properties = self._css_properties.longhands + self._css_properties.shorthands
aliases = self._css_properties.aliases
self._property_classes_by_id = list(map(self.get_class, properties))
self._alias_classes_by_id = list(map(self.get_class, aliases))
self._property_classes_by_id.sort(key=lambda t: t.enum_value)
self._alias_classes_by_id.sort(key=lambda t: t.enum_value)
def get_class(self, property_):
namespace_group = 'Shorthand' if property_['longhands'] else 'Longhand'
return PropertyClassData(
enum_key=property_['enum_key'],
enum_value=property_['enum_value'],
property_id=property_['property_id'],
classname=property_['name'].to_upper_camel_case(),
namespace_group=namespace_group,
filename=property_['name'].to_snake_case())
@property
def css_properties(self):
return self._css_properties
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.h.tmpl')
def generate_property_instances_header(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
@template_expander.use_jinja(
'core/css/properties/templates/css_property_instances.cc.tmpl')
def generate_property_instances_implementation(self):
return {
'input_files': self._input_files,
'property_classes_by_property_id': self._property_classes_by_id,
'alias_classes_by_property_id': self._alias_classes_by_id,
}
if __name__ == '__main__':
json5_generator.Maker(CSSPropertyInstancesWriter).main()
| true | true |
f72aae9d51b7f0153c0b16c546eb1ceaa7d6f438 | 777 | py | Python | web/data/migrations/0003_auto_20210108_1000.py | liwan1698/CatchingFire | 74535cba4b6da178eed2857d5db9900604c0c5f7 | [
"MIT"
] | 11 | 2021-02-14T15:56:22.000Z | 2022-03-21T08:26:58.000Z | web/data/migrations/0003_auto_20210108_1000.py | liwan1698/CatchingFire | 74535cba4b6da178eed2857d5db9900604c0c5f7 | [
"MIT"
] | null | null | null | web/data/migrations/0003_auto_20210108_1000.py | liwan1698/CatchingFire | 74535cba4b6da178eed2857d5db9900604c0c5f7 | [
"MIT"
] | 6 | 2021-03-16T14:30:12.000Z | 2022-03-10T14:20:24.000Z | # Generated by Django 3.1.5 on 2021-01-08 10:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0002_remove_classifydata_pending_tag'),
]
operations = [
migrations.CreateModel(
name='ClassifyTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(default='', help_text='标签种类', max_length=30, verbose_name='标签种类')),
],
),
migrations.AddField(
model_name='classifydata',
name='status',
field=models.BooleanField(default=False, help_text='是否完成', verbose_name='是否完成'),
),
]
| 29.884615 | 114 | 0.593308 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0002_remove_classifydata_pending_tag'),
]
operations = [
migrations.CreateModel(
name='ClassifyTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(default='', help_text='标签种类', max_length=30, verbose_name='标签种类')),
],
),
migrations.AddField(
model_name='classifydata',
name='status',
field=models.BooleanField(default=False, help_text='是否完成', verbose_name='是否完成'),
),
]
| true | true |
f72aaeb6bcf064edb9fbce86a27a4b372ec8bac8 | 9,321 | py | Python | kernel/protobuf/generated/pearson_model_param_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 39 | 2021-10-12T01:43:27.000Z | 2022-03-28T04:46:35.000Z | kernel/protobuf/generated/pearson_model_param_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 6 | 2021-10-14T02:11:47.000Z | 2022-03-23T02:41:50.000Z | kernel/protobuf/generated/pearson_model_param_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 10 | 2021-10-14T09:36:03.000Z | 2022-02-10T11:05:12.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pearson-model-param.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pearson-model-param.proto',
package='com.welab.wefe.core.mlmodel.buffer',
syntax='proto3',
serialized_options=b'B\026PearsonModelParamProto',
serialized_pb=b'\n\x19pearson-model-param.proto\x12\"com.welab.wefe.core.mlmodel.buffer\"\x16\n\x05Names\x12\r\n\x05names\x18\x01 \x03(\t\"/\n\x0c\x41nonymousMap\x12\x11\n\tanonymous\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x8a\x02\n\x11PearsonModelParam\x12\r\n\x05party\x18\x01 \x01(\t\x12\x0f\n\x07parties\x18\x02 \x03(\t\x12\r\n\x05shape\x18\x03 \x01(\x05\x12\x0e\n\x06shapes\x18\x04 \x03(\x05\x12\r\n\x05names\x18\x05 \x03(\t\x12G\n\ranonymous_map\x18\t \x03(\x0b\x32\x30.com.welab.wefe.core.mlmodel.buffer.AnonymousMap\x12\x0c\n\x04\x63orr\x18\x06 \x03(\x01\x12\x12\n\nlocal_corr\x18\x07 \x03(\x01\x12<\n\tall_names\x18\x08 \x03(\x0b\x32).com.welab.wefe.core.mlmodel.buffer.NamesB\x18\x42\x16PearsonModelParamProtob\x06proto3'
)
_NAMES = _descriptor.Descriptor(
name='Names',
full_name='com.welab.wefe.core.mlmodel.buffer.Names',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.Names.names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=87,
)
_ANONYMOUSMAP = _descriptor.Descriptor(
name='AnonymousMap',
full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='anonymous', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.anonymous', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=136,
)
_PEARSONMODELPARAM = _descriptor.Descriptor(
name='PearsonModelParam',
full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='party', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.party', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parties', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.parties', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shape', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shapes', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shapes', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.names', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anonymous_map', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.anonymous_map',
index=5,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.corr', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.local_corr', index=7,
number=7, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.all_names', index=8,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=405,
)
_PEARSONMODELPARAM.fields_by_name['anonymous_map'].message_type = _ANONYMOUSMAP
_PEARSONMODELPARAM.fields_by_name['all_names'].message_type = _NAMES
DESCRIPTOR.message_types_by_name['Names'] = _NAMES
DESCRIPTOR.message_types_by_name['AnonymousMap'] = _ANONYMOUSMAP
DESCRIPTOR.message_types_by_name['PearsonModelParam'] = _PEARSONMODELPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Names = _reflection.GeneratedProtocolMessageType('Names', (_message.Message,), {
'DESCRIPTOR': _NAMES,
'__module__': 'pearson_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.Names)
})
_sym_db.RegisterMessage(Names)
AnonymousMap = _reflection.GeneratedProtocolMessageType('AnonymousMap', (_message.Message,), {
'DESCRIPTOR': _ANONYMOUSMAP,
'__module__': 'pearson_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.AnonymousMap)
})
_sym_db.RegisterMessage(AnonymousMap)
PearsonModelParam = _reflection.GeneratedProtocolMessageType('PearsonModelParam', (_message.Message,), {
'DESCRIPTOR': _PEARSONMODELPARAM,
'__module__': 'pearson_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.PearsonModelParam)
})
_sym_db.RegisterMessage(PearsonModelParam)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 45.247573 | 747 | 0.695848 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pearson-model-param.proto',
package='com.welab.wefe.core.mlmodel.buffer',
syntax='proto3',
serialized_options=b'B\026PearsonModelParamProto',
serialized_pb=b'\n\x19pearson-model-param.proto\x12\"com.welab.wefe.core.mlmodel.buffer\"\x16\n\x05Names\x12\r\n\x05names\x18\x01 \x03(\t\"/\n\x0c\x41nonymousMap\x12\x11\n\tanonymous\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x8a\x02\n\x11PearsonModelParam\x12\r\n\x05party\x18\x01 \x01(\t\x12\x0f\n\x07parties\x18\x02 \x03(\t\x12\r\n\x05shape\x18\x03 \x01(\x05\x12\x0e\n\x06shapes\x18\x04 \x03(\x05\x12\r\n\x05names\x18\x05 \x03(\t\x12G\n\ranonymous_map\x18\t \x03(\x0b\x32\x30.com.welab.wefe.core.mlmodel.buffer.AnonymousMap\x12\x0c\n\x04\x63orr\x18\x06 \x03(\x01\x12\x12\n\nlocal_corr\x18\x07 \x03(\x01\x12<\n\tall_names\x18\x08 \x03(\x0b\x32).com.welab.wefe.core.mlmodel.buffer.NamesB\x18\x42\x16PearsonModelParamProtob\x06proto3'
)
_NAMES = _descriptor.Descriptor(
name='Names',
full_name='com.welab.wefe.core.mlmodel.buffer.Names',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.Names.names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=87,
)
_ANONYMOUSMAP = _descriptor.Descriptor(
name='AnonymousMap',
full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='anonymous', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.anonymous', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=136,
)
_PEARSONMODELPARAM = _descriptor.Descriptor(
name='PearsonModelParam',
full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='party', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.party', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parties', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.parties', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shape', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shapes', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shapes', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.names', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anonymous_map', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.anonymous_map',
index=5,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.corr', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.local_corr', index=7,
number=7, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.all_names', index=8,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=405,
)
_PEARSONMODELPARAM.fields_by_name['anonymous_map'].message_type = _ANONYMOUSMAP
_PEARSONMODELPARAM.fields_by_name['all_names'].message_type = _NAMES
DESCRIPTOR.message_types_by_name['Names'] = _NAMES
DESCRIPTOR.message_types_by_name['AnonymousMap'] = _ANONYMOUSMAP
DESCRIPTOR.message_types_by_name['PearsonModelParam'] = _PEARSONMODELPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Names = _reflection.GeneratedProtocolMessageType('Names', (_message.Message,), {
'DESCRIPTOR': _NAMES,
'__module__': 'pearson_model_param_pb2'
})
_sym_db.RegisterMessage(Names)
AnonymousMap = _reflection.GeneratedProtocolMessageType('AnonymousMap', (_message.Message,), {
'DESCRIPTOR': _ANONYMOUSMAP,
'__module__': 'pearson_model_param_pb2'
})
_sym_db.RegisterMessage(AnonymousMap)
PearsonModelParam = _reflection.GeneratedProtocolMessageType('PearsonModelParam', (_message.Message,), {
'DESCRIPTOR': _PEARSONMODELPARAM,
'__module__': 'pearson_model_param_pb2'
})
_sym_db.RegisterMessage(PearsonModelParam)
DESCRIPTOR._options = None
| true | true |
f72aaed00855b0147da7146ad4481dc9c1de0fae | 7,616 | py | Python | sig.py | IlyaKodua/colorization_with_averaging_ab_channels_test | 425a9f3e8b875b21c76424e892cbf489a9e408cb | [
"MIT"
] | null | null | null | sig.py | IlyaKodua/colorization_with_averaging_ab_channels_test | 425a9f3e8b875b21c76424e892cbf489a9e408cb | [
"MIT"
] | null | null | null | sig.py | IlyaKodua/colorization_with_averaging_ab_channels_test | 425a9f3e8b875b21c76424e892cbf489a9e408cb | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class SIGGRAPHGenerator(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d, classes=529):
super(SIGGRAPHGenerator, self).__init__()
# Conv1
model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[norm_layer(64),]
# add a subsampling operation
# Conv2
model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[norm_layer(128),]
# add a subsampling layer operation
# Conv3
model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[norm_layer(256),]
# add a subsampling layer operation
# Conv4
model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[norm_layer(512),]
# Conv5
model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[norm_layer(512),]
# Conv6
model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[norm_layer(512),]
# Conv7
model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[norm_layer(512),]
# Conv7
model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)]
model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[norm_layer(256),]
# Conv9
model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
# add the two feature maps above
model9=[nn.ReLU(True),]
model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model9+=[nn.ReLU(True),]
model9+=[norm_layer(128),]
# Conv10
model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
# add the two feature maps above
model10=[nn.ReLU(True),]
model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),]
model10+=[nn.LeakyReLU(negative_slope=.2),]
# classification output
model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
# regression output
model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
model_out+=[nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])
def forward(self, input_A, input_B=None, mask_B=None):
if(input_B is None):
input_B = torch.cat((input_A*0, input_A*0), dim=1)
if(mask_B is None):
mask_B = input_A*0
conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))
conv2_2 = self.model2(conv1_2[:,:,::2,::2])
conv3_3 = self.model3(conv2_2[:,:,::2,::2])
conv4_3 = self.model4(conv3_3[:,:,::2,::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.re_pad_sum(self.model8up(conv7_3), self.model3short8(conv3_3))
conv8_3 = self.model8(conv8_up)
conv9_up = self.re_pad_sum(self.model9up(conv8_3),self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3),self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
conv9_up = self.re_pad_sum(self.model9up(conv8_3), self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3), self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
return out_reg
def re_pad_sum(self, x, y):
diffY = y.size()[2] - x.size()[2]
diffX = y.size()[3] - x.size()[3]
x = F.pad(x, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
return x + y
def siggraph17(pretrained=True):
model = SIGGRAPHGenerator()
if(pretrained):
import torch.utils.model_zoo as model_zoo
model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth',map_location='cpu',check_hash=True))
return model | 43.028249 | 157 | 0.616334 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SIGGRAPHGenerator(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d, classes=529):
super(SIGGRAPHGenerator, self).__init__()
model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),]
model1+=[nn.ReLU(True),]
model1+=[norm_layer(64),]
model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model2+=[nn.ReLU(True),]
model2+=[norm_layer(128),]
model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model3+=[nn.ReLU(True),]
model3+=[norm_layer(256),]
model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model4+=[nn.ReLU(True),]
model4+=[norm_layer(512),]
model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model5+=[nn.ReLU(True),]
model5+=[norm_layer(512),]
model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]
model6+=[nn.ReLU(True),]
model6+=[norm_layer(512),]
model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]
model7+=[nn.ReLU(True),]
model7+=[norm_layer(512),]
model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)]
model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]
model8+=[nn.ReLU(True),]
model8+=[norm_layer(256),]
model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model9=[nn.ReLU(True),]
model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model9+=[nn.ReLU(True),]
model9+=[norm_layer(128),]
model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),]
model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]
model10=[nn.ReLU(True),]
model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),]
model10+=[nn.LeakyReLU(negative_slope=.2),]
model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]
model_out+=[nn.Tanh()]
self.model1 = nn.Sequential(*model1)
self.model2 = nn.Sequential(*model2)
self.model3 = nn.Sequential(*model3)
self.model4 = nn.Sequential(*model4)
self.model5 = nn.Sequential(*model5)
self.model6 = nn.Sequential(*model6)
self.model7 = nn.Sequential(*model7)
self.model8up = nn.Sequential(*model8up)
self.model8 = nn.Sequential(*model8)
self.model9up = nn.Sequential(*model9up)
self.model9 = nn.Sequential(*model9)
self.model10up = nn.Sequential(*model10up)
self.model10 = nn.Sequential(*model10)
self.model3short8 = nn.Sequential(*model3short8)
self.model2short9 = nn.Sequential(*model2short9)
self.model1short10 = nn.Sequential(*model1short10)
self.model_class = nn.Sequential(*model_class)
self.model_out = nn.Sequential(*model_out)
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),])
self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])
def forward(self, input_A, input_B=None, mask_B=None):
if(input_B is None):
input_B = torch.cat((input_A*0, input_A*0), dim=1)
if(mask_B is None):
mask_B = input_A*0
conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))
conv2_2 = self.model2(conv1_2[:,:,::2,::2])
conv3_3 = self.model3(conv2_2[:,:,::2,::2])
conv4_3 = self.model4(conv3_3[:,:,::2,::2])
conv5_3 = self.model5(conv4_3)
conv6_3 = self.model6(conv5_3)
conv7_3 = self.model7(conv6_3)
conv8_up = self.re_pad_sum(self.model8up(conv7_3), self.model3short8(conv3_3))
conv8_3 = self.model8(conv8_up)
conv9_up = self.re_pad_sum(self.model9up(conv8_3),self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3),self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
conv9_up = self.re_pad_sum(self.model9up(conv8_3), self.model2short9(conv2_2))
conv9_3 = self.model9(conv9_up)
conv10_up = self.re_pad_sum(self.model10up(conv9_3), self.model1short10(conv1_2))
conv10_2 = self.model10(conv10_up)
out_reg = self.model_out(conv10_2)
return out_reg
def re_pad_sum(self, x, y):
diffY = y.size()[2] - x.size()[2]
diffX = y.size()[3] - x.size()[3]
x = F.pad(x, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
return x + y
def siggraph17(pretrained=True):
model = SIGGRAPHGenerator()
if(pretrained):
import torch.utils.model_zoo as model_zoo
model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth',map_location='cpu',check_hash=True))
return model | true | true |
f72aaf0d1aeed3ebfc53a15203a1be8e842a5f86 | 1,624 | py | Python | landmark_recognition/urls.py | MilanSusa/Landmark-Recognition-Inference-API | e770fd8dce1b7dc39e52950c71e6406352a67123 | [
"MIT"
] | null | null | null | landmark_recognition/urls.py | MilanSusa/Landmark-Recognition-Inference-API | e770fd8dce1b7dc39e52950c71e6406352a67123 | [
"MIT"
] | 11 | 2020-11-13T18:40:49.000Z | 2022-03-12T00:20:31.000Z | landmark_recognition/urls.py | MilanSusa/Landmark-Recognition-Inference-API | e770fd8dce1b7dc39e52950c71e6406352a67123 | [
"MIT"
] | null | null | null | """landmark_recognition URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Landmark Recognition Inference API",
default_version='v1',
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('admin/', admin.site.urls),
path('inference/', include('inference_api.urls')),
] + static(prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 38.666667 | 108 | 0.721675 | from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Landmark Recognition Inference API",
default_version='v1',
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('admin/', admin.site.urls),
path('inference/', include('inference_api.urls')),
] + static(prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| true | true |
f72aaf58fab32e715e1c98ff0e845edadd9fd68c | 3,567 | py | Python | examples/ariadne_uvicorn/movies_v4.py | jyoost/neo4j-graphql-py | 14dbd8f133727f89ec8ea79e5475e4a940d4e55f | [
"Apache-2.0"
] | null | null | null | examples/ariadne_uvicorn/movies_v4.py | jyoost/neo4j-graphql-py | 14dbd8f133727f89ec8ea79e5475e4a940d4e55f | [
"Apache-2.0"
] | null | null | null | examples/ariadne_uvicorn/movies_v4.py | jyoost/neo4j-graphql-py | 14dbd8f133727f89ec8ea79e5475e4a940d4e55f | [
"Apache-2.0"
] | null | null | null | import uvicorn
from neo4j import GraphDatabase
from ariadne.asgi import GraphQL
from neo4j_graphql_py import neo4j_graphql
from ariadne import QueryType, make_executable_schema, MutationType, gql
typeDefs = gql('''
directive @cypher(statement: String!) on FIELD_DEFINITION
directive @relation(name:String!, direction:String!) on FIELD_DEFINITION
type Movie {
_id: ID
movieId: ID!
title: String
tagline: String
year: Int
plot: String
poster: String
imdbRating: Float
genres: [Genre] @relation(name: "IN_GENRE", direction: "OUT")
similar(first: Int = 3, offset: Int = 0, limit: Int = 5): [Movie] @cypher(statement: "WITH {this} AS this MATCH (this)--(:Genre)--(o:Movie) RETURN o LIMIT {limit}")
mostSimilar: Movie @cypher(statement: "WITH {this} AS this RETURN this")
degree: Int @cypher(statement: "WITH {this} AS this RETURN SIZE((this)--())")
actors(first: Int = 3, offset: Int = 0): [Actor] @relation(name: "ACTED_IN", direction:"IN")
avgStars: Float
filmedIn: State @relation(name: "FILMED_IN", direction: "OUT")
scaleRating(scale: Int = 3): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
scaleRatingFloat(scale: Float = 1.5): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
}
type Genre {
_id: ID!
name: String
movies(first: Int = 3, offset: Int = 0): [Movie] @relation(name: "IN_GENRE", direction: "IN")
highestRatedMovie: Movie @cypher(statement: "MATCH (m:Movie)-[:IN_GENRE]->(this) RETURN m ORDER BY m.imdbRating DESC LIMIT 1")
}
type State {
name: String
}
interface Person {
id: ID!
name: String
}
type Actor {
id: ID!
name: String
movies: [Movie] @relation(name: "ACTED_IN", direction: "OUT")
}
type User implements Person {
id: ID!
name: String
}
enum BookGenre {
Mystery,
Science,
Math
}
type Book {
title: String!
genre: BookGenre
}
type Query {
Movie(id: ID, title: String, year: Int, plot: String, poster: String, imdbRating: Float, first: Int, offset: Int): [Movie]
MoviesByYear(year: Int): [Movie]
AllMovies: [Movie]
MovieById(movieId: ID!): Movie
GenresBySubstring(substring: String): [Genre] @cypher(statement: "MATCH (g:Genre) WHERE toLower(g.name) CONTAINS toLower($substring) RETURN g")
Books: [Book]
Actors: [Actor]
}
type Mutation {
CreateGenre(name: String): Genre @cypher(statement: "CREATE (g:Genre) SET g.name = $name RETURN g")
CreateMovie(movieId: ID!, title: String, year: Int, plot: String, poster: String, imdbRating: Float): Movie
CreateBook(title: String!,genre: BookGenre): Book @cypher(statement: "CREATE (b:Book) SET b.title = $title, b.genre = $genre RETURN b")
}
'''
)
query = QueryType()
mutation = MutationType()
# @mutation.field('AddMovieGenre')
@query.field('Actors')
@query.field('Movie')
@query.field('MoviesByYear')
@query.field('AllMovies')
@query.field('MovieById')
@query.field('GenresBySubstring')
@query.field('Books')
@mutation.field('CreateGenre')
@mutation.field('CreateMovie')
@mutation.field('CreateBook')
async def resolve(obj, info, **kwargs):
return await neo4j_graphql(obj, info.context, info, True, **kwargs)
schema = make_executable_schema(typeDefs, query, mutation)
driver = None
def context(request):
global driver
if driver is None:
driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "123456"))
return {'driver': driver, 'request': request}
root_value = {}
app = GraphQL(schema=schema, root_value=root_value, context_value=context, debug=True)
uvicorn.run(app)
driver.close()
| 31.566372 | 166 | 0.702832 | import uvicorn
from neo4j import GraphDatabase
from ariadne.asgi import GraphQL
from neo4j_graphql_py import neo4j_graphql
from ariadne import QueryType, make_executable_schema, MutationType, gql
typeDefs = gql('''
directive @cypher(statement: String!) on FIELD_DEFINITION
directive @relation(name:String!, direction:String!) on FIELD_DEFINITION
type Movie {
_id: ID
movieId: ID!
title: String
tagline: String
year: Int
plot: String
poster: String
imdbRating: Float
genres: [Genre] @relation(name: "IN_GENRE", direction: "OUT")
similar(first: Int = 3, offset: Int = 0, limit: Int = 5): [Movie] @cypher(statement: "WITH {this} AS this MATCH (this)--(:Genre)--(o:Movie) RETURN o LIMIT {limit}")
mostSimilar: Movie @cypher(statement: "WITH {this} AS this RETURN this")
degree: Int @cypher(statement: "WITH {this} AS this RETURN SIZE((this)--())")
actors(first: Int = 3, offset: Int = 0): [Actor] @relation(name: "ACTED_IN", direction:"IN")
avgStars: Float
filmedIn: State @relation(name: "FILMED_IN", direction: "OUT")
scaleRating(scale: Int = 3): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
scaleRatingFloat(scale: Float = 1.5): Float @cypher(statement: "WITH $this AS this RETURN $scale * this.imdbRating")
}
type Genre {
_id: ID!
name: String
movies(first: Int = 3, offset: Int = 0): [Movie] @relation(name: "IN_GENRE", direction: "IN")
highestRatedMovie: Movie @cypher(statement: "MATCH (m:Movie)-[:IN_GENRE]->(this) RETURN m ORDER BY m.imdbRating DESC LIMIT 1")
}
type State {
name: String
}
interface Person {
id: ID!
name: String
}
type Actor {
id: ID!
name: String
movies: [Movie] @relation(name: "ACTED_IN", direction: "OUT")
}
type User implements Person {
id: ID!
name: String
}
enum BookGenre {
Mystery,
Science,
Math
}
type Book {
title: String!
genre: BookGenre
}
type Query {
Movie(id: ID, title: String, year: Int, plot: String, poster: String, imdbRating: Float, first: Int, offset: Int): [Movie]
MoviesByYear(year: Int): [Movie]
AllMovies: [Movie]
MovieById(movieId: ID!): Movie
GenresBySubstring(substring: String): [Genre] @cypher(statement: "MATCH (g:Genre) WHERE toLower(g.name) CONTAINS toLower($substring) RETURN g")
Books: [Book]
Actors: [Actor]
}
type Mutation {
CreateGenre(name: String): Genre @cypher(statement: "CREATE (g:Genre) SET g.name = $name RETURN g")
CreateMovie(movieId: ID!, title: String, year: Int, plot: String, poster: String, imdbRating: Float): Movie
CreateBook(title: String!,genre: BookGenre): Book @cypher(statement: "CREATE (b:Book) SET b.title = $title, b.genre = $genre RETURN b")
}
'''
)
query = QueryType()
mutation = MutationType()
@query.field('Actors')
@query.field('Movie')
@query.field('MoviesByYear')
@query.field('AllMovies')
@query.field('MovieById')
@query.field('GenresBySubstring')
@query.field('Books')
@mutation.field('CreateGenre')
@mutation.field('CreateMovie')
@mutation.field('CreateBook')
async def resolve(obj, info, **kwargs):
return await neo4j_graphql(obj, info.context, info, True, **kwargs)
schema = make_executable_schema(typeDefs, query, mutation)
driver = None
def context(request):
global driver
if driver is None:
driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "123456"))
return {'driver': driver, 'request': request}
root_value = {}
app = GraphQL(schema=schema, root_value=root_value, context_value=context, debug=True)
uvicorn.run(app)
driver.close()
| true | true |
f72ab07a46237929635bd213f93c69e456b0ae1e | 2,868 | py | Python | modules/storage/PrefabS3Scality.py | threefoldtech/jumpscale_prefab9 | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | null | null | null | modules/storage/PrefabS3Scality.py | threefoldtech/jumpscale_prefab9 | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | 31 | 2018-07-31T15:40:07.000Z | 2019-02-20T11:07:15.000Z | modules/storage/PrefabS3Scality.py | threefoldtech/jumpscale_prefab | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | null | null | null | from jumpscale import j
from time import sleep
app = j.tools.prefab._getBaseAppClass()
class PrefabS3Scality(app):
NAME = 's3scality'
def install(self, start=False, storageLocation="/data/", metaLocation="/meta/"):
"""
put backing store on /storage/...
"""
self.prefab.system.package.mdupdate()
self.prefab.system.package.install('build-essential')
self.prefab.system.package.install('python2.7')
self.prefab.core.dir_ensure(storageLocation)
self.prefab.core.dir_ensure(metaLocation)
self.prefab.core.dir_ensure('/opt/code/github/scality')
path = self.prefab.tools.git.pullRepo('https://github.com/scality/S3.git', ssh=False)
profile = self.prefab.bash.profileDefault
profile.addPath(self.prefab.core.dir_paths['BINDIR'])
profile.save()
self.prefab.runtimes.nodejs.install()
self.prefab.core.run('cd {} && npm install --python=python2.7'.format(path), profile=True)
self.prefab.core.dir_remove('$JSAPPSDIR/S3', recursive=True)
self.prefab.core.dir_ensure('$JSAPPSDIR')
self.prefab.core.run('mv {} $JSAPPSDIR/'.format(path))
cmd = 'S3DATAPATH={data} S3METADATAPATH={meta} npm start'.format(
data=storageLocation,
meta=metaLocation,
)
content = self.prefab.core.file_read('$JSAPPSDIR/S3/package.json')
pkg = j.data.serializer.json.loads(content)
pkg['scripts']['start_location'] = cmd
content = j.data.serializer.json.dumps(pkg, indent=True)
self.prefab.core.file_write('$JSAPPSDIR/S3/package.json', content)
if start:
self.start()
def start(self, name=NAME):
nodePath = '$BASEDIR/node/lib/node_modules'
# Temporary. Should be removed after updating the building process
self.prefab.core.dir_ensure('/data/data')
self.prefab.core.dir_ensure('/data/meta')
# Temporary. npm install should be added to install() function after updating the building process
if not self.prefab.core.dir_exists('%s/npm-run-all' % nodePath):
self.prefab.core.run('npm install npm-run-all')
nodePath = self.prefab.core.replace('$BASEDIR/node/lib/node_modules/s3/node_modules:%s' % nodePath)
if self.prefab.bash.profileDefault.envGet('NODE_PATH') != nodePath:
self.prefab.bash.profileDefault.envSet("NODE_PATH", nodePath)
self.prefab.bash.profileDefault.addPath(self.prefab.core.replace("$BASEDIR/node/bin/"))
self.prefab.bash.profileDefault.save()
path = j.sal.fs.joinPaths(j.dirs.JSAPPSDIR, 'S3')
self.prefab.core.run('cd {} && npm run start_location'.format(path), profile=True)
def test(self):
# put/get file over S3 interface using a python S3 lib
raise NotImplementedError
| 43.454545 | 107 | 0.658996 | from jumpscale import j
from time import sleep
app = j.tools.prefab._getBaseAppClass()
class PrefabS3Scality(app):
NAME = 's3scality'
def install(self, start=False, storageLocation="/data/", metaLocation="/meta/"):
self.prefab.system.package.mdupdate()
self.prefab.system.package.install('build-essential')
self.prefab.system.package.install('python2.7')
self.prefab.core.dir_ensure(storageLocation)
self.prefab.core.dir_ensure(metaLocation)
self.prefab.core.dir_ensure('/opt/code/github/scality')
path = self.prefab.tools.git.pullRepo('https://github.com/scality/S3.git', ssh=False)
profile = self.prefab.bash.profileDefault
profile.addPath(self.prefab.core.dir_paths['BINDIR'])
profile.save()
self.prefab.runtimes.nodejs.install()
self.prefab.core.run('cd {} && npm install --python=python2.7'.format(path), profile=True)
self.prefab.core.dir_remove('$JSAPPSDIR/S3', recursive=True)
self.prefab.core.dir_ensure('$JSAPPSDIR')
self.prefab.core.run('mv {} $JSAPPSDIR/'.format(path))
cmd = 'S3DATAPATH={data} S3METADATAPATH={meta} npm start'.format(
data=storageLocation,
meta=metaLocation,
)
content = self.prefab.core.file_read('$JSAPPSDIR/S3/package.json')
pkg = j.data.serializer.json.loads(content)
pkg['scripts']['start_location'] = cmd
content = j.data.serializer.json.dumps(pkg, indent=True)
self.prefab.core.file_write('$JSAPPSDIR/S3/package.json', content)
if start:
self.start()
def start(self, name=NAME):
nodePath = '$BASEDIR/node/lib/node_modules'
self.prefab.core.dir_ensure('/data/data')
self.prefab.core.dir_ensure('/data/meta')
if not self.prefab.core.dir_exists('%s/npm-run-all' % nodePath):
self.prefab.core.run('npm install npm-run-all')
nodePath = self.prefab.core.replace('$BASEDIR/node/lib/node_modules/s3/node_modules:%s' % nodePath)
if self.prefab.bash.profileDefault.envGet('NODE_PATH') != nodePath:
self.prefab.bash.profileDefault.envSet("NODE_PATH", nodePath)
self.prefab.bash.profileDefault.addPath(self.prefab.core.replace("$BASEDIR/node/bin/"))
self.prefab.bash.profileDefault.save()
path = j.sal.fs.joinPaths(j.dirs.JSAPPSDIR, 'S3')
self.prefab.core.run('cd {} && npm run start_location'.format(path), profile=True)
def test(self):
raise NotImplementedError
| true | true |
f72ab0a07e44dd8dfddbb6dd81911777030d7752 | 3,238 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/privatelinks/v2020_05_01/_configuration.py | NateLehman/azure-sdk-for-python | 82fcc5a5e9e01c3b7f6ab24fccbafad19149e400 | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/privatelinks/v2020_05_01/_configuration.py | NateLehman/azure-sdk-for-python | 82fcc5a5e9e01c3b7f6ab24fccbafad19149e400 | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/privatelinks/v2020_05_01/_configuration.py | NateLehman/azure-sdk-for-python | 82fcc5a5e9e01c3b7f6ab24fccbafad19149e400 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ResourcePrivateLinkClientConfiguration(Configuration):
"""Configuration for ResourcePrivateLinkClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ResourcePrivateLinkClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 46.927536 | 125 | 0.693638 |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
class ResourcePrivateLinkClientConfiguration(Configuration):
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ResourcePrivateLinkClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs
):
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| true | true |
f72ab179dcc3caf0aecde8a069b2cd8ed3626836 | 2,754 | py | Python | src/main/resources/classes/assassin/multihit.py | WynnLab/WynnLab | 9950bc1485fa187394c1b1326fa0b5c6b6a1ac96 | [
"MIT"
] | 2 | 2021-03-17T19:28:36.000Z | 2021-03-26T09:31:22.000Z | src/main/resources/classes/assassin/multihit.py | FauxKiwi/Wynnlab | 9950bc1485fa187394c1b1326fa0b5c6b6a1ac96 | [
"MIT"
] | 5 | 2021-06-08T12:13:40.000Z | 2021-08-09T15:04:23.000Z | src/main/resources/classes/assassin/multihit.py | FauxKiwi/Wynnlab | 9950bc1485fa187394c1b1326fa0b5c6b6a1ac96 | [
"MIT"
] | 4 | 2021-08-09T15:17:23.000Z | 2022-03-05T14:08:26.000Z | from org.bukkit import Particle, Sound
from org.bukkit.potion import PotionEffectType
from com.wynnlab.spells import PySpell
from com.wynnlab.util import BukkitUtils
class Spell(PySpell):
def __init__(self):
self.l = None
self.entities = None
self.shift = False
def init(self):
self.shift = self.player.isSneaking()
def tick(self):
if self.t % 2 != 0:
return
if self.t == 0:
if self.player.hasPotionEffect(PotionEffectType.INVISIBILITY):
self.castSpell('ASSASSIN', 5)
self.sound(Sound.ENTITY_PLAYER_ATTACK_STRONG, .5, 1)
self.sound(Sound.ENTITY_IRON_GOLEM_HURT, 1, 1.5)
if self.clone:
self.sound(Sound.ENTITY_BLAZE_AMBIENT, .3, 1.5)
v = BukkitUtils.normalizeOnXZ(self.player.getEyeLocation().getDirection())
self.l = self.player.getLocation().clone().add(v).add(0, .5, 0)
self.particle(self.l, Particle.SWEEP_ATTACK, 5, .5, .5, .5, .1)
self.entities = self.nearbyMobs(self.l, 3, 3, 3)
self.particle(self.l.add(v), Particle.SWEEP_ATTACK, 5, .5, .5, .5, .1)
self.particle(self.l.add(v), Particle.SWEEP_ATTACK, 5, .5, .5, .5, .1)
elif self.t <= 20:
for e in self.entities:
e.setVelocity(self.player.getEyeLocation().getDirection().multiply(.05 if self.shift else .3).setY(.2).rotateAroundY((.1 * self.t) if self.t % 2 == 0 else (-.1 * self.t)))
self.particle(e.getLocation(), Particle.SWEEP_ATTACK, 5, .5, .5, .5, .5)
if self.clone:
self.particle(e.getLocation(), Particle.SPELL_WITCH, 7, .5, .5, .5, .2)
self.particle(e.getLocation(), Particle.SQUID_INK, 6, .5, .5, .5, .1)
self.particle(e.getLocation(), Particle.CRIT, 7 if self.clone else 10, .5, .5, .5, .1)
self.sound(e.getLocation(), Sound.ENTITY_PLAYER_ATTACK_SWEEP, 1, 1.3)
self.sound(e.getLocation(), Sound.ENTITY_PLAYER_ATTACK_CRIT, .8, 1.6)
self.damage(e, False, .27)
else:
for e in self.entities:
if not self.shift:
e.setVelocity(self.player.getEyeLocation().getDirection().setY(.5))
self.sound(e.getLocation(), Sound.ENTITY_PLAYER_ATTACK_KNOCKBACK, 1, 1.3)
self.damage(e, False, 1.2, .2, 0, .3, .5, 0, 0)
if self.clone:
self.sound(e.getLocation(), Sound.ENTITY_BLAZE_DEATH, 1, 1.2)
self.sound(e.getLocation(), Sound.ENTITY_BLAZE_AMBIENT, 1, 1.6)
self.sound(e.getLocation(), Sound.ENTITY_FIREWORK_ROCKET_BLAST_FAR, .5, 1)
| 41.727273 | 187 | 0.576253 | from org.bukkit import Particle, Sound
from org.bukkit.potion import PotionEffectType
from com.wynnlab.spells import PySpell
from com.wynnlab.util import BukkitUtils
class Spell(PySpell):
def __init__(self):
self.l = None
self.entities = None
self.shift = False
def init(self):
self.shift = self.player.isSneaking()
def tick(self):
if self.t % 2 != 0:
return
if self.t == 0:
if self.player.hasPotionEffect(PotionEffectType.INVISIBILITY):
self.castSpell('ASSASSIN', 5)
self.sound(Sound.ENTITY_PLAYER_ATTACK_STRONG, .5, 1)
self.sound(Sound.ENTITY_IRON_GOLEM_HURT, 1, 1.5)
if self.clone:
self.sound(Sound.ENTITY_BLAZE_AMBIENT, .3, 1.5)
v = BukkitUtils.normalizeOnXZ(self.player.getEyeLocation().getDirection())
self.l = self.player.getLocation().clone().add(v).add(0, .5, 0)
self.particle(self.l, Particle.SWEEP_ATTACK, 5, .5, .5, .5, .1)
self.entities = self.nearbyMobs(self.l, 3, 3, 3)
self.particle(self.l.add(v), Particle.SWEEP_ATTACK, 5, .5, .5, .5, .1)
self.particle(self.l.add(v), Particle.SWEEP_ATTACK, 5, .5, .5, .5, .1)
elif self.t <= 20:
for e in self.entities:
e.setVelocity(self.player.getEyeLocation().getDirection().multiply(.05 if self.shift else .3).setY(.2).rotateAroundY((.1 * self.t) if self.t % 2 == 0 else (-.1 * self.t)))
self.particle(e.getLocation(), Particle.SWEEP_ATTACK, 5, .5, .5, .5, .5)
if self.clone:
self.particle(e.getLocation(), Particle.SPELL_WITCH, 7, .5, .5, .5, .2)
self.particle(e.getLocation(), Particle.SQUID_INK, 6, .5, .5, .5, .1)
self.particle(e.getLocation(), Particle.CRIT, 7 if self.clone else 10, .5, .5, .5, .1)
self.sound(e.getLocation(), Sound.ENTITY_PLAYER_ATTACK_SWEEP, 1, 1.3)
self.sound(e.getLocation(), Sound.ENTITY_PLAYER_ATTACK_CRIT, .8, 1.6)
self.damage(e, False, .27)
else:
for e in self.entities:
if not self.shift:
e.setVelocity(self.player.getEyeLocation().getDirection().setY(.5))
self.sound(e.getLocation(), Sound.ENTITY_PLAYER_ATTACK_KNOCKBACK, 1, 1.3)
self.damage(e, False, 1.2, .2, 0, .3, .5, 0, 0)
if self.clone:
self.sound(e.getLocation(), Sound.ENTITY_BLAZE_DEATH, 1, 1.2)
self.sound(e.getLocation(), Sound.ENTITY_BLAZE_AMBIENT, 1, 1.6)
self.sound(e.getLocation(), Sound.ENTITY_FIREWORK_ROCKET_BLAST_FAR, .5, 1)
| true | true |
f72ab1a21ac26d83b6dfe2d7a8390897f1a6f645 | 5,138 | py | Python | DSPdu.py | Francisobiagwu/SecureDocumentSharing | d8fe27f3ca4d1b470a8cbe6d3e475226bdb796c1 | [
"MIT"
] | 2 | 2018-06-21T18:06:15.000Z | 2021-08-19T15:27:55.000Z | DSPdu.py | Francisobiagwu/DocumentSharing | d8fe27f3ca4d1b470a8cbe6d3e475226bdb796c1 | [
"MIT"
] | null | null | null | DSPdu.py | Francisobiagwu/DocumentSharing | d8fe27f3ca4d1b470a8cbe6d3e475226bdb796c1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
@author: Francis Obiagwu
@software: SecureDocumentSharing
@file: DSPdu.py
@time: 6/6/18 7:16 PM
"""
import binascii
import struct
from datetime import datetime
from DSCodes import DSCode
class DSPdu:
"""
The DSPdu class is used to create a generic pdu object. The user have the option
of modifying the changing the size of the pdu, adding additional parts to the pdu also
"""
def __init__( self ):
"""
This is used to initialize the pdu components and their respective sizes
"""
####################################
# MESSAGE_TYPE : 12 BYTES #
# TIMESTAMP : 32 BYTES #
# ERROR CODE : 4 BYTES #
# FLAGS : 6 BYTES #
# CHANGED_SECTION : 8 BYTES #
# SECTION_ID : 8 BYTES #
# RESERVED_1 : 32 BYTES #
# RESERVED_2 : 32 BYTES #
# RESERVED_3 : 32 BYTES #
# DATA : 100 BYTES #
# DATA_SIZE : 8 BYTES #
# CHECKSUM : 8 BYTES #
####################################
# TOTAL : 658 BYTES #
####################################
array = [('MESSAGE_TYPE', '12s'), ('TIMESTAMP', '32s'), ('ERROR_CODES', 'i'), ('FLAG', '6s'), ('CHANGED_SECTION', 'q'),
('SECTION-ID', 'q'), ('RESERVED-1', '32s'), ('RESERVED-2', '32s'), ('RESERVED-3', '32s'),
('DATA', '100s'),('DATA_SIZE', 'q'), ('CHECKSUM', 'q')]
self.pdu_dic = {}
self.size = None
self.format = ''
self.s = None
self.null_bytes = b'\x00'
self.data_size = None
self.parts_index = []
for index, item in enumerate(array):
name, size = item
self.parts_index.append(index)
self.format += ' ' + size
self.pdu_dic[name] = struct.Struct(size).size
self.s = struct.Struct(self.format)
self.size = self.s.size
# print('{:>11} {:>11}'.format(name, struct.Struct(size).size))
self.data_size = self.pdu_dic.get('DATA')
# print(self.data_size)
# print(self.pdu_dic)
# print(self.size)
# print(self.parts_index)
def get_pdu_parts_index(self):
return self.parts_index
def get_data_size(self):
return self.data_size
def get_other_pdu_parts( self, request, data ):
"""
:param byte request:
:param byte data:
:return: list
"""
timestamp = self.get_time()
checksum = self.get_checksum(timestamp, data)
# return all the parameters including the DSCode.OK. The client is only allowed to use DSCode.OK
return [request, checksum, timestamp, DSCode.OK, data]
@staticmethod
def get_time():
return str(datetime.now()).encode('utf-8')
@staticmethod
def get_checksum( timestamp, data ):
try:
return binascii.crc32(timestamp + data)
except TypeError as err:
print('This value {} is not a byte'.format(data))
def get_reserved_1( self ):
return self.null_bytes
def get_reserved_2( self ):
return self.null_bytes
def get_reserved_3( self ):
return self.null_bytes
def get_flag( self ):
pass
def pack( self, array ):
"""
Used to return the pdu after it is created
:return: Struct object
"""
self.s = struct.Struct(self.format)
self.size = self.s.size
return self.s.pack(*array)
def unpack( self, packed_pdu ):
"""
Used to unpack pdu
:param Struct packed_pdu:
:return: Struct Object
"""
self.s = struct.Struct(self.format)
# print(self.s.size)
# print(self.s.unpack(packed_pdu))
# print('size of the packed pdu: {}'.format(len(packed_pdu)))
return self.s.unpack(packed_pdu)
def get_pdu_part_names( self ):
"""
Used to return the parts name. When the user is unsure of the pdu parts, they
can use this to return the pdu component names
:return: string array
"""
return self.pdu_part_list
@staticmethod
def remove_padding( unpacked_pdu ):
"""
This processes an unpacked pdu that is padded.
Then returns the unpacked_pdu without padding
:param unpacked_pdu:
:return: list
"""
array = []
# print(unpacked_pdu)
for item in unpacked_pdu:
if type(item) is bytes: # this means it is string
item = item.decode('utf-8')
padding_index = item.find('\x00')
if padding_index > 0:
array.append(item[:padding_index])
# print(array)
else: # there is no null bytes
array.append(item)
else:
array.append(item)
return array
def get_buffer_size( self ):
return self.size
| 30.583333 | 127 | 0.527053 |
import binascii
import struct
from datetime import datetime
from DSCodes import DSCode
class DSPdu:
def __init__( self ):
elf ):
pass
def pack( self, array ):
self.s = struct.Struct(self.format)
self.size = self.s.size
return self.s.pack(*array)
def unpack( self, packed_pdu ):
self.s = struct.Struct(self.format)
return self.s.unpack(packed_pdu)
def get_pdu_part_names( self ):
return self.pdu_part_list
@staticmethod
def remove_padding( unpacked_pdu ):
array = []
for item in unpacked_pdu:
if type(item) is bytes:
item = item.decode('utf-8')
padding_index = item.find('\x00')
if padding_index > 0:
array.append(item[:padding_index])
else:
array.append(item)
else:
array.append(item)
return array
def get_buffer_size( self ):
return self.size
| true | true |
f72ab2180c0e9b438ab38e4580406e4f2106a777 | 731 | py | Python | main.py | beetrandahiya/project-Zurich | e46584c1e036ec95a9f612d04a3855349568e082 | [
"MIT"
] | null | null | null | main.py | beetrandahiya/project-Zurich | e46584c1e036ec95a9f612d04a3855349568e082 | [
"MIT"
] | null | null | null | main.py | beetrandahiya/project-Zurich | e46584c1e036ec95a9f612d04a3855349568e082 | [
"MIT"
] | null | null | null | import numpy as np
#test inputs
inputs = [1, 2, 3, 2.5]
weights = [[0.2, 0.8, -0.5, 1],
[0.5, -0.91, 0.26, -0.5],
[-0.26, -0.27, 0.17, 0.87]]
biases = [2, 3, 0.5]
def neuron_output(inputs, weights,bias):
return sum(inputs[i] * weights[i] for i in range(len(inputs)))+ bias
#this can also be done with numpy because its just the dot product of weights and inputs
#np.dot(weights,inputs) + bias
def neuron_layer_output(inputs, weights, biases):
outputs=[]
for i in range(len(biases)):
outputs.append(neuron_output(inputs,weights[i],biases[i]))
return outputs
print(neuron_layer_output(inputs, weights, biases))
# for input in batches
# we will have to use matrix operations to calculate outputs
| 22.151515 | 88 | 0.674419 | import numpy as np
inputs = [1, 2, 3, 2.5]
weights = [[0.2, 0.8, -0.5, 1],
[0.5, -0.91, 0.26, -0.5],
[-0.26, -0.27, 0.17, 0.87]]
biases = [2, 3, 0.5]
def neuron_output(inputs, weights,bias):
return sum(inputs[i] * weights[i] for i in range(len(inputs)))+ bias
def neuron_layer_output(inputs, weights, biases):
outputs=[]
for i in range(len(biases)):
outputs.append(neuron_output(inputs,weights[i],biases[i]))
return outputs
print(neuron_layer_output(inputs, weights, biases))
| true | true |
f72ab2e85de44330df2bccc1d1ebf94901b9c48b | 387 | py | Python | students/K33401/Goncharov_Vladimir/Lr3/hotel/hotel/asgi.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 4 | 2020-09-03T15:41:42.000Z | 2021-12-24T15:28:20.000Z | students/K33401/Goncharov_Vladimir/Lr3/hotel/hotel/asgi.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 48 | 2020-09-13T20:22:42.000Z | 2021-04-30T11:13:30.000Z | students/K33401/Goncharov_Vladimir/Lr3/hotel/hotel/asgi.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 69 | 2020-09-06T10:32:37.000Z | 2021-11-28T18:13:17.000Z | """
ASGI config for hotel project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hotel.settings')
application = get_asgi_application()
| 22.764706 | 78 | 0.782946 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hotel.settings')
application = get_asgi_application()
| true | true |
f72ab3141e4951a0fbf2744f08280c033d6a9acf | 13,023 | py | Python | imgcls/modeling/backbone/mobilenet.py | TuranSKT/detectron2_class | c90e68abbd39afa8c34d83ac760cabf3b5d02868 | [
"MIT"
] | 22 | 2020-06-09T11:06:15.000Z | 2022-03-29T16:24:23.000Z | imgcls/modeling/backbone/mobilenet.py | TuranSKT/detectron2_class | c90e68abbd39afa8c34d83ac760cabf3b5d02868 | [
"MIT"
] | 4 | 2020-07-09T16:39:48.000Z | 2020-11-25T13:34:52.000Z | imgcls/modeling/backbone/mobilenet.py | TuranSKT/detectron2_class | c90e68abbd39afa8c34d83ac760cabf3b5d02868 | [
"MIT"
] | 9 | 2020-06-10T09:55:09.000Z | 2021-08-20T12:55:26.000Z | '''
@Copyright (c) tkianai All Rights Reserved.
@Author : tkianai
@Github : https://github.com/tkianai
@Date : 2020-04-26 14:14:18
@FilePath : /ImageCls.detectron2/imgcls/modeling/backbone/mobilenet.py
@Description :
'''
import torch
import torch.nn as nn
from detectron2.layers import Conv2d, ShapeSpec
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
__all__ = [
'build_mnetv1_backbone',
'build_mnetv2_backbone',
]
def conv_bn_leaky(inp, oup, stride=1, leaky=0):
return nn.Sequential(
Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True)
)
def conv_dw_leaky(inp, oup, stride, leaky=0.1):
return nn.Sequential(
Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
class MobileNetV1(Backbone):
def __init__(self, cfg, data_channel, width_mult=1.0, out_features=None, num_classes=None):
super().__init__()
self.num_classes = num_classes
input_channel = 32
# scale input channel
input_channel = int(input_channel * width_mult)
# stem
current_stride = 2
name = "stem"
self.stem = conv_bn_leaky(
data_channel, input_channel, current_stride, leaky=0.1)
self._out_feature_strides = {name: current_stride}
self._out_feature_channels = {name: input_channel}
# body
dw_setting = [
# c, n, s
[64, 1, 1],
[128, 2, 2],
[256, 2, 2],
[512, 6, 2],
[1024, 2, 2],
]
self.return_features_indices = [3, 5, 11, 13]
self.features = nn.ModuleList([])
# building depthwise conv block
for c, n, s in dw_setting:
output_channel = int(c * width_mult)
for i in range(n):
# the first one applying stride
if i == 0:
self.features.append(conv_dw_leaky(
input_channel, output_channel, s))
else:
self.features.append(conv_dw_leaky(
input_channel, output_channel, 1))
# update input channel for next block
input_channel = output_channel
# check output this feature map?
if len(self.features) in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(len(self.features)) + 2)
self._out_feature_channels.update({
name: output_channel
})
current_stride *= 2
self._out_feature_strides.update({
name: current_stride
})
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(input_channel, num_classes)
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n) ** 0.5)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
# n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def freeze(self, freeze_at):
if freeze_at > 0:
# freeze stem
for p in self.stem.parameters():
p.requires_grad = False
if freeze_at > 1:
# freeze features
freeze_at = freeze_at - 2
freeze_layers = self.return_features_indices[freeze_at] if freeze_at < len(
self.return_features_indices) else self.return_features_indices[-1]
for layer_index in range(freeze_layers):
for p in self.features[layer_index].parameters():
p.requires_grad = False
return self
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for i, m in enumerate(self.features, 1):
x = m(x)
if i in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(i) + 2)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def conv_bn(inp, oup, stride):
return nn.Sequential(
Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super().__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
Conv2d(inp, hidden_dim, 3, stride, 1,
groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
Conv2d(hidden_dim, hidden_dim, 3, stride,
1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(Backbone):
def __init__(self, cfg, data_channel, width_mult=1.0, out_features=None, num_classes=None):
super().__init__()
self.num_classes = num_classes
input_channel = 32
# scale input channel
input_channel = int(input_channel * width_mult)
# stem
current_stride = 2
name = "stem"
self.stem = conv_bn(data_channel, input_channel, current_stride)
self._out_feature_strides = {name: current_stride}
self._out_feature_channels = {name: input_channel}
# body
block = InvertedResidual
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
self.return_features_indices = [3, 6, 13, 17]
self.features = nn.ModuleList([])
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
# the first one applying stride
if i == 0:
self.features.append(
block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(
block(input_channel, output_channel, 1, expand_ratio=t))
# update input channel for next block
input_channel = output_channel
# check output this feature map?
if len(self.features) in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(len(self.features)) + 2)
self._out_feature_channels.update({
name: output_channel
})
current_stride *= 2
self._out_feature_strides.update({
name: current_stride
})
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(input_channel, num_classes)
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n) ** 0.5)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
# n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def freeze(self, freeze_at):
if freeze_at > 0:
# freeze stem
for p in self.stem.parameters():
p.requires_grad = False
if freeze_at > 1:
# freeze features
freeze_at = freeze_at - 2
freeze_layers = self.return_features_indices[freeze_at] if freeze_at < len(
self.return_features_indices) else self.return_features_indices[-1]
for layer_index in range(freeze_layers):
for p in self.features[layer_index].parameters():
p.requires_grad = False
return self
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
# res2 -> stride 2**2
# res3 -> stride 2**3
# output downsample stride: [4, 8, 16, 32]
for i, m in enumerate(self.features, 1):
x = m(x)
if i in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(i) + 2)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
@BACKBONE_REGISTRY.register()
def build_mnetv1_backbone(cfg, input_shape: ShapeSpec):
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.MNET.OUT_FEATURES
width_mult = cfg.MODEL.MNET.WIDTH_MULT
num_classes = cfg.MODEL.CLSNET.NUM_CLASSES if cfg.MODEL.CLSNET.ENABLE else None
model = MobileNetV1(cfg, input_shape.channels, width_mult=width_mult,
out_features=out_features, num_classes=num_classes).freeze(freeze_at)
return model
@BACKBONE_REGISTRY.register()
def build_mnetv2_backbone(cfg, input_shape: ShapeSpec):
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.MNET.OUT_FEATURES
width_mult = cfg.MODEL.MNET.WIDTH_MULT
num_classes = cfg.MODEL.CLSNET.NUM_CLASSES if cfg.MODEL.CLSNET.ENABLE else None
model = MobileNetV2(cfg, input_shape.channels, width_mult=width_mult,
out_features=out_features, num_classes=num_classes).freeze(freeze_at)
return model
| 35.581967 | 95 | 0.543807 |
import torch
import torch.nn as nn
from detectron2.layers import Conv2d, ShapeSpec
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
__all__ = [
'build_mnetv1_backbone',
'build_mnetv2_backbone',
]
def conv_bn_leaky(inp, oup, stride=1, leaky=0):
return nn.Sequential(
Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True)
)
def conv_dw_leaky(inp, oup, stride, leaky=0.1):
return nn.Sequential(
Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
class MobileNetV1(Backbone):
def __init__(self, cfg, data_channel, width_mult=1.0, out_features=None, num_classes=None):
super().__init__()
self.num_classes = num_classes
input_channel = 32
input_channel = int(input_channel * width_mult)
current_stride = 2
name = "stem"
self.stem = conv_bn_leaky(
data_channel, input_channel, current_stride, leaky=0.1)
self._out_feature_strides = {name: current_stride}
self._out_feature_channels = {name: input_channel}
dw_setting = [
[64, 1, 1],
[128, 2, 2],
[256, 2, 2],
[512, 6, 2],
[1024, 2, 2],
]
self.return_features_indices = [3, 5, 11, 13]
self.features = nn.ModuleList([])
for c, n, s in dw_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(conv_dw_leaky(
input_channel, output_channel, s))
else:
self.features.append(conv_dw_leaky(
input_channel, output_channel, 1))
input_channel = output_channel
if len(self.features) in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(len(self.features)) + 2)
self._out_feature_channels.update({
name: output_channel
})
current_stride *= 2
self._out_feature_strides.update({
name: current_stride
})
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(input_channel, num_classes)
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n) ** 0.5)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def freeze(self, freeze_at):
if freeze_at > 0:
for p in self.stem.parameters():
p.requires_grad = False
if freeze_at > 1:
freeze_at = freeze_at - 2
freeze_layers = self.return_features_indices[freeze_at] if freeze_at < len(
self.return_features_indices) else self.return_features_indices[-1]
for layer_index in range(freeze_layers):
for p in self.features[layer_index].parameters():
p.requires_grad = False
return self
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for i, m in enumerate(self.features, 1):
x = m(x)
if i in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(i) + 2)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def conv_bn(inp, oup, stride):
return nn.Sequential(
Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super().__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
Conv2d(inp, hidden_dim, 3, stride, 1,
groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
Conv2d(hidden_dim, hidden_dim, 3, stride,
1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(Backbone):
def __init__(self, cfg, data_channel, width_mult=1.0, out_features=None, num_classes=None):
super().__init__()
self.num_classes = num_classes
input_channel = 32
input_channel = int(input_channel * width_mult)
current_stride = 2
name = "stem"
self.stem = conv_bn(data_channel, input_channel, current_stride)
self._out_feature_strides = {name: current_stride}
self._out_feature_channels = {name: input_channel}
block = InvertedResidual
inverted_residual_setting = [
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
self.return_features_indices = [3, 6, 13, 17]
self.features = nn.ModuleList([])
for t, c, n, s in inverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(
block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(
block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
if len(self.features) in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(len(self.features)) + 2)
self._out_feature_channels.update({
name: output_channel
})
current_stride *= 2
self._out_feature_strides.update({
name: current_stride
})
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(input_channel, num_classes)
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n) ** 0.5)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def freeze(self, freeze_at):
if freeze_at > 0:
for p in self.stem.parameters():
p.requires_grad = False
if freeze_at > 1:
freeze_at = freeze_at - 2
freeze_layers = self.return_features_indices[freeze_at] if freeze_at < len(
self.return_features_indices) else self.return_features_indices[-1]
for layer_index in range(freeze_layers):
for p in self.features[layer_index].parameters():
p.requires_grad = False
return self
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for i, m in enumerate(self.features, 1):
x = m(x)
if i in self.return_features_indices:
name = "mob{}".format(
self.return_features_indices.index(i) + 2)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
@BACKBONE_REGISTRY.register()
def build_mnetv1_backbone(cfg, input_shape: ShapeSpec):
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.MNET.OUT_FEATURES
width_mult = cfg.MODEL.MNET.WIDTH_MULT
num_classes = cfg.MODEL.CLSNET.NUM_CLASSES if cfg.MODEL.CLSNET.ENABLE else None
model = MobileNetV1(cfg, input_shape.channels, width_mult=width_mult,
out_features=out_features, num_classes=num_classes).freeze(freeze_at)
return model
@BACKBONE_REGISTRY.register()
def build_mnetv2_backbone(cfg, input_shape: ShapeSpec):
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.MNET.OUT_FEATURES
width_mult = cfg.MODEL.MNET.WIDTH_MULT
num_classes = cfg.MODEL.CLSNET.NUM_CLASSES if cfg.MODEL.CLSNET.ENABLE else None
model = MobileNetV2(cfg, input_shape.channels, width_mult=width_mult,
out_features=out_features, num_classes=num_classes).freeze(freeze_at)
return model
| true | true |
f72ab477380e68f511e89a12fe5e0154052fb2b7 | 854 | py | Python | sleap/io/format/text.py | jens-k/sleap | 4e99ed037f1f7f41d9f15e2efaac638fc7e12b09 | [
"BSD-3-Clause-Clear"
] | null | null | null | sleap/io/format/text.py | jens-k/sleap | 4e99ed037f1f7f41d9f15e2efaac638fc7e12b09 | [
"BSD-3-Clause-Clear"
] | null | null | null | sleap/io/format/text.py | jens-k/sleap | 4e99ed037f1f7f41d9f15e2efaac638fc7e12b09 | [
"BSD-3-Clause-Clear"
] | null | null | null | from .adaptor import Adaptor, SleapObjectType
from .filehandle import FileHandle
class TextAdaptor(Adaptor):
@property
def handles(self):
return SleapObjectType.misc
@property
def default_ext(self):
return "txt"
@property
def all_exts(self):
return ["txt", "log"]
@property
def name(self):
return "Text file"
def can_read_file(self, file: FileHandle):
return True # FIXME
def can_write_filename(self, filename: str) -> bool:
return True
def does_read(self) -> bool:
return True
def does_write(self) -> bool:
return True
def read(self, file: FileHandle, *args, **kwargs):
return file.text
def write(self, filename: str, source_object: str):
with open(filename, "w") as f:
f.write(source_object)
| 21.35 | 56 | 0.619438 | from .adaptor import Adaptor, SleapObjectType
from .filehandle import FileHandle
class TextAdaptor(Adaptor):
@property
def handles(self):
return SleapObjectType.misc
@property
def default_ext(self):
return "txt"
@property
def all_exts(self):
return ["txt", "log"]
@property
def name(self):
return "Text file"
def can_read_file(self, file: FileHandle):
return True
def can_write_filename(self, filename: str) -> bool:
return True
def does_read(self) -> bool:
return True
def does_write(self) -> bool:
return True
def read(self, file: FileHandle, *args, **kwargs):
return file.text
def write(self, filename: str, source_object: str):
with open(filename, "w") as f:
f.write(source_object)
| true | true |
f72ab504565970994d8e7ad4fc8bc28fa7d14daa | 14,614 | py | Python | tests/unit/anchore_engine/services/policy_engine/policy/test_parameters.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 1 | 2019-06-27T08:47:48.000Z | 2019-06-27T08:47:48.000Z | tests/unit/anchore_engine/services/policy_engine/policy/test_parameters.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 4 | 2020-11-07T00:16:02.000Z | 2020-11-08T20:52:06.000Z | tests/unit/anchore_engine/services/policy_engine/policy/test_parameters.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 1 | 2019-11-23T03:39:28.000Z | 2019-11-23T03:39:28.000Z | import unittest
from anchore_engine.services.policy_engine.engine.policy.params import JsonSchemaValidator, BooleanStringValidator, TypeValidator, CommaDelimitedNumberListValidator, EnumValidator, \
DelimitedEnumStringValidator, IntegerValidator, NameVersionListValidator, PipeDelimitedStringListValidator, CommaDelimitedStringListValidator, RegexParamValidator, nested_item_delim_parser, \
delim_parser, LinkedValidator
from anchore_engine.services.policy_engine.engine.policy import params
from anchore_engine.services.policy_engine.engine.policy import gate
from anchore_engine.services.policy_engine.engine.policy.exceptions import ParameterValueInvalidError, ValidationError, RequiredParameterNotSetError
class ValidatorTestMixin(object):
"""
Mixin for helpers for parameter validation tests
"""
def run_matrix_test(self, value_matrix, validator):
for input, expected in value_matrix:
print(('Testing value: {} with expected output: {}'.format(input, expected)))
if expected:
self.assertTrue(validator.validate(input), msg='Expected true for input: {}'.format(input))
else:
with self.assertRaises(ValidationError, msg='Expected exception for input: {}'.format(input)) as e:
validator.validate(input)
class TestParamParsers(unittest.TestCase):
def _run_test_table(self, table, fn):
for t in table:
self.assertEqual(t['result'], fn(t['test']))
def testDelimParser(self):
test_table = [
{'test': 'a,b', 'result': ['a', 'b']},
{'test': ' a , b ', 'result': ['a', 'b']},
{'test': 'a,b,', 'result': ['a', 'b', '']}
]
self._run_test_table(test_table, delim_parser)
test_table = [
{'test': 'a|b', 'result': ['a', 'b']},
{'test': ' a | b ', 'result': ['a', 'b']},
{'test': 'a|b|', 'result': ['a', 'b', '']}
]
self._run_test_table(test_table, lambda x: delim_parser(param_value=x, item_delimiter='|'))
def testBarsplitCommaDelimParser(self):
test_table = [
{'test': 'a|b,c|d', 'result': {'a': 'b', 'c': 'd'}},
{'test': ' a|b , c|d ', 'result': {'a': 'b', 'c': 'd'}},
{'test': ' a|b,c|d ', 'result': {'a': 'b', 'c': 'd'}},
{'test': ' a-b.c-09-e|b,c|d ', 'result': {'a-b.c-09-e': 'b', 'c': 'd'}},
]
self._run_test_table(test_table, nested_item_delim_parser)
class TestTypeValidator(unittest.TestCase, ValidatorTestMixin):
def test_boolean(self):
matrix = [
(True, True),
(False, True),
('true', False),
('True', False),
('false', False),
('False', False),
('abc', False),
(1, False),
(['a'], False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator("boolean"))
def test_object(self):
matrix = [
('blah', False),
(1, False),
(['a'], False),
({}, True),
({'a': 'b'}, True)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('object'))
def test_string(self):
matrix = [
('blah', True),
('', True),
(1, False),
(['a'], False),
({}, False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('string'))
def test_array(self):
matrix = [
('blah', False),
(1, False),
(['a'], True),
([], True),
({'a': 'b'}, False),
('null', False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('array'))
def test_integer(self):
matrix = [
('blah', False),
(1, True),
(1.0, False),
(['a'], False),
({}, False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('integer'))
def test_number(self):
matrix = [
('blah', False),
(1, True),
(1.0, True),
(['a'], False),
({}, False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('number'))
class TestBooleanStringValidator(unittest.TestCase, ValidatorTestMixin):
def test_boolean_strings(self):
matrix = [
('True', True),
('False', True),
('true', True),
('TRUE', True),
('FALSE', True),
('false', True),
('blah', False),
(1, False),
('1.0', False),
('1', False),
({'a': 'b'}, False),
(['a'], False)
]
self.run_matrix_test(matrix, BooleanStringValidator())
class TestJsonSchemaValidator(unittest.TestCase, ValidatorTestMixin):
class CustomValidator(JsonSchemaValidator):
__validation_schema__ = {
'type': 'object',
'required': ['id', 'name'],
'properties': {
'id': {
'type': 'string'
},
'name': {
'type': 'string'
},
'count': {
'type': 'integer'
}
}
}
def test_json(self):
matrix = [
({'id': 'abc', 'name': 'testname', 'count': 123}, True),
({'id': 'abc', 'name': 'test'}, True),
('a', False),
(1.0, False),
('1.1', False),
(['a', 1, 1], False),
({'name': 'testname', 'count': 123}, False), # Missing a required key
({'id': 'v1', 'name': 'v2', 'count': 123, 'blah': 'hello'}, True)
]
v = TestJsonSchemaValidator.CustomValidator()
self.run_matrix_test(matrix, v)
class TestRegexValidator(unittest.TestCase, ValidatorTestMixin):
def test_regex(self):
v = RegexParamValidator('.*')
matrix = [
('abadfasd.asdfonweo;ianvoaisealnefq;olq23--=23512=5=-w=215', True),
(1, False),
('', True)
]
self.run_matrix_test(matrix, v)
v = RegexParamValidator('[0-9]+')
matrix = [
('1231231', True),
('abc', False),
('', False),
(' ', False)
]
self.run_matrix_test(matrix, v)
class TestRegexRelatedValidators(unittest.TestCase, ValidatorTestMixin):
def test_commadelim_numberlist_validator(self):
v = CommaDelimitedNumberListValidator()
matrix = [
('1,2,3', True),
(' 1, 2, 3 ', True),
('1', True),
('a', False),
('1,2,c', False),
('1,,2', False)
]
self.run_matrix_test(matrix, v)
def test_nameversion_list_validator(self):
v = NameVersionListValidator()
matrix = [
('a|1.0,b|2.0', True),
('a|b,c|defefes|', False),
('a|b', True),
('a|b,c|d', True),
('a,b', False),
('|a', False),
('a,', False),
('a||', False),
('a|,c|d', False),
('a', False),
('a,b', False),
('pkg1|0.1.1.1 pkg2|1.2.', False)
]
self.run_matrix_test(matrix, v)
def test_commadelim_stringlist_validator(self):
v = CommaDelimitedStringListValidator()
matrix = [
('a,b,c', True),
('aa,,bb', False),
(',a', False),
('a,', False)
]
self.run_matrix_test(matrix, v)
def test_pipe_delim_validator(self):
v = PipeDelimitedStringListValidator()
matrix = [
('ab', True),
('abc|c', True),
('ab|c|d', True),
('|a', False),
('a|', False)
]
self.run_matrix_test(matrix, v)
def test_integer_validator(self):
v = IntegerValidator()
matrix = [
('1', True),
('1,2,3', False),
('a,b,c', False),
('a', False),
('1,2,c', False)
]
self.run_matrix_test(matrix, v)
def test_enum_validator(self):
v = EnumValidator(['value1', 'value2'])
matrix = [
('value1', True),
('value2', True),
('3', False),
('value1,value2', False)
]
self.run_matrix_test(matrix, v)
def test_enum_list_validator(self):
v = DelimitedEnumStringValidator(['value1', 'value2'])
matrix = [
('value1', True),
('value2', True),
('value1,value2', True),
('value3', False),
('value1,value3', False)
]
self.run_matrix_test(matrix, v)
class FakeTrigger(gate.BaseTrigger):
__trigger_name__ = 'TestingTrigger'
__description__ = 'Not real'
__trigger_id__ = 'Blah123'
param1 = params.TriggerParameter(name='param_test', example_str='somevalue', description='Test parameter', validator=TypeValidator("string"), is_required=False)
def test1(self):
print((type(self.param1)))
class FakeGate(gate.Gate):
__gate_name__ = 'Somegate'
__triggers__ = [FakeTrigger]
class TestTriggerParams(unittest.TestCase):
def test_param_basics(self):
p = params.TriggerParameter('TestParam1', description='Param for testing basic strings', validator=TypeValidator("string"), related_to='ThisOtherParam')
print('Trying string that should pass validation')
# Should pass validation
print((p.set_value('somestring')))
print(('Got value: {}'.format(p.value())))
print('Trying an int that should fail validation')
# Should fail validation
with self.assertRaises(ValidationError) as ex:
print((p.set_value(10)))
print(('Correctly got exception {}'.format(ex.exception)))
def test_param_integration(self):
t = FakeTrigger(parent_gate_cls=FakeGate, param_test='blah')
# print('Inst value: {}'.format(t.eval_params.get(t.param1.name)))
print(('Inst value: {}'.format(t.param1.value())))
print(('Class value: {}'.format(t.__class__.param1.value())))
t.test1()
class ValidatedParameterTestMixin(object):
"""
Mixin for helpers for parameter validation tests
"""
def run_matrix_test(self, value_matrix, parameter):
for input, expected in value_matrix:
print(('Testing value: {} with expected output: {}'.format(input, expected)))
if expected:
parameter.set_value(input)
output = parameter.value()
self.assertEqual(output, expected)
else:
with self.assertRaises(ValidationError) as e:
parameter.set_value(input)
class TestParameters(unittest.TestCase, ValidatedParameterTestMixin):
def test_nameversion_stringlist_parameter(self):
p = params.NameVersionStringListParameter(name='test1', description='test_description', is_required=False)
test_matrix = [
('a|b,c|d', {'a': 'b', 'c': 'd'}),
('pkg1|0.1.1-abc,pkg2|1.3.5-asdf0', {'pkg1': '0.1.1-abc', 'pkg2': '1.3.5-asdf0'}),
(' a|b , c|d', {'a': 'b', 'c': 'd'}),
('a,b', False),
('a b c', False),
('a|b,c,d', False),
('a|b|c|d', False),
('pkg1|0.1.1.1 pkg2|1.2.', False)
]
self.run_matrix_test(test_matrix, p)
def test_enum_string_parameter(self):
p = params.EnumStringParameter(name='test1', description='test1_description', is_required=False, enum_values=['value1', 'value2'])
test_matrix = [
('value1', 'value1'),
('value2', 'value2'),
('value3', False),
('value1,value2', False),
(' ', False),
('', False)
]
self.run_matrix_test(test_matrix, p)
def test_enumcomma_stringlist_parameter(self):
p = params.EnumCommaDelimStringListParameter(name='test1', description='test1_description', is_required=False, enum_values=['value1', 'value2'])
test_matrix = [
('value1', ['value1']),
('value1,value2', ['value1', 'value2']),
('value1 , value2', ['value1', 'value2']),
('value1, value2', ['value1', 'value2']),
('value1, value2, value1', ['value1', 'value2', 'value1']),
('value3', False),
(' ', False),
('', False)
]
self.run_matrix_test(test_matrix, p)
class TestLinkedValidator(unittest.TestCase, ValidatedParameterTestMixin):
def test_linked(self):
p1 = params.EnumStringParameter(name='attribute', description='Testing123', enum_values=['a', 'b'], is_required=True)
p2 = params.SimpleStringParameter(name='downstream', validator=LinkedValidator(discriminator_parameter='attribute', default_validator=TypeValidator('string'), value_map={'a': BooleanStringValidator(), 'b': IntegerValidator()}), description='test123')
print(p2.validator.validation_criteria())
#p1.set_value('a')
p2.validator.inject_discriminator(None)
test_matrix = [
('true', 'true'),
('blah', 'blah') # p1 not set, so uses default
]
self.run_matrix_test(test_matrix, p2)
p1._param_value = None
p2._param_value = None
p2.validator.inject_discriminator('a')
p1.set_value('a')
test_matrix = [
('true', 'true'),
('blah', False) # should fail now that p1 has a value
]
self.run_matrix_test(test_matrix, p2)
p1._param_value = None
p2._param_value = None
p1.set_value('b')
p2.validator.inject_discriminator('b')
test_matrix = [
('true', False),
('blah', False),
('123', '123')
]
self.run_matrix_test(test_matrix, p2)
def test_multiple(self):
trig1 = FakeTrigger(parent_gate_cls=FakeGate, param_test="somevalue")
trig2 = FakeTrigger(parent_gate_cls=FakeGate, param_test="someothervalue")
print('{} {}'.format(trig1.json(), trig2.json()))
if __name__ == '__main__':
unittest.main()
| 32.189427 | 258 | 0.531066 | import unittest
from anchore_engine.services.policy_engine.engine.policy.params import JsonSchemaValidator, BooleanStringValidator, TypeValidator, CommaDelimitedNumberListValidator, EnumValidator, \
DelimitedEnumStringValidator, IntegerValidator, NameVersionListValidator, PipeDelimitedStringListValidator, CommaDelimitedStringListValidator, RegexParamValidator, nested_item_delim_parser, \
delim_parser, LinkedValidator
from anchore_engine.services.policy_engine.engine.policy import params
from anchore_engine.services.policy_engine.engine.policy import gate
from anchore_engine.services.policy_engine.engine.policy.exceptions import ParameterValueInvalidError, ValidationError, RequiredParameterNotSetError
class ValidatorTestMixin(object):
def run_matrix_test(self, value_matrix, validator):
for input, expected in value_matrix:
print(('Testing value: {} with expected output: {}'.format(input, expected)))
if expected:
self.assertTrue(validator.validate(input), msg='Expected true for input: {}'.format(input))
else:
with self.assertRaises(ValidationError, msg='Expected exception for input: {}'.format(input)) as e:
validator.validate(input)
class TestParamParsers(unittest.TestCase):
def _run_test_table(self, table, fn):
for t in table:
self.assertEqual(t['result'], fn(t['test']))
def testDelimParser(self):
test_table = [
{'test': 'a,b', 'result': ['a', 'b']},
{'test': ' a , b ', 'result': ['a', 'b']},
{'test': 'a,b,', 'result': ['a', 'b', '']}
]
self._run_test_table(test_table, delim_parser)
test_table = [
{'test': 'a|b', 'result': ['a', 'b']},
{'test': ' a | b ', 'result': ['a', 'b']},
{'test': 'a|b|', 'result': ['a', 'b', '']}
]
self._run_test_table(test_table, lambda x: delim_parser(param_value=x, item_delimiter='|'))
def testBarsplitCommaDelimParser(self):
test_table = [
{'test': 'a|b,c|d', 'result': {'a': 'b', 'c': 'd'}},
{'test': ' a|b , c|d ', 'result': {'a': 'b', 'c': 'd'}},
{'test': ' a|b,c|d ', 'result': {'a': 'b', 'c': 'd'}},
{'test': ' a-b.c-09-e|b,c|d ', 'result': {'a-b.c-09-e': 'b', 'c': 'd'}},
]
self._run_test_table(test_table, nested_item_delim_parser)
class TestTypeValidator(unittest.TestCase, ValidatorTestMixin):
def test_boolean(self):
matrix = [
(True, True),
(False, True),
('true', False),
('True', False),
('false', False),
('False', False),
('abc', False),
(1, False),
(['a'], False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator("boolean"))
def test_object(self):
matrix = [
('blah', False),
(1, False),
(['a'], False),
({}, True),
({'a': 'b'}, True)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('object'))
def test_string(self):
matrix = [
('blah', True),
('', True),
(1, False),
(['a'], False),
({}, False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('string'))
def test_array(self):
matrix = [
('blah', False),
(1, False),
(['a'], True),
([], True),
({'a': 'b'}, False),
('null', False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('array'))
def test_integer(self):
matrix = [
('blah', False),
(1, True),
(1.0, False),
(['a'], False),
({}, False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('integer'))
def test_number(self):
matrix = [
('blah', False),
(1, True),
(1.0, True),
(['a'], False),
({}, False),
({'a': 'b'}, False)
]
self.run_matrix_test(value_matrix=matrix, validator=TypeValidator('number'))
class TestBooleanStringValidator(unittest.TestCase, ValidatorTestMixin):
def test_boolean_strings(self):
matrix = [
('True', True),
('False', True),
('true', True),
('TRUE', True),
('FALSE', True),
('false', True),
('blah', False),
(1, False),
('1.0', False),
('1', False),
({'a': 'b'}, False),
(['a'], False)
]
self.run_matrix_test(matrix, BooleanStringValidator())
class TestJsonSchemaValidator(unittest.TestCase, ValidatorTestMixin):
class CustomValidator(JsonSchemaValidator):
__validation_schema__ = {
'type': 'object',
'required': ['id', 'name'],
'properties': {
'id': {
'type': 'string'
},
'name': {
'type': 'string'
},
'count': {
'type': 'integer'
}
}
}
def test_json(self):
matrix = [
({'id': 'abc', 'name': 'testname', 'count': 123}, True),
({'id': 'abc', 'name': 'test'}, True),
('a', False),
(1.0, False),
('1.1', False),
(['a', 1, 1], False),
({'name': 'testname', 'count': 123}, False),
({'id': 'v1', 'name': 'v2', 'count': 123, 'blah': 'hello'}, True)
]
v = TestJsonSchemaValidator.CustomValidator()
self.run_matrix_test(matrix, v)
class TestRegexValidator(unittest.TestCase, ValidatorTestMixin):
def test_regex(self):
v = RegexParamValidator('.*')
matrix = [
('abadfasd.asdfonweo;ianvoaisealnefq;olq23--=23512=5=-w=215', True),
(1, False),
('', True)
]
self.run_matrix_test(matrix, v)
v = RegexParamValidator('[0-9]+')
matrix = [
('1231231', True),
('abc', False),
('', False),
(' ', False)
]
self.run_matrix_test(matrix, v)
class TestRegexRelatedValidators(unittest.TestCase, ValidatorTestMixin):
def test_commadelim_numberlist_validator(self):
v = CommaDelimitedNumberListValidator()
matrix = [
('1,2,3', True),
(' 1, 2, 3 ', True),
('1', True),
('a', False),
('1,2,c', False),
('1,,2', False)
]
self.run_matrix_test(matrix, v)
def test_nameversion_list_validator(self):
v = NameVersionListValidator()
matrix = [
('a|1.0,b|2.0', True),
('a|b,c|defefes|', False),
('a|b', True),
('a|b,c|d', True),
('a,b', False),
('|a', False),
('a,', False),
('a||', False),
('a|,c|d', False),
('a', False),
('a,b', False),
('pkg1|0.1.1.1 pkg2|1.2.', False)
]
self.run_matrix_test(matrix, v)
def test_commadelim_stringlist_validator(self):
v = CommaDelimitedStringListValidator()
matrix = [
('a,b,c', True),
('aa,,bb', False),
(',a', False),
('a,', False)
]
self.run_matrix_test(matrix, v)
def test_pipe_delim_validator(self):
v = PipeDelimitedStringListValidator()
matrix = [
('ab', True),
('abc|c', True),
('ab|c|d', True),
('|a', False),
('a|', False)
]
self.run_matrix_test(matrix, v)
def test_integer_validator(self):
v = IntegerValidator()
matrix = [
('1', True),
('1,2,3', False),
('a,b,c', False),
('a', False),
('1,2,c', False)
]
self.run_matrix_test(matrix, v)
def test_enum_validator(self):
v = EnumValidator(['value1', 'value2'])
matrix = [
('value1', True),
('value2', True),
('3', False),
('value1,value2', False)
]
self.run_matrix_test(matrix, v)
def test_enum_list_validator(self):
v = DelimitedEnumStringValidator(['value1', 'value2'])
matrix = [
('value1', True),
('value2', True),
('value1,value2', True),
('value3', False),
('value1,value3', False)
]
self.run_matrix_test(matrix, v)
class FakeTrigger(gate.BaseTrigger):
__trigger_name__ = 'TestingTrigger'
__description__ = 'Not real'
__trigger_id__ = 'Blah123'
param1 = params.TriggerParameter(name='param_test', example_str='somevalue', description='Test parameter', validator=TypeValidator("string"), is_required=False)
def test1(self):
print((type(self.param1)))
class FakeGate(gate.Gate):
__gate_name__ = 'Somegate'
__triggers__ = [FakeTrigger]
class TestTriggerParams(unittest.TestCase):
def test_param_basics(self):
p = params.TriggerParameter('TestParam1', description='Param for testing basic strings', validator=TypeValidator("string"), related_to='ThisOtherParam')
print('Trying string that should pass validation')
print((p.set_value('somestring')))
print(('Got value: {}'.format(p.value())))
print('Trying an int that should fail validation')
with self.assertRaises(ValidationError) as ex:
print((p.set_value(10)))
print(('Correctly got exception {}'.format(ex.exception)))
def test_param_integration(self):
t = FakeTrigger(parent_gate_cls=FakeGate, param_test='blah')
print(('Inst value: {}'.format(t.param1.value())))
print(('Class value: {}'.format(t.__class__.param1.value())))
t.test1()
class ValidatedParameterTestMixin(object):
def run_matrix_test(self, value_matrix, parameter):
for input, expected in value_matrix:
print(('Testing value: {} with expected output: {}'.format(input, expected)))
if expected:
parameter.set_value(input)
output = parameter.value()
self.assertEqual(output, expected)
else:
with self.assertRaises(ValidationError) as e:
parameter.set_value(input)
class TestParameters(unittest.TestCase, ValidatedParameterTestMixin):
def test_nameversion_stringlist_parameter(self):
p = params.NameVersionStringListParameter(name='test1', description='test_description', is_required=False)
test_matrix = [
('a|b,c|d', {'a': 'b', 'c': 'd'}),
('pkg1|0.1.1-abc,pkg2|1.3.5-asdf0', {'pkg1': '0.1.1-abc', 'pkg2': '1.3.5-asdf0'}),
(' a|b , c|d', {'a': 'b', 'c': 'd'}),
('a,b', False),
('a b c', False),
('a|b,c,d', False),
('a|b|c|d', False),
('pkg1|0.1.1.1 pkg2|1.2.', False)
]
self.run_matrix_test(test_matrix, p)
def test_enum_string_parameter(self):
p = params.EnumStringParameter(name='test1', description='test1_description', is_required=False, enum_values=['value1', 'value2'])
test_matrix = [
('value1', 'value1'),
('value2', 'value2'),
('value3', False),
('value1,value2', False),
(' ', False),
('', False)
]
self.run_matrix_test(test_matrix, p)
def test_enumcomma_stringlist_parameter(self):
p = params.EnumCommaDelimStringListParameter(name='test1', description='test1_description', is_required=False, enum_values=['value1', 'value2'])
test_matrix = [
('value1', ['value1']),
('value1,value2', ['value1', 'value2']),
('value1 , value2', ['value1', 'value2']),
('value1, value2', ['value1', 'value2']),
('value1, value2, value1', ['value1', 'value2', 'value1']),
('value3', False),
(' ', False),
('', False)
]
self.run_matrix_test(test_matrix, p)
class TestLinkedValidator(unittest.TestCase, ValidatedParameterTestMixin):
def test_linked(self):
p1 = params.EnumStringParameter(name='attribute', description='Testing123', enum_values=['a', 'b'], is_required=True)
p2 = params.SimpleStringParameter(name='downstream', validator=LinkedValidator(discriminator_parameter='attribute', default_validator=TypeValidator('string'), value_map={'a': BooleanStringValidator(), 'b': IntegerValidator()}), description='test123')
print(p2.validator.validation_criteria())
p2.validator.inject_discriminator(None)
test_matrix = [
('true', 'true'),
('blah', 'blah')
]
self.run_matrix_test(test_matrix, p2)
p1._param_value = None
p2._param_value = None
p2.validator.inject_discriminator('a')
p1.set_value('a')
test_matrix = [
('true', 'true'),
('blah', False)
]
self.run_matrix_test(test_matrix, p2)
p1._param_value = None
p2._param_value = None
p1.set_value('b')
p2.validator.inject_discriminator('b')
test_matrix = [
('true', False),
('blah', False),
('123', '123')
]
self.run_matrix_test(test_matrix, p2)
def test_multiple(self):
trig1 = FakeTrigger(parent_gate_cls=FakeGate, param_test="somevalue")
trig2 = FakeTrigger(parent_gate_cls=FakeGate, param_test="someothervalue")
print('{} {}'.format(trig1.json(), trig2.json()))
if __name__ == '__main__':
unittest.main()
| true | true |
f72ab6805d5b4e650b8e6b745b9ad9b0ed680de0 | 329 | py | Python | clingine/clock.py | avancayetano/clingine | 55e8bd6366aad3ae8e7ac9537fa3ae85efab9ddc | [
"MIT"
] | 12 | 2020-04-10T09:10:29.000Z | 2022-03-12T03:45:08.000Z | clingine/clock.py | avancayetano/clingine | 55e8bd6366aad3ae8e7ac9537fa3ae85efab9ddc | [
"MIT"
] | 6 | 2020-04-11T10:47:01.000Z | 2020-10-19T14:15:55.000Z | clingine/clock.py | avancayetano/clingine | 55e8bd6366aad3ae8e7ac9537fa3ae85efab9ddc | [
"MIT"
] | 1 | 2021-09-04T00:40:34.000Z | 2021-09-04T00:40:34.000Z | import time
class Clock:
def __init__(self):
self.start_time = time.time()
self.current_time = time.time()
def get_time(self):
return time.time() - self.start_time
def get_dt(self):
return time.time() - self.current_time
def update(self):
self.current_time = time.time()
def delay(self, sec):
time.sleep(sec) | 19.352941 | 40 | 0.702128 | import time
class Clock:
def __init__(self):
self.start_time = time.time()
self.current_time = time.time()
def get_time(self):
return time.time() - self.start_time
def get_dt(self):
return time.time() - self.current_time
def update(self):
self.current_time = time.time()
def delay(self, sec):
time.sleep(sec) | true | true |
f72ab6e6434d9b5f426cef3c89cc2fec38e25ed5 | 1,703 | py | Python | scripts/maf_covered_regions.py | tweirick/bx-python | f16a57e9f0a133ab4d62aed6fec087b8ce4ec848 | [
"MIT"
] | null | null | null | scripts/maf_covered_regions.py | tweirick/bx-python | f16a57e9f0a133ab4d62aed6fec087b8ce4ec848 | [
"MIT"
] | null | null | null | scripts/maf_covered_regions.py | tweirick/bx-python | f16a57e9f0a133ab4d62aed6fec087b8ce4ec848 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Read a maf file and print the regions covered to a set of bed files (one for
each sequence source referenced in the maf). Only blocks with a positive
percent identity are written out.
TODO: Can this be generalized to be made more useful?
usage: %prog bed_outfile_prefix < maf
"""
from __future__ import division, print_function
import sys
import bx.align.maf
import psyco_full
def block_pid( comp1, comp2 ):
match = 0
total = 0
t1 = comp1.text.lower()
t2 = comp2.text.lower()
for i in range( 0, len(t1) ):
a, b = t1[i], t2[i]
if a == '-' or b == '-':
continue
elif a == b:
match += 1
total += 1
if total == 0: return None
return ( match / total )
def main():
out_prefix = sys.argv[1]
print(out_prefix)
out_files = dict()
for block in bx.align.maf.Reader( sys.stdin ):
ref_comp = block.components[0]
ref_chrom = ref_comp.src.split('.')[1]
for comp in block.components[1:]:
comp_species, comp_chrom = comp.src.split('.')[:2]
if comp_species not in out_files:
f = open( "%s%s.bed" % ( out_prefix, comp_species ), "w" )
out_files[comp_species] = f
pid = block_pid( ref_comp, comp )
if pid:
out_files[comp_species].write( "%s\t%d\t%d\t%s:%d-%d,%s\t%f\n" %
( ref_chrom, ref_comp.forward_strand_start, ref_comp.forward_strand_end, \
comp_chrom, comp.start, comp.end, comp.strand, pid ) )
for f in out_files.values():
f.close()
if __name__ == "__main__":
main()
| 28.864407 | 107 | 0.570757 |
from __future__ import division, print_function
import sys
import bx.align.maf
import psyco_full
def block_pid( comp1, comp2 ):
match = 0
total = 0
t1 = comp1.text.lower()
t2 = comp2.text.lower()
for i in range( 0, len(t1) ):
a, b = t1[i], t2[i]
if a == '-' or b == '-':
continue
elif a == b:
match += 1
total += 1
if total == 0: return None
return ( match / total )
def main():
out_prefix = sys.argv[1]
print(out_prefix)
out_files = dict()
for block in bx.align.maf.Reader( sys.stdin ):
ref_comp = block.components[0]
ref_chrom = ref_comp.src.split('.')[1]
for comp in block.components[1:]:
comp_species, comp_chrom = comp.src.split('.')[:2]
if comp_species not in out_files:
f = open( "%s%s.bed" % ( out_prefix, comp_species ), "w" )
out_files[comp_species] = f
pid = block_pid( ref_comp, comp )
if pid:
out_files[comp_species].write( "%s\t%d\t%d\t%s:%d-%d,%s\t%f\n" %
( ref_chrom, ref_comp.forward_strand_start, ref_comp.forward_strand_end, \
comp_chrom, comp.start, comp.end, comp.strand, pid ) )
for f in out_files.values():
f.close()
if __name__ == "__main__":
main()
| true | true |
f72ab7e4fe69751d46adb928a0232848fd36398f | 4,962 | py | Python | apps/log_extract/handlers/thread.py | yiqiwang-17/bk-log | 7b356fced63b667baea300cfd194ad70a842c3ee | [
"MIT"
] | null | null | null | apps/log_extract/handlers/thread.py | yiqiwang-17/bk-log | 7b356fced63b667baea300cfd194ad70a842c3ee | [
"MIT"
] | null | null | null | apps/log_extract/handlers/thread.py | yiqiwang-17/bk-log | 7b356fced63b667baea300cfd194ad70a842c3ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging # noqa
from functools import partial # noqa
from multiprocessing.pool import ThreadPool as _ThreadPool # noqa
from django import db # noqa
from django.utils import timezone, translation # noqa
from apps.utils.local import activate_request, get_request # noqa
from .local import local # noqa
logger = logging.getLogger(__name__)
def run_func_with_local(items, tz, lang, request, func, *args, **kwargs):
"""
线程执行函数
:param request: added by jairwu API request
:param func: 待执行函数
:param items: Thread Local Items
:param tz: 时区
:param lang: 语言
:param args: 位置参数
:param kwargs: 关键字参数
:return: 函数返回值
"""
# 同步local数据
for item in items:
setattr(local, item[0], item[1])
# 设置时区及语言
timezone.activate(tz)
translation.activate(lang)
activate_request(request)
try:
data = func(*args, **kwargs)
except Exception as e:
raise e
finally:
# 关闭db连接
db.connections.close_all()
# 清理local数据
for item in local:
delattr(local, item[0])
return data
class ThreadPool(_ThreadPool):
"""
线程池
"""
@staticmethod
def get_func_with_local(func):
tz = timezone.get_current_timezone().zone
lang = translation.get_language()
items = [item for item in local]
request = get_request()
return partial(run_func_with_local, items, tz, lang, request, func)
def map_ignore_exception(self, func, iterable, return_exception=False):
"""
忽略错误版的map
"""
futures = []
for params in iterable:
if not isinstance(params, (tuple, list)):
params = (params,)
futures.append(self.apply_async(func, args=params))
results = []
for future in futures:
try:
results.append(future.get())
except Exception as e:
if return_exception:
results.append(e)
logger.exception(e)
return results
def map_async(self, func, iterable, chunksize=None, callback=None):
return super(ThreadPool, self).map_async(
self.get_func_with_local(func), iterable, chunksize=chunksize, callback=callback
)
def apply_async(self, func, args=(), kwds={}, callback=None):
return super(ThreadPool, self).apply_async(
self.get_func_with_local(func), args=args, kwds=kwds, callback=callback
)
def imap(self, func, iterable, chunksize=1):
return super(ThreadPool, self).imap(self.get_func_with_local(func), iterable, chunksize)
def imap_unordered(self, func, iterable, chunksize=1):
func = partial(run_func_with_local, func, local)
return super(ThreadPool, self).imap_unordered(self.get_func_with_local(func), iterable, chunksize=chunksize)
| 38.169231 | 116 | 0.689238 |
import logging
from functools import partial
from multiprocessing.pool import ThreadPool as _ThreadPool
from django import db
from django.utils import timezone, translation
from apps.utils.local import activate_request, get_request
from .local import local
logger = logging.getLogger(__name__)
def run_func_with_local(items, tz, lang, request, func, *args, **kwargs):
for item in items:
setattr(local, item[0], item[1])
timezone.activate(tz)
translation.activate(lang)
activate_request(request)
try:
data = func(*args, **kwargs)
except Exception as e:
raise e
finally:
db.connections.close_all()
for item in local:
delattr(local, item[0])
return data
class ThreadPool(_ThreadPool):
@staticmethod
def get_func_with_local(func):
tz = timezone.get_current_timezone().zone
lang = translation.get_language()
items = [item for item in local]
request = get_request()
return partial(run_func_with_local, items, tz, lang, request, func)
def map_ignore_exception(self, func, iterable, return_exception=False):
futures = []
for params in iterable:
if not isinstance(params, (tuple, list)):
params = (params,)
futures.append(self.apply_async(func, args=params))
results = []
for future in futures:
try:
results.append(future.get())
except Exception as e:
if return_exception:
results.append(e)
logger.exception(e)
return results
def map_async(self, func, iterable, chunksize=None, callback=None):
return super(ThreadPool, self).map_async(
self.get_func_with_local(func), iterable, chunksize=chunksize, callback=callback
)
def apply_async(self, func, args=(), kwds={}, callback=None):
return super(ThreadPool, self).apply_async(
self.get_func_with_local(func), args=args, kwds=kwds, callback=callback
)
def imap(self, func, iterable, chunksize=1):
return super(ThreadPool, self).imap(self.get_func_with_local(func), iterable, chunksize)
def imap_unordered(self, func, iterable, chunksize=1):
func = partial(run_func_with_local, func, local)
return super(ThreadPool, self).imap_unordered(self.get_func_with_local(func), iterable, chunksize=chunksize)
| true | true |
f72ab8481e4f48f3a7a7d665752d25ae94efa665 | 3,571 | py | Python | basic/string1.py | hmln/google-python-exercises | c9b55063708ea22a99914a3ad14fd2aae54336f2 | [
"Apache-2.0"
] | null | null | null | basic/string1.py | hmln/google-python-exercises | c9b55063708ea22a99914a3ad14fd2aae54336f2 | [
"Apache-2.0"
] | null | null | null | basic/string1.py | hmln/google-python-exercises | c9b55063708ea22a99914a3ad14fd2aae54336f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
return 'Number of donuts: {}'.format(count if count < 10 else 'many')
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ''
return s[0:2] + s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
return s[0] + s[1:].replace(s[0], '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return '{} {}'.format(b[:2] + a[2:], a[:2] + b[2:])
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print('donuts')
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print()
print('both_ends')
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print()
print('fix_start')
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print()
print('mix_up')
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| 32.463636 | 80 | 0.659199 |
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
def donuts(count):
return 'Number of donuts: {}'.format(count if count < 10 else 'many')
def both_ends(s):
if len(s) < 2:
return ''
return s[0:2] + s[-2:]
def fix_start(s):
return s[0] + s[1:].replace(s[0], '*')
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return '{} {}'.format(b[:2] + a[2:], a[:2] + b[2:])
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
def main():
print('donuts')
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print()
print('both_ends')
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print()
print('fix_start')
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print()
print('mix_up')
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
if __name__ == '__main__':
main()
| true | true |
f72ab89546778e858c6dc70b6873b930fa6fde29 | 518 | py | Python | tests/test_config.py | kraeki/openair-jac | 760b1b1be7efebde1146b31cf0a9326a7362a82c | [
"BSD-3-Clause"
] | null | null | null | tests/test_config.py | kraeki/openair-jac | 760b1b1be7efebde1146b31cf0a9326a7362a82c | [
"BSD-3-Clause"
] | null | null | null | tests/test_config.py | kraeki/openair-jac | 760b1b1be7efebde1146b31cf0a9326a7362a82c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test configs."""
from openair.app import create_app
from openair.settings import DevConfig, ProdConfig
def test_production_config():
"""Production config."""
app = create_app(ProdConfig)
assert app.config['ENV'] == 'prod'
assert app.config['DEBUG'] is False
assert app.config['DEBUG_TB_ENABLED'] is False
def test_dev_config():
"""Development config."""
app = create_app(DevConfig)
assert app.config['ENV'] == 'dev'
assert app.config['DEBUG'] is True
| 25.9 | 50 | 0.675676 |
from openair.app import create_app
from openair.settings import DevConfig, ProdConfig
def test_production_config():
app = create_app(ProdConfig)
assert app.config['ENV'] == 'prod'
assert app.config['DEBUG'] is False
assert app.config['DEBUG_TB_ENABLED'] is False
def test_dev_config():
app = create_app(DevConfig)
assert app.config['ENV'] == 'dev'
assert app.config['DEBUG'] is True
| true | true |
f72ab8a2448743b933326291b648e8d737b17a76 | 142 | py | Python | config/prd.py | by46/camel | b1ac2609bc5d1cd22933c07c9fce7b935f2d9394 | [
"MIT"
] | null | null | null | config/prd.py | by46/camel | b1ac2609bc5d1cd22933c07c9fce7b935f2d9394 | [
"MIT"
] | null | null | null | config/prd.py | by46/camel | b1ac2609bc5d1cd22933c07c9fce7b935f2d9394 | [
"MIT"
] | null | null | null | # PRD environment setting
# Flask-NegLog Settings
LOG_LEVEL = 'debug'
LOG_FILENAME = "/var/camel/error.log"
LOG_ENABLE_CONSOLE = False
| 20.285714 | 38 | 0.739437 |
LOG_LEVEL = 'debug'
LOG_FILENAME = "/var/camel/error.log"
LOG_ENABLE_CONSOLE = False
| true | true |
f72ab9e2e5e78bac6263ebb24b7540ab94fc5895 | 1,304 | py | Python | clean_prediction.py | richardanarfi/Recsys-Challenge-2018-TeamFL | 81e00a2417d530ea1033dcb22fbe29b7ceb12bb2 | [
"Apache-2.0"
] | null | null | null | clean_prediction.py | richardanarfi/Recsys-Challenge-2018-TeamFL | 81e00a2417d530ea1033dcb22fbe29b7ceb12bb2 | [
"Apache-2.0"
] | null | null | null | clean_prediction.py | richardanarfi/Recsys-Challenge-2018-TeamFL | 81e00a2417d530ea1033dcb22fbe29b7ceb12bb2 | [
"Apache-2.0"
] | null | null | null | from gensim.models import Word2Vec
from sklearn.decomposition import PCA
from matplotlib import pyplot
import string
import fnmatch
# define training data
#sentences = open('new_file_sentence.txt', 'r', encoding='utf-8')
path = 'predictions_v11_1500_clean.txt'
output_file = open("predictions_v11_500.txt", "w")
input_texts = ()
with open(path) as f:
lines = f.read().split('\n')
for line in lines[: min(1000000, len(lines) - 1)]:
line = line.replace(' ','').split(',')
str = ''
#print(line)
#x = 'spotify*'
for i in range(2000):
if 'spotify:track:' in line[i]:
str += line[i]
str += ','
print(line[i])
output_file.write(str)
output_file.write('\n')
#y = not (fnmatch.filter(line, x))
# print(y)
#print(line[i])
#print(line)
#print(x for x in line if 'spotify' in x)
#if "spotify" not in line:
# print(line)
# line=line[i].replace(line[i], '')
#print(line)
#input_texts.append(line)
#output_file.write(input_texts)
#output_file.write('\n')
#import fnmatch
#l = ['RT07010534.txt', 'RT07010533.txt', 'RT02010534.txt']
#pattern = 'RT0701*.txt'
#matching = fnmatch.filter(l, pattern)
#print(matching)
#print(sample1)
| 26.612245 | 66 | 0.595859 | from gensim.models import Word2Vec
from sklearn.decomposition import PCA
from matplotlib import pyplot
import string
import fnmatch
path = 'predictions_v11_1500_clean.txt'
output_file = open("predictions_v11_500.txt", "w")
input_texts = ()
with open(path) as f:
lines = f.read().split('\n')
for line in lines[: min(1000000, len(lines) - 1)]:
line = line.replace(' ','').split(',')
str = ''
for i in range(2000):
if 'spotify:track:' in line[i]:
str += line[i]
str += ','
print(line[i])
output_file.write(str)
output_file.write('\n')
| true | true |
f72aba22fc109af958a6de438269df6a2c4a6b07 | 1,733 | py | Python | tests/test_data/test_structured.py | el/elizabeth | dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff | [
"MIT"
] | null | null | null | tests/test_data/test_structured.py | el/elizabeth | dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff | [
"MIT"
] | null | null | null | tests/test_data/test_structured.py | el/elizabeth | dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff | [
"MIT"
] | 1 | 2019-12-27T19:34:17.000Z | 2019-12-27T19:34:17.000Z | # -*- coding: utf-8 -*-
import re
import csv
from elizabeth.core.providers import Structured
from unittest import TestCase
from elizabeth.core import interdata as common
from ._patterns import STR_REGEX
class StructuredBaseTest(TestCase):
def setUp(self):
self.structured = Structured('en')
def tearDown(self):
del self.structured
def test_str(self):
self.assertTrue(re.match(STR_REGEX, self.structured.__str__()))
def test_css(self):
result = self.structured.css()
self.assertIsInstance(result, str) # returns string
self.assertIn(":", result) # contains property assignments
self.assertEqual(result[-1], "}") # closed at end
self.assertEqual(result.split(" ")[1][0], "{") # opened after selector
def test_css_property(self):
result = self.structured.css_property()
self.assertEqual(len(result.split(" ")), 2) # contains one property assignment
self.assertIn(":", result) # contains any property assignments
def test_html_attribute_value(self):
result = self.structured.html_attribute_value("a", "href")
self.assertEqual(result[0:4], "http")
with self.assertRaises(NotImplementedError):
self.structured.html_attribute_value("a", "bogus")
with self.assertRaises(NotImplementedError):
common.HTML_CONTAINER_TAGS['div']['class'] = "bogus"
from elizabeth.core.providers import Structured
Structured('en').html_attribute_value("div", "class")
def test_html(self):
result = self.structured.html()
self.assertEqual(result[0], "<") # tag is enclosed
self.assertEqual(result[-1], ">") # tag is enclosed
| 36.104167 | 87 | 0.663589 |
import re
import csv
from elizabeth.core.providers import Structured
from unittest import TestCase
from elizabeth.core import interdata as common
from ._patterns import STR_REGEX
class StructuredBaseTest(TestCase):
def setUp(self):
self.structured = Structured('en')
def tearDown(self):
del self.structured
def test_str(self):
self.assertTrue(re.match(STR_REGEX, self.structured.__str__()))
def test_css(self):
result = self.structured.css()
self.assertIsInstance(result, str)
self.assertIn(":", result)
self.assertEqual(result[-1], "}")
self.assertEqual(result.split(" ")[1][0], "{")
def test_css_property(self):
result = self.structured.css_property()
self.assertEqual(len(result.split(" ")), 2)
self.assertIn(":", result)
def test_html_attribute_value(self):
result = self.structured.html_attribute_value("a", "href")
self.assertEqual(result[0:4], "http")
with self.assertRaises(NotImplementedError):
self.structured.html_attribute_value("a", "bogus")
with self.assertRaises(NotImplementedError):
common.HTML_CONTAINER_TAGS['div']['class'] = "bogus"
from elizabeth.core.providers import Structured
Structured('en').html_attribute_value("div", "class")
def test_html(self):
result = self.structured.html()
self.assertEqual(result[0], "<")
self.assertEqual(result[-1], ">")
| true | true |
f72aba59680a1148f9878e622e1a32e4cbb7706a | 212 | py | Python | mayan/apps/document_states/managers.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 2,743 | 2017-12-18T07:12:30.000Z | 2022-03-27T17:21:25.000Z | mayan/apps/document_states/managers.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | 15 | 2020-06-06T00:00:48.000Z | 2022-03-12T00:03:54.000Z | mayan/apps/document_states/managers.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | 257 | 2017-12-18T03:12:58.000Z | 2022-03-25T08:59:10.000Z | from django.db import models
class WorkflowManager(models.Manager):
def launch_for(self, document):
for workflow in document.document_type.workflows.all():
workflow.launch_for(document)
| 26.5 | 63 | 0.726415 | from django.db import models
class WorkflowManager(models.Manager):
def launch_for(self, document):
for workflow in document.document_type.workflows.all():
workflow.launch_for(document)
| true | true |
f72aba799455f6cc85c2295c96a774ff725ab946 | 18,200 | py | Python | tests/onnx/test_onnx_model_export.py | kokoff/mlflow | 062722b172f403e613c41f9bb024b3e1673dfe31 | [
"Apache-2.0"
] | 1 | 2020-08-17T21:50:32.000Z | 2020-08-17T21:50:32.000Z | tests/onnx/test_onnx_model_export.py | kokoff/mlflow | 062722b172f403e613c41f9bb024b3e1673dfe31 | [
"Apache-2.0"
] | null | null | null | tests/onnx/test_onnx_model_export.py | kokoff/mlflow | 062722b172f403e613c41f9bb024b3e1673dfe31 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import pytest
import mock
from keras.models import Sequential
from keras.layers import Dense
import sklearn.datasets as datasets
import pandas as pd
import numpy as np
import yaml
import tensorflow as tf
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models import infer_signature, Model
from mlflow.models.utils import _read_example
from mlflow.utils.file_utils import TempDir
from tests.helper_functions import pyfunc_serve_and_score_model
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
pytestmark = pytest.mark.skipif(
(sys.version_info < (3, 6)), reason="Tests require Python 3 to run!"
)
@pytest.fixture(scope="module")
def data():
iris = datasets.load_iris()
data = pd.DataFrame(
data=np.c_[iris["data"], iris["target"]], columns=iris["feature_names"] + ["target"]
)
y = data["target"]
x = data.drop("target", axis=1)
return x, y
@pytest.fixture(scope="module")
def model(data):
x, y = data
model = Sequential()
model.add(Dense(3, input_dim=4))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="SGD")
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def onnx_model(model):
import onnxmltools
return onnxmltools.convert_keras(model)
@pytest.fixture(scope="module")
def sklearn_model(data):
from sklearn.linear_model import LogisticRegression
x, y = data
model = LogisticRegression()
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def onnx_sklearn_model(sklearn_model):
import onnxmltools
from skl2onnx.common.data_types import FloatTensorType
initial_type = [("float_input", FloatTensorType([None, 4]))]
onx = onnxmltools.convert_sklearn(sklearn_model, initial_types=initial_type)
return onx
@pytest.fixture(scope="module")
def predicted(model, data):
return model.predict(data[0])
@pytest.fixture(scope="module")
def tf_model_multiple_inputs_float64():
graph = tf.Graph()
with graph.as_default():
t_in1 = tf.placeholder(tf.float64, 10, name="first_input")
t_in2 = tf.placeholder(tf.float64, 10, name="second_input")
t_out = tf.multiply(t_in1, t_in2)
tf.identity(t_out, name="output")
return graph
@pytest.fixture(scope="module")
def tf_model_multiple_inputs_float32():
graph = tf.Graph()
with graph.as_default():
t_in1 = tf.placeholder(tf.float32, 10, name="first_input")
t_in2 = tf.placeholder(tf.float32, 10, name="second_input")
t_out = tf.multiply(t_in1, t_in2)
tf.identity(t_out, name="output")
return graph
@pytest.fixture(scope="module")
def onnx_model_multiple_inputs_float64(tf_model_multiple_inputs_float64):
import tf2onnx
sess = tf.Session(graph=tf_model_multiple_inputs_float64)
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
sess.graph, input_names=["first_input:0", "second_input:0"], output_names=["output:0"]
)
model_proto = onnx_graph.make_model("test")
return model_proto
@pytest.fixture(scope="module")
def onnx_model_multiple_inputs_float32(tf_model_multiple_inputs_float32):
import tf2onnx
sess = tf.Session(graph=tf_model_multiple_inputs_float32)
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
sess.graph, input_names=["first_input:0", "second_input:0"], output_names=["output:0"]
)
model_proto = onnx_graph.make_model("test")
return model_proto
@pytest.fixture(scope="module")
def data_multiple_inputs():
return pd.DataFrame(
{"first_input:0": np.random.random(10), "second_input:0": np.random.random(10)}
)
@pytest.fixture(scope="module")
def predicted_multiple_inputs(data_multiple_inputs):
return pd.DataFrame(
data_multiple_inputs["first_input:0"] * data_multiple_inputs["second_input:0"]
)
@pytest.fixture
def model_path(tmpdir):
return os.path.join(tmpdir.strpath, "model")
@pytest.fixture
def onnx_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(
conda_env,
additional_conda_deps=["pytest", "keras"],
additional_pip_deps=["onnx", "onnxmltools"],
)
return conda_env
@pytest.mark.large
def test_cast_float64_to_float32():
import mlflow.onnx
df = pd.DataFrame([[1.0, 2.1], [True, False]], columns=["col1", "col2"])
df["col1"] = df["col1"].astype(np.float64)
df["col2"] = df["col2"].astype(np.bool)
df2 = mlflow.onnx._OnnxModelWrapper._cast_float64_to_float32(df, df.columns)
assert df2["col1"].dtype == np.float32 and df2["col2"].dtype == np.bool
# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime
# library
@pytest.mark.large
def test_model_save_load(onnx_model, model_path, onnx_custom_env):
import onnx
import mlflow.onnx
mlflow.onnx.save_model(onnx_model, model_path, conda_env=onnx_custom_env)
# Loading ONNX model
onnx.checker.check_model = mock.Mock()
mlflow.onnx.load_model(model_path)
assert onnx.checker.check_model.called
@pytest.mark.large
def test_signature_and_examples_are_saved_correctly(onnx_model, data, onnx_custom_env):
import mlflow.onnx
model = onnx_model
signature_ = infer_signature(*data)
example_ = data[0].head(3)
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.onnx.save_model(
model,
path=path,
conda_env=onnx_custom_env,
signature=signature,
input_example=example,
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert all((_read_example(mlflow_model, path) == example).all())
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_save_load_evaluate_pyfunc_format(onnx_model, model_path, data, predicted):
import mlflow.onnx
x = data[0]
mlflow.onnx.save_model(onnx_model, model_path)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
assert np.allclose(pyfunc_loaded.predict(x).values, predicted, rtol=1e-05, atol=1e-05)
# pyfunc serve
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=x,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
)
assert np.allclose(
pd.read_json(scoring_response.content, orient="records").values.astype(np.float32),
predicted,
rtol=1e-05,
atol=1e-05,
)
# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime
# library
@pytest.mark.large
def test_model_save_load_multiple_inputs(
onnx_model_multiple_inputs_float64, model_path, onnx_custom_env
):
import onnx
import mlflow.onnx
mlflow.onnx.save_model(
onnx_model_multiple_inputs_float64, model_path, conda_env=onnx_custom_env
)
# Loading ONNX model
onnx.checker.check_model = mock.Mock()
mlflow.onnx.load_model(model_path)
assert onnx.checker.check_model.called
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_save_load_evaluate_pyfunc_format_multiple_inputs(
onnx_model_multiple_inputs_float64, data_multiple_inputs, predicted_multiple_inputs, model_path
):
import mlflow.onnx
mlflow.onnx.save_model(onnx_model_multiple_inputs_float64, model_path)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
assert np.allclose(
pyfunc_loaded.predict(data_multiple_inputs).values,
predicted_multiple_inputs.values,
rtol=1e-05,
atol=1e-05,
)
# pyfunc serve
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=data_multiple_inputs,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
)
assert np.allclose(
pd.read_json(scoring_response.content, orient="records").values,
predicted_multiple_inputs.values,
rtol=1e-05,
atol=1e-05,
)
# TODO: Remove test, along with explicit casting, when https://github.com/mlflow/mlflow/issues/1286
# is fixed.
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_pyfunc_representation_of_float32_model_casts_and_evalutes_float64_inputs(
onnx_model_multiple_inputs_float32, model_path, data_multiple_inputs, predicted_multiple_inputs
):
"""
The ``python_function`` representation of an MLflow model with the ONNX flavor
casts 64-bit floats to 32-bit floats automatically before evaluating, as opposed
to throwing an unexpected type exception. This behavior is implemented due
to the issue described in https://github.com/mlflow/mlflow/issues/1286 where
the JSON representation of a Pandas DataFrame does not always preserve float
precision (e.g., 32-bit floats may be converted to 64-bit floats when persisting a
DataFrame as JSON).
"""
import mlflow.onnx
mlflow.onnx.save_model(onnx_model_multiple_inputs_float32, model_path)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
assert np.allclose(
pyfunc_loaded.predict(data_multiple_inputs.astype("float64")).values,
predicted_multiple_inputs.astype("float32").values,
rtol=1e-05,
atol=1e-05,
)
with pytest.raises(RuntimeError):
pyfunc_loaded.predict(data_multiple_inputs.astype("int32"))
# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime
# library
@pytest.mark.large
def test_model_log(onnx_model, onnx_custom_env):
# pylint: disable=unused-argument
import onnx
import mlflow.onnx
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "onnx_model"
mlflow.onnx.log_model(
onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
# Load model
onnx.checker.check_model = mock.Mock()
mlflow.onnx.load_model(model_uri)
assert onnx.checker.check_model.called
finally:
mlflow.end_run()
def test_log_model_calls_register_model(onnx_model, onnx_custom_env):
import mlflow.onnx
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.onnx.log_model(
onnx_model=onnx_model,
artifact_path=artifact_path,
conda_env=onnx_custom_env,
registered_model_name="AdsModel1",
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
mlflow.register_model.assert_called_once_with(model_uri, "AdsModel1")
def test_log_model_no_registered_model_name(onnx_model, onnx_custom_env):
import mlflow.onnx
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.onnx.log_model(
onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env
)
mlflow.register_model.assert_not_called()
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_log_evaluate_pyfunc_format(onnx_model, data, predicted):
import mlflow.onnx
x = data[0]
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "onnx_model"
mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_uri=model_uri)
assert np.allclose(pyfunc_loaded.predict(x).values, predicted, rtol=1e-05, atol=1e-05)
finally:
mlflow.end_run()
@pytest.mark.large
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
onnx_model, model_path, onnx_custom_env
):
import mlflow.onnx
mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=onnx_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != onnx_custom_env
with open(onnx_custom_env, "r") as f:
onnx_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == onnx_custom_env_parsed
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_save_accepts_conda_env_as_dict(onnx_model, model_path):
import mlflow.onnx
conda_env = dict(mlflow.onnx.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
@pytest.mark.large
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(
onnx_model, onnx_custom_env
):
import mlflow.onnx
artifact_path = "model"
with mlflow.start_run():
mlflow.onnx.log_model(
onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env
)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != onnx_custom_env
with open(onnx_custom_env, "r") as f:
onnx_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == onnx_custom_env_parsed
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
onnx_model, model_path
):
import mlflow.onnx
mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=None)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.onnx.get_default_conda_env()
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
onnx_model,
):
import mlflow.onnx
artifact_path = "model"
with mlflow.start_run():
mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path, conda_env=None)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.onnx.get_default_conda_env()
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_pyfunc_predict_supports_models_with_list_outputs(onnx_sklearn_model, model_path, data):
"""
https://github.com/mlflow/mlflow/issues/2499
User encountered issue where an sklearn model, converted to onnx, would return a list response.
The issue resulted in an error because MLflow assumed it would be a numpy array. Therefore,
the this test validates the service does not receive that error when using such a model.
"""
import mlflow.onnx
x = data[0]
mlflow.onnx.save_model(onnx_sklearn_model, model_path)
wrapper = mlflow.pyfunc.load_model(model_path)
wrapper.predict(pd.DataFrame(x))
| 34.469697 | 99 | 0.720879 | import sys
import os
import pytest
import mock
from keras.models import Sequential
from keras.layers import Dense
import sklearn.datasets as datasets
import pandas as pd
import numpy as np
import yaml
import tensorflow as tf
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models import infer_signature, Model
from mlflow.models.utils import _read_example
from mlflow.utils.file_utils import TempDir
from tests.helper_functions import pyfunc_serve_and_score_model
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
pytestmark = pytest.mark.skipif(
(sys.version_info < (3, 6)), reason="Tests require Python 3 to run!"
)
@pytest.fixture(scope="module")
def data():
iris = datasets.load_iris()
data = pd.DataFrame(
data=np.c_[iris["data"], iris["target"]], columns=iris["feature_names"] + ["target"]
)
y = data["target"]
x = data.drop("target", axis=1)
return x, y
@pytest.fixture(scope="module")
def model(data):
x, y = data
model = Sequential()
model.add(Dense(3, input_dim=4))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="SGD")
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def onnx_model(model):
import onnxmltools
return onnxmltools.convert_keras(model)
@pytest.fixture(scope="module")
def sklearn_model(data):
from sklearn.linear_model import LogisticRegression
x, y = data
model = LogisticRegression()
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def onnx_sklearn_model(sklearn_model):
import onnxmltools
from skl2onnx.common.data_types import FloatTensorType
initial_type = [("float_input", FloatTensorType([None, 4]))]
onx = onnxmltools.convert_sklearn(sklearn_model, initial_types=initial_type)
return onx
@pytest.fixture(scope="module")
def predicted(model, data):
return model.predict(data[0])
@pytest.fixture(scope="module")
def tf_model_multiple_inputs_float64():
graph = tf.Graph()
with graph.as_default():
t_in1 = tf.placeholder(tf.float64, 10, name="first_input")
t_in2 = tf.placeholder(tf.float64, 10, name="second_input")
t_out = tf.multiply(t_in1, t_in2)
tf.identity(t_out, name="output")
return graph
@pytest.fixture(scope="module")
def tf_model_multiple_inputs_float32():
graph = tf.Graph()
with graph.as_default():
t_in1 = tf.placeholder(tf.float32, 10, name="first_input")
t_in2 = tf.placeholder(tf.float32, 10, name="second_input")
t_out = tf.multiply(t_in1, t_in2)
tf.identity(t_out, name="output")
return graph
@pytest.fixture(scope="module")
def onnx_model_multiple_inputs_float64(tf_model_multiple_inputs_float64):
import tf2onnx
sess = tf.Session(graph=tf_model_multiple_inputs_float64)
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
sess.graph, input_names=["first_input:0", "second_input:0"], output_names=["output:0"]
)
model_proto = onnx_graph.make_model("test")
return model_proto
@pytest.fixture(scope="module")
def onnx_model_multiple_inputs_float32(tf_model_multiple_inputs_float32):
import tf2onnx
sess = tf.Session(graph=tf_model_multiple_inputs_float32)
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
sess.graph, input_names=["first_input:0", "second_input:0"], output_names=["output:0"]
)
model_proto = onnx_graph.make_model("test")
return model_proto
@pytest.fixture(scope="module")
def data_multiple_inputs():
return pd.DataFrame(
{"first_input:0": np.random.random(10), "second_input:0": np.random.random(10)}
)
@pytest.fixture(scope="module")
def predicted_multiple_inputs(data_multiple_inputs):
return pd.DataFrame(
data_multiple_inputs["first_input:0"] * data_multiple_inputs["second_input:0"]
)
@pytest.fixture
def model_path(tmpdir):
return os.path.join(tmpdir.strpath, "model")
@pytest.fixture
def onnx_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(
conda_env,
additional_conda_deps=["pytest", "keras"],
additional_pip_deps=["onnx", "onnxmltools"],
)
return conda_env
@pytest.mark.large
def test_cast_float64_to_float32():
import mlflow.onnx
df = pd.DataFrame([[1.0, 2.1], [True, False]], columns=["col1", "col2"])
df["col1"] = df["col1"].astype(np.float64)
df["col2"] = df["col2"].astype(np.bool)
df2 = mlflow.onnx._OnnxModelWrapper._cast_float64_to_float32(df, df.columns)
assert df2["col1"].dtype == np.float32 and df2["col2"].dtype == np.bool
# library
@pytest.mark.large
def test_model_save_load(onnx_model, model_path, onnx_custom_env):
import onnx
import mlflow.onnx
mlflow.onnx.save_model(onnx_model, model_path, conda_env=onnx_custom_env)
# Loading ONNX model
onnx.checker.check_model = mock.Mock()
mlflow.onnx.load_model(model_path)
assert onnx.checker.check_model.called
@pytest.mark.large
def test_signature_and_examples_are_saved_correctly(onnx_model, data, onnx_custom_env):
import mlflow.onnx
model = onnx_model
signature_ = infer_signature(*data)
example_ = data[0].head(3)
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.onnx.save_model(
model,
path=path,
conda_env=onnx_custom_env,
signature=signature,
input_example=example,
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert all((_read_example(mlflow_model, path) == example).all())
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_save_load_evaluate_pyfunc_format(onnx_model, model_path, data, predicted):
import mlflow.onnx
x = data[0]
mlflow.onnx.save_model(onnx_model, model_path)
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
assert np.allclose(pyfunc_loaded.predict(x).values, predicted, rtol=1e-05, atol=1e-05)
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=x,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
)
assert np.allclose(
pd.read_json(scoring_response.content, orient="records").values.astype(np.float32),
predicted,
rtol=1e-05,
atol=1e-05,
)
# library
@pytest.mark.large
def test_model_save_load_multiple_inputs(
onnx_model_multiple_inputs_float64, model_path, onnx_custom_env
):
import onnx
import mlflow.onnx
mlflow.onnx.save_model(
onnx_model_multiple_inputs_float64, model_path, conda_env=onnx_custom_env
)
# Loading ONNX model
onnx.checker.check_model = mock.Mock()
mlflow.onnx.load_model(model_path)
assert onnx.checker.check_model.called
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_save_load_evaluate_pyfunc_format_multiple_inputs(
onnx_model_multiple_inputs_float64, data_multiple_inputs, predicted_multiple_inputs, model_path
):
import mlflow.onnx
mlflow.onnx.save_model(onnx_model_multiple_inputs_float64, model_path)
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
assert np.allclose(
pyfunc_loaded.predict(data_multiple_inputs).values,
predicted_multiple_inputs.values,
rtol=1e-05,
atol=1e-05,
)
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=data_multiple_inputs,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
)
assert np.allclose(
pd.read_json(scoring_response.content, orient="records").values,
predicted_multiple_inputs.values,
rtol=1e-05,
atol=1e-05,
)
@pytest.mark.release
def test_pyfunc_representation_of_float32_model_casts_and_evalutes_float64_inputs(
onnx_model_multiple_inputs_float32, model_path, data_multiple_inputs, predicted_multiple_inputs
):
import mlflow.onnx
mlflow.onnx.save_model(onnx_model_multiple_inputs_float32, model_path)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
assert np.allclose(
pyfunc_loaded.predict(data_multiple_inputs.astype("float64")).values,
predicted_multiple_inputs.astype("float32").values,
rtol=1e-05,
atol=1e-05,
)
with pytest.raises(RuntimeError):
pyfunc_loaded.predict(data_multiple_inputs.astype("int32"))
# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime
@pytest.mark.large
def test_model_log(onnx_model, onnx_custom_env):
import onnx
import mlflow.onnx
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "onnx_model"
mlflow.onnx.log_model(
onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
onnx.checker.check_model = mock.Mock()
mlflow.onnx.load_model(model_uri)
assert onnx.checker.check_model.called
finally:
mlflow.end_run()
def test_log_model_calls_register_model(onnx_model, onnx_custom_env):
import mlflow.onnx
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.onnx.log_model(
onnx_model=onnx_model,
artifact_path=artifact_path,
conda_env=onnx_custom_env,
registered_model_name="AdsModel1",
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
mlflow.register_model.assert_called_once_with(model_uri, "AdsModel1")
def test_log_model_no_registered_model_name(onnx_model, onnx_custom_env):
import mlflow.onnx
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
mlflow.onnx.log_model(
onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env
)
mlflow.register_model.assert_not_called()
@pytest.mark.release
def test_model_log_evaluate_pyfunc_format(onnx_model, data, predicted):
import mlflow.onnx
x = data[0]
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "onnx_model"
mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_uri=model_uri)
assert np.allclose(pyfunc_loaded.predict(x).values, predicted, rtol=1e-05, atol=1e-05)
finally:
mlflow.end_run()
@pytest.mark.large
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
onnx_model, model_path, onnx_custom_env
):
import mlflow.onnx
mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=onnx_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != onnx_custom_env
with open(onnx_custom_env, "r") as f:
onnx_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == onnx_custom_env_parsed
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_save_accepts_conda_env_as_dict(onnx_model, model_path):
import mlflow.onnx
conda_env = dict(mlflow.onnx.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
@pytest.mark.large
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(
onnx_model, onnx_custom_env
):
import mlflow.onnx
artifact_path = "model"
with mlflow.start_run():
mlflow.onnx.log_model(
onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env
)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != onnx_custom_env
with open(onnx_custom_env, "r") as f:
onnx_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == onnx_custom_env_parsed
@pytest.mark.release
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
onnx_model, model_path
):
import mlflow.onnx
mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=None)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.onnx.get_default_conda_env()
# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library
@pytest.mark.release
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
onnx_model,
):
import mlflow.onnx
artifact_path = "model"
with mlflow.start_run():
mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path, conda_env=None)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.onnx.get_default_conda_env()
@pytest.mark.release
def test_pyfunc_predict_supports_models_with_list_outputs(onnx_sklearn_model, model_path, data):
import mlflow.onnx
x = data[0]
mlflow.onnx.save_model(onnx_sklearn_model, model_path)
wrapper = mlflow.pyfunc.load_model(model_path)
wrapper.predict(pd.DataFrame(x))
| true | true |
f72ababf067d4c75b7546894366ccba2992a76c1 | 8,329 | py | Python | test/functional/feature_proxy.py | KingricharVD/DSW | 7281f6ed5c102687805d2bca707e675cbce7dd4d | [
"MIT"
] | 3 | 2020-10-02T13:11:53.000Z | 2021-11-06T18:02:32.000Z | test/functional/feature_proxy.py | KingricharVD/DSW | 7281f6ed5c102687805d2bca707e675cbce7dd4d | [
"MIT"
] | 3 | 2020-08-06T17:35:37.000Z | 2021-07-22T01:37:56.000Z | test/functional/feature_proxy.py | KingricharVD/DSW | 7281f6ed5c102687805d2bca707e675cbce7dd4d | [
"MIT"
] | 6 | 2020-10-09T16:42:49.000Z | 2021-07-05T20:57:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start nesteggd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on nesteggd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create nesteggds that connect to them
- Manipulate the nesteggds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.232673 | 121 | 0.625405 |
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE
class ProxyTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| true | true |
f72abb4a157ff48785fee482319d874695a9722b | 10,815 | py | Python | tensorflow/python/training/tracking/resource.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 7 | 2022-03-04T21:14:47.000Z | 2022-03-22T23:07:39.000Z | tensorflow/python/training/tracking/resource.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 1 | 2022-03-08T18:28:46.000Z | 2022-03-08T18:37:20.000Z | tensorflow/python/training/tracking/resource.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 1 | 2022-03-22T00:45:15.000Z | 2022-03-22T00:45:15.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definitions for resource-type trackable object classes."""
import contextlib
import copy
import weakref
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.training.tracking import base
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# global _RESOURCE_TRACKER_STACK
_RESOURCE_TRACKER_STACK = []
class ResourceTracker(object):
"""An object that tracks a list of resources."""
__slots__ = ["_resources"]
def __init__(self):
self._resources = []
@property
def resources(self):
return self._resources
def add_resource(self, resource):
self._resources.append(resource)
@tf_contextlib.contextmanager
def resource_tracker_scope(resource_tracker):
"""A context to manage resource trackers.
Use this in order to collect up all resources created within a block of code.
Example usage:
```python
resource_tracker = ResourceTracker()
with resource_tracker_scope(resource_tracker):
resource = TrackableResource()
assert resource_tracker.resources == [resource]
Args:
resource_tracker: The passed in ResourceTracker object
Yields:
A scope in which the resource_tracker is active.
"""
global _RESOURCE_TRACKER_STACK
old = list(_RESOURCE_TRACKER_STACK)
_RESOURCE_TRACKER_STACK.append(resource_tracker)
try:
yield
finally:
_RESOURCE_TRACKER_STACK = old
def _make_getter(captured_getter, captured_previous):
"""To avoid capturing loop variables."""
def getter(*args, **kwargs):
return captured_getter(captured_previous, *args, **kwargs)
return getter
class _ResourceMetaclass(type):
"""Metaclass for CapturableResource."""
def __call__(cls, *args, **kwargs):
def default_resource_creator(next_creator, *a, **kw):
assert next_creator is None
obj = cls.__new__(cls, *a, **kw)
obj.__init__(*a, **kw)
return obj
previous_getter = lambda *a, **kw: default_resource_creator(None, *a, **kw)
resource_creator_stack = ops.get_default_graph()._resource_creator_stack
for getter in resource_creator_stack[cls._resource_type()]:
previous_getter = _make_getter(getter, previous_getter)
return previous_getter(*args, **kwargs)
class CapturableResource(six.with_metaclass(_ResourceMetaclass,
base.Trackable)):
"""Holds a Tensor which a tf.function can capture.
`CapturableResource`s are discovered by traversing the graph of object
attributes, e.g. during `tf.saved_model.save`. They are excluded from the
scope-based tracking of `TrackableResource`; generally things that require
initialization should inherit from `TrackableResource` instead of
`CapturableResource` directly.
"""
def __init__(self, device=""):
"""Initialize the `CapturableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
"""
self._resource_handle_value = None
self._resource_device = device
self._self_destruction_context = (
context.eager_mode if context.executing_eagerly()
else ops.get_default_graph().as_default)
@classmethod
def _resource_type(cls):
return cls.__name__
@property
def _destruction_context(self):
return getattr(self, "_self_destruction_context",
# no-op context
contextlib.suppress)
@_destruction_context.setter
def _destruction_context(self, destruction_context):
self._self_destruction_context = destruction_context
def _create_resource(self):
"""A function that creates a resource handle."""
raise NotImplementedError("TrackableResource._create_resource not "
"implemented.")
@property
def _resource_handle(self):
return self._resource_handle_value
@_resource_handle.setter
def _resource_handle(self, value):
if isinstance(value, (ops.Tensor, ops.EagerTensor)):
value._parent_trackable = weakref.ref(self) # pylint: disable=protected-access
self._resource_handle_value = value
def _initialize(self):
"""A function that initializes the resource. Optional."""
pass
def _destroy_resource(self):
"""A function that destroys the resource. Optional."""
pass
@property
def resource_handle(self):
"""Returns the resource handle associated with this Resource."""
if self._resource_handle is None:
with ops.device(self._resource_device):
self._resource_handle = self._create_resource()
return self._resource_handle
def _map_resources(self, _):
"""For implementing `Trackable`."""
new_obj = copy.copy(self)
# pylint: disable=protected-access
with ops.device(self._resource_device):
new_resource = new_obj._create_resource()
new_obj._resource_handle = new_resource
# pylint: enable=protected-access
obj_map = {self: new_obj}
resource_map = {self.resource_handle: new_resource}
return obj_map, resource_map
def _trackable_children(self, save_type, **kwargs):
children = super()._trackable_children(save_type, **kwargs)
if save_type == "savedmodel":
@def_function.function(input_signature=[], autograph=False)
def _creator():
resource = self._create_resource()
return resource
@def_function.function(input_signature=[], autograph=False)
def _initializer():
self._initialize()
return 1 # Dummy return
@def_function.function(input_signature=[], autograph=False)
def _destroyer():
self._destroy_resource()
return 1 # Dummy return
children.update({
"_create_resource": _creator,
"_initialize": _initializer,
"_destroy_resource": _destroyer,
})
return children
def __del__(self):
try:
# Outer race condition: on program exit, the destruction context may be
# deleted before this __del__ is called. At this point we can safely
# exit without calling _destroy_resource() and let Python handle things.
with self._destruction_context():
# Inner race condition: possible between this and `ScopedTFFunction`
# whereby if an entire garbage collection chain containing both
# objects is moved to unreachable during the same garbage collection
# cycle, the __del__ for `ScopedTFFunction` can be collected before
# this method is called. In that case, we can't do much but
# continue.
self._destroy_resource()
except Exception: # pylint: disable=broad-except
# Silence all error logs that occur when attempting to destroy this
# resource.
pass
@tf_export("saved_model.experimental.TrackableResource")
class TrackableResource(CapturableResource):
"""Holds a Tensor which a tf.function can capture.
A TrackableResource is most useful for stateful Tensors that require
initialization, such as `tf.lookup.StaticHashTable`. `TrackableResource`s
are discovered by traversing the graph of object attributes, e.g. during
`tf.saved_model.save`.
A TrackableResource has three methods to override:
* `_create_resource` should create the resource tensor handle.
* `_initialize` should initialize the resource held at `self.resource_handle`.
* `_destroy_resource` is called upon a `TrackableResource`'s destruction
and should decrement the resource's ref count. For most resources, this
should be done with a call to `tf.raw_ops.DestroyResourceOp`.
Example usage:
>>> class DemoResource(tf.saved_model.experimental.TrackableResource):
... def __init__(self):
... super().__init__()
... self._initialize()
... def _create_resource(self):
... return tf.raw_ops.VarHandleOp(dtype=tf.float32, shape=[2])
... def _initialize(self):
... tf.raw_ops.AssignVariableOp(
... resource=self.resource_handle, value=tf.ones([2]))
... def _destroy_resource(self):
... tf.raw_ops.DestroyResourceOp(resource=self.resource_handle)
>>> class DemoModule(tf.Module):
... def __init__(self):
... self.resource = DemoResource()
... def increment(self, tensor):
... return tensor + tf.raw_ops.ReadVariableOp(
... resource=self.resource.resource_handle, dtype=tf.float32)
>>> demo = DemoModule()
>>> demo.increment([5, 1])
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 2.], dtype=float32)>
"""
def __init__(self, device=""):
"""Initialize the `TrackableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
"""
global _RESOURCE_TRACKER_STACK
for resource_tracker in _RESOURCE_TRACKER_STACK:
resource_tracker.add_resource(self)
super(TrackableResource, self).__init__(device=device)
# TODO(b/124205571,b/124092991): Solve destruction of resources.
class RestoredResource(TrackableResource):
"""Restored SavedResource."""
def __init__(self, device=""):
super(RestoredResource, self).__init__(device=device)
@classmethod
def _deserialize_from_proto(cls, object_proto, dependencies, **unused_kwargs):
obj = cls(device=object_proto.resource.device)
resource_creator = dependencies.get("_create_resource")
if resource_creator is not None:
obj._create_resource = resource_creator # pylint: disable=protected-access
return obj
def _add_trackable_child(self, name, value):
setattr(self, name, value)
if (isinstance(value, base.Trackable) and
not isinstance(value, def_function.Function)):
self._track_trackable(value, name)
| 34.887097 | 85 | 0.713269 |
import contextlib
import copy
import weakref
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.training.tracking import base
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_RESOURCE_TRACKER_STACK = []
class ResourceTracker(object):
__slots__ = ["_resources"]
def __init__(self):
self._resources = []
@property
def resources(self):
return self._resources
def add_resource(self, resource):
self._resources.append(resource)
@tf_contextlib.contextmanager
def resource_tracker_scope(resource_tracker):
global _RESOURCE_TRACKER_STACK
old = list(_RESOURCE_TRACKER_STACK)
_RESOURCE_TRACKER_STACK.append(resource_tracker)
try:
yield
finally:
_RESOURCE_TRACKER_STACK = old
def _make_getter(captured_getter, captured_previous):
def getter(*args, **kwargs):
return captured_getter(captured_previous, *args, **kwargs)
return getter
class _ResourceMetaclass(type):
def __call__(cls, *args, **kwargs):
def default_resource_creator(next_creator, *a, **kw):
assert next_creator is None
obj = cls.__new__(cls, *a, **kw)
obj.__init__(*a, **kw)
return obj
previous_getter = lambda *a, **kw: default_resource_creator(None, *a, **kw)
resource_creator_stack = ops.get_default_graph()._resource_creator_stack
for getter in resource_creator_stack[cls._resource_type()]:
previous_getter = _make_getter(getter, previous_getter)
return previous_getter(*args, **kwargs)
class CapturableResource(six.with_metaclass(_ResourceMetaclass,
base.Trackable)):
def __init__(self, device=""):
self._resource_handle_value = None
self._resource_device = device
self._self_destruction_context = (
context.eager_mode if context.executing_eagerly()
else ops.get_default_graph().as_default)
@classmethod
def _resource_type(cls):
return cls.__name__
@property
def _destruction_context(self):
return getattr(self, "_self_destruction_context",
contextlib.suppress)
@_destruction_context.setter
def _destruction_context(self, destruction_context):
self._self_destruction_context = destruction_context
def _create_resource(self):
raise NotImplementedError("TrackableResource._create_resource not "
"implemented.")
@property
def _resource_handle(self):
return self._resource_handle_value
@_resource_handle.setter
def _resource_handle(self, value):
if isinstance(value, (ops.Tensor, ops.EagerTensor)):
value._parent_trackable = weakref.ref(self)
self._resource_handle_value = value
def _initialize(self):
pass
def _destroy_resource(self):
pass
@property
def resource_handle(self):
if self._resource_handle is None:
with ops.device(self._resource_device):
self._resource_handle = self._create_resource()
return self._resource_handle
def _map_resources(self, _):
new_obj = copy.copy(self)
with ops.device(self._resource_device):
new_resource = new_obj._create_resource()
new_obj._resource_handle = new_resource
obj_map = {self: new_obj}
resource_map = {self.resource_handle: new_resource}
return obj_map, resource_map
def _trackable_children(self, save_type, **kwargs):
children = super()._trackable_children(save_type, **kwargs)
if save_type == "savedmodel":
@def_function.function(input_signature=[], autograph=False)
def _creator():
resource = self._create_resource()
return resource
@def_function.function(input_signature=[], autograph=False)
def _initializer():
self._initialize()
return 1
@def_function.function(input_signature=[], autograph=False)
def _destroyer():
self._destroy_resource()
return 1
children.update({
"_create_resource": _creator,
"_initialize": _initializer,
"_destroy_resource": _destroyer,
})
return children
def __del__(self):
try:
with self._destruction_context():
# continue.
self._destroy_resource()
except Exception: # pylint: disable=broad-except
# Silence all error logs that occur when attempting to destroy this
# resource.
pass
@tf_export("saved_model.experimental.TrackableResource")
class TrackableResource(CapturableResource):
def __init__(self, device=""):
global _RESOURCE_TRACKER_STACK
for resource_tracker in _RESOURCE_TRACKER_STACK:
resource_tracker.add_resource(self)
super(TrackableResource, self).__init__(device=device)
# TODO(b/124205571,b/124092991): Solve destruction of resources.
class RestoredResource(TrackableResource):
def __init__(self, device=""):
super(RestoredResource, self).__init__(device=device)
@classmethod
def _deserialize_from_proto(cls, object_proto, dependencies, **unused_kwargs):
obj = cls(device=object_proto.resource.device)
resource_creator = dependencies.get("_create_resource")
if resource_creator is not None:
obj._create_resource = resource_creator # pylint: disable=protected-access
return obj
def _add_trackable_child(self, name, value):
setattr(self, name, value)
if (isinstance(value, base.Trackable) and
not isinstance(value, def_function.Function)):
self._track_trackable(value, name)
| true | true |
f72abbd3ee1fb7ddc9a51049416b8d1194ab3660 | 9,235 | py | Python | remove_code/sotas/SSAH-adversarial-attack-main/utils/fid_score.py | JohnZhang000/adaptive-jpeg-compression | f54e4798c01169812958f4d5539a03927dbdc313 | [
"MIT"
] | 9 | 2022-03-15T02:59:32.000Z | 2022-03-26T09:16:44.000Z | remove_code/sotas/SSAH-adversarial-attack-main/utils/fid_score.py | JohnZhang000/adaptive-jpeg-compression | f54e4798c01169812958f4d5539a03927dbdc313 | [
"MIT"
] | 1 | 2022-03-30T02:59:55.000Z | 2022-03-30T02:59:55.000Z | remove_code/sotas/SSAH-adversarial-attack-main/utils/fid_score.py | JohnZhang000/adaptive-jpeg-compression | f54e4798c01169812958f4d5539a03927dbdc313 | [
"MIT"
] | 1 | 2022-03-20T12:19:26.000Z | 2022-03-20T12:19:26.000Z | """Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from multiprocessing import cpu_count
import numpy as np
import torch
import torchvision.transforms as TF
from PIL import Image
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
# If tqdm is not available, provide a mock version of it
def tqdm(x):
return x
from utils.inception import InceptionV3
print(InceptionV3.BLOCK_INDEX_BY_DIM)
IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm',
'tif', 'tiff', 'webp'}
class ImagePathDataset(torch.utils.data.Dataset):
def __init__(self, files, transforms=None):
self.files = files
self.transforms = transforms
def __len__(self):
return len(self.files)
def __getitem__(self, i):
path = self.files[i]
img = Image.open(path).convert('RGB')
if self.transforms is not None:
img = self.transforms(img)
return img
def get_activations(files, model, batch_size=50, dims=2048, device='cuda'):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- device : Device to run calculations
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
print(len(files), batch_size)
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
dataset = ImagePathDataset(files, transforms=TF.ToTensor())
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=cpu_count())
pred_arr = np.empty((len(files), dims))
start_idx = 0
for batch in tqdm(dataloader):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred = pred.squeeze(3).squeeze(2).cpu().numpy()
pred_arr[start_idx:start_idx + pred.shape[0]] = pred
start_idx = start_idx + pred.shape[0]
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1)
+ np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50, dims=2048,
device='cuda'):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- device : Device to run calculations
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(files, model, batch_size, dims, device)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def compute_statistics_of_path(path, model, batch_size, dims, device):
if path.endswith('.npz'):
with np.load(path) as f:
m, s = f['mu'][:], f['sigma'][:]
else:
path = pathlib.Path(path)
files = sorted([file for ext in IMAGE_EXTENSIONS
for file in path.glob('*.{}'.format(ext))])
m, s = calculate_activation_statistics(files, model, batch_size,
dims, device)
return m, s
def calculate_fid_given_paths(paths, batch_size, device, dims):
"""Calculates the FID of two paths"""
print('paths is :', paths)
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
m1, s1 = compute_statistics_of_path(paths[0], model, batch_size,
dims, device)
m2, s2 = compute_statistics_of_path(paths[1], model, batch_size,
dims, device)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
def return_fid(path1, path2):
device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
fid_value = calculate_fid_given_paths(paths=[path1, path2],
batch_size=50,
device=device,
dims=2048)
return fid_value
| 35.794574 | 78 | 0.636492 | import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from multiprocessing import cpu_count
import numpy as np
import torch
import torchvision.transforms as TF
from PIL import Image
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
def tqdm(x):
return x
from utils.inception import InceptionV3
print(InceptionV3.BLOCK_INDEX_BY_DIM)
IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm',
'tif', 'tiff', 'webp'}
class ImagePathDataset(torch.utils.data.Dataset):
def __init__(self, files, transforms=None):
self.files = files
self.transforms = transforms
def __len__(self):
return len(self.files)
def __getitem__(self, i):
path = self.files[i]
img = Image.open(path).convert('RGB')
if self.transforms is not None:
img = self.transforms(img)
return img
def get_activations(files, model, batch_size=50, dims=2048, device='cuda'):
model.eval()
print(len(files), batch_size)
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
dataset = ImagePathDataset(files, transforms=TF.ToTensor())
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=cpu_count())
pred_arr = np.empty((len(files), dims))
start_idx = 0
for batch in tqdm(dataloader):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)[0]
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred = pred.squeeze(3).squeeze(2).cpu().numpy()
pred_arr[start_idx:start_idx + pred.shape[0]] = pred
start_idx = start_idx + pred.shape[0]
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1)
+ np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50, dims=2048,
device='cuda'):
act = get_activations(files, model, batch_size, dims, device)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def compute_statistics_of_path(path, model, batch_size, dims, device):
if path.endswith('.npz'):
with np.load(path) as f:
m, s = f['mu'][:], f['sigma'][:]
else:
path = pathlib.Path(path)
files = sorted([file for ext in IMAGE_EXTENSIONS
for file in path.glob('*.{}'.format(ext))])
m, s = calculate_activation_statistics(files, model, batch_size,
dims, device)
return m, s
def calculate_fid_given_paths(paths, batch_size, device, dims):
print('paths is :', paths)
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
m1, s1 = compute_statistics_of_path(paths[0], model, batch_size,
dims, device)
m2, s2 = compute_statistics_of_path(paths[1], model, batch_size,
dims, device)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
def return_fid(path1, path2):
device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
fid_value = calculate_fid_given_paths(paths=[path1, path2],
batch_size=50,
device=device,
dims=2048)
return fid_value
| true | true |
f72abcf519c3d777dae73575160a3505946609c2 | 421 | py | Python | test/command_line/test_plot_Fo_vs_Fc.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | 58 | 2015-10-15T09:28:20.000Z | 2022-03-28T20:09:38.000Z | test/command_line/test_plot_Fo_vs_Fc.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | 1,741 | 2015-11-24T08:17:02.000Z | 2022-03-31T15:46:42.000Z | test/command_line/test_plot_Fo_vs_Fc.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | 45 | 2015-10-14T13:44:16.000Z | 2022-03-22T14:45:56.000Z | import procrunner
def test(dials_data, tmp_path):
mtz_file = dials_data("lysozyme_electron_diffraction").join("refmac_final.mtz")
result = procrunner.run(
["dials.plot_Fo_vs_Fc", "hklin=" + mtz_file.strpath], working_directory=tmp_path
)
assert not result.returncode and not result.stderr
assert tmp_path.joinpath("Fo_vs_Fc.pdf").is_file()
assert "|Fe| = 42.0" in result.stdout.decode()
| 35.083333 | 88 | 0.719715 | import procrunner
def test(dials_data, tmp_path):
mtz_file = dials_data("lysozyme_electron_diffraction").join("refmac_final.mtz")
result = procrunner.run(
["dials.plot_Fo_vs_Fc", "hklin=" + mtz_file.strpath], working_directory=tmp_path
)
assert not result.returncode and not result.stderr
assert tmp_path.joinpath("Fo_vs_Fc.pdf").is_file()
assert "|Fe| = 42.0" in result.stdout.decode()
| true | true |
f72ac027d54393cfbc8f4c4a085d814d8add6b01 | 99 | py | Python | algo228/shooter_game.py | voidwalker-so2/vasya228 | cf766ee40341aa46799a461a246fa1f8f24df0ec | [
"BSD-2-Clause"
] | null | null | null | algo228/shooter_game.py | voidwalker-so2/vasya228 | cf766ee40341aa46799a461a246fa1f8f24df0ec | [
"BSD-2-Clause"
] | null | null | null | algo228/shooter_game.py | voidwalker-so2/vasya228 | cf766ee40341aa46799a461a246fa1f8f24df0ec | [
"BSD-2-Clause"
] | null | null | null | #Создай собственный Шутер!
from pygame import *
dfgshfhsdljfvhs
ssdkgvkshdv
sdhvljsdhv
sljgvksjdg
| 12.375 | 26 | 0.848485 |
from pygame import *
dfgshfhsdljfvhs
ssdkgvkshdv
sdhvljsdhv
sljgvksjdg
| true | true |
f72ac04ea85e822cd8063706b8bc88973fb8d216 | 7,842 | py | Python | src/python/pants/backend/jvm/tasks/classpath_util.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 1 | 2021-11-11T14:04:24.000Z | 2021-11-11T14:04:24.000Z | src/python/pants/backend/jvm/tasks/classpath_util.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 2 | 2016-10-13T21:37:42.000Z | 2018-07-20T20:14:33.000Z | src/python/pants/backend/jvm/tasks/classpath_util.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | 1 | 2018-03-08T22:21:44.000Z | 2018-03-08T22:21:44.000Z | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
from collections import OrderedDict
from twitter.common.collections import OrderedSet
from pants.util.contextutil import open_zip
from pants.util.dirutil import fast_relpath, safe_walk
from pants.util.strutil import ensure_text
class ClasspathUtil(object):
@classmethod
def compute_classpath(cls, targets, classpath_products, extra_classpath_tuples, confs):
"""Return the list of classpath entries for a classpath covering the passed targets.
Filters and adds paths from extra_classpath_tuples to the end of the resulting list.
:param targets: The targets to generate a classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param extra_classpath_tuples: Additional classpath entries.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
total_classpath = OrderedSet(classpath_iter)
filtered_extra_classpath_iter = cls._filtered_classpath_by_confs_iter(extra_classpath_tuples,
confs)
extra_classpath_iter = cls._entries_iter(filtered_extra_classpath_iter)
total_classpath.update(extra_classpath_iter)
return list(total_classpath)
@classmethod
def classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the classpath as a list of paths covering all the passed targets.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
return list(classpath_iter)
@classmethod
def _classpath_iter(cls, targets, classpath_products, confs=('default',)):
classpath_tuples = classpath_products.get_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return cls._entries_iter(filtered_tuples_iter)
@classmethod
def internal_classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the list of internal classpath entries for a classpath covering all `targets`.
Any classpath entries contributed by external dependencies will be omitted.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_tuples = classpath_products.get_internal_classpath_entries_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return [entry.path for entry in cls._entries_iter(filtered_tuples_iter)]
@classmethod
def classpath_by_targets(cls, targets, classpath_products, confs=('default',)):
"""Return classpath entries grouped by their targets for the given `targets`.
:param targets: The targets to lookup classpath products for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The ordered (target, classpath) mappings.
:rtype: OrderedDict
"""
classpath_target_tuples = classpath_products.get_product_target_mappings_for_targets(targets)
filtered_items_iter = itertools.ifilter(cls._accept_conf_filter(confs, lambda x: x[0][0]),
classpath_target_tuples)
# group (classpath_entry, target) tuples by targets
target_to_classpath = OrderedDict()
for classpath_entry, target in filtered_items_iter:
_, entry = classpath_entry
if not target in target_to_classpath:
target_to_classpath[target] = []
target_to_classpath[target].append(entry)
return target_to_classpath
@classmethod
def _accept_conf_filter(cls, confs, unpack_func=None):
def accept_conf_in_item(item):
conf = unpack_func(item)
return confs is None or conf in confs
unpack_func = unpack_func or (lambda x: x)
return accept_conf_in_item
@classmethod
def _filtered_classpath_by_confs_iter(cls, classpath_tuples, confs):
filter_func = cls._accept_conf_filter(confs, unpack_func=lambda x: x[0])
return itertools.ifilter(filter_func, classpath_tuples)
@classmethod
def _entries_iter(cls, classpath):
for conf, entry in classpath:
yield entry
@classmethod
def classpath_contents(cls, targets, classpath_products, confs=('default',)):
"""Provide a generator over the contents (classes/resources) of a classpath.
:param targets: Targets to iterate the contents classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: An iterator over all classpath contents, one directory, class or resource relative
path per iteration step.
:rtype: :class:`collections.Iterator` of string
"""
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
for f in cls.classpath_entries_contents(classpath_iter):
yield f
@classmethod
def classpath_entries_contents(cls, classpath_entries):
"""Provide a generator over the contents (classes/resources) of a classpath.
Subdirectories are included and differentiated via a trailing forward slash (for symmetry
across ZipFile.namelist and directory walks).
:param classpath_entries: A sequence of classpath_entries. Non-jars/dirs are ignored.
:returns: An iterator over all classpath contents, one directory, class or resource relative
path per iteration step.
:rtype: :class:`collections.Iterator` of string
"""
for entry in classpath_entries:
if cls.is_jar(entry):
# Walk the jar namelist.
with open_zip(entry, mode='r') as jar:
for name in jar.namelist():
yield ensure_text(name)
elif os.path.isdir(entry):
# Walk the directory, including subdirs.
def rel_walk_name(abs_sub_dir, name):
return fast_relpath(os.path.join(abs_sub_dir, name), entry)
for abs_sub_dir, dirnames, filenames in safe_walk(entry):
for name in dirnames:
yield '{}/'.format(rel_walk_name(abs_sub_dir, name))
for name in filenames:
yield rel_walk_name(abs_sub_dir, name)
else:
# non-jar and non-directory classpath entries should be ignored
pass
@classmethod
def classname_for_rel_classfile(cls, class_file_name):
"""Return the class name for the given relative-to-a-classpath-entry file, or None."""
if not class_file_name.endswith('.class'):
return None
return class_file_name[:-len('.class')].replace('/', '.')
@classmethod
def is_jar(cls, path):
"""True if the given path represents an existing jar or zip file."""
return path.endswith(('.jar', '.zip')) and os.path.isfile(path)
@classmethod
def is_dir(cls, path):
"""True if the given path represents an existing directory."""
return os.path.isdir(path)
| 43.087912 | 97 | 0.733614 |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
from collections import OrderedDict
from twitter.common.collections import OrderedSet
from pants.util.contextutil import open_zip
from pants.util.dirutil import fast_relpath, safe_walk
from pants.util.strutil import ensure_text
class ClasspathUtil(object):
@classmethod
def compute_classpath(cls, targets, classpath_products, extra_classpath_tuples, confs):
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
total_classpath = OrderedSet(classpath_iter)
filtered_extra_classpath_iter = cls._filtered_classpath_by_confs_iter(extra_classpath_tuples,
confs)
extra_classpath_iter = cls._entries_iter(filtered_extra_classpath_iter)
total_classpath.update(extra_classpath_iter)
return list(total_classpath)
@classmethod
def classpath(cls, targets, classpath_products, confs=('default',)):
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
return list(classpath_iter)
@classmethod
def _classpath_iter(cls, targets, classpath_products, confs=('default',)):
classpath_tuples = classpath_products.get_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return cls._entries_iter(filtered_tuples_iter)
@classmethod
def internal_classpath(cls, targets, classpath_products, confs=('default',)):
classpath_tuples = classpath_products.get_internal_classpath_entries_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return [entry.path for entry in cls._entries_iter(filtered_tuples_iter)]
@classmethod
def classpath_by_targets(cls, targets, classpath_products, confs=('default',)):
classpath_target_tuples = classpath_products.get_product_target_mappings_for_targets(targets)
filtered_items_iter = itertools.ifilter(cls._accept_conf_filter(confs, lambda x: x[0][0]),
classpath_target_tuples)
target_to_classpath = OrderedDict()
for classpath_entry, target in filtered_items_iter:
_, entry = classpath_entry
if not target in target_to_classpath:
target_to_classpath[target] = []
target_to_classpath[target].append(entry)
return target_to_classpath
@classmethod
def _accept_conf_filter(cls, confs, unpack_func=None):
def accept_conf_in_item(item):
conf = unpack_func(item)
return confs is None or conf in confs
unpack_func = unpack_func or (lambda x: x)
return accept_conf_in_item
@classmethod
def _filtered_classpath_by_confs_iter(cls, classpath_tuples, confs):
filter_func = cls._accept_conf_filter(confs, unpack_func=lambda x: x[0])
return itertools.ifilter(filter_func, classpath_tuples)
@classmethod
def _entries_iter(cls, classpath):
for conf, entry in classpath:
yield entry
@classmethod
def classpath_contents(cls, targets, classpath_products, confs=('default',)):
classpath_iter = cls._classpath_iter(targets, classpath_products, confs=confs)
for f in cls.classpath_entries_contents(classpath_iter):
yield f
@classmethod
def classpath_entries_contents(cls, classpath_entries):
for entry in classpath_entries:
if cls.is_jar(entry):
with open_zip(entry, mode='r') as jar:
for name in jar.namelist():
yield ensure_text(name)
elif os.path.isdir(entry):
def rel_walk_name(abs_sub_dir, name):
return fast_relpath(os.path.join(abs_sub_dir, name), entry)
for abs_sub_dir, dirnames, filenames in safe_walk(entry):
for name in dirnames:
yield '{}/'.format(rel_walk_name(abs_sub_dir, name))
for name in filenames:
yield rel_walk_name(abs_sub_dir, name)
else:
pass
@classmethod
def classname_for_rel_classfile(cls, class_file_name):
if not class_file_name.endswith('.class'):
return None
return class_file_name[:-len('.class')].replace('/', '.')
@classmethod
def is_jar(cls, path):
return path.endswith(('.jar', '.zip')) and os.path.isfile(path)
@classmethod
def is_dir(cls, path):
return os.path.isdir(path)
| true | true |
f72ac0bed67e590b7732695c441a21acb5828469 | 2,176 | py | Python | Slider_Trinkey/Hue_Brightness_Python_Code/Hue_Brightness_Python_code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | Slider_Trinkey/Hue_Brightness_Python_Code/Hue_Brightness_Python_code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | Slider_Trinkey/Hue_Brightness_Python_Code/Hue_Brightness_Python_code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Kattni Rembor for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
Slider Trinkey Hue Brightness Python Example
(Requires Hue and Monitor Brightness CircuitPython example to be running on the Slider Trinkey)
"""
import sys
from phue import Bridge
import serial
from serial.tools import list_ports
# Update this to the room, zone or individual lamp you want to control.
LAMP_OR_GROUP_NAME = "Office"
# Update this to the IP address of your Hue Bridge.
b = Bridge("0.0.0.0")
slider_trinkey_port = None
ports = list_ports.comports(include_links=False)
for p in ports:
if p.pid is not None:
print("Port:", p.device, "-", hex(p.pid), end="\t")
if p.pid == 0x8102:
slider_trinkey_port = p
print("Found Slider Trinkey!")
trinkey = serial.Serial(p.device)
break
else:
print("Did not find Slider Trinkey port :(")
sys.exit()
# If the app is not registered and the button on the Hue Bridge is not pressed, press the button
# and call connect() (this only needs to be run a single time)
b.connect()
b.get_api()
is_group = False
light = None
# First, check if it's a group name.
for group_data in b.get_group().values():
if group_data["name"] == LAMP_OR_GROUP_NAME:
print("Found group with name", LAMP_OR_GROUP_NAME)
is_group = True
# If it's not a group, find the lamp by name.
if not is_group:
light_names = b.get_light_objects("name")
light = light_names[LAMP_OR_GROUP_NAME]
print("Found light with name", LAMP_OR_GROUP_NAME)
current_brightness = None
while True:
x = trinkey.readline().decode("utf-8")
if not x.startswith("Slider: "):
continue
# Convert the Slider Trinkey output value of 0-100 to 0-254.
brightness_value = int((float(x.split(": ")[1]) / 100) * 254)
if current_brightness is None or brightness_value != current_brightness:
print("Setting brightness to:", brightness_value)
if is_group:
b.set_group(LAMP_OR_GROUP_NAME, {"bri": brightness_value})
else:
light.brightness = brightness_value
current_brightness = brightness_value
| 31.085714 | 96 | 0.688879 |
import sys
from phue import Bridge
import serial
from serial.tools import list_ports
LAMP_OR_GROUP_NAME = "Office"
b = Bridge("0.0.0.0")
slider_trinkey_port = None
ports = list_ports.comports(include_links=False)
for p in ports:
if p.pid is not None:
print("Port:", p.device, "-", hex(p.pid), end="\t")
if p.pid == 0x8102:
slider_trinkey_port = p
print("Found Slider Trinkey!")
trinkey = serial.Serial(p.device)
break
else:
print("Did not find Slider Trinkey port :(")
sys.exit()
b.connect()
b.get_api()
is_group = False
light = None
for group_data in b.get_group().values():
if group_data["name"] == LAMP_OR_GROUP_NAME:
print("Found group with name", LAMP_OR_GROUP_NAME)
is_group = True
# If it's not a group, find the lamp by name.
if not is_group:
light_names = b.get_light_objects("name")
light = light_names[LAMP_OR_GROUP_NAME]
print("Found light with name", LAMP_OR_GROUP_NAME)
current_brightness = None
while True:
x = trinkey.readline().decode("utf-8")
if not x.startswith("Slider: "):
continue
brightness_value = int((float(x.split(": ")[1]) / 100) * 254)
if current_brightness is None or brightness_value != current_brightness:
print("Setting brightness to:", brightness_value)
if is_group:
b.set_group(LAMP_OR_GROUP_NAME, {"bri": brightness_value})
else:
light.brightness = brightness_value
current_brightness = brightness_value
| true | true |
f72ac1123188353e94ecd664682ea810ce628d26 | 2,748 | py | Python | mira/auth.py | Bl4ck4/mira-1 | 2b907c1a4c09585f0c68223e0435cc7414eab3c5 | [
"MIT"
] | null | null | null | mira/auth.py | Bl4ck4/mira-1 | 2b907c1a4c09585f0c68223e0435cc7414eab3c5 | [
"MIT"
] | null | null | null | mira/auth.py | Bl4ck4/mira-1 | 2b907c1a4c09585f0c68223e0435cc7414eab3c5 | [
"MIT"
] | 1 | 2021-10-02T10:36:21.000Z | 2021-10-02T10:36:21.000Z | """Mira 2020."""
import functools
import requests
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
BLUEPRINT = Blueprint('auth', __name__, url_prefix='/auth')
@BLUEPRINT.route('/login', methods = ['GET', 'POST'])
def login():
"""Login to the application."""
error = ""
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
if not email:
error = 'Email is required.'
elif not password:
error = 'Password is required.'
data = {"email": email, "password": password}
response = requests.post("http://localhost:5000/login", json=data)
if error is "" and response.json().get('status') == "success":
data = response.json().get('data')
session.clear()
session['access_token'] = data.get('access_token')
session['refresh_token'] = data.get('refresh_token')
return redirect(url_for('index'))
error = response.json().get('message')
return render_template('auth/login.html', error=error)
@BLUEPRINT.route('/register', methods = ['GET', 'POST'])
def register():
"""Register a new user."""
error = ""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
email = request.form['email']
if not username:
error = 'Username is required.'
elif not email:
error = 'Email is required.'
elif not password:
error = 'Password is required.'
if error is "":
data = {"username": username, "email": email, "password": password}
response = requests.post("http://localhost:5000/register", json=data)
if response.json().get("status") == "success":
return redirect(url_for('auth.login'))
error = response.json().get("message")
return render_template('auth/register.html', error=error)
@BLUEPRINT.route('/forgot_password', methods = ['GET', 'POST'])
def forgot_password():
"""Restore password for user."""
return render_template('auth/forgot_password.html')
@BLUEPRINT.route('/logout')
def logout():
"""Destroy and clear session of logged in user."""
session.clear()
return redirect(url_for('auth.login'))
def login_required(view):
"""Decorator for viewes that requires the user to be logged in."""
@funtools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
| 31.953488 | 81 | 0.612445 | import functools
import requests
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
BLUEPRINT = Blueprint('auth', __name__, url_prefix='/auth')
@BLUEPRINT.route('/login', methods = ['GET', 'POST'])
def login():
error = ""
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
if not email:
error = 'Email is required.'
elif not password:
error = 'Password is required.'
data = {"email": email, "password": password}
response = requests.post("http://localhost:5000/login", json=data)
if error is "" and response.json().get('status') == "success":
data = response.json().get('data')
session.clear()
session['access_token'] = data.get('access_token')
session['refresh_token'] = data.get('refresh_token')
return redirect(url_for('index'))
error = response.json().get('message')
return render_template('auth/login.html', error=error)
@BLUEPRINT.route('/register', methods = ['GET', 'POST'])
def register():
error = ""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
email = request.form['email']
if not username:
error = 'Username is required.'
elif not email:
error = 'Email is required.'
elif not password:
error = 'Password is required.'
if error is "":
data = {"username": username, "email": email, "password": password}
response = requests.post("http://localhost:5000/register", json=data)
if response.json().get("status") == "success":
return redirect(url_for('auth.login'))
error = response.json().get("message")
return render_template('auth/register.html', error=error)
@BLUEPRINT.route('/forgot_password', methods = ['GET', 'POST'])
def forgot_password():
return render_template('auth/forgot_password.html')
@BLUEPRINT.route('/logout')
def logout():
session.clear()
return redirect(url_for('auth.login'))
def login_required(view):
@funtools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
| true | true |
f72ac2161ec154a6fbc2d4c0db4116346291b457 | 9,690 | py | Python | homeassistant/components/zha/core/discovery.py | twrecked/core | d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab | [
"Apache-2.0"
] | 2 | 2021-09-13T21:44:02.000Z | 2021-12-17T21:20:51.000Z | homeassistant/components/zha/core/discovery.py | twrecked/core | d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab | [
"Apache-2.0"
] | 5 | 2021-02-08T20:55:25.000Z | 2022-03-12T00:51:18.000Z | homeassistant/components/zha/core/discovery.py | twrecked/core | d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab | [
"Apache-2.0"
] | 2 | 2020-11-04T07:40:01.000Z | 2021-09-13T21:44:03.000Z | """Device discovery functions for Zigbee Home Automation."""
from collections import Counter
import logging
from typing import Callable, List, Tuple
from homeassistant import const as ha_const
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers.typing import HomeAssistantType
from . import const as zha_const, registries as zha_regs, typing as zha_typing
from .. import ( # noqa: F401 pylint: disable=unused-import,
binary_sensor,
cover,
device_tracker,
fan,
light,
lock,
sensor,
switch,
)
from .channels import base
_LOGGER = logging.getLogger(__name__)
@callback
async def async_add_entities(
_async_add_entities: Callable,
entities: List[
Tuple[
zha_typing.ZhaEntityType,
Tuple[str, zha_typing.ZhaDeviceType, List[zha_typing.ChannelType]],
]
],
) -> None:
"""Add entities helper."""
if not entities:
return
to_add = [ent_cls(*args) for ent_cls, args in entities]
_async_add_entities(to_add, update_before_add=True)
entities.clear()
class ProbeEndpoint:
"""All discovered channels and entities of an endpoint."""
def __init__(self):
"""Initialize instance."""
self._device_configs = {}
@callback
def discover_entities(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
self.discover_by_device_type(channel_pool)
self.discover_by_cluster_id(channel_pool)
@callback
def discover_by_device_type(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
unique_id = channel_pool.unique_id
component = self._device_configs.get(unique_id, {}).get(ha_const.CONF_TYPE)
if component is None:
ep_profile_id = channel_pool.endpoint.profile_id
ep_device_type = channel_pool.endpoint.device_type
component = zha_regs.DEVICE_CLASS[ep_profile_id].get(ep_device_type)
if component and component in zha_const.COMPONENTS:
channels = channel_pool.unclaimed_channels()
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, channel_pool.manufacturer, channel_pool.model, channels
)
if entity_class is None:
return
channel_pool.claim_channels(claimed)
channel_pool.async_new_entity(component, entity_class, unique_id, claimed)
@callback
def discover_by_cluster_id(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
items = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.items()
single_input_clusters = {
cluster_class: match
for cluster_class, match in items
if not isinstance(cluster_class, int)
}
remaining_channels = channel_pool.unclaimed_channels()
for channel in remaining_channels:
if channel.cluster.cluster_id in zha_regs.CHANNEL_ONLY_CLUSTERS:
channel_pool.claim_channels([channel])
continue
component = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.get(
channel.cluster.cluster_id
)
if component is None:
for cluster_class, match in single_input_clusters.items():
if isinstance(channel.cluster, cluster_class):
component = match
break
self.probe_single_cluster(component, channel, channel_pool)
# until we can get rid off registries
self.handle_on_off_output_cluster_exception(channel_pool)
@staticmethod
def probe_single_cluster(
component: str,
channel: zha_typing.ChannelType,
ep_channels: zha_typing.ChannelPoolType,
) -> None:
"""Probe specified cluster for specific component."""
if component is None or component not in zha_const.COMPONENTS:
return
channel_list = [channel]
unique_id = f"{ep_channels.unique_id}-{channel.cluster.cluster_id}"
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, ep_channels.manufacturer, ep_channels.model, channel_list
)
if entity_class is None:
return
ep_channels.claim_channels(claimed)
ep_channels.async_new_entity(component, entity_class, unique_id, claimed)
def handle_on_off_output_cluster_exception(
self, ep_channels: zha_typing.ChannelPoolType
) -> None:
"""Process output clusters of the endpoint."""
profile_id = ep_channels.endpoint.profile_id
device_type = ep_channels.endpoint.device_type
if device_type in zha_regs.REMOTE_DEVICE_TYPES.get(profile_id, []):
return
for cluster_id, cluster in ep_channels.endpoint.out_clusters.items():
component = zha_regs.SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS.get(
cluster.cluster_id
)
if component is None:
continue
channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base.ZigbeeChannel
)
channel = channel_class(cluster, ep_channels)
self.probe_single_cluster(component, channel, ep_channels)
def initialize(self, hass: HomeAssistantType) -> None:
"""Update device overrides config."""
zha_config = hass.data[zha_const.DATA_ZHA].get(zha_const.DATA_ZHA_CONFIG, {})
overrides = zha_config.get(zha_const.CONF_DEVICE_CONFIG)
if overrides:
self._device_configs.update(overrides)
class GroupProbe:
"""Determine the appropriate component for a group."""
def __init__(self):
"""Initialize instance."""
self._hass = None
self._unsubs = []
def initialize(self, hass: HomeAssistantType) -> None:
"""Initialize the group probe."""
self._hass = hass
self._unsubs.append(
async_dispatcher_connect(
hass, zha_const.SIGNAL_GROUP_ENTITY_REMOVED, self._reprobe_group
)
)
def cleanup(self):
"""Clean up on when zha shuts down."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
def _reprobe_group(self, group_id: int) -> None:
"""Reprobe a group for entities after its members change."""
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
zha_group = zha_gateway.groups.get(group_id)
if zha_group is None:
return
self.discover_group_entities(zha_group)
@callback
def discover_group_entities(self, group: zha_typing.ZhaGroupType) -> None:
"""Process a group and create any entities that are needed."""
# only create a group entity if there are 2 or more members in a group
if len(group.members) < 2:
_LOGGER.debug(
"Group: %s:0x%04x has less than 2 members - skipping entity discovery",
group.name,
group.group_id,
)
return
entity_domains = GroupProbe.determine_entity_domains(self._hass, group)
if not entity_domains:
return
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
for domain in entity_domains:
entity_class = zha_regs.ZHA_ENTITIES.get_group_entity(domain)
if entity_class is None:
continue
self._hass.data[zha_const.DATA_ZHA][domain].append(
(
entity_class,
(
group.get_domain_entity_ids(domain),
f"{domain}_zha_group_0x{group.group_id:04x}",
group.group_id,
zha_gateway.coordinator_zha_device,
),
)
)
async_dispatcher_send(self._hass, zha_const.SIGNAL_ADD_ENTITIES)
@staticmethod
def determine_entity_domains(
hass: HomeAssistantType, group: zha_typing.ZhaGroupType
) -> List[str]:
"""Determine the entity domains for this group."""
entity_domains: List[str] = []
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
all_domain_occurrences = []
for member in group.members:
if member.device.is_coordinator:
continue
entities = async_entries_for_device(
zha_gateway.ha_entity_registry, member.device.device_id
)
all_domain_occurrences.extend(
[
entity.domain
for entity in entities
if entity.domain in zha_regs.GROUP_ENTITY_DOMAINS
]
)
if not all_domain_occurrences:
return entity_domains
# get all domains we care about if there are more than 2 entities of this domain
counts = Counter(all_domain_occurrences)
entity_domains = [domain[0] for domain in counts.items() if domain[1] >= 2]
_LOGGER.debug(
"The entity domains are: %s for group: %s:0x%04x",
entity_domains,
group.name,
group.group_id,
)
return entity_domains
PROBE = ProbeEndpoint()
GROUP_PROBE = GroupProbe()
| 36.022305 | 88 | 0.637771 |
from collections import Counter
import logging
from typing import Callable, List, Tuple
from homeassistant import const as ha_const
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers.typing import HomeAssistantType
from . import const as zha_const, registries as zha_regs, typing as zha_typing
from .. import (
binary_sensor,
cover,
device_tracker,
fan,
light,
lock,
sensor,
switch,
)
from .channels import base
_LOGGER = logging.getLogger(__name__)
@callback
async def async_add_entities(
_async_add_entities: Callable,
entities: List[
Tuple[
zha_typing.ZhaEntityType,
Tuple[str, zha_typing.ZhaDeviceType, List[zha_typing.ChannelType]],
]
],
) -> None:
if not entities:
return
to_add = [ent_cls(*args) for ent_cls, args in entities]
_async_add_entities(to_add, update_before_add=True)
entities.clear()
class ProbeEndpoint:
def __init__(self):
self._device_configs = {}
@callback
def discover_entities(self, channel_pool: zha_typing.ChannelPoolType) -> None:
self.discover_by_device_type(channel_pool)
self.discover_by_cluster_id(channel_pool)
@callback
def discover_by_device_type(self, channel_pool: zha_typing.ChannelPoolType) -> None:
unique_id = channel_pool.unique_id
component = self._device_configs.get(unique_id, {}).get(ha_const.CONF_TYPE)
if component is None:
ep_profile_id = channel_pool.endpoint.profile_id
ep_device_type = channel_pool.endpoint.device_type
component = zha_regs.DEVICE_CLASS[ep_profile_id].get(ep_device_type)
if component and component in zha_const.COMPONENTS:
channels = channel_pool.unclaimed_channels()
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, channel_pool.manufacturer, channel_pool.model, channels
)
if entity_class is None:
return
channel_pool.claim_channels(claimed)
channel_pool.async_new_entity(component, entity_class, unique_id, claimed)
@callback
def discover_by_cluster_id(self, channel_pool: zha_typing.ChannelPoolType) -> None:
items = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.items()
single_input_clusters = {
cluster_class: match
for cluster_class, match in items
if not isinstance(cluster_class, int)
}
remaining_channels = channel_pool.unclaimed_channels()
for channel in remaining_channels:
if channel.cluster.cluster_id in zha_regs.CHANNEL_ONLY_CLUSTERS:
channel_pool.claim_channels([channel])
continue
component = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.get(
channel.cluster.cluster_id
)
if component is None:
for cluster_class, match in single_input_clusters.items():
if isinstance(channel.cluster, cluster_class):
component = match
break
self.probe_single_cluster(component, channel, channel_pool)
self.handle_on_off_output_cluster_exception(channel_pool)
@staticmethod
def probe_single_cluster(
component: str,
channel: zha_typing.ChannelType,
ep_channels: zha_typing.ChannelPoolType,
) -> None:
if component is None or component not in zha_const.COMPONENTS:
return
channel_list = [channel]
unique_id = f"{ep_channels.unique_id}-{channel.cluster.cluster_id}"
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, ep_channels.manufacturer, ep_channels.model, channel_list
)
if entity_class is None:
return
ep_channels.claim_channels(claimed)
ep_channels.async_new_entity(component, entity_class, unique_id, claimed)
def handle_on_off_output_cluster_exception(
self, ep_channels: zha_typing.ChannelPoolType
) -> None:
profile_id = ep_channels.endpoint.profile_id
device_type = ep_channels.endpoint.device_type
if device_type in zha_regs.REMOTE_DEVICE_TYPES.get(profile_id, []):
return
for cluster_id, cluster in ep_channels.endpoint.out_clusters.items():
component = zha_regs.SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS.get(
cluster.cluster_id
)
if component is None:
continue
channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base.ZigbeeChannel
)
channel = channel_class(cluster, ep_channels)
self.probe_single_cluster(component, channel, ep_channels)
def initialize(self, hass: HomeAssistantType) -> None:
zha_config = hass.data[zha_const.DATA_ZHA].get(zha_const.DATA_ZHA_CONFIG, {})
overrides = zha_config.get(zha_const.CONF_DEVICE_CONFIG)
if overrides:
self._device_configs.update(overrides)
class GroupProbe:
def __init__(self):
self._hass = None
self._unsubs = []
def initialize(self, hass: HomeAssistantType) -> None:
self._hass = hass
self._unsubs.append(
async_dispatcher_connect(
hass, zha_const.SIGNAL_GROUP_ENTITY_REMOVED, self._reprobe_group
)
)
def cleanup(self):
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
def _reprobe_group(self, group_id: int) -> None:
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
zha_group = zha_gateway.groups.get(group_id)
if zha_group is None:
return
self.discover_group_entities(zha_group)
@callback
def discover_group_entities(self, group: zha_typing.ZhaGroupType) -> None:
if len(group.members) < 2:
_LOGGER.debug(
"Group: %s:0x%04x has less than 2 members - skipping entity discovery",
group.name,
group.group_id,
)
return
entity_domains = GroupProbe.determine_entity_domains(self._hass, group)
if not entity_domains:
return
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
for domain in entity_domains:
entity_class = zha_regs.ZHA_ENTITIES.get_group_entity(domain)
if entity_class is None:
continue
self._hass.data[zha_const.DATA_ZHA][domain].append(
(
entity_class,
(
group.get_domain_entity_ids(domain),
f"{domain}_zha_group_0x{group.group_id:04x}",
group.group_id,
zha_gateway.coordinator_zha_device,
),
)
)
async_dispatcher_send(self._hass, zha_const.SIGNAL_ADD_ENTITIES)
@staticmethod
def determine_entity_domains(
hass: HomeAssistantType, group: zha_typing.ZhaGroupType
) -> List[str]:
entity_domains: List[str] = []
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
all_domain_occurrences = []
for member in group.members:
if member.device.is_coordinator:
continue
entities = async_entries_for_device(
zha_gateway.ha_entity_registry, member.device.device_id
)
all_domain_occurrences.extend(
[
entity.domain
for entity in entities
if entity.domain in zha_regs.GROUP_ENTITY_DOMAINS
]
)
if not all_domain_occurrences:
return entity_domains
counts = Counter(all_domain_occurrences)
entity_domains = [domain[0] for domain in counts.items() if domain[1] >= 2]
_LOGGER.debug(
"The entity domains are: %s for group: %s:0x%04x",
entity_domains,
group.name,
group.group_id,
)
return entity_domains
PROBE = ProbeEndpoint()
GROUP_PROBE = GroupProbe()
| true | true |
f72ac2c60476f898867047bfebd012f5f4feae2c | 3,209 | py | Python | autocalibration/lib/python2.7/site-packages/matplotlib/tests/test_units.py | prcalopa/reactable-autocalibration | eb67a5b5ee0e50f1effa773f6f3f934b5fda6fcf | [
"MIT"
] | 5 | 2017-11-15T10:33:42.000Z | 2021-11-16T02:21:31.000Z | matplotlib/tests/test_units.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2017-10-28T03:30:26.000Z | 2017-10-28T03:31:00.000Z | matplotlib/tests/test_units.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 6 | 2017-11-30T00:34:20.000Z | 2021-05-20T02:58:02.000Z | from matplotlib.cbook import iterable
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import matplotlib.units as munits
import numpy as np
try:
# mock in python 3.3+
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
# Basic class that wraps numpy array and has units
class Quantity(object):
def __init__(self, data, units):
self.magnitude = data
self.units = units
def to(self, new_units):
factors = {('hours', 'seconds'): 3600, ('minutes', 'hours'): 1 / 60,
('minutes', 'seconds'): 60, ('feet', 'miles'): 1 / 5280.,
('feet', 'inches'): 12, ('miles', 'inches'): 12 * 5280}
if self.units != new_units:
mult = factors[self.units, new_units]
return Quantity(mult * self.magnitude, new_units)
else:
return Quantity(self.magnitude, self.units)
def __getattr__(self, attr):
return getattr(self.magnitude, attr)
def __getitem__(self, item):
return Quantity(self.magnitude[item], self.units)
def __array__(self):
return np.asarray(self.magnitude)
# Tests that the conversion machinery works properly for classes that
# work as a facade over numpy arrays (like pint)
@image_comparison(baseline_images=['plot_pint'],
extensions=['png'], remove_text=False, style='mpl20')
def test_numpy_facade():
# Create an instance of the conversion interface and
# mock so we can check methods called
qc = munits.ConversionInterface()
def convert(value, unit, axis):
if hasattr(value, 'units'):
return value.to(unit).magnitude
elif iterable(value):
try:
return [v.to(unit).magnitude for v in value]
except AttributeError:
return [Quantity(v, axis.get_units()).to(unit).magnitude
for v in value]
else:
return Quantity(value, axis.get_units()).to(unit).magnitude
qc.convert = MagicMock(side_effect=convert)
qc.axisinfo = MagicMock(side_effect=lambda u, a: munits.AxisInfo(label=u))
qc.default_units = MagicMock(side_effect=lambda x, a: x.units)
# Register the class
munits.registry[Quantity] = qc
# Simple test
y = Quantity(np.linspace(0, 30), 'miles')
x = Quantity(np.linspace(0, 5), 'hours')
fig, ax = plt.subplots()
fig.subplots_adjust(left=0.15) # Make space for label
ax.plot(x, y, 'tab:blue')
ax.axhline(Quantity(26400, 'feet'), color='tab:red')
ax.axvline(Quantity(120, 'minutes'), color='tab:green')
ax.yaxis.set_units('inches')
ax.xaxis.set_units('seconds')
assert qc.convert.called
assert qc.axisinfo.called
assert qc.default_units.called
# Tests gh-8908
@image_comparison(baseline_images=['plot_masked_units'],
extensions=['png'], remove_text=True, style='mpl20')
def test_plot_masked_units():
data = np.linspace(-5, 5)
data_masked = np.ma.array(data, mask=(data > -2) & (data < 2))
data_masked_units = Quantity(data_masked, 'meters')
fig, ax = plt.subplots()
ax.plot(data_masked_units)
| 33.778947 | 78 | 0.644126 | from matplotlib.cbook import iterable
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import matplotlib.units as munits
import numpy as np
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class Quantity(object):
def __init__(self, data, units):
self.magnitude = data
self.units = units
def to(self, new_units):
factors = {('hours', 'seconds'): 3600, ('minutes', 'hours'): 1 / 60,
('minutes', 'seconds'): 60, ('feet', 'miles'): 1 / 5280.,
('feet', 'inches'): 12, ('miles', 'inches'): 12 * 5280}
if self.units != new_units:
mult = factors[self.units, new_units]
return Quantity(mult * self.magnitude, new_units)
else:
return Quantity(self.magnitude, self.units)
def __getattr__(self, attr):
return getattr(self.magnitude, attr)
def __getitem__(self, item):
return Quantity(self.magnitude[item], self.units)
def __array__(self):
return np.asarray(self.magnitude)
@image_comparison(baseline_images=['plot_pint'],
extensions=['png'], remove_text=False, style='mpl20')
def test_numpy_facade():
qc = munits.ConversionInterface()
def convert(value, unit, axis):
if hasattr(value, 'units'):
return value.to(unit).magnitude
elif iterable(value):
try:
return [v.to(unit).magnitude for v in value]
except AttributeError:
return [Quantity(v, axis.get_units()).to(unit).magnitude
for v in value]
else:
return Quantity(value, axis.get_units()).to(unit).magnitude
qc.convert = MagicMock(side_effect=convert)
qc.axisinfo = MagicMock(side_effect=lambda u, a: munits.AxisInfo(label=u))
qc.default_units = MagicMock(side_effect=lambda x, a: x.units)
munits.registry[Quantity] = qc
y = Quantity(np.linspace(0, 30), 'miles')
x = Quantity(np.linspace(0, 5), 'hours')
fig, ax = plt.subplots()
fig.subplots_adjust(left=0.15)
ax.plot(x, y, 'tab:blue')
ax.axhline(Quantity(26400, 'feet'), color='tab:red')
ax.axvline(Quantity(120, 'minutes'), color='tab:green')
ax.yaxis.set_units('inches')
ax.xaxis.set_units('seconds')
assert qc.convert.called
assert qc.axisinfo.called
assert qc.default_units.called
@image_comparison(baseline_images=['plot_masked_units'],
extensions=['png'], remove_text=True, style='mpl20')
def test_plot_masked_units():
data = np.linspace(-5, 5)
data_masked = np.ma.array(data, mask=(data > -2) & (data < 2))
data_masked_units = Quantity(data_masked, 'meters')
fig, ax = plt.subplots()
ax.plot(data_masked_units)
| true | true |
f72ac3357d035fb96b484046450f998989af2f98 | 36,873 | py | Python | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | jolinlaw/turicreate | 6b2057dc29533da225d18138e93cc15680eea85d | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | jolinlaw/turicreate | 6b2057dc29533da225d18138e93cc15680eea85d | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | jolinlaw/turicreate | 6b2057dc29533da225d18138e93cc15680eea85d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import turicreate as _tc
import numpy as _np
import time as _time
from turicreate.toolkits._model import CustomModel as _CustomModel
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits import evaluation as _evaluation
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from turicreate import extensions as _extensions
from .. import _pre_trained_models
BITMAP_WIDTH = 28
BITMAP_HEIGHT = 28
TRAIN_VALIDATION_SPLIT = .95
def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
"""
Performs some sanity checks on the SFrame provided as input to
`turicreate.drawing_classifier.create` and raises a ToolkitError
if something in the dataset is missing or wrong.
"""
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!")
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
"""
Create a :class:`DrawingClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``target``
parameters will be extracted for training the drawing classifier.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type.
feature : string optional
Name of the column containing the input drawings. 'None' (the default)
indicates the column in `dataset` named "drawing" should be used as the
feature.
The feature column can contain both bitmap-based drawings as well as
stroke-based drawings. Bitmap-based drawing input can be a grayscale
tc.Image of any size.
Stroke-based drawing input must be in the following format:
Every drawing must be represented by a list of strokes, where each
stroke must be a list of points in the order in which they were drawn
on the canvas.
Each point must be a dictionary with two keys, "x" and "y", and their
respective values must be numerical, i.e. either integer or float.
validation_set : SFrame optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
warm_start : string optional
A string to denote which pretrained model to use. Set to "auto"
by default which uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset. To disable warm start, pass in None to this
argument. Here is a list of all the pretrained models that
can be passed in as this argument:
"auto": Uses quickdraw_245_v0
"quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset.
None: No Warm Start
batch_size: int optional
The number of drawings per training step. If not set, a default
value of 256 will be used. If you are getting memory errors,
try decreasing this value. If you have a powerful computer, increasing
this value may improve performance.
max_iterations : int optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model.
verbose : bool optional
If True, print progress updates and model details.
Returns
-------
out : DrawingClassifier
A trained :class:`DrawingClassifier` model.
See Also
--------
DrawingClassifier
Examples
--------
.. sourcecode:: python
# Train a drawing classifier model
>>> model = turicreate.drawing_classifier.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
"""
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
from .._mxnet import _mxnet_utils
start_time = _time.time()
accepted_values_for_warm_start = ["auto", "quickdraw_245_v0", None]
# @TODO: Should be able to automatically choose number of iterations
# based on data size: Tracked in Github Issue #1576
# automatically infer feature column
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
if type(warm_start) is not str:
raise TypeError("'warm_start' must be a string or None. "
+ "'warm_start' can take in the following values: "
+ str(accepted_values_for_warm_start))
if warm_start not in accepted_values_for_warm_start:
raise _ToolkitError("Unrecognized value for 'warm_start': "
+ warm_start + ". 'warm_start' can take in the following "
+ "values: " + str(accepted_values_for_warm_start))
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
# Inside training scope
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
# Computes softmax cross entropy loss.
loss = softmax_cross_entropy(z, y)
# Backpropagate the error for one iteration.
loss.backward()
# Make one step of parameter update. Trainer needs to know the
# batch size of data to normalize the gradient by 1/batch_size.
trainer.step(train_batch.data[0].shape[0])
# calculate training metrics
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
# Compute training accuracy
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
# Compute validation accuracy
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
# nan if validation_set=None
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
class DrawingClassifier(_CustomModel):
"""
A trained model that is ready to use for classification, and to be
exported to Core ML.
This model should not be constructed directly.
"""
_PYTHON_DRAWING_CLASSIFIER_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "drawing_classifier"
def _get_native_state(self):
from .._mxnet import _mxnet_utils
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_DRAWING_CLASSIFIER_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version,
cls._PYTHON_DRAWING_CLASSIFIER_VERSION)
from ._model_architecture import Model as _Model
from .._mxnet import _mxnet_utils
net = _Model(num_classes = len(state['classes']), prefix = 'drawing_')
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(
net_params, state['_model'], ctx=ctx
)
state['_model'] = net
# For a model trained on integer classes, when saved and loaded back,
# the classes are loaded as floats. The following if statement casts
# the loaded "float" classes back to int.
if len(state['classes']) > 0 and isinstance(state['classes'][0], float):
state['classes'] = list(map(int, state['classes']))
return DrawingClassifier(state)
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the DrawingClassifier.
"""
return self.__repr__()
def __repr__(self):
"""
Returns a string description of the model when the model name is
entered in the terminal.
"""
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Number of classes', 'num_classes'),
('Feature column', 'feature'),
('Target column', 'target')
]
training_fields = [
('Training Iterations', 'max_iterations'),
('Training Accuracy', 'training_accuracy'),
('Validation Accuracy', 'validation_accuracy'),
('Training Time', 'training_time'),
('Number of Examples', 'num_examples'),
('Batch Size', 'batch_size'),
('Final Loss (specific to model)', 'training_loss')
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def export_coreml(self, filename, verbose=False):
"""
Save the model in Core ML format. The Core ML model takes a grayscale
drawing of fixed size as input and produces two outputs:
`classLabel` and `labelProbabilities`.
The first one, `classLabel` is an integer or string (depending on the
classes the model was trained on) to store the label of the top
prediction by the model.
The second one, `labelProbabilities`, is a dictionary with all the
class labels in the dataset as the keys, and their respective
probabilities as the values.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
verbose : bool optional
If True, prints export progress.
Examples
--------
>>> model.export_coreml('drawing_classifier.mlmodel')
"""
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools as _coremltools
batch_size = 1
image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)
s_image = _mx.sym.Variable(self.feature,
shape=image_shape, dtype=_np.float32)
from copy import copy as _copy
net = _copy(self._model)
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
coreml_model = _mxnet_converter.convert(mod, mode='classifier',
class_labels=self.classes,
input_shape=[(self.feature, image_shape)],
builder=None, verbose=verbose,
preprocessor_args={
'image_input_names': [self.feature],
'image_scale': 1.0/255
})
DESIRED_OUTPUT_NAME = self.target + "Probabilities"
spec = coreml_model._spec
class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1
probabilities_output_index = 1-class_label_output_index
spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME
spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME
spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME
from turicreate.toolkits import _coreml_utils
model_type = "drawing classifier"
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)
spec.description.input[0].shortDescription = self.feature
spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'
spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, filename)
def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
"""
Predict with probabilities. The core prediction part that both
`evaluate` and `predict` share.
Returns an SFrame with two columns, self.target, and "probabilities".
The column with column name, self.target, contains the predictions made
by the model for the provided dataset.
The "probabilities" column contains the probabilities for each class
that the model predicted for the data provided to the function.
"""
from .._mxnet import _mxnet_utils
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
# Do not print progress if only a few samples are predicted
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()}))
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the feature and target columns used for model training.
Additional columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'confusion_matrix' : An SFrame with counts of possible
prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an
ROC curve
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print(results['accuracy'])
"""
if self.target not in dataset.column_names():
raise _ToolkitError("Must provide ground truth column, '"
+ self.target + "' in the evaluation dataset.")
predicted = self._predict_with_probabilities(dataset, batch_size, verbose)
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
metrics = avail_metrics if metric == 'auto' else [metric]
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(
dataset[self.target], predicted[self.target])
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(
dataset[self.target], predicted[self.target])
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(
dataset[self.target], predicted[self.target])
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(
dataset[self.target], predicted[self.target])
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(
dataset[self.target], predicted[self.target])
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
return ret
def predict_topk(self, dataset, output_type="probability", k=3,
batch_size=None):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability` or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image | list
Drawings to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the
prediction.
- `rank` : Rank associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "rank"])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1")
if k <= 0:
raise ValueError("'k' must be >= 1")
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
prob_vector = self.predict(
dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [
{'class': classes[i], 'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
else:
assert(output_type == 'rank')
results = prob_vector.apply(lambda p: [
{'class': classes[index], 'rank': rank}
for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]
)
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results
def predict(self, data, output_type='class', batch_size=None, verbose=True):
"""
Predict on an SFrame or SArray of drawings, or on a single drawing.
Parameters
----------
data : SFrame | SArray | tc.Image | list
The drawing(s) on which to perform drawing classification.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
If the data is a single drawing, it can be either of type tc.Image,
in which case it is a bitmap-based drawing input,
or of type list, in which case it is a stroke-based drawing input.
output_type : {'probability', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. Label ordering is dictated by the ``classes``
member variable.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
a drawing and contains a single value corresponding to the
predicted label. Each prediction will have type integer or string
depending on the type of the classes the model was trained on.
If `data` is a single drawing, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Print predictions, for a better overview
>>> print(pred)
dtype: int
Rows: 10
[3, 4, 3, 3, 4, 5, 8, 8, 8, 4]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
# single input
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"]
| 41.901136 | 123 | 0.609389 |
import turicreate as _tc
import numpy as _np
import time as _time
from turicreate.toolkits._model import CustomModel as _CustomModel
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits import evaluation as _evaluation
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from turicreate import extensions as _extensions
from .. import _pre_trained_models
BITMAP_WIDTH = 28
BITMAP_HEIGHT = 28
TRAIN_VALIDATION_SPLIT = .95
def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!")
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
from .._mxnet import _mxnet_utils
start_time = _time.time()
accepted_values_for_warm_start = ["auto", "quickdraw_245_v0", None]
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
if type(warm_start) is not str:
raise TypeError("'warm_start' must be a string or None. "
+ "'warm_start' can take in the following values: "
+ str(accepted_values_for_warm_start))
if warm_start not in accepted_values_for_warm_start:
raise _ToolkitError("Unrecognized value for 'warm_start': "
+ warm_start + ". 'warm_start' can take in the following "
+ "values: " + str(accepted_values_for_warm_start))
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
loss = softmax_cross_entropy(z, y)
loss.backward()
trainer.step(train_batch.data[0].shape[0])
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
class DrawingClassifier(_CustomModel):
_PYTHON_DRAWING_CLASSIFIER_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "drawing_classifier"
def _get_native_state(self):
from .._mxnet import _mxnet_utils
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_DRAWING_CLASSIFIER_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version,
cls._PYTHON_DRAWING_CLASSIFIER_VERSION)
from ._model_architecture import Model as _Model
from .._mxnet import _mxnet_utils
net = _Model(num_classes = len(state['classes']), prefix = 'drawing_')
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(
net_params, state['_model'], ctx=ctx
)
state['_model'] = net
if len(state['classes']) > 0 and isinstance(state['classes'][0], float):
state['classes'] = list(map(int, state['classes']))
return DrawingClassifier(state)
def __str__(self):
return self.__repr__()
def __repr__(self):
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
model_fields = [
('Number of classes', 'num_classes'),
('Feature column', 'feature'),
('Target column', 'target')
]
training_fields = [
('Training Iterations', 'max_iterations'),
('Training Accuracy', 'training_accuracy'),
('Validation Accuracy', 'validation_accuracy'),
('Training Time', 'training_time'),
('Number of Examples', 'num_examples'),
('Batch Size', 'batch_size'),
('Final Loss (specific to model)', 'training_loss')
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def export_coreml(self, filename, verbose=False):
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools as _coremltools
batch_size = 1
image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)
s_image = _mx.sym.Variable(self.feature,
shape=image_shape, dtype=_np.float32)
from copy import copy as _copy
net = _copy(self._model)
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
coreml_model = _mxnet_converter.convert(mod, mode='classifier',
class_labels=self.classes,
input_shape=[(self.feature, image_shape)],
builder=None, verbose=verbose,
preprocessor_args={
'image_input_names': [self.feature],
'image_scale': 1.0/255
})
DESIRED_OUTPUT_NAME = self.target + "Probabilities"
spec = coreml_model._spec
class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1
probabilities_output_index = 1-class_label_output_index
spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME
spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME
spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME
spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME
from turicreate.toolkits import _coreml_utils
model_type = "drawing classifier"
spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)
spec.description.input[0].shortDescription = self.feature
spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'
spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'
from coremltools.models.utils import save_spec as _save_spec
_save_spec(spec, filename)
def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
from .._mxnet import _mxnet_utils
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()}))
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):
if self.target not in dataset.column_names():
raise _ToolkitError("Must provide ground truth column, '"
+ self.target + "' in the evaluation dataset.")
predicted = self._predict_with_probabilities(dataset, batch_size, verbose)
avail_metrics = ['accuracy', 'auc', 'precision', 'recall',
'f1_score', 'confusion_matrix', 'roc_curve']
_tkutl._check_categorical_option_type(
'metric', metric, avail_metrics + ['auto'])
metrics = avail_metrics if metric == 'auto' else [metric]
ret = {}
if 'accuracy' in metrics:
ret['accuracy'] = _evaluation.accuracy(
dataset[self.target], predicted[self.target])
if 'auc' in metrics:
ret['auc'] = _evaluation.auc(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
if 'precision' in metrics:
ret['precision'] = _evaluation.precision(
dataset[self.target], predicted[self.target])
if 'recall' in metrics:
ret['recall'] = _evaluation.recall(
dataset[self.target], predicted[self.target])
if 'f1_score' in metrics:
ret['f1_score'] = _evaluation.f1_score(
dataset[self.target], predicted[self.target])
if 'confusion_matrix' in metrics:
ret['confusion_matrix'] = _evaluation.confusion_matrix(
dataset[self.target], predicted[self.target])
if 'roc_curve' in metrics:
ret['roc_curve'] = _evaluation.roc_curve(
dataset[self.target], predicted['probability'],
index_map=self._class_to_index)
return ret
def predict_topk(self, dataset, output_type="probability", k=3,
batch_size=None):
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "rank"])
if not isinstance(k, int):
raise TypeError("'k' must be an integer >= 1")
if k <= 0:
raise ValueError("'k' must be >= 1")
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
prob_vector = self.predict(
dataset, output_type='probability_vector', batch_size=batch_size)
classes = self.classes
if output_type == 'probability':
results = prob_vector.apply(lambda p: [
{'class': classes[i], 'probability': p[i]}
for i in reversed(_np.argsort(p)[-k:])]
)
else:
assert(output_type == 'rank')
results = prob_vector.apply(lambda p: [
{'class': classes[index], 'rank': rank}
for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]
)
results = _tc.SFrame({'X': results})
results = results.add_row_number()
results = results.stack('X', new_column_name='X')
results = results.unpack('X', column_name_prefix='')
return results
def predict(self, data, output_type='class', batch_size=None, verbose=True):
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"]
| true | true |
f72ac42fd9ceac1af5051c46c0355962da805968 | 15,671 | py | Python | restio/model.py | eduardostarling/restio | 66bdb0f86105bf090d7f109da2dd37cbd0096da7 | [
"MIT"
] | 3 | 2019-11-11T14:18:26.000Z | 2020-09-04T20:50:11.000Z | restio/model.py | eduardostarling/restio | 66bdb0f86105bf090d7f109da2dd37cbd0096da7 | [
"MIT"
] | 16 | 2019-11-19T14:39:30.000Z | 2021-06-26T15:08:21.000Z | restio/model.py | eduardostarling/restio | 66bdb0f86105bf090d7f109da2dd37cbd0096da7 | [
"MIT"
] | null | null | null | from __future__ import annotations
from collections.abc import Iterable
from reprlib import Repr
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type
from uuid import UUID, uuid4
from restio.event import EventListener
from restio.fields.base import Field, T_co
from restio.shared import (
CURRENT_SESSION,
MODEL_INSTANTIATED_EVENT,
MODEL_PRE_UPDATE_EVENT,
MODEL_TYPE_REGISTRY,
MODEL_UPDATE_EVENT,
)
from restio.state import ModelState
if TYPE_CHECKING:
from restio.session import Session
def _check_model_type(obj: Optional[BaseModel]):
if not isinstance(obj, BaseModel):
raise TypeError("The provided object is not of type BaseModel.")
class ModelMeta:
__slots__ = ("init", "init_ignore_extra", "repr", "fields", "primary_keys", "alias")
init: bool
init_ignore_extra: bool
repr: bool
fields: Dict[str, Field]
primary_keys: Dict[str, Field]
alias: Optional[str]
def __init__(self):
self.init = True
self.init_ignore_extra = True
self.repr = True
self.fields = dict()
self.primary_keys = dict()
self.alias = None
# Meta attributes that don't get inherited from parent classes
__MODEL_META_NOT_INHERITED__ = ("alias",)
# Read-only meta attributes, can't be modified by model class
__MODEL_META_READONLY__ = ("fields", "primary_keys")
class BaseModelMeta(type):
__slots__ = ()
"""
BaseModel metaclass. Responsible to internally cache the data schema in a BaseModel
subclass by identifying fields and primary keys.
"""
def __new__(cls, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]):
# internal fields not initialized in BaseModel
dct["_internal_id"] = None
dct["_hash"] = None
dct["_listener"] = None
dct["_persistent_values"] = None
# prepares metadata for the model type
meta = ModelMeta()
dct["_meta"] = meta
def _update_meta(
_meta: Optional[ModelMeta],
extend: bool,
not_inherited: Tuple[str, ...] = tuple(),
):
if not _meta:
return
propagate_meta = (
set(meta.__slots__) - set(__MODEL_META_READONLY__) - set(not_inherited)
)
for meta_attribute in propagate_meta:
if not hasattr(_meta, meta_attribute):
continue
setattr(meta, meta_attribute, getattr(_meta, meta_attribute))
# excluded meta, needs to be propagated manually
if extend:
meta.fields.update(_meta.fields)
meta.primary_keys.update(_meta.primary_keys)
base: Type[BaseModel]
for base in bases:
if not hasattr(base, "_meta"):
continue
_update_meta(base._meta, True, __MODEL_META_NOT_INHERITED__)
_update_meta(dct.get("Meta", None), False)
# process class fields
for field_name, field_value in dct.items():
if not isinstance(field_value, Field):
continue
meta.fields[field_name] = field_value
if field_value.pk:
meta.primary_keys[field_name] = field_value
# set alias name to class name when None
name_alias = meta.alias or name
# validate if the alias is not duplicate
# the caveat here is that two classes with the same name in two
# different files will have a name collision and fail initializing
if name_alias in MODEL_TYPE_REGISTRY:
raise ValueError(
f"Model alias `{name_alias}` is already used by another class."
)
cls_object = super().__new__(cls, name, bases, dct)
# set the model alias to the model type
if name_alias != "BaseModel":
MODEL_TYPE_REGISTRY[name_alias] = cls_object
return cls_object
def __call__(self, *args, **kwargs):
instance: BaseModel = super().__call__(*args, **kwargs)
# stores the default after the constructor, if nothing has been set yet
# this is implemented here so that this is always called, regardless of the
# models with custom constructors calling or not super().__init__()
for field in instance._meta.fields.values():
field._store_default(instance, force=False)
instance._internal_id = uuid4()
instance._hash = hash((instance.__class__, str(instance._internal_id)))
instance._persistent_values = {}
instance._listener = EventListener()
instance._initialized = True
session = CURRENT_SESSION.get()
if session:
session._listener.dispatch(MODEL_INSTANTIATED_EVENT, instance)
return instance
_repr_obj: Repr = Repr()
_repr_obj.maxother = 200
class BaseModel(metaclass=BaseModelMeta):
"""
A representation of a remote object model.
BaseModel is an abstract class that should be extended to represent models incoming
from or outgoing to a remote REST API.
Models can exist independently from Sessions but contain an internal state that
indicates the status of the model within the current context. The Sessions are
responsible to control this state. Also, each model contains a set of control
attributes that indicate which fields are watched by restio internals. By default,
all Field descriptors in the model will become field attributes. Fields declared
with pk=True will be used by restio to optimize the caching of the models in a
Session.
Models that change over time will contain an internal dictionary with the latest
know persistent value of each field. This is done to guarantee fast rollback of the
values when the Session is invalid, and to also indicate which values might have
changed within the session scope. If a field is modified directly, the model will
intercept the change and save the older value into the persistent dictionary until
`_persist` is called. During a `_rollback` call, however, the stored values are
re-assigned to their original attributes. Each attribute change will also dispatch
an update event so that the session is aware of changes and manages the model's
internal state accordingly. The persistent dictionary (through the helper method
`is_field_modified`) can also be used by DAO's to verify which values where updated
prior to sending a request through the REST API, thus allowing for proper
optimization and minimizing chances of conflicting changes on the remote object.
All models automatically generate a random internal UUID when created. This UUID is
used internally for comparison purposes, and externally as an identity. Although
this attribute is not explicitly set as private, it should never be modified.
"""
# these are all initialized by the metaclass
_meta: ModelMeta
__state: ModelState = ModelState.UNBOUND
__primary_keys: Optional[Dict[str, Any]] = None
_initialized: bool = False
_internal_id: UUID
_hash: int
_persistent_values: Dict[str, Any]
_listener: EventListener
def __init__(self, **kwargs: T_co):
"""
Instantiates the model by matching `kwargs` parameters to field names.
Behavior is disabled when init=False in the model Meta class.
:param kwargs: The dictionary of keyword arguments matching the field names of
the model class.
:raises ValueError: When invalid arguments are provided.
"""
meta = self._meta
if not meta.init:
return
for arg_name, value in kwargs.items():
field_object = meta.fields.get(arg_name, None)
if not field_object:
if not meta.init_ignore_extra:
raise ValueError(
"Invalid argument provided to constructor of"
f" `{self.__class__.__name__}`: {arg_name}"
)
continue # pragma: no cover
if not field_object.init:
if not meta.init_ignore_extra:
raise ValueError(f"Attribute `{arg_name}` cannot be initialized.")
continue # pragma: no cover
field_object.__set__(self, value)
@property
def _state(self) -> ModelState:
"""
Returns the state of the current model.
:return: The ModelState representation.
"""
return self.__state
@_state.setter
def _state(self, state: ModelState):
self.__state = state
@property
def primary_keys(self) -> Dict[str, T_co]:
"""
Returns a dictionary containing all primary keys. The keys will be
ordered in the same order as they are declared in the model type,
also following the order in which they appear in class inheritance.
This property is optimized to minimize the number of iterations done
in the model instance by internalizing a cache with the latest retrieved
primary keys. This cache is reset for every modification of a primary
key and recovered during the next call to the property.
:return: The ordered tuple of values.
"""
if self.__primary_keys is None:
self.__primary_keys = self._load_primary_keys()
return self.__primary_keys
def _load_primary_keys(self) -> Dict[str, T_co]:
"""
Returns a dictionary containing the primary key fields (keys) and their
current values in the model (values). This operation will inspect the
instance and collect all current values on-spot.
:return: Dictionary of primary keys values.
"""
return {key: getattr(self, key) for key in self._meta.primary_keys}
def _reset_primary_keys(self):
"""
Resets the internal cache of primary keys for the instance.
"""
self.__primary_keys = None
def get_children(
self,
recursive: bool = False,
children: Optional[Set[BaseModel]] = None,
top_level: Optional[BaseModel] = None,
) -> Set[BaseModel]:
"""
Returns the list of all children of the current model. This algorithm checks in
runtime for all objects refered by the instance and that are part of fields
marked with depends_on=True. When `recursive` is True, then the algorithm will
recursively search through all children.
`children` and `top_level` are control variables that indicate which models
have already been inspected by this function, in order to avoid infinite
recursion if any circular dependency exists. In most cases, they should be left
empty.
:param recursive: If True, recursively searches for children. Returns only
first degree relationships otherwise. Defaults to False.
:param children: List of existing models already inspected.
:param top_level: The top-level model from where inspection started.
:return: The list of children.
"""
if children is None:
children = set()
if top_level:
if self == top_level:
return children
children.add(self)
else:
top_level = self
for value in self.dependency_fields.values():
def check(child: Optional[BaseModel]):
# this can happen when the field allows none
if not child or child in children: # type: ignore
return
if recursive:
child.get_children(recursive, children, top_level)
else:
children.add(child)
# iterables are only supported if the values are not iterables - there is
# no recursiveness
if isinstance(value, Iterable):
value: Iterable[Any]
for item in value:
check(item)
else:
check(value)
return children
@property
def fields(self) -> Dict[str, Any]:
"""
Returns the values of each field in the model instance.
:return: A dict with keys containing the string names of the fields,
and values containing the value of the corresponding field.
"""
return {k: getattr(self, k) for k in self._filter_fields(lambda v: True)}
@property
def dependency_fields(self) -> Dict[str, Any]:
"""
Returns the values of each field that have relationship with other models.
:return: The dictionary of fields and their values
"""
return {
k: getattr(self, k) for k in self._filter_fields(lambda v: v.depends_on)
}
def is_field_modified(self, field_name: str) -> bool:
"""
Indicates of field with name `field_name` has been modified.
:param field_name: The name of the field.
:raises ValueError: When the field name does not exist.
:return: True if field is modified, False otherwise.
"""
if field_name not in self._meta.fields:
raise ValueError(
f"Field `{field_name}` does not exist in model"
" `{self.__class__.__name__}`."
)
return field_name in self._persistent_values
def _filter_fields(self, filt: Callable[[Field], bool]):
return {k: v for k, v in self._meta.fields.items() if filt(v)}
def _rollback(self):
"""
Restore the persistent values in the model to their original attributes.
"""
for attr, value in list(self._persistent_values.items()):
setattr(self, attr, value)
self._persist()
def _persist(self):
"""
Persists the current attribute values by emptying the internal persistent
dictionary. Once this is called, it is not possible to rollback to the old
values anymore. It is recommended that this method should only be called by the
party that persisted the values on the remote server.
"""
self._persistent_values = {}
def _pre_update(self, field: Field[T_co], value: T_co):
self._listener.dispatch(MODEL_PRE_UPDATE_EVENT, self, field, value)
def _update(self, field: Field[T_co], value: T_co):
if field.pk:
self._reset_primary_keys()
self._listener.dispatch(MODEL_UPDATE_EVENT, self, field, value)
def _update_persistent_values(self, field: Field[T_co], value: T_co):
name: str = field.name
if name in self._persistent_values:
if value == self._persistent_values[name]:
del self._persistent_values[name]
else:
mutable_fields = self.fields
if value != mutable_fields[name]:
self._persistent_values[name] = mutable_fields[name]
def __eq__(self, other: BaseModel) -> bool:
return isinstance(other, self.__class__) and self._hash == other._hash
def __repr__(self) -> str:
if not self._meta.repr:
return super().__repr__()
def get_field_repr(field: str):
value = getattr(self, field)
return f"{field}={_repr_obj.repr(value)}"
repr_args: List[str] = [
get_field_repr(n) for n in self._filter_fields(lambda x: x.repr)
]
return f"{self.__class__.__name__}({', '.join(repr_args)})"
def __hash__(self) -> int:
return self._hash
| 35.942661 | 88 | 0.639972 | from __future__ import annotations
from collections.abc import Iterable
from reprlib import Repr
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type
from uuid import UUID, uuid4
from restio.event import EventListener
from restio.fields.base import Field, T_co
from restio.shared import (
CURRENT_SESSION,
MODEL_INSTANTIATED_EVENT,
MODEL_PRE_UPDATE_EVENT,
MODEL_TYPE_REGISTRY,
MODEL_UPDATE_EVENT,
)
from restio.state import ModelState
if TYPE_CHECKING:
from restio.session import Session
def _check_model_type(obj: Optional[BaseModel]):
if not isinstance(obj, BaseModel):
raise TypeError("The provided object is not of type BaseModel.")
class ModelMeta:
__slots__ = ("init", "init_ignore_extra", "repr", "fields", "primary_keys", "alias")
init: bool
init_ignore_extra: bool
repr: bool
fields: Dict[str, Field]
primary_keys: Dict[str, Field]
alias: Optional[str]
def __init__(self):
self.init = True
self.init_ignore_extra = True
self.repr = True
self.fields = dict()
self.primary_keys = dict()
self.alias = None
__MODEL_META_NOT_INHERITED__ = ("alias",)
# Read-only meta attributes, can't be modified by model class
__MODEL_META_READONLY__ = ("fields", "primary_keys")
class BaseModelMeta(type):
__slots__ = ()
def __new__(cls, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]):
dct["_internal_id"] = None
dct["_hash"] = None
dct["_listener"] = None
dct["_persistent_values"] = None
meta = ModelMeta()
dct["_meta"] = meta
def _update_meta(
_meta: Optional[ModelMeta],
extend: bool,
not_inherited: Tuple[str, ...] = tuple(),
):
if not _meta:
return
propagate_meta = (
set(meta.__slots__) - set(__MODEL_META_READONLY__) - set(not_inherited)
)
for meta_attribute in propagate_meta:
if not hasattr(_meta, meta_attribute):
continue
setattr(meta, meta_attribute, getattr(_meta, meta_attribute))
if extend:
meta.fields.update(_meta.fields)
meta.primary_keys.update(_meta.primary_keys)
base: Type[BaseModel]
for base in bases:
if not hasattr(base, "_meta"):
continue
_update_meta(base._meta, True, __MODEL_META_NOT_INHERITED__)
_update_meta(dct.get("Meta", None), False)
for field_name, field_value in dct.items():
if not isinstance(field_value, Field):
continue
meta.fields[field_name] = field_value
if field_value.pk:
meta.primary_keys[field_name] = field_value
name_alias = meta.alias or name
if name_alias in MODEL_TYPE_REGISTRY:
raise ValueError(
f"Model alias `{name_alias}` is already used by another class."
)
cls_object = super().__new__(cls, name, bases, dct)
if name_alias != "BaseModel":
MODEL_TYPE_REGISTRY[name_alias] = cls_object
return cls_object
def __call__(self, *args, **kwargs):
instance: BaseModel = super().__call__(*args, **kwargs)
for field in instance._meta.fields.values():
field._store_default(instance, force=False)
instance._internal_id = uuid4()
instance._hash = hash((instance.__class__, str(instance._internal_id)))
instance._persistent_values = {}
instance._listener = EventListener()
instance._initialized = True
session = CURRENT_SESSION.get()
if session:
session._listener.dispatch(MODEL_INSTANTIATED_EVENT, instance)
return instance
_repr_obj: Repr = Repr()
_repr_obj.maxother = 200
class BaseModel(metaclass=BaseModelMeta):
_meta: ModelMeta
__state: ModelState = ModelState.UNBOUND
__primary_keys: Optional[Dict[str, Any]] = None
_initialized: bool = False
_internal_id: UUID
_hash: int
_persistent_values: Dict[str, Any]
_listener: EventListener
def __init__(self, **kwargs: T_co):
meta = self._meta
if not meta.init:
return
for arg_name, value in kwargs.items():
field_object = meta.fields.get(arg_name, None)
if not field_object:
if not meta.init_ignore_extra:
raise ValueError(
"Invalid argument provided to constructor of"
f" `{self.__class__.__name__}`: {arg_name}"
)
continue
if not field_object.init:
if not meta.init_ignore_extra:
raise ValueError(f"Attribute `{arg_name}` cannot be initialized.")
continue
field_object.__set__(self, value)
@property
def _state(self) -> ModelState:
return self.__state
@_state.setter
def _state(self, state: ModelState):
self.__state = state
@property
def primary_keys(self) -> Dict[str, T_co]:
if self.__primary_keys is None:
self.__primary_keys = self._load_primary_keys()
return self.__primary_keys
def _load_primary_keys(self) -> Dict[str, T_co]:
return {key: getattr(self, key) for key in self._meta.primary_keys}
def _reset_primary_keys(self):
self.__primary_keys = None
def get_children(
self,
recursive: bool = False,
children: Optional[Set[BaseModel]] = None,
top_level: Optional[BaseModel] = None,
) -> Set[BaseModel]:
if children is None:
children = set()
if top_level:
if self == top_level:
return children
children.add(self)
else:
top_level = self
for value in self.dependency_fields.values():
def check(child: Optional[BaseModel]):
if not child or child in children:
return
if recursive:
child.get_children(recursive, children, top_level)
else:
children.add(child)
if isinstance(value, Iterable):
value: Iterable[Any]
for item in value:
check(item)
else:
check(value)
return children
@property
def fields(self) -> Dict[str, Any]:
return {k: getattr(self, k) for k in self._filter_fields(lambda v: True)}
@property
def dependency_fields(self) -> Dict[str, Any]:
return {
k: getattr(self, k) for k in self._filter_fields(lambda v: v.depends_on)
}
def is_field_modified(self, field_name: str) -> bool:
if field_name not in self._meta.fields:
raise ValueError(
f"Field `{field_name}` does not exist in model"
" `{self.__class__.__name__}`."
)
return field_name in self._persistent_values
def _filter_fields(self, filt: Callable[[Field], bool]):
return {k: v for k, v in self._meta.fields.items() if filt(v)}
def _rollback(self):
for attr, value in list(self._persistent_values.items()):
setattr(self, attr, value)
self._persist()
def _persist(self):
self._persistent_values = {}
def _pre_update(self, field: Field[T_co], value: T_co):
self._listener.dispatch(MODEL_PRE_UPDATE_EVENT, self, field, value)
def _update(self, field: Field[T_co], value: T_co):
if field.pk:
self._reset_primary_keys()
self._listener.dispatch(MODEL_UPDATE_EVENT, self, field, value)
def _update_persistent_values(self, field: Field[T_co], value: T_co):
name: str = field.name
if name in self._persistent_values:
if value == self._persistent_values[name]:
del self._persistent_values[name]
else:
mutable_fields = self.fields
if value != mutable_fields[name]:
self._persistent_values[name] = mutable_fields[name]
def __eq__(self, other: BaseModel) -> bool:
return isinstance(other, self.__class__) and self._hash == other._hash
def __repr__(self) -> str:
if not self._meta.repr:
return super().__repr__()
def get_field_repr(field: str):
value = getattr(self, field)
return f"{field}={_repr_obj.repr(value)}"
repr_args: List[str] = [
get_field_repr(n) for n in self._filter_fields(lambda x: x.repr)
]
return f"{self.__class__.__name__}({', '.join(repr_args)})"
def __hash__(self) -> int:
return self._hash
| true | true |
f72ac444a8eab9e84fe6a3ecf0f61835271a6e97 | 4,638 | py | Python | opencv3_align_images.py | jaydenmedia/OpenCV3-Python | e0bfed6582447c567f100c507f5a8c59b621dfe1 | [
"MIT"
] | null | null | null | opencv3_align_images.py | jaydenmedia/OpenCV3-Python | e0bfed6582447c567f100c507f5a8c59b621dfe1 | [
"MIT"
] | null | null | null | opencv3_align_images.py | jaydenmedia/OpenCV3-Python | e0bfed6582447c567f100c507f5a8c59b621dfe1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will
# list the files in the input directory from subprocess import check_output
#print(check_output(["ls", "../input"]).decode("utf8"))
#ORB is basically a fusion of FAST keypoint detector and BRIEF descriptor with
# many modifications to enhance the performance. First it use FAST to find
# keypoints, then apply Harris corner measure to find top N points among them.
#For any feature set of n binary tests at location (x_i, y_i),
# define a 2 \times n matrix, S which contains the coordinates of these pixels.
# Then using the orientation of patch, \theta, its rotation matrix is found
# and rotates the S to get steered(rotated) version S_\theta.
#ORB runs a greedy search among all possible binary tests to find the ones that
# have both high variance and means close to 0.5, as well as being uncorrelated.
# Any results write to the current directory are saved as output.
import numpy as np # linear algebra
import cv2
import os
import csv
import sys
from time import sleep
def im_align_orb(imp1, imp2, nf=10000):
"""
:param imp1: image1 file path
:param imp2: image2 file path
:param nf: max number of ORB key points
:return: transformed image2, so that it can be aligned with image1
"""
img1 = cv2.imread(imp1, 0)
img2 = cv2.imread(imp2, 0)
h2, w2 = img2.shape[:2]
orb = cv2.ORB_create(nfeatures=nf, WTA_K=2)
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
matches = bf.knnMatch(des1, des2, 2)
matches_ = []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
matches_.append((m[0].trainIdx, m[0].queryIdx))
kp1_ = np.float32([kp1[m[1]].pt for m in matches_]).reshape(-1, 1, 2)
kp2_ = np.float32([kp2[m[0]].pt for m in matches_]).reshape(-1, 1, 2)
H, mask = cv2.findHomography(kp2_, kp1_, cv2.RANSAC, 1.0)
h1, w1 = img1.shape[:2]
img2 = cv2.warpPerspective(cv2.imread(imp2), H, (w1, h1))
return img2
def align_set_by_id(setid, setvalue, isTrain=True, nFeatures=20000):
"""
:param setid: image set id values
:param isTrain: train (true) or test (false) path
:return: aligned images into output path
"""
train_path = '../output/train_sm/'
test_path = '../output/test_sm/'
counter = 0
if isTrain:
image_path = train_path
fn1 = train_path + "set" + key + "_" + elem[0] + ".jpg"
outputpath = "./train_output/"
else:
image_path = test_path
fn1 = train_path + "set" + key + "_" + elem[0] + ".jpg"
print(fn1)
outputpath = "./test_output/"
result = list()
result.append(cv2.cvtColor(cv2.imread(fn1), cv2.COLOR_BGR2RGB))
for id in elem: # outputmatrix elem
fn2 = image_path + "set" + str(setid) + "_" + str(id) + ".jpg"
print("fn1=%s, fn2=%s" % (os.path.basename(fn1), os.path.basename(fn2)))
im = im_align_orb(fn1, fn2, nFeatures)
cv2.imwrite(outputpath + os.path.basename(fn2), im)
result.append(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
counter += 1
for i in range(21):
sys.stdout.write('\r')
sys.stdout.write(
'[%-20s] %d%% %d/%d ' % ('=' * i, 5 * i, counter, om_len)
)
sys.stdout.flush()
sleep(0.25)
return result
def align_all_set(path, isTrain=True):
allfiles = os.listdir(path)
allfiles = [
os.path.basename(file) for file in allfiles if file.startswith('set')]
allsets = np.unique([f.split("_")[0].replace("set", "") for f in allfiles])
for s in allsets:
align_set_by_id(s, isTrain=True, nFeatures=20000)
#align_all_set(path='../output/train_sm')
def csv_lists(path):
row = []
matrix = {}
with open(path) as f:
csv_reader = csv.reader(f)
csv_list = list(csv_reader)
for idx, val in enumerate(csv_list):
if not row:
row.extend([val[0]])
if row[0] == val[0]:
row.extend([val[1]])
elif row != val[0]:
row = [val[0]]
row.extend([val[1]])
if len(row) is 6:
matrix.update({row[0]: row[1:]})
return matrix
outputmatrix = csv_lists('../output/features_means_train.csv')
om_len = len(outputmatrix)
for key, elem in list(outputmatrix.items()):
align_set_by_id(key, elem, isTrain=True, nFeatures=15000) | 32.661972 | 80 | 0.625916 |
import numpy as np
import cv2
import os
import csv
import sys
from time import sleep
def im_align_orb(imp1, imp2, nf=10000):
img1 = cv2.imread(imp1, 0)
img2 = cv2.imread(imp2, 0)
h2, w2 = img2.shape[:2]
orb = cv2.ORB_create(nfeatures=nf, WTA_K=2)
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
matches = bf.knnMatch(des1, des2, 2)
matches_ = []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
matches_.append((m[0].trainIdx, m[0].queryIdx))
kp1_ = np.float32([kp1[m[1]].pt for m in matches_]).reshape(-1, 1, 2)
kp2_ = np.float32([kp2[m[0]].pt for m in matches_]).reshape(-1, 1, 2)
H, mask = cv2.findHomography(kp2_, kp1_, cv2.RANSAC, 1.0)
h1, w1 = img1.shape[:2]
img2 = cv2.warpPerspective(cv2.imread(imp2), H, (w1, h1))
return img2
def align_set_by_id(setid, setvalue, isTrain=True, nFeatures=20000):
train_path = '../output/train_sm/'
test_path = '../output/test_sm/'
counter = 0
if isTrain:
image_path = train_path
fn1 = train_path + "set" + key + "_" + elem[0] + ".jpg"
outputpath = "./train_output/"
else:
image_path = test_path
fn1 = train_path + "set" + key + "_" + elem[0] + ".jpg"
print(fn1)
outputpath = "./test_output/"
result = list()
result.append(cv2.cvtColor(cv2.imread(fn1), cv2.COLOR_BGR2RGB))
for id in elem:
fn2 = image_path + "set" + str(setid) + "_" + str(id) + ".jpg"
print("fn1=%s, fn2=%s" % (os.path.basename(fn1), os.path.basename(fn2)))
im = im_align_orb(fn1, fn2, nFeatures)
cv2.imwrite(outputpath + os.path.basename(fn2), im)
result.append(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
counter += 1
for i in range(21):
sys.stdout.write('\r')
sys.stdout.write(
'[%-20s] %d%% %d/%d ' % ('=' * i, 5 * i, counter, om_len)
)
sys.stdout.flush()
sleep(0.25)
return result
def align_all_set(path, isTrain=True):
allfiles = os.listdir(path)
allfiles = [
os.path.basename(file) for file in allfiles if file.startswith('set')]
allsets = np.unique([f.split("_")[0].replace("set", "") for f in allfiles])
for s in allsets:
align_set_by_id(s, isTrain=True, nFeatures=20000)
def csv_lists(path):
row = []
matrix = {}
with open(path) as f:
csv_reader = csv.reader(f)
csv_list = list(csv_reader)
for idx, val in enumerate(csv_list):
if not row:
row.extend([val[0]])
if row[0] == val[0]:
row.extend([val[1]])
elif row != val[0]:
row = [val[0]]
row.extend([val[1]])
if len(row) is 6:
matrix.update({row[0]: row[1:]})
return matrix
outputmatrix = csv_lists('../output/features_means_train.csv')
om_len = len(outputmatrix)
for key, elem in list(outputmatrix.items()):
align_set_by_id(key, elem, isTrain=True, nFeatures=15000) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.