hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71e2f8d7692afc5941b35ae1ab1c5e9f76147ef | 1,438 | py | Python | setup.py | YuraHavrylko/revenuecat_python | a25b234933b6e80e1ff09b6a82d73a0e3df91caa | [
"MIT"
] | 1 | 2020-12-11T09:31:02.000Z | 2020-12-11T09:31:02.000Z | setup.py | YuraHavrylko/revenuecat_python | a25b234933b6e80e1ff09b6a82d73a0e3df91caa | [
"MIT"
] | null | null | null | setup.py | YuraHavrylko/revenuecat_python | a25b234933b6e80e1ff09b6a82d73a0e3df91caa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['httpx', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Yurii Havrylko",
author_email='yurii.havrylko@gmail.com',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python RevenueCat lib",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='revenuecat_python',
name='revenuecat_python',
packages=find_packages(include=['revenuecat_python', 'revenuecat_python.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/YuraHavrylko/revenuecat_python',
version='0.2.0',
zip_safe=False,
)
| 29.346939 | 81 | 0.659249 |
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['httpx', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Yurii Havrylko",
author_email='yurii.havrylko@gmail.com',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python RevenueCat lib",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='revenuecat_python',
name='revenuecat_python',
packages=find_packages(include=['revenuecat_python', 'revenuecat_python.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/YuraHavrylko/revenuecat_python',
version='0.2.0',
zip_safe=False,
)
| true | true |
f71e32462865ab179efbd73d661ab16a1fde5483 | 12,404 | py | Python | cdk/app.py | liangfb/aws-dynamodb-cross-region-replication | 24a354158cb19c87785bdb59400c5522d706a9a1 | [
"MIT-0"
] | 12 | 2020-06-05T02:46:42.000Z | 2022-02-24T11:19:55.000Z | cdk/app.py | liangfb/aws-dynamodb-cross-region-replication | 24a354158cb19c87785bdb59400c5522d706a9a1 | [
"MIT-0"
] | null | null | null | cdk/app.py | liangfb/aws-dynamodb-cross-region-replication | 24a354158cb19c87785bdb59400c5522d706a9a1 | [
"MIT-0"
] | 4 | 2020-06-07T15:29:53.000Z | 2021-06-10T18:59:38.000Z | from aws_cdk import (
aws_lambda as lambda_,
aws_sqs as sqs,
aws_dynamodb as ddb,
aws_ec2 as ec2,
aws_kinesis as kinesis,
aws_ssm as ssm,
core
)
from aws_cdk.aws_dynamodb import StreamViewType
from aws_cdk.aws_ec2 import SubnetSelection, SubnetType
from aws_cdk.aws_iam import PolicyStatement, Effect
from aws_cdk.aws_lambda_event_sources import DynamoEventSource, SqsDlq, KinesisEventSource
from aws_cdk.core import RemovalPolicy
'''
0. Specify the regions in REGION_A and REGION_B for replication
'''
REGION_A = 'ap-southeast-1'
REGION_B = 'cn-north-1'
'''
1. Credential setting in SSM Parameter Store for the target region.
CDK is not allowed to deploy secure string for AKSK, you will need to set up parameter store in SSM manually
and provide the parameter prefix here, e.g.
for access_key, "/DDBReplication/TableCN/Access_Key" (StringType)
for secret_key, "/DDBReplication/TableCN/Secret_Key" (SecureStringType)
'''
PARAMETER_STORE_PREFIX = {
REGION_A:'/DDBReplication/TableCN/', # IMPORTANT! This is path to the AKSK to access REGION_B
REGION_B:'/DDBReplication/TableSG/' # IMPORTANT! This is path to the AKSK to access REGION_A
}
'''
2. Specify the existing key name here for SSH.
'''
KEY_NAME = {
REGION_A:'<key_pair_name_A>', # Key pair for loader EC2 in REGION_A
REGION_B:'<key_pair_name_B>' # Key pair for loader EC2 in REGION_B
}
'''
3. (Optional) Specify proxy server here if used.
'''
PROXY_SERVER = {
REGION_A:"<proxy_server_ip>:<port>",
REGION_B:"<proxy_server_ip>:<port>"
}
TABLE_NAME_PREFIX = 'user_cdk'
STREAM_NAME = 'ddb_replication_stream'
LOADER_STATS_TABLE_NAME = 'loader_stats'
REPLICATOR_STATS_TABLE_NAME = 'replicator_stats'
region_list = [REGION_A, REGION_B]
TABLE_NAME = { region:''.join([TABLE_NAME_PREFIX,'-',region]) for region in region_list }
class SourceDynamoStack(core.Stack):
def __init__(self, scope: core.Construct, _id: str,
key_name,
table_name,
parameter_store_prefix,
target_region,
proxy_server, **kwargs) -> None:
super().__init__(scope, _id, **kwargs)
# Create VPC, NAT Gateway
vpc = ec2.Vpc(self, "VPC",
max_azs=2,
cidr="10.10.0.0/16",
# configuration will create 2 public subnets and 2 private subnets
subnet_configuration=[ec2.SubnetConfiguration(
subnet_type=SubnetType.PUBLIC,
name="Public",
cidr_mask=24
),
ec2.SubnetConfiguration(
subnet_type=SubnetType.PRIVATE,
name="Private",
cidr_mask=24
)
],
nat_gateways=1,
)
# Create source table
source_ddb_table = ddb.Table(self, table_name,
table_name=table_name,
partition_key=ddb.Attribute(name="PK", type=ddb.AttributeType.STRING),
billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
stream=StreamViewType.NEW_AND_OLD_IMAGES,
removal_policy=RemovalPolicy.DESTROY)
# Create loader_stats table for statistics of source table
source_loader_stats_table = ddb.Table(self, LOADER_STATS_TABLE_NAME,
table_name=LOADER_STATS_TABLE_NAME,
partition_key=ddb.Attribute(name="PK", type=ddb.AttributeType.STRING),
billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
removal_policy=RemovalPolicy.DESTROY
)
# Create EC2 instance for load testing
public_subnets = vpc.public_subnets
amzn_linux = ec2.MachineImage.latest_amazon_linux(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
edition=ec2.AmazonLinuxEdition.STANDARD,
virtualization=ec2.AmazonLinuxVirt.HVM,
storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
)
loader_instance = ec2.Instance(self, 'loader-dynamodb',
instance_type=ec2.InstanceType("c5.large"),
machine_image=amzn_linux,
vpc=vpc,
vpc_subnets=SubnetSelection(subnets=public_subnets),
key_name=key_name
)
loader_instance.connections.allow_from_any_ipv4(ec2.Port.tcp(22), "Allow from SSH")
# The loader EC2 will write to source table and loader statistics table
source_ddb_table.grant_read_write_data(loader_instance)
source_loader_stats_table.grant_read_write_data(loader_instance)
# The loader EC2 also needs to put metrics in Cloudwatch
put_metrics_policy = PolicyStatement(
actions=['cloudwatch:PutMetricData'],
effect=Effect.ALLOW,
resources=['*']
)
loader_instance.add_to_role_policy(put_metrics_policy)
# Create Lambda function send_to_kinesis in private subnet of VPC and set up environment variables
private_subnets = vpc.private_subnets
dest_sqs = sqs.Queue(self, 'send_to_kinesis_dest_Q')
send_to_kinesis_lambda = lambda_.Function(self, 'ddb_send_to_kinesis',
code=lambda_.Code.asset("../lambda_send_to_kinesis"),
runtime=lambda_.Runtime.PYTHON_3_7,
handler='send_to_kinesis.lambda_handler',
timeout=core.Duration.seconds(60),
vpc=vpc,
vpc_subnets=SubnetSelection(subnets=private_subnets),
environment={
'PARAMETER_STORE_PATH_PREFIX':parameter_store_prefix,
'TARGET_REGION':target_region,
'TARGET_STREAM':STREAM_NAME,
'USE_PROXY':"FALSE",
'PROXY_SERVER':proxy_server}
)
# Add event source of DynamoDB source table to lambda function
send_to_kinesis_lambda.add_event_source(DynamoEventSource(table=source_ddb_table,
starting_position=lambda_.StartingPosition.LATEST,
batch_size=500,
retry_attempts=300,
parallelization_factor=10,
on_failure=SqsDlq(dest_sqs),
bisect_batch_on_error=True
))
# allow lambda to access the AKSK in SSM parameter store
access_key_parameter_name = ''.join([parameter_store_prefix,'AccessKey'])
secret_key_parameter_name = ''.join([parameter_store_prefix,'SecretKey'])
ak = ssm.StringParameter.from_string_parameter_attributes(self, access_key_parameter_name,
parameter_name=access_key_parameter_name,
version=1)
ak.grant_read(send_to_kinesis_lambda)
sk = ssm.StringParameter.from_secure_string_parameter_attributes(self, secret_key_parameter_name,
parameter_name=secret_key_parameter_name,
version=1)
sk.grant_read(send_to_kinesis_lambda)
# Output public DNS name of the loader
core.CfnOutput(self, "loader_instance",
value=loader_instance.instance_public_dns_name)
core.CfnOutput(self, "source_table",
value=source_ddb_table.table_name)
core.CfnOutput(self, "source_stats_table",
value=source_loader_stats_table.table_name)
class ReplicatorStack(core.Stack):
def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
new_kwargs = {'env':kwargs['env']}
super().__init__(scope, _id, **new_kwargs)
# Create staging Kinesis Data Stream, set to 1 shard in this sample code
kinesis_stream = kinesis.Stream(self, STREAM_NAME,
stream_name=STREAM_NAME,
shard_count=1)
# Create replicator lambda function that consumes the Kinesis stream and writes to target DDB table
target_table_name = kwargs['target_table_name']
dlq_sqs = sqs.Queue(self, 'replicator_failure_Q')
replicator_lambda = lambda_.Function(self, 'replicator_kinesis',
code=lambda_.Code.asset("../lambda_replicator"),
runtime=lambda_.Runtime.PYTHON_3_7,
handler='replicator_kinesis.lambda_handler',
timeout=core.Duration.seconds(60),
environment={
'TARGET_TABLE':target_table_name
}
)
kinesis_stream.grant_read(replicator_lambda)
replicator_lambda.add_event_source(KinesisEventSource(
stream=kinesis_stream,
starting_position=lambda_.StartingPosition.LATEST,
batch_size=500,
retry_attempts=100,
parallelization_factor=10,
on_failure=SqsDlq(dlq_sqs)
))
target_table = ddb.Table.from_table_name(self,target_table_name,target_table_name)
target_table.grant_read_write_data(replicator_lambda)
# The replicator lambda will put metrics to Cloudwatch
put_metrics_policy = PolicyStatement(
actions=['cloudwatch:PutMetricData'],
effect=Effect.ALLOW,
resources=['*']
)
replicator_lambda.add_to_role_policy(put_metrics_policy)
# Create replicator-stats table for statistics of replicator
replicator_stats_table = ddb.Table(self, REPLICATOR_STATS_TABLE_NAME,
table_name=REPLICATOR_STATS_TABLE_NAME,
partition_key=ddb.Attribute(name="PK", type=ddb.AttributeType.STRING),
billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
removal_policy=RemovalPolicy.DESTROY
)
replicator_stats_table.grant_read_write_data(replicator_lambda)
core.CfnOutput(self, "replicator_stats_table",
value=replicator_stats_table.table_name)
app = core.App()
for region in region_list:
SourceDynamoStack(app, "source-dynamo-"+region,
key_name = KEY_NAME[region],
table_name=TABLE_NAME[region],
target_region=(set(region_list) - set([region])).pop(),
parameter_store_prefix=PARAMETER_STORE_PREFIX[region],
proxy_server=PROXY_SERVER[region],
env = {'region':region}
)
ReplicatorStack(app, "replicator-"+region, env={'region':region},
target_table_name = TABLE_NAME[region])
app.synth()
| 49.815261 | 116 | 0.545066 | from aws_cdk import (
aws_lambda as lambda_,
aws_sqs as sqs,
aws_dynamodb as ddb,
aws_ec2 as ec2,
aws_kinesis as kinesis,
aws_ssm as ssm,
core
)
from aws_cdk.aws_dynamodb import StreamViewType
from aws_cdk.aws_ec2 import SubnetSelection, SubnetType
from aws_cdk.aws_iam import PolicyStatement, Effect
from aws_cdk.aws_lambda_event_sources import DynamoEventSource, SqsDlq, KinesisEventSource
from aws_cdk.core import RemovalPolicy
REGION_A = 'ap-southeast-1'
REGION_B = 'cn-north-1'
PARAMETER_STORE_PREFIX = {
REGION_A:'/DDBReplication/TableCN/',
REGION_B:'/DDBReplication/TableSG/'
}
KEY_NAME = {
REGION_A:'<key_pair_name_A>',
REGION_B:'<key_pair_name_B>'
}
PROXY_SERVER = {
REGION_A:"<proxy_server_ip>:<port>",
REGION_B:"<proxy_server_ip>:<port>"
}
TABLE_NAME_PREFIX = 'user_cdk'
STREAM_NAME = 'ddb_replication_stream'
LOADER_STATS_TABLE_NAME = 'loader_stats'
REPLICATOR_STATS_TABLE_NAME = 'replicator_stats'
region_list = [REGION_A, REGION_B]
TABLE_NAME = { region:''.join([TABLE_NAME_PREFIX,'-',region]) for region in region_list }
class SourceDynamoStack(core.Stack):
def __init__(self, scope: core.Construct, _id: str,
key_name,
table_name,
parameter_store_prefix,
target_region,
proxy_server, **kwargs) -> None:
super().__init__(scope, _id, **kwargs)
vpc = ec2.Vpc(self, "VPC",
max_azs=2,
cidr="10.10.0.0/16",
subnet_configuration=[ec2.SubnetConfiguration(
subnet_type=SubnetType.PUBLIC,
name="Public",
cidr_mask=24
),
ec2.SubnetConfiguration(
subnet_type=SubnetType.PRIVATE,
name="Private",
cidr_mask=24
)
],
nat_gateways=1,
)
source_ddb_table = ddb.Table(self, table_name,
table_name=table_name,
partition_key=ddb.Attribute(name="PK", type=ddb.AttributeType.STRING),
billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
stream=StreamViewType.NEW_AND_OLD_IMAGES,
removal_policy=RemovalPolicy.DESTROY)
source_loader_stats_table = ddb.Table(self, LOADER_STATS_TABLE_NAME,
table_name=LOADER_STATS_TABLE_NAME,
partition_key=ddb.Attribute(name="PK", type=ddb.AttributeType.STRING),
billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
removal_policy=RemovalPolicy.DESTROY
)
public_subnets = vpc.public_subnets
amzn_linux = ec2.MachineImage.latest_amazon_linux(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
edition=ec2.AmazonLinuxEdition.STANDARD,
virtualization=ec2.AmazonLinuxVirt.HVM,
storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
)
loader_instance = ec2.Instance(self, 'loader-dynamodb',
instance_type=ec2.InstanceType("c5.large"),
machine_image=amzn_linux,
vpc=vpc,
vpc_subnets=SubnetSelection(subnets=public_subnets),
key_name=key_name
)
loader_instance.connections.allow_from_any_ipv4(ec2.Port.tcp(22), "Allow from SSH")
source_ddb_table.grant_read_write_data(loader_instance)
source_loader_stats_table.grant_read_write_data(loader_instance)
put_metrics_policy = PolicyStatement(
actions=['cloudwatch:PutMetricData'],
effect=Effect.ALLOW,
resources=['*']
)
loader_instance.add_to_role_policy(put_metrics_policy)
private_subnets = vpc.private_subnets
dest_sqs = sqs.Queue(self, 'send_to_kinesis_dest_Q')
send_to_kinesis_lambda = lambda_.Function(self, 'ddb_send_to_kinesis',
code=lambda_.Code.asset("../lambda_send_to_kinesis"),
runtime=lambda_.Runtime.PYTHON_3_7,
handler='send_to_kinesis.lambda_handler',
timeout=core.Duration.seconds(60),
vpc=vpc,
vpc_subnets=SubnetSelection(subnets=private_subnets),
environment={
'PARAMETER_STORE_PATH_PREFIX':parameter_store_prefix,
'TARGET_REGION':target_region,
'TARGET_STREAM':STREAM_NAME,
'USE_PROXY':"FALSE",
'PROXY_SERVER':proxy_server}
)
send_to_kinesis_lambda.add_event_source(DynamoEventSource(table=source_ddb_table,
starting_position=lambda_.StartingPosition.LATEST,
batch_size=500,
retry_attempts=300,
parallelization_factor=10,
on_failure=SqsDlq(dest_sqs),
bisect_batch_on_error=True
))
access_key_parameter_name = ''.join([parameter_store_prefix,'AccessKey'])
secret_key_parameter_name = ''.join([parameter_store_prefix,'SecretKey'])
ak = ssm.StringParameter.from_string_parameter_attributes(self, access_key_parameter_name,
parameter_name=access_key_parameter_name,
version=1)
ak.grant_read(send_to_kinesis_lambda)
sk = ssm.StringParameter.from_secure_string_parameter_attributes(self, secret_key_parameter_name,
parameter_name=secret_key_parameter_name,
version=1)
sk.grant_read(send_to_kinesis_lambda)
core.CfnOutput(self, "loader_instance",
value=loader_instance.instance_public_dns_name)
core.CfnOutput(self, "source_table",
value=source_ddb_table.table_name)
core.CfnOutput(self, "source_stats_table",
value=source_loader_stats_table.table_name)
class ReplicatorStack(core.Stack):
def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
new_kwargs = {'env':kwargs['env']}
super().__init__(scope, _id, **new_kwargs)
kinesis_stream = kinesis.Stream(self, STREAM_NAME,
stream_name=STREAM_NAME,
shard_count=1)
target_table_name = kwargs['target_table_name']
dlq_sqs = sqs.Queue(self, 'replicator_failure_Q')
replicator_lambda = lambda_.Function(self, 'replicator_kinesis',
code=lambda_.Code.asset("../lambda_replicator"),
runtime=lambda_.Runtime.PYTHON_3_7,
handler='replicator_kinesis.lambda_handler',
timeout=core.Duration.seconds(60),
environment={
'TARGET_TABLE':target_table_name
}
)
kinesis_stream.grant_read(replicator_lambda)
replicator_lambda.add_event_source(KinesisEventSource(
stream=kinesis_stream,
starting_position=lambda_.StartingPosition.LATEST,
batch_size=500,
retry_attempts=100,
parallelization_factor=10,
on_failure=SqsDlq(dlq_sqs)
))
target_table = ddb.Table.from_table_name(self,target_table_name,target_table_name)
target_table.grant_read_write_data(replicator_lambda)
put_metrics_policy = PolicyStatement(
actions=['cloudwatch:PutMetricData'],
effect=Effect.ALLOW,
resources=['*']
)
replicator_lambda.add_to_role_policy(put_metrics_policy)
replicator_stats_table = ddb.Table(self, REPLICATOR_STATS_TABLE_NAME,
table_name=REPLICATOR_STATS_TABLE_NAME,
partition_key=ddb.Attribute(name="PK", type=ddb.AttributeType.STRING),
billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
removal_policy=RemovalPolicy.DESTROY
)
replicator_stats_table.grant_read_write_data(replicator_lambda)
core.CfnOutput(self, "replicator_stats_table",
value=replicator_stats_table.table_name)
app = core.App()
for region in region_list:
SourceDynamoStack(app, "source-dynamo-"+region,
key_name = KEY_NAME[region],
table_name=TABLE_NAME[region],
target_region=(set(region_list) - set([region])).pop(),
parameter_store_prefix=PARAMETER_STORE_PREFIX[region],
proxy_server=PROXY_SERVER[region],
env = {'region':region}
)
ReplicatorStack(app, "replicator-"+region, env={'region':region},
target_table_name = TABLE_NAME[region])
app.synth()
| true | true |
f71e32526c89d04a91ff2d8fb4b429508b64ebc4 | 316 | py | Python | 30 Days of Code/Day 5 Loops/Solution.py | iamnambiar/HackerRank-Solutions | 6fdcab79b18e66a6d7278b979a8be087f8f6c696 | [
"MIT"
] | 2 | 2020-04-06T10:32:08.000Z | 2021-04-23T04:32:45.000Z | 30 Days of Code/Day 5 Loops/Solution.py | iamnambiar/HackerRank-Solutions | 6fdcab79b18e66a6d7278b979a8be087f8f6c696 | [
"MIT"
] | null | null | null | 30 Days of Code/Day 5 Loops/Solution.py | iamnambiar/HackerRank-Solutions | 6fdcab79b18e66a6d7278b979a8be087f8f6c696 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/30-loops/problem
#!/bin/python3
import math
import os
import random
import re
import sys
def printMultiples(number):
for i in range(1,11):
print(str(number)+" x "+str(i)+" = "+str(number*i))
if __name__ == '__main__':
n = int(input())
printMultiples(n) | 18.588235 | 59 | 0.664557 |
import math
import os
import random
import re
import sys
def printMultiples(number):
for i in range(1,11):
print(str(number)+" x "+str(i)+" = "+str(number*i))
if __name__ == '__main__':
n = int(input())
printMultiples(n) | true | true |
f71e331cef3b1394d9e7e053e583141c9058ad7e | 2,466 | py | Python | tests/test_vault.py | zx2c4-forks/angr | e15619d99a093af77943b28b5b368e6607a2f336 | [
"BSD-2-Clause"
] | 2 | 2020-04-29T02:39:42.000Z | 2020-04-29T08:07:44.000Z | tests/test_vault.py | hhuihuang/helios-angr | 1978fa5392d65901633191b61c01017627ab7755 | [
"BSD-2-Clause"
] | null | null | null | tests/test_vault.py | hhuihuang/helios-angr | 1978fa5392d65901633191b61c01017627ab7755 | [
"BSD-2-Clause"
] | null | null | null | import claripy
import angr
class A:
n = 0
def do_vault_identity(v_factory):
v = v_factory()
v.uuid_dedup.add(A)
assert len(v.keys()) == 0
a = A()
b = A()
b.n = 1
c = A()
c.n = 2
aid = v.store(a)
assert len(v.keys()) == 1
bid = v.store(b)
assert len(v.keys()) == 2
cid = v.store(c)
assert len(v.keys()) == 3
aa = v.load(aid)
bb = v.load(bid)
cc = v.load(cid)
assert aa is a
assert bb is b
assert cc is c
bb.n = 1337
del bb
del b
import gc
gc.collect()
bbb = v.load(bid)
assert bbb.n == 1
def do_vault_noidentity(v_factory):
v = v_factory()
assert len(v.keys()) == 0
a = A()
b = A()
b.n = 1
c = A()
c.n = 2
aid = v.store(a)
assert len(v.keys()) == 1
bid = v.store(b)
assert len(v.keys()) == 2
cid = v.store(c)
assert len(v.keys()) == 3
aa = v.load(aid)
bb = v.load(bid)
cc = v.load(cid)
assert aa is not a
assert bb is not b
assert cc is not c
v.store(aa)
assert len(v.keys()) == 4
v.store(bb)
assert len(v.keys()) == 5
v.store(cc)
assert len(v.keys()) == 6
def do_ast_vault(v_factory):
v = v_factory()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = x + y
v.store(x)
assert len(v.keys()) == 1
zid = v.store(z)
assert len(v.keys()) == 3
zz = v.load(zid)
assert z is zz
zs = v.dumps(z)
zzz = v.loads(zs)
assert zzz is z
def test_vault():
yield do_vault_noidentity, angr.vaults.VaultDir
yield do_vault_noidentity, angr.vaults.VaultShelf
yield do_vault_noidentity, angr.vaults.VaultDict
yield do_vault_identity, angr.vaults.VaultDir
yield do_vault_identity, angr.vaults.VaultShelf
yield do_vault_identity, angr.vaults.VaultDict
def test_ast_vault():
yield do_ast_vault, angr.vaults.VaultDir
yield do_ast_vault, angr.vaults.VaultShelf
yield do_ast_vault, angr.vaults.VaultDict
def test_project():
v = angr.vaults.VaultDir()
p = angr.Project("/bin/false")
ps = v.store(p)
pp = v.load(ps)
assert p is pp
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
pstring = v.dumps(p)
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
pp2 = v.loads(pstring)
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
assert p is pp
p._asdf = 'fdsa'
del pp2
del pp
del p
import gc
gc.collect()
p = v.load(ps)
#assert not hasattr(p, '_asdf')
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
if __name__ == '__main__':
for _a,_b in test_vault():
_a(_b)
for _a,_b in test_ast_vault():
_a(_b)
test_project()
| 18.132353 | 64 | 0.643958 | import claripy
import angr
class A:
n = 0
def do_vault_identity(v_factory):
v = v_factory()
v.uuid_dedup.add(A)
assert len(v.keys()) == 0
a = A()
b = A()
b.n = 1
c = A()
c.n = 2
aid = v.store(a)
assert len(v.keys()) == 1
bid = v.store(b)
assert len(v.keys()) == 2
cid = v.store(c)
assert len(v.keys()) == 3
aa = v.load(aid)
bb = v.load(bid)
cc = v.load(cid)
assert aa is a
assert bb is b
assert cc is c
bb.n = 1337
del bb
del b
import gc
gc.collect()
bbb = v.load(bid)
assert bbb.n == 1
def do_vault_noidentity(v_factory):
v = v_factory()
assert len(v.keys()) == 0
a = A()
b = A()
b.n = 1
c = A()
c.n = 2
aid = v.store(a)
assert len(v.keys()) == 1
bid = v.store(b)
assert len(v.keys()) == 2
cid = v.store(c)
assert len(v.keys()) == 3
aa = v.load(aid)
bb = v.load(bid)
cc = v.load(cid)
assert aa is not a
assert bb is not b
assert cc is not c
v.store(aa)
assert len(v.keys()) == 4
v.store(bb)
assert len(v.keys()) == 5
v.store(cc)
assert len(v.keys()) == 6
def do_ast_vault(v_factory):
v = v_factory()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = x + y
v.store(x)
assert len(v.keys()) == 1
zid = v.store(z)
assert len(v.keys()) == 3
zz = v.load(zid)
assert z is zz
zs = v.dumps(z)
zzz = v.loads(zs)
assert zzz is z
def test_vault():
yield do_vault_noidentity, angr.vaults.VaultDir
yield do_vault_noidentity, angr.vaults.VaultShelf
yield do_vault_noidentity, angr.vaults.VaultDict
yield do_vault_identity, angr.vaults.VaultDir
yield do_vault_identity, angr.vaults.VaultShelf
yield do_vault_identity, angr.vaults.VaultDict
def test_ast_vault():
yield do_ast_vault, angr.vaults.VaultDir
yield do_ast_vault, angr.vaults.VaultShelf
yield do_ast_vault, angr.vaults.VaultDict
def test_project():
v = angr.vaults.VaultDir()
p = angr.Project("/bin/false")
ps = v.store(p)
pp = v.load(ps)
assert p is pp
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
pstring = v.dumps(p)
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
pp2 = v.loads(pstring)
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
assert p is pp
p._asdf = 'fdsa'
del pp2
del pp
del p
import gc
gc.collect()
p = v.load(ps)
assert sum(1 for k in v.keys() if k.startswith('Project')) == 1
if __name__ == '__main__':
for _a,_b in test_vault():
_a(_b)
for _a,_b in test_ast_vault():
_a(_b)
test_project()
| true | true |
f71e33bd084310d0aeba71a25aac58cd5fcf8bbb | 1,082 | py | Python | line.py | kavach-feature/Advanced_lane_finding | 12e4e330e338734fdb35655c7581b98ba1eb490b | [
"MIT"
] | null | null | null | line.py | kavach-feature/Advanced_lane_finding | 12e4e330e338734fdb35655c7581b98ba1eb490b | [
"MIT"
] | null | null | null | line.py | kavach-feature/Advanced_lane_finding | 12e4e330e338734fdb35655c7581b98ba1eb490b | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
class Line():
def __init__(self,n):
self.n=n
self.detected =False
#Polynomial coefficients of the lines
self.A=[]
self.B=[]
self.C=[]
#Running average of coefficients
self.A_avg=0.
self.B_avg=0.
self.C_avg=0.
def obtain_fit(self):
return (self.A_avg,self.B_avg,self.C_avg)
def update_fit(self,fit_coeffs):
"""Obtain the fit coefficients from the latest frame and apply over each of 2nd polynomial coefficients
for the purpose of smoothing
"""
full_Q= len(self.A) >= self.n
#Append line fit coefficients
self.A.append(fit_coeffs[0])
self.B.append(fit_coeffs[1])
self.C.append(fit_coeffs[2])
if full_Q:
_=self.A.pop(0)
_=self.B.pop(0)
_=self.C.pop(0)
# Compute the average of the polynomial coefficients
self.A_avg = np.mean(self.A)
self.B_avg = np.mean(self.B)
self.C_avg = np.mean(self.C)
return (self.A_avg,self.B_avg,self.C_avg)
| 18.655172 | 106 | 0.660813 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
class Line():
def __init__(self,n):
self.n=n
self.detected =False
self.A=[]
self.B=[]
self.C=[]
self.A_avg=0.
self.B_avg=0.
self.C_avg=0.
def obtain_fit(self):
return (self.A_avg,self.B_avg,self.C_avg)
def update_fit(self,fit_coeffs):
full_Q= len(self.A) >= self.n
self.A.append(fit_coeffs[0])
self.B.append(fit_coeffs[1])
self.C.append(fit_coeffs[2])
if full_Q:
_=self.A.pop(0)
_=self.B.pop(0)
_=self.C.pop(0)
self.A_avg = np.mean(self.A)
self.B_avg = np.mean(self.B)
self.C_avg = np.mean(self.C)
return (self.A_avg,self.B_avg,self.C_avg)
| true | true |
f71e34484d2699805f3e8b41dc2369ce69733744 | 5,651 | py | Python | devstack/files/grafana/grafana-init.py | zhangjianweibj/monasca-api | 26133aefe413546f91aaa13c981fe93a69dfc2eb | [
"Apache-2.0"
] | null | null | null | devstack/files/grafana/grafana-init.py | zhangjianweibj/monasca-api | 26133aefe413546f91aaa13c981fe93a69dfc2eb | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | devstack/files/grafana/grafana-init.py | zhangjianweibj/monasca-api | 26133aefe413546f91aaa13c981fe93a69dfc2eb | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | #!/usr/bin/env python
# coding=utf-8
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import json
import logging
import os
import sys
import time
from requests import RequestException
from requests import Session
LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=LOG_LEVEL)
logger = logging.getLogger(__name__)
GRAFANA_URL = os.environ.get('GRAFANA_URL', 'http://localhost:3000')
GRAFANA_USERNAME = os.environ.get('GRAFANA_USERNAME', 'mini-mon')
GRAFANA_PASSWORD = os.environ.get('GRAFANA_PASSWORD', 'password')
GRAFANA_USERS = [{'user': GRAFANA_USERNAME, 'password': GRAFANA_PASSWORD, 'email': ''}]
DATASOURCE_NAME = os.environ.get('DATASOURCE_NAME', 'monasca')
DATASOURCE_URL = os.environ.get('DATASOURCE_URL', 'http://localhost/metrics')
DATASOURCE_ACCESS_MODE = os.environ.get('DATASOURCE_ACCESS_MODE', 'proxy')
DATASOURCE_AUTH = os.environ.get('DATASOURCE_AUTH', 'Keystone').capitalize()
DATASOURCE_AUTH_TOKEN = os.environ.get('DATASOURCE_AUTH_TOKEN', '')
DASHBOARDS_DIR = os.environ.get('DASHBOARDS_DIR', '/dashboards.d')
def retry(retries=5, delay=2.0, exc_types=(RequestException,)):
def decorator(func):
def f_retry(*args, **kwargs):
for i in range(retries):
try:
return func(*args, **kwargs)
except exc_types as exc:
if i < retries - 1:
logger.debug('Caught exception, retrying...',
exc_info=True)
time.sleep(delay)
else:
logger.exception('Failed after %d attempts', retries)
if isinstance(exc, RequestException):
logger.debug('Response was: %r', exc.response.text)
raise
return f_retry
return decorator
def create_login_payload():
if os.environ.get('GRAFANA_USERS'):
try:
json.loads(os.environ.get('GRAFANA_USERS'))
except ValueError:
print("Invalid type GRAFANA_USERS")
raise
grafana_users = json.loads(os.environ.get('GRAFANA_USERS'))
else:
grafana_users = GRAFANA_USERS
return grafana_users
@retry(retries=24, delay=5.0)
def login(session, user):
r = session.post('{url}/login'.format(url=GRAFANA_URL),
json=user,
timeout=5)
r.raise_for_status()
@retry(retries=12, delay=5.0)
def check_initialized(session):
r = session.get('{url}/api/datasources'.format(url=GRAFANA_URL), timeout=5)
r.raise_for_status()
logging.debug('existing datasources = %r', r.json())
for datasource in r.json():
if datasource['name'] == DATASOURCE_NAME:
return True
return False
def create_datasource_payload():
payload = {
'name': DATASOURCE_NAME,
'url': DATASOURCE_URL,
'access': DATASOURCE_ACCESS_MODE,
'isDefault': True,
}
if DATASOURCE_AUTH not in ['Keystone', 'Horizon', 'Token']:
logger.error('Unknown Keystone authentication option: %s',
DATASOURCE_AUTH)
sys.exit(1)
keystone_auth = False
if DATASOURCE_AUTH in ['Keystone']:
keystone_auth = True
payload.update({
'monasca': {
'type': 'monasca-datasource',
'jsonData': {
'authMode': DATASOURCE_AUTH,
'keystoneAuth': keystone_auth,
'token': DATASOURCE_AUTH_TOKEN,
}
}
}.get(DATASOURCE_NAME, {}))
logging.debug('payload = %r', payload)
return payload
def create_dashboard_payload(json_path):
with open(json_path, 'r') as f:
dashboard = json.load(f)
dashboard['id'] = None
return {
'dashboard': dashboard,
'overwrite': False
}
def main():
for user in create_login_payload():
logging.info('Opening a Grafana session...')
session = Session()
login(session, user)
if check_initialized(session):
logging.info('Grafana has already been initialized, skipping!')
return
logging.info('Attempting to add configured datasource...')
r = session.post('{url}/api/datasources'.format(url=GRAFANA_URL),
json=create_datasource_payload())
logging.debug('Response: %r', r.json())
r.raise_for_status()
for path in sorted(glob.glob('{dir}/*.json'.format(dir=DASHBOARDS_DIR))):
logging.info('Creating dashboard from file: {path}'.format(path=path))
r = session.post('{url}/api/dashboards/db'.format(url=GRAFANA_URL),
json=create_dashboard_payload(path))
logging.debug('Response: %r', r.json())
r.raise_for_status()
logging.info('Ending %r session...', user.get('user'))
session.get('{url}/logout'.format(url=GRAFANA_URL))
logging.info('Finished successfully.')
if __name__ == '__main__':
main()
| 31.747191 | 87 | 0.623076 |
import glob
import json
import logging
import os
import sys
import time
from requests import RequestException
from requests import Session
LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=LOG_LEVEL)
logger = logging.getLogger(__name__)
GRAFANA_URL = os.environ.get('GRAFANA_URL', 'http://localhost:3000')
GRAFANA_USERNAME = os.environ.get('GRAFANA_USERNAME', 'mini-mon')
GRAFANA_PASSWORD = os.environ.get('GRAFANA_PASSWORD', 'password')
GRAFANA_USERS = [{'user': GRAFANA_USERNAME, 'password': GRAFANA_PASSWORD, 'email': ''}]
DATASOURCE_NAME = os.environ.get('DATASOURCE_NAME', 'monasca')
DATASOURCE_URL = os.environ.get('DATASOURCE_URL', 'http://localhost/metrics')
DATASOURCE_ACCESS_MODE = os.environ.get('DATASOURCE_ACCESS_MODE', 'proxy')
DATASOURCE_AUTH = os.environ.get('DATASOURCE_AUTH', 'Keystone').capitalize()
DATASOURCE_AUTH_TOKEN = os.environ.get('DATASOURCE_AUTH_TOKEN', '')
DASHBOARDS_DIR = os.environ.get('DASHBOARDS_DIR', '/dashboards.d')
def retry(retries=5, delay=2.0, exc_types=(RequestException,)):
def decorator(func):
def f_retry(*args, **kwargs):
for i in range(retries):
try:
return func(*args, **kwargs)
except exc_types as exc:
if i < retries - 1:
logger.debug('Caught exception, retrying...',
exc_info=True)
time.sleep(delay)
else:
logger.exception('Failed after %d attempts', retries)
if isinstance(exc, RequestException):
logger.debug('Response was: %r', exc.response.text)
raise
return f_retry
return decorator
def create_login_payload():
if os.environ.get('GRAFANA_USERS'):
try:
json.loads(os.environ.get('GRAFANA_USERS'))
except ValueError:
print("Invalid type GRAFANA_USERS")
raise
grafana_users = json.loads(os.environ.get('GRAFANA_USERS'))
else:
grafana_users = GRAFANA_USERS
return grafana_users
@retry(retries=24, delay=5.0)
def login(session, user):
r = session.post('{url}/login'.format(url=GRAFANA_URL),
json=user,
timeout=5)
r.raise_for_status()
@retry(retries=12, delay=5.0)
def check_initialized(session):
r = session.get('{url}/api/datasources'.format(url=GRAFANA_URL), timeout=5)
r.raise_for_status()
logging.debug('existing datasources = %r', r.json())
for datasource in r.json():
if datasource['name'] == DATASOURCE_NAME:
return True
return False
def create_datasource_payload():
payload = {
'name': DATASOURCE_NAME,
'url': DATASOURCE_URL,
'access': DATASOURCE_ACCESS_MODE,
'isDefault': True,
}
if DATASOURCE_AUTH not in ['Keystone', 'Horizon', 'Token']:
logger.error('Unknown Keystone authentication option: %s',
DATASOURCE_AUTH)
sys.exit(1)
keystone_auth = False
if DATASOURCE_AUTH in ['Keystone']:
keystone_auth = True
payload.update({
'monasca': {
'type': 'monasca-datasource',
'jsonData': {
'authMode': DATASOURCE_AUTH,
'keystoneAuth': keystone_auth,
'token': DATASOURCE_AUTH_TOKEN,
}
}
}.get(DATASOURCE_NAME, {}))
logging.debug('payload = %r', payload)
return payload
def create_dashboard_payload(json_path):
with open(json_path, 'r') as f:
dashboard = json.load(f)
dashboard['id'] = None
return {
'dashboard': dashboard,
'overwrite': False
}
def main():
for user in create_login_payload():
logging.info('Opening a Grafana session...')
session = Session()
login(session, user)
if check_initialized(session):
logging.info('Grafana has already been initialized, skipping!')
return
logging.info('Attempting to add configured datasource...')
r = session.post('{url}/api/datasources'.format(url=GRAFANA_URL),
json=create_datasource_payload())
logging.debug('Response: %r', r.json())
r.raise_for_status()
for path in sorted(glob.glob('{dir}/*.json'.format(dir=DASHBOARDS_DIR))):
logging.info('Creating dashboard from file: {path}'.format(path=path))
r = session.post('{url}/api/dashboards/db'.format(url=GRAFANA_URL),
json=create_dashboard_payload(path))
logging.debug('Response: %r', r.json())
r.raise_for_status()
logging.info('Ending %r session...', user.get('user'))
session.get('{url}/logout'.format(url=GRAFANA_URL))
logging.info('Finished successfully.')
if __name__ == '__main__':
main()
| true | true |
f71e344d83b88a33fee568792a59d599f2f0eac4 | 699 | py | Python | scripts/fhr/count_facets.py | bcolloran/jydoop | 0267cbf8f467a77fae97c1604e2cbb70f4aba7d4 | [
"Apache-2.0"
] | 1 | 2017-09-28T08:35:26.000Z | 2017-09-28T08:35:26.000Z | scripts/fhr/count_facets.py | bcolloran/jydoop | 0267cbf8f467a77fae97c1604e2cbb70f4aba7d4 | [
"Apache-2.0"
] | null | null | null | scripts/fhr/count_facets.py | bcolloran/jydoop | 0267cbf8f467a77fae97c1604e2cbb70f4aba7d4 | [
"Apache-2.0"
] | null | null | null | import json
import jydoop
import healthreportutils
setupjob = healthreportutils.setupjob
combine = jydoop.sumreducer
def map(key, value, context):
try:
payload = json.loads(value)
except:
context.write("Bogus\tBogus\tBogus\tBogus", 1)
return
output = []
try:
info = payload['geckoAppInfo']
if type(info) == dict:
output.append(info['name'])
output.append(info['os'])
output.append(info['updateChannel'])
output.append(info['version'])
except KeyError:
pass
if len(output) == 4:
outkey = "\t".join(output)
context.write(outkey, 1)
reduce = jydoop.sumreducer
| 21.84375 | 54 | 0.597997 | import json
import jydoop
import healthreportutils
setupjob = healthreportutils.setupjob
combine = jydoop.sumreducer
def map(key, value, context):
try:
payload = json.loads(value)
except:
context.write("Bogus\tBogus\tBogus\tBogus", 1)
return
output = []
try:
info = payload['geckoAppInfo']
if type(info) == dict:
output.append(info['name'])
output.append(info['os'])
output.append(info['updateChannel'])
output.append(info['version'])
except KeyError:
pass
if len(output) == 4:
outkey = "\t".join(output)
context.write(outkey, 1)
reduce = jydoop.sumreducer
| true | true |
f71e348b15e2cb995ca29be30b12308864a64f38 | 5,124 | py | Python | kanmail/window.py | frznvm0/Kanmail | 98699a14fa32aa1fd6d7384328ca30da6aae7a01 | [
"OpenSSL"
] | null | null | null | kanmail/window.py | frznvm0/Kanmail | 98699a14fa32aa1fd6d7384328ca30da6aae7a01 | [
"OpenSSL"
] | null | null | null | kanmail/window.py | frznvm0/Kanmail | 98699a14fa32aa1fd6d7384328ca30da6aae7a01 | [
"OpenSSL"
] | null | null | null | from typing import Dict, Optional, Union
from uuid import uuid4
import webview
from kanmail.log import logger
from kanmail.server.app import server
from kanmail.settings.constants import DEBUG, FRAMELESS, IS_APP, SERVER_HOST, SESSION_TOKEN
ID_TO_WINDOW = {} # internal ID -> window object
UNIQUE_NAME_TO_ID = {} # name -> internal ID for unique windows
def create_window(
endpoint: str = '/',
unique_key: Optional[str] = None,
**kwargs,
) -> Union[str, bool]:
if not IS_APP:
logger.warning('Cannot open window in server mode!')
return False
internal_id = str(uuid4())
link = (
f'http://{SERVER_HOST}:{server.get_port()}{endpoint}'
f'?window_id={internal_id}'
f'&Kanmail-Session-Token={SESSION_TOKEN}'
)
logger.debug(
f'Opening window (#{internal_id}) '
f'url={endpoint} kwargs={kwargs}',
)
# Nuke any existing unique window
if unique_key and unique_key in UNIQUE_NAME_TO_ID:
old_window_id = UNIQUE_NAME_TO_ID.get(unique_key)
if old_window_id:
destroy_window(old_window_id)
window = webview.create_window(
'Kanmail', link,
frameless=FRAMELESS,
easy_drag=False,
text_select=True,
**kwargs,
)
ID_TO_WINDOW[internal_id] = window
if unique_key:
UNIQUE_NAME_TO_ID[unique_key] = internal_id
return internal_id
def destroy_window(internal_id: str) -> None:
window = ID_TO_WINDOW.pop(internal_id, None)
if window:
try:
window.destroy()
except KeyError: # happens if the window has already been destroyed (user close)
pass
else:
return
logger.warning(f'Tried to destroy non-existant window: {internal_id}')
def resize_window(internal_id: str, width: int, height: int) -> None:
window = ID_TO_WINDOW[internal_id]
if window:
window.resize(width, height)
logger.warning(f'Tried to resize non-existant window: {internal_id}')
def reload_main_window() -> None:
if IS_APP:
window = get_main_window()
window.evaluate_js('window.location.reload()')
def get_main_window() -> webview.Window:
return ID_TO_WINDOW[UNIQUE_NAME_TO_ID['main']]
def destroy_main_window() -> None:
destroy_window(UNIQUE_NAME_TO_ID['main'])
def get_main_window_size_position() -> Dict[str, int]:
window = get_main_window()
return {
'left': window.x,
'top': window.y,
'width': window.width,
'height': window.height,
}
def show_traffic_light_buttons(window):
import AppKit
buttons = [
window.standardWindowButton_(AppKit.NSWindowCloseButton),
window.standardWindowButton_(AppKit.NSWindowZoomButton),
window.standardWindowButton_(AppKit.NSWindowMiniaturizeButton),
]
for button in buttons:
button.setHidden_(False)
def reposition_traffic_light_buttons(window):
import AppKit
button = window.standardWindowButton_(AppKit.NSWindowCloseButton)
titlebar_container_view = button.superview().superview()
titlebar_container_rect = titlebar_container_view.frame()
titlebar_container_rect.size.height += 22
titlebar_container_rect.origin.y -= 13
titlebar_container_rect.size.width += 22
titlebar_container_rect.origin.x += 13
titlebar_container_view._.frame = AppKit.NSValue.valueWithRect_(titlebar_container_rect)
def init_window_hacks() -> None:
# Although this is supported in the pywebview API - we're overriding the defaults
# for all windows with context of one (settings) window. Currently this is the
# only place we wish to confirm exit, so it's not an issue.
# TODO Issue to track per-window customization: https://github.com/r0x0r/pywebview/issues/697
webview.localization.localization['global.quit'] = 'Close without saving'
webview.localization.localization['global.cancel'] = 'Return to settings'
webview.localization.localization['global.quitConfirmation'] = (
'Any changes will be lost, do you still want to close the window?'
)
try:
from webview.platforms import cocoa
except ImportError:
pass
else:
# This cocoa specific hack shows the traffic light buttons (pywebview hides these
# in frameless mode by default) and also moves them so they look better placed
# in the sidebar header.
# Normally set by webview.start but importing cocoa before breaks that
cocoa._debug = {'mode': DEBUG}
class CustomBrowserView(cocoa.BrowserView):
def first_show(self, *args, **kwargs):
show_traffic_light_buttons(self.window)
reposition_traffic_light_buttons(self.window)
super().first_show(*args, **kwargs)
class CustomWindowDelegate(cocoa.BrowserView.WindowDelegate):
def windowDidResize_(self, notification):
reposition_traffic_light_buttons(notification.object())
cocoa.BrowserView = CustomBrowserView
cocoa.BrowserView.WindowDelegate = CustomWindowDelegate
| 31.054545 | 97 | 0.685402 | from typing import Dict, Optional, Union
from uuid import uuid4
import webview
from kanmail.log import logger
from kanmail.server.app import server
from kanmail.settings.constants import DEBUG, FRAMELESS, IS_APP, SERVER_HOST, SESSION_TOKEN
ID_TO_WINDOW = {}
UNIQUE_NAME_TO_ID = {}
def create_window(
endpoint: str = '/',
unique_key: Optional[str] = None,
**kwargs,
) -> Union[str, bool]:
if not IS_APP:
logger.warning('Cannot open window in server mode!')
return False
internal_id = str(uuid4())
link = (
f'http://{SERVER_HOST}:{server.get_port()}{endpoint}'
f'?window_id={internal_id}'
f'&Kanmail-Session-Token={SESSION_TOKEN}'
)
logger.debug(
f'Opening window (#{internal_id}) '
f'url={endpoint} kwargs={kwargs}',
)
if unique_key and unique_key in UNIQUE_NAME_TO_ID:
old_window_id = UNIQUE_NAME_TO_ID.get(unique_key)
if old_window_id:
destroy_window(old_window_id)
window = webview.create_window(
'Kanmail', link,
frameless=FRAMELESS,
easy_drag=False,
text_select=True,
**kwargs,
)
ID_TO_WINDOW[internal_id] = window
if unique_key:
UNIQUE_NAME_TO_ID[unique_key] = internal_id
return internal_id
def destroy_window(internal_id: str) -> None:
window = ID_TO_WINDOW.pop(internal_id, None)
if window:
try:
window.destroy()
except KeyError:
pass
else:
return
logger.warning(f'Tried to destroy non-existant window: {internal_id}')
def resize_window(internal_id: str, width: int, height: int) -> None:
window = ID_TO_WINDOW[internal_id]
if window:
window.resize(width, height)
logger.warning(f'Tried to resize non-existant window: {internal_id}')
def reload_main_window() -> None:
if IS_APP:
window = get_main_window()
window.evaluate_js('window.location.reload()')
def get_main_window() -> webview.Window:
return ID_TO_WINDOW[UNIQUE_NAME_TO_ID['main']]
def destroy_main_window() -> None:
destroy_window(UNIQUE_NAME_TO_ID['main'])
def get_main_window_size_position() -> Dict[str, int]:
window = get_main_window()
return {
'left': window.x,
'top': window.y,
'width': window.width,
'height': window.height,
}
def show_traffic_light_buttons(window):
import AppKit
buttons = [
window.standardWindowButton_(AppKit.NSWindowCloseButton),
window.standardWindowButton_(AppKit.NSWindowZoomButton),
window.standardWindowButton_(AppKit.NSWindowMiniaturizeButton),
]
for button in buttons:
button.setHidden_(False)
def reposition_traffic_light_buttons(window):
import AppKit
button = window.standardWindowButton_(AppKit.NSWindowCloseButton)
titlebar_container_view = button.superview().superview()
titlebar_container_rect = titlebar_container_view.frame()
titlebar_container_rect.size.height += 22
titlebar_container_rect.origin.y -= 13
titlebar_container_rect.size.width += 22
titlebar_container_rect.origin.x += 13
titlebar_container_view._.frame = AppKit.NSValue.valueWithRect_(titlebar_container_rect)
def init_window_hacks() -> None:
# for all windows with context of one (settings) window. Currently this is the
# only place we wish to confirm exit, so it's not an issue.
webview.localization.localization['global.quit'] = 'Close without saving'
webview.localization.localization['global.cancel'] = 'Return to settings'
webview.localization.localization['global.quitConfirmation'] = (
'Any changes will be lost, do you still want to close the window?'
)
try:
from webview.platforms import cocoa
except ImportError:
pass
else:
cocoa._debug = {'mode': DEBUG}
class CustomBrowserView(cocoa.BrowserView):
def first_show(self, *args, **kwargs):
show_traffic_light_buttons(self.window)
reposition_traffic_light_buttons(self.window)
super().first_show(*args, **kwargs)
class CustomWindowDelegate(cocoa.BrowserView.WindowDelegate):
def windowDidResize_(self, notification):
reposition_traffic_light_buttons(notification.object())
cocoa.BrowserView = CustomBrowserView
cocoa.BrowserView.WindowDelegate = CustomWindowDelegate
| true | true |
f71e34be826358ffa5149b16e2c58f61431da420 | 6,693 | py | Python | torchlib/deep_rl/algorithm/ppo/utils.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | 3 | 2019-07-23T21:32:36.000Z | 2022-02-04T23:13:30.000Z | torchlib/deep_rl/algorithm/ppo/utils.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | null | null | null | torchlib/deep_rl/algorithm/ppo/utils.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | 1 | 2019-07-23T21:32:23.000Z | 2019-07-23T21:32:23.000Z | """
Common utilities to implement policy gradient algorithms
"""
from collections import namedtuple, deque
import numpy as np
from scipy import signal
from torchlib.dataset.utils import create_data_loader
from torchlib.deep_rl.utils.replay.replay import ReplayBuffer
from torchlib.deep_rl.utils.replay.sampler import Sampler
from torchlib.utils.math import unnormalize, normalize
Trajectory = namedtuple('Trajectory', ('state', 'action', 'reward_to_go', 'advantage', 'old_log_prob'))
class PPOReplayBuffer(ReplayBuffer):
def __init__(self, gamma, lam, policy, alpha=0.9):
"""
Args:
gamma: discount factor
lam: generalized advantage estimation
policy: PPO policy
alpha: value moving average ratio
"""
super(PPOReplayBuffer, self).__init__(None, None, None, None, None)
self.gamma = gamma
self.lam = lam
self.alpha = alpha
self.policy = policy
def _initialize(self):
self.memory = deque()
self.running_value_mean = 0.
self.running_value_std = 0.
def clear(self):
self._size = 0
self.memory.clear()
def _finish_trajectory(self, states, actions, rewards, last_value):
"""Compute path accessory information including (reward_to_go, old_log_prob, advantage)
Returns:
"""
predicted_state_values = self.policy.predict_state_value_batch(states)
predicted_state_values = unnormalize(predicted_state_values, self.running_value_mean, self.running_value_std)
rewards_last_state = np.append(rewards, last_value)
predicted_state_values = np.append(predicted_state_values, last_value)
# Used for fit value function
reward_to_go = discount(rewards_last_state, self.gamma).astype(np.float32)[:-1]
temporal_difference = rewards + predicted_state_values[1:] * self.gamma - predicted_state_values[:-1]
# calculate reward-to-go
gae = discount(temporal_difference, self.gamma * self.lam).astype(np.float32)
old_log_prob = self.policy.predict_log_prob_batch(states, actions)
return reward_to_go, gae, old_log_prob
def add_trajectory(self, states, actions, rewards, last_value):
"""If last_state is not None, this trajectory is truncated.
Args:
states: (T, ob_dim)
actions: (T, ac_dim)
rewards: (T,)
last_state: (ob_dim)
Returns:
"""
reward_to_go, gae, old_log_prob = self._finish_trajectory(states, actions, rewards, last_value)
self.memory.append(Trajectory(
state=states,
action=actions,
reward_to_go=reward_to_go,
advantage=gae,
old_log_prob=old_log_prob
))
self._size += actions.shape[0]
def random_iterator(self, batch_size):
"""Create an iterator of all the dataset and update value mean and std
Args:
batch_size:
Returns:
"""
states = np.concatenate([trajectory.state for trajectory in self.memory], axis=0)
actions = np.concatenate([trajectory.action for trajectory in self.memory], axis=0)
reward_to_go = np.concatenate([trajectory.reward_to_go for trajectory in self.memory], axis=0)
gaes = np.concatenate([trajectory.advantage for trajectory in self.memory], axis=0)
old_log_prob = np.concatenate([trajectory.old_log_prob for trajectory in self.memory], axis=0)
value_mean, value_std = np.mean(reward_to_go), np.std(reward_to_go)
reward_to_go = normalize(reward_to_go, value_mean, value_std)
self.running_value_mean = self.running_value_mean * self.alpha + value_mean * (1 - self.alpha)
self.running_value_std = self.running_value_std * self.alpha + value_std * (1 - self.alpha)
gaes = normalize(gaes, np.mean(gaes), np.std(gaes))
batch_size = min(batch_size, states.shape[0])
data_loader = create_data_loader((states, actions, reward_to_go, gaes, old_log_prob),
batch_size=batch_size, shuffle=True, drop_last=True)
return data_loader
class PPOSampler(Sampler):
def __init__(self, min_steps_per_batch, logger=None):
super(PPOSampler, self).__init__()
self.min_steps_per_batch = min_steps_per_batch
self.logger = logger
def sample_trajectories(self, policy=None):
obs_lst = []
action_lst = []
reward_lst = []
done_lst = []
policy = self.policy if policy is None else policy
obs = self.env.reset()
for _ in range(self.min_steps_per_batch // obs.shape[0]):
action = policy.predict_batch(obs)
obs_lst.append(obs)
action_lst.append(action)
obs, rewards, dones, infos = self.env.step(action)
reward_lst.append(rewards)
done_lst.append(dones)
# compute last state value for the last trajectory in each environment
last_state_lst = obs
last_value_lst = self.policy.predict_state_value_batch(last_state_lst)
last_value_lst = unnormalize(last_value_lst, self.pool.running_value_mean, self.pool.running_value_std)
obs_lst = np.stack(obs_lst, axis=1)
action_lst = np.stack(action_lst, axis=1)
reward_lst = np.stack(reward_lst, axis=1)
done_lst = np.stack(done_lst, axis=1)
# separate trajectories and add to pool
for i in range(self.env.num_envs):
done_index = np.where(done_lst[i])[0] + 1
if done_lst[i][-1] == True:
done_index = done_index[:-1] # ignore the last one
last_value = 0.
else:
last_value = last_value_lst[i]
sub_obs_lst = np.split(obs_lst[i], done_index)
sub_action_lst = np.split(action_lst[i], done_index)
sub_reward_lst = np.split(reward_lst[i], done_index)
sub_last_value_lst = [0.] * (len(sub_obs_lst) - 1) + [last_value]
for j in range(len(sub_obs_lst)):
self.pool.add_trajectory(states=sub_obs_lst[j],
actions=sub_action_lst[j],
rewards=sub_reward_lst[j],
last_value=sub_last_value_lst[j])
if self.logger:
self.logger.store(EpReward=np.sum(sub_reward_lst[j]) + sub_last_value_lst[j])
self.logger.store(EpLength=sub_obs_lst[j].shape[0])
def discount(x, gamma):
return signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
| 37.183333 | 117 | 0.635739 |
from collections import namedtuple, deque
import numpy as np
from scipy import signal
from torchlib.dataset.utils import create_data_loader
from torchlib.deep_rl.utils.replay.replay import ReplayBuffer
from torchlib.deep_rl.utils.replay.sampler import Sampler
from torchlib.utils.math import unnormalize, normalize
Trajectory = namedtuple('Trajectory', ('state', 'action', 'reward_to_go', 'advantage', 'old_log_prob'))
class PPOReplayBuffer(ReplayBuffer):
def __init__(self, gamma, lam, policy, alpha=0.9):
super(PPOReplayBuffer, self).__init__(None, None, None, None, None)
self.gamma = gamma
self.lam = lam
self.alpha = alpha
self.policy = policy
def _initialize(self):
self.memory = deque()
self.running_value_mean = 0.
self.running_value_std = 0.
def clear(self):
self._size = 0
self.memory.clear()
def _finish_trajectory(self, states, actions, rewards, last_value):
predicted_state_values = self.policy.predict_state_value_batch(states)
predicted_state_values = unnormalize(predicted_state_values, self.running_value_mean, self.running_value_std)
rewards_last_state = np.append(rewards, last_value)
predicted_state_values = np.append(predicted_state_values, last_value)
reward_to_go = discount(rewards_last_state, self.gamma).astype(np.float32)[:-1]
temporal_difference = rewards + predicted_state_values[1:] * self.gamma - predicted_state_values[:-1]
gae = discount(temporal_difference, self.gamma * self.lam).astype(np.float32)
old_log_prob = self.policy.predict_log_prob_batch(states, actions)
return reward_to_go, gae, old_log_prob
def add_trajectory(self, states, actions, rewards, last_value):
reward_to_go, gae, old_log_prob = self._finish_trajectory(states, actions, rewards, last_value)
self.memory.append(Trajectory(
state=states,
action=actions,
reward_to_go=reward_to_go,
advantage=gae,
old_log_prob=old_log_prob
))
self._size += actions.shape[0]
def random_iterator(self, batch_size):
states = np.concatenate([trajectory.state for trajectory in self.memory], axis=0)
actions = np.concatenate([trajectory.action for trajectory in self.memory], axis=0)
reward_to_go = np.concatenate([trajectory.reward_to_go for trajectory in self.memory], axis=0)
gaes = np.concatenate([trajectory.advantage for trajectory in self.memory], axis=0)
old_log_prob = np.concatenate([trajectory.old_log_prob for trajectory in self.memory], axis=0)
value_mean, value_std = np.mean(reward_to_go), np.std(reward_to_go)
reward_to_go = normalize(reward_to_go, value_mean, value_std)
self.running_value_mean = self.running_value_mean * self.alpha + value_mean * (1 - self.alpha)
self.running_value_std = self.running_value_std * self.alpha + value_std * (1 - self.alpha)
gaes = normalize(gaes, np.mean(gaes), np.std(gaes))
batch_size = min(batch_size, states.shape[0])
data_loader = create_data_loader((states, actions, reward_to_go, gaes, old_log_prob),
batch_size=batch_size, shuffle=True, drop_last=True)
return data_loader
class PPOSampler(Sampler):
def __init__(self, min_steps_per_batch, logger=None):
super(PPOSampler, self).__init__()
self.min_steps_per_batch = min_steps_per_batch
self.logger = logger
def sample_trajectories(self, policy=None):
obs_lst = []
action_lst = []
reward_lst = []
done_lst = []
policy = self.policy if policy is None else policy
obs = self.env.reset()
for _ in range(self.min_steps_per_batch // obs.shape[0]):
action = policy.predict_batch(obs)
obs_lst.append(obs)
action_lst.append(action)
obs, rewards, dones, infos = self.env.step(action)
reward_lst.append(rewards)
done_lst.append(dones)
last_state_lst = obs
last_value_lst = self.policy.predict_state_value_batch(last_state_lst)
last_value_lst = unnormalize(last_value_lst, self.pool.running_value_mean, self.pool.running_value_std)
obs_lst = np.stack(obs_lst, axis=1)
action_lst = np.stack(action_lst, axis=1)
reward_lst = np.stack(reward_lst, axis=1)
done_lst = np.stack(done_lst, axis=1)
for i in range(self.env.num_envs):
done_index = np.where(done_lst[i])[0] + 1
if done_lst[i][-1] == True:
done_index = done_index[:-1]
last_value = 0.
else:
last_value = last_value_lst[i]
sub_obs_lst = np.split(obs_lst[i], done_index)
sub_action_lst = np.split(action_lst[i], done_index)
sub_reward_lst = np.split(reward_lst[i], done_index)
sub_last_value_lst = [0.] * (len(sub_obs_lst) - 1) + [last_value]
for j in range(len(sub_obs_lst)):
self.pool.add_trajectory(states=sub_obs_lst[j],
actions=sub_action_lst[j],
rewards=sub_reward_lst[j],
last_value=sub_last_value_lst[j])
if self.logger:
self.logger.store(EpReward=np.sum(sub_reward_lst[j]) + sub_last_value_lst[j])
self.logger.store(EpLength=sub_obs_lst[j].shape[0])
def discount(x, gamma):
return signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
| true | true |
f71e35702cffecb31e78b3aa2858556b28764eb9 | 3,500 | py | Python | kubernetes/test/test_policy_v1beta1_api.py | fooka03/python | 073cf4d89e532f92b57e8955b4efc3d5d5eb80cf | [
"Apache-2.0"
] | 2 | 2020-07-02T05:47:41.000Z | 2020-07-02T05:50:34.000Z | kubernetes/test/test_policy_v1beta1_api.py | fooka03/python | 073cf4d89e532f92b57e8955b4efc3d5d5eb80cf | [
"Apache-2.0"
] | 1 | 2021-03-25T23:44:49.000Z | 2021-03-25T23:44:49.000Z | k8sdeployment/k8sstat/python/kubernetes/test/test_policy_v1beta1_api.py | JeffYFHuang/gpuaccounting | afa934350ebbd0634beb60b9df4a147426ea0006 | [
"MIT"
] | 1 | 2021-10-13T17:45:37.000Z | 2021-10-13T17:45:37.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.policy_v1beta1_api import PolicyV1beta1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestPolicyV1beta1Api(unittest.TestCase):
"""PolicyV1beta1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.policy_v1beta1_api.PolicyV1beta1Api() # noqa: E501
def tearDown(self):
pass
def test_create_namespaced_pod_disruption_budget(self):
"""Test case for create_namespaced_pod_disruption_budget
"""
pass
def test_create_pod_security_policy(self):
"""Test case for create_pod_security_policy
"""
pass
def test_delete_collection_namespaced_pod_disruption_budget(self):
"""Test case for delete_collection_namespaced_pod_disruption_budget
"""
pass
def test_delete_collection_pod_security_policy(self):
"""Test case for delete_collection_pod_security_policy
"""
pass
def test_delete_namespaced_pod_disruption_budget(self):
"""Test case for delete_namespaced_pod_disruption_budget
"""
pass
def test_delete_pod_security_policy(self):
"""Test case for delete_pod_security_policy
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_namespaced_pod_disruption_budget(self):
"""Test case for list_namespaced_pod_disruption_budget
"""
pass
def test_list_pod_disruption_budget_for_all_namespaces(self):
"""Test case for list_pod_disruption_budget_for_all_namespaces
"""
pass
def test_list_pod_security_policy(self):
"""Test case for list_pod_security_policy
"""
pass
def test_patch_namespaced_pod_disruption_budget(self):
"""Test case for patch_namespaced_pod_disruption_budget
"""
pass
def test_patch_namespaced_pod_disruption_budget_status(self):
"""Test case for patch_namespaced_pod_disruption_budget_status
"""
pass
def test_patch_pod_security_policy(self):
"""Test case for patch_pod_security_policy
"""
pass
def test_read_namespaced_pod_disruption_budget(self):
"""Test case for read_namespaced_pod_disruption_budget
"""
pass
def test_read_namespaced_pod_disruption_budget_status(self):
"""Test case for read_namespaced_pod_disruption_budget_status
"""
pass
def test_read_pod_security_policy(self):
"""Test case for read_pod_security_policy
"""
pass
def test_replace_namespaced_pod_disruption_budget(self):
"""Test case for replace_namespaced_pod_disruption_budget
"""
pass
def test_replace_namespaced_pod_disruption_budget_status(self):
"""Test case for replace_namespaced_pod_disruption_budget_status
"""
pass
def test_replace_pod_security_policy(self):
"""Test case for replace_pod_security_policy
"""
pass
if __name__ == '__main__':
unittest.main()
| 23.648649 | 124 | 0.687714 |
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.policy_v1beta1_api import PolicyV1beta1Api
from kubernetes.client.rest import ApiException
class TestPolicyV1beta1Api(unittest.TestCase):
def setUp(self):
self.api = kubernetes.client.api.policy_v1beta1_api.PolicyV1beta1Api()
def tearDown(self):
pass
def test_create_namespaced_pod_disruption_budget(self):
pass
def test_create_pod_security_policy(self):
pass
def test_delete_collection_namespaced_pod_disruption_budget(self):
pass
def test_delete_collection_pod_security_policy(self):
pass
def test_delete_namespaced_pod_disruption_budget(self):
pass
def test_delete_pod_security_policy(self):
pass
def test_get_api_resources(self):
pass
def test_list_namespaced_pod_disruption_budget(self):
pass
def test_list_pod_disruption_budget_for_all_namespaces(self):
pass
def test_list_pod_security_policy(self):
pass
def test_patch_namespaced_pod_disruption_budget(self):
pass
def test_patch_namespaced_pod_disruption_budget_status(self):
pass
def test_patch_pod_security_policy(self):
pass
def test_read_namespaced_pod_disruption_budget(self):
pass
def test_read_namespaced_pod_disruption_budget_status(self):
pass
def test_read_pod_security_policy(self):
pass
def test_replace_namespaced_pod_disruption_budget(self):
pass
def test_replace_namespaced_pod_disruption_budget_status(self):
pass
def test_replace_pod_security_policy(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f71e3841de6840322ca7bb77900844dca913abf4 | 29 | py | Python | BatalhaNaval.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | BatalhaNaval.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | BatalhaNaval.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | ''' Jogo Batalha Naval '''
| 7.25 | 26 | 0.551724 | true | true | |
f71e38c13ec8321f0a922bfca3d6d3b50576bfd0 | 2,147 | py | Python | import_export/results.py | michelekihiu/superintendence_tracking | 325513059eeec25fdffd5ccb36befc0a9e8235ac | [
"BSD-2-Clause"
] | 1 | 2017-10-31T02:37:37.000Z | 2017-10-31T02:37:37.000Z | import_export/results.py | michelekihiu/superintendence_tracking | 325513059eeec25fdffd5ccb36befc0a9e8235ac | [
"BSD-2-Clause"
] | 2 | 2021-06-01T22:03:20.000Z | 2022-01-13T00:43:38.000Z | import_export/results.py | michelekihiu/superintendence_tracking | 325513059eeec25fdffd5ccb36befc0a9e8235ac | [
"BSD-2-Clause"
] | 1 | 2020-11-04T08:39:52.000Z | 2020-11-04T08:39:52.000Z | from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from tablib import Dataset
class Error(object):
def __init__(self, error, traceback=None, row=None):
self.error = error
self.traceback = traceback
self.row = row
class RowResult(object):
IMPORT_TYPE_UPDATE = 'update'
IMPORT_TYPE_NEW = 'new'
IMPORT_TYPE_DELETE = 'delete'
IMPORT_TYPE_SKIP = 'skip'
IMPORT_TYPE_ERROR = 'error'
def __init__(self):
self.errors = []
self.diff = None
self.import_type = None
class Result(object):
def __init__(self, *args, **kwargs):
super(Result, self).__init__()
self.base_errors = []
self.diff_headers = []
self.rows = [] # RowResults
self.failed_dataset = Dataset()
self.totals = OrderedDict([(RowResult.IMPORT_TYPE_NEW, 0),
(RowResult.IMPORT_TYPE_UPDATE, 0),
(RowResult.IMPORT_TYPE_DELETE, 0),
(RowResult.IMPORT_TYPE_SKIP, 0),
(RowResult.IMPORT_TYPE_ERROR, 0)])
self.total_rows = 0
def append_row_result(self, row_result):
self.rows.append(row_result)
def append_base_error(self, error):
self.base_errors.append(error)
def add_dataset_headers(self, headers):
self.failed_dataset.headers = headers + ["Error"]
def append_failed_row(self, row, error):
row_values = [v for (k, v) in row.items()]
row_values.append(error.error.message)
self.failed_dataset.append(row_values)
def increment_row_result_total(self, row_result):
if row_result.import_type:
self.totals[row_result.import_type] += 1
def row_errors(self):
return [(i + 1, row.errors)
for i, row in enumerate(self.rows) if row.errors]
def has_errors(self):
return bool(self.base_errors or self.row_errors())
def __iter__(self):
return iter(self.rows)
| 29.819444 | 69 | 0.620401 | from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from tablib import Dataset
class Error(object):
def __init__(self, error, traceback=None, row=None):
self.error = error
self.traceback = traceback
self.row = row
class RowResult(object):
IMPORT_TYPE_UPDATE = 'update'
IMPORT_TYPE_NEW = 'new'
IMPORT_TYPE_DELETE = 'delete'
IMPORT_TYPE_SKIP = 'skip'
IMPORT_TYPE_ERROR = 'error'
def __init__(self):
self.errors = []
self.diff = None
self.import_type = None
class Result(object):
def __init__(self, *args, **kwargs):
super(Result, self).__init__()
self.base_errors = []
self.diff_headers = []
self.rows = []
self.failed_dataset = Dataset()
self.totals = OrderedDict([(RowResult.IMPORT_TYPE_NEW, 0),
(RowResult.IMPORT_TYPE_UPDATE, 0),
(RowResult.IMPORT_TYPE_DELETE, 0),
(RowResult.IMPORT_TYPE_SKIP, 0),
(RowResult.IMPORT_TYPE_ERROR, 0)])
self.total_rows = 0
def append_row_result(self, row_result):
self.rows.append(row_result)
def append_base_error(self, error):
self.base_errors.append(error)
def add_dataset_headers(self, headers):
self.failed_dataset.headers = headers + ["Error"]
def append_failed_row(self, row, error):
row_values = [v for (k, v) in row.items()]
row_values.append(error.error.message)
self.failed_dataset.append(row_values)
def increment_row_result_total(self, row_result):
if row_result.import_type:
self.totals[row_result.import_type] += 1
def row_errors(self):
return [(i + 1, row.errors)
for i, row in enumerate(self.rows) if row.errors]
def has_errors(self):
return bool(self.base_errors or self.row_errors())
def __iter__(self):
return iter(self.rows)
| true | true |
f71e391a2a15cf39c959d5d3a8f6cd170aa714bd | 909 | py | Python | badgify/tests/test_models.py | BrendanBerkley/django-badgify | 61203e92cb76982f778caf168d371a72a401db10 | [
"MIT"
] | 78 | 2015-03-04T13:41:31.000Z | 2021-12-21T14:30:27.000Z | badgify/tests/test_models.py | BrendanBerkley/django-badgify | 61203e92cb76982f778caf168d371a72a401db10 | [
"MIT"
] | 11 | 2015-06-18T18:38:53.000Z | 2019-12-27T14:08:29.000Z | badgify/tests/test_models.py | BrendanBerkley/django-badgify | 61203e92cb76982f778caf168d371a72a401db10 | [
"MIT"
] | 12 | 2015-06-18T16:05:38.000Z | 2018-07-13T12:50:34.000Z | from django.db import IntegrityError
from django.test import TestCase
from ..models import Badge, Award
from .mixins import UserFixturesMixin
class BadgeTestCase(TestCase):
"""
Badge model test case.
"""
def test_autocreate_slug(self):
badge = Badge.objects.create(name='Super Chouette')
self.assertEqual(badge.slug, 'super-chouette')
class AwardTestCase(TestCase, UserFixturesMixin):
"""
Award model test case.
"""
def setUp(self):
self.create_users()
def test_create(self):
badge = Badge.objects.create(name='Super Chouette')
Award.objects.create(user=self.user1, badge=badge)
Award.objects.create(user=self.user2, badge=badge)
self.assertEqual(badge.users.count(), 2)
self.assertRaises(IntegrityError, Award.objects.create, **{
'user': self.user1,
'badge': badge
})
| 25.971429 | 67 | 0.657866 | from django.db import IntegrityError
from django.test import TestCase
from ..models import Badge, Award
from .mixins import UserFixturesMixin
class BadgeTestCase(TestCase):
def test_autocreate_slug(self):
badge = Badge.objects.create(name='Super Chouette')
self.assertEqual(badge.slug, 'super-chouette')
class AwardTestCase(TestCase, UserFixturesMixin):
def setUp(self):
self.create_users()
def test_create(self):
badge = Badge.objects.create(name='Super Chouette')
Award.objects.create(user=self.user1, badge=badge)
Award.objects.create(user=self.user2, badge=badge)
self.assertEqual(badge.users.count(), 2)
self.assertRaises(IntegrityError, Award.objects.create, **{
'user': self.user1,
'badge': badge
})
| true | true |
f71e3bf6b5a4e1db0bd1e025dcf8b861aeb828af | 922 | py | Python | test/test_steps.py | jladdjr/ansible-builder | 7520396f8921b98a033a8f25248dbadb9cd83901 | [
"Apache-2.0"
] | null | null | null | test/test_steps.py | jladdjr/ansible-builder | 7520396f8921b98a033a8f25248dbadb9cd83901 | [
"Apache-2.0"
] | null | null | null | test/test_steps.py | jladdjr/ansible-builder | 7520396f8921b98a033a8f25248dbadb9cd83901 | [
"Apache-2.0"
] | null | null | null | import pytest
import textwrap
from ansible_builder.steps import AdditionalBuildSteps, PipSteps, BindepSteps
def test_steps_for_collection_dependencies():
assert list(PipSteps('requirements.txt')) == [
'ADD requirements.txt /build/',
'RUN pip3 install --upgrade -r /build/requirements.txt'
]
@pytest.mark.parametrize('verb', ['prepend', 'append'])
def test_additional_build_steps(verb):
additional_build_steps = {
'prepend': ["RUN echo This is the prepend test", "RUN whoami"],
'append': textwrap.dedent("""
RUN echo This is the append test
RUN whoami
""")
}
steps = AdditionalBuildSteps(additional_build_steps[verb])
assert len(list(steps)) == 2
def test_system_steps():
assert list(BindepSteps('bindep_output.txt')) == [
'ADD bindep_output.txt /build/',
'RUN dnf -y install $(cat /build/bindep_output.txt)'
]
| 27.939394 | 77 | 0.667028 | import pytest
import textwrap
from ansible_builder.steps import AdditionalBuildSteps, PipSteps, BindepSteps
def test_steps_for_collection_dependencies():
assert list(PipSteps('requirements.txt')) == [
'ADD requirements.txt /build/',
'RUN pip3 install --upgrade -r /build/requirements.txt'
]
@pytest.mark.parametrize('verb', ['prepend', 'append'])
def test_additional_build_steps(verb):
additional_build_steps = {
'prepend': ["RUN echo This is the prepend test", "RUN whoami"],
'append': textwrap.dedent("""
RUN echo This is the append test
RUN whoami
""")
}
steps = AdditionalBuildSteps(additional_build_steps[verb])
assert len(list(steps)) == 2
def test_system_steps():
assert list(BindepSteps('bindep_output.txt')) == [
'ADD bindep_output.txt /build/',
'RUN dnf -y install $(cat /build/bindep_output.txt)'
]
| true | true |
f71e3ce1316873d9471482dae170853222e1b3db | 1,417 | py | Python | download_heroicons.py | zerolab/heroicons | 1a95b8e7316b315377a8773569476aa7bcb8f9e1 | [
"MIT"
] | null | null | null | download_heroicons.py | zerolab/heroicons | 1a95b8e7316b315377a8773569476aa7bcb8f9e1 | [
"MIT"
] | null | null | null | download_heroicons.py | zerolab/heroicons | 1a95b8e7316b315377a8773569476aa7bcb8f9e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Download the latest heroicons zip file and select only the optimized icons.
"""
import argparse
import os
import sys
from io import BytesIO
from zipfile import ZIP_DEFLATED, ZipFile
import requests
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("version", help="e.g. 1.0.1")
args = parser.parse_args(args)
version = args.version
zip_url = (
f"https://github.com/tailwindlabs/heroicons/archive/refs/tags/v{version}.zip"
)
response = requests.get(zip_url)
if response.status_code != 200:
print(f"Got status code {response.status_code} for {zip_url}", file=sys.stderr)
return 1
input_zip = ZipFile(BytesIO(response.content))
input_prefix = f"heroicons-{version}/optimized/"
output_path = "src/heroicons/heroicons.zip"
try:
os.remove(output_path)
except FileNotFoundError:
pass
with ZipFile(
output_path, "w", compression=ZIP_DEFLATED, compresslevel=9
) as output_zip:
for name in input_zip.namelist():
if name.startswith(input_prefix) and name.endswith(".svg"):
data = input_zip.read(name)
new_name = name[len(input_prefix) :]
output_zip.writestr(new_name, data)
print(new_name)
print("\n✅ Written!")
return 0
if __name__ == "__main__":
exit(main())
| 26.240741 | 87 | 0.652788 |
import argparse
import os
import sys
from io import BytesIO
from zipfile import ZIP_DEFLATED, ZipFile
import requests
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("version", help="e.g. 1.0.1")
args = parser.parse_args(args)
version = args.version
zip_url = (
f"https://github.com/tailwindlabs/heroicons/archive/refs/tags/v{version}.zip"
)
response = requests.get(zip_url)
if response.status_code != 200:
print(f"Got status code {response.status_code} for {zip_url}", file=sys.stderr)
return 1
input_zip = ZipFile(BytesIO(response.content))
input_prefix = f"heroicons-{version}/optimized/"
output_path = "src/heroicons/heroicons.zip"
try:
os.remove(output_path)
except FileNotFoundError:
pass
with ZipFile(
output_path, "w", compression=ZIP_DEFLATED, compresslevel=9
) as output_zip:
for name in input_zip.namelist():
if name.startswith(input_prefix) and name.endswith(".svg"):
data = input_zip.read(name)
new_name = name[len(input_prefix) :]
output_zip.writestr(new_name, data)
print(new_name)
print("\n✅ Written!")
return 0
if __name__ == "__main__":
exit(main())
| true | true |
f71e3e92f9777ec601bc7079991566c4790da4b1 | 48,021 | py | Python | python/ray/_private/utils.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | python/ray/_private/utils.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 41 | 2021-09-21T01:13:48.000Z | 2022-03-19T07:12:22.000Z | python/ray/_private/utils.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 1 | 2019-09-24T16:24:49.000Z | 2019-09-24T16:24:49.000Z | import binascii
import errno
import functools
import hashlib
import importlib
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional, Sequence, Tuple, Any, Union, Dict
import uuid
import grpc
import warnings
try:
from grpc import aio as aiogrpc
except ImportError:
from grpc.experimental import aio as aiogrpc
import inspect
from inspect import signature
from pathlib import Path
import numpy as np
import ray
from ray.core.generated.gcs_pb2 import ErrorTableData
import ray.ray_constants as ray_constants
from ray._private.tls_utils import load_certs_from_env
# Import psutil after ray so the packaged version is used.
import psutil
pwd = None
if sys.platform != "win32":
import pwd
logger = logging.getLogger(__name__)
# Linux can bind child processes' lifetimes to that of their parents via prctl.
# prctl support is detected dynamically once, and assumed thereafter.
linux_prctl = None
# Windows can bind processes' lifetimes to that of kernel-level "job objects".
# We keep a global job object to tie its lifetime to that of our own process.
win32_job = None
win32_AssignProcessToJobObject = None
def get_user_temp_dir():
if "RAY_TMPDIR" in os.environ:
return os.environ["RAY_TMPDIR"]
elif sys.platform.startswith("linux") and "TMPDIR" in os.environ:
return os.environ["TMPDIR"]
elif sys.platform.startswith("darwin") or sys.platform.startswith("linux"):
# Ideally we wouldn't need this fallback, but keep it for now for
# for compatibility
tempdir = os.path.join(os.sep, "tmp")
else:
tempdir = tempfile.gettempdir()
return tempdir
def get_ray_temp_dir():
return os.path.join(get_user_temp_dir(), "ray")
def _random_string():
id_hash = hashlib.shake_128()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest(ray_constants.ID_SIZE)
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
def format_error_message(exception_message: str, task_exception: bool = False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message: A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
def push_error_to_driver(
worker, error_type: str, message: str, job_id: Optional[str] = None
):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
def construct_error_message(job_id, error_type, message, timestamp):
"""Construct an ErrorTableData object.
Args:
job_id: The ID of the job that the error should go to. If this is
nil, then the error will go to all drivers.
error_type: The type of the error.
message: The error message.
timestamp: The time of the error.
Returns:
The ErrorTableData object.
"""
data = ErrorTableData()
data.job_id = job_id.binary()
data.type = error_type
data.error_message = message
data.timestamp = timestamp
return data
def publish_error_to_driver(
error_type: str,
message: str,
gcs_publisher,
job_id=None,
):
"""Push an error message to the driver to be printed in the background.
Normally the push_error_to_driver function should be used. However, in some
instances, the raylet client is not available, e.g., because the
error happens in Python before the driver or worker has connected to the
backend processes.
Args:
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
gcs_publisher: The GCS publisher to use.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
error_data = construct_error_message(job_id, error_type, message, time.time())
try:
gcs_publisher.publish_error(job_id.hex().encode(), error_data)
except Exception:
logger.exception(f"Failed to publish error {error_data}")
def random_string():
"""Generate a random string to use as an ID.
Note that users may seed numpy, which could cause this function to generate
duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
interfere with the state of the user's random number generator, so we
extract the state of the random number generator and reset it after we are
done.
TODO(rkn): If we want to later guarantee that these are generated in a
deterministic manner, then we will need to make some changes here.
Returns:
A random byte string of length ray_constants.ID_SIZE.
"""
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id
def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"):
"""Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
"""
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(f"The argument {byte_str} must be a bytes object.")
if sys.version_info >= (3, 0):
return byte_str.decode(encode_type)
else:
return byte_str
def ensure_str(s, encoding="utf-8", errors="strict"):
"""Coerce *s* to `str`.
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, str):
return s
else:
assert isinstance(s, bytes)
return s.decode(encoding, errors)
def binary_to_object_ref(binary_object_ref):
return ray.ObjectRef(binary_object_ref)
def binary_to_task_id(binary_task_id):
return ray.TaskID(binary_task_id)
def binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
if sys.version_info >= (3, 0):
hex_identifier = hex_identifier.decode()
return hex_identifier
def hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
# TODO(qwang): Remove these hepler functions
# once we separate `WorkerID` from `UniqueID`.
def compute_job_id_from_driver(driver_id):
assert isinstance(driver_id, ray.WorkerID)
return ray.JobID(driver_id.binary()[0 : ray.JobID.size()])
def compute_driver_id_from_job(job_id):
assert isinstance(job_id, ray.JobID)
rest_length = ray_constants.ID_SIZE - job_id.size()
driver_id_str = job_id.binary() + (rest_length * b"\xff")
return ray.WorkerID(driver_id_str)
def get_cuda_visible_devices():
"""Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
Returns:
devices (List[str]): If CUDA_VISIBLE_DEVICES is set, returns a
list of strings representing the IDs of the visible GPUs.
If it is not set or is set to NoDevFiles, returns empty list.
"""
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
if gpu_ids_str == "NoDevFiles":
return []
# GPU identifiers are given as strings representing integers or UUIDs.
return list(gpu_ids_str.split(","))
last_set_gpu_ids = None
def set_cuda_visible_devices(gpu_ids):
"""Set the CUDA_VISIBLE_DEVICES environment variable.
Args:
gpu_ids (List[str]): List of strings representing GPU IDs.
"""
if os.environ.get(ray_constants.NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):
return
global last_set_gpu_ids
if last_set_gpu_ids == gpu_ids:
return # optimization: already set
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
last_set_gpu_ids = gpu_ids
def resources_from_ray_options(options_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Determine a task's resource requirements.
Args:
options_dict: The dictionary that contains resources requirements.
Returns:
A dictionary of the resource requirements for the task.
"""
resources = (options_dict.get("resources") or {}).copy()
if "CPU" in resources or "GPU" in resources:
raise ValueError(
"The resources dictionary must not contain the key 'CPU' or 'GPU'"
)
elif "memory" in resources or "object_store_memory" in resources:
raise ValueError(
"The resources dictionary must not "
"contain the key 'memory' or 'object_store_memory'"
)
num_cpus = options_dict.get("num_cpus")
num_gpus = options_dict.get("num_gpus")
memory = options_dict.get("memory")
object_store_memory = options_dict.get("object_store_memory")
accelerator_type = options_dict.get("accelerator_type")
if num_cpus is not None:
resources["CPU"] = num_cpus
if num_gpus is not None:
resources["GPU"] = num_gpus
if memory is not None:
resources["memory"] = ray_constants.to_memory_units(memory, round_up=True)
if object_store_memory is not None:
resources["object_store_memory"] = ray_constants.to_memory_units(
object_store_memory, round_up=True
)
if accelerator_type is not None:
resources[
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}{accelerator_type}"
] = 0.001
return resources
class Unbuffered(object):
"""There's no "built-in" solution to programatically disabling buffering of
text files. Ray expects stdout/err to be text files, so creating an
unbuffered binary file is unacceptable.
See
https://mail.python.org/pipermail/tutor/2003-November/026645.html.
https://docs.python.org/3/library/functions.html#open
"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def open_log(path, unbuffered=False, **kwargs):
"""
Opens the log file at `path`, with the provided kwargs being given to
`open`.
"""
# Disable buffering, see test_advanced_3.py::test_logging_to_driver
kwargs.setdefault("buffering", 1)
kwargs.setdefault("mode", "a")
kwargs.setdefault("encoding", "utf-8")
stream = open(path, **kwargs)
if unbuffered:
return Unbuffered(stream)
else:
return stream
def get_system_memory(
# For cgroups v1:
memory_limit_filename="/sys/fs/cgroup/memory/memory.limit_in_bytes",
# For cgroups v2:
memory_limit_filename_v2="/sys/fs/cgroup/memory.max",
):
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
"""
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
elif os.path.exists(memory_limit_filename_v2):
with open(memory_limit_filename_v2, "r") as f:
max_file = f.read()
if max_file.isnumeric():
docker_limit = int(max_file)
else:
# max_file is "max", i.e. is unset.
docker_limit = None
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().total
if docker_limit is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_limit, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def _get_docker_cpus(
cpu_quota_file_name="/sys/fs/cgroup/cpu/cpu.cfs_quota_us",
cpu_period_file_name="/sys/fs/cgroup/cpu/cpu.cfs_period_us",
cpuset_file_name="/sys/fs/cgroup/cpuset/cpuset.cpus",
cpu_max_file_name="/sys/fs/cgroup/cpu.max",
) -> Optional[float]:
# TODO (Alex): Don't implement this logic oursleves.
# Docker has 2 underyling ways of implementing CPU limits:
# https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler
# 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a
# soft limit so we don't worry about it). For Ray's purposes, if we use
# docker, the number of vCPUs on a machine is whichever is set (ties broken
# by smaller value).
cpu_quota = None
# See: https://bugs.openjdk.java.net/browse/JDK-8146115
if os.path.exists(cpu_quota_file_name) and os.path.exists(cpu_period_file_name):
try:
with open(cpu_quota_file_name, "r") as quota_file, open(
cpu_period_file_name, "r"
) as period_file:
cpu_quota = float(quota_file.read()) / float(period_file.read())
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
# Look at cpu.max for cgroups v2
elif os.path.exists(cpu_max_file_name):
try:
max_file = open(cpu_max_file_name).read()
quota_str, period_str = max_file.split()
if quota_str.isnumeric() and period_str.isnumeric():
cpu_quota = float(quota_str) / float(period_str)
else:
# quota_str is "max" meaning the cpu quota is unset
cpu_quota = None
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
if (cpu_quota is not None) and (cpu_quota < 0):
cpu_quota = None
elif cpu_quota == 0:
# Round up in case the cpu limit is less than 1.
cpu_quota = 1
cpuset_num = None
if os.path.exists(cpuset_file_name):
try:
with open(cpuset_file_name) as cpuset_file:
ranges_as_string = cpuset_file.read()
ranges = ranges_as_string.split(",")
cpu_ids = []
for num_or_range in ranges:
if "-" in num_or_range:
start, end = num_or_range.split("-")
cpu_ids.extend(list(range(int(start), int(end) + 1)))
else:
cpu_ids.append(int(num_or_range))
cpuset_num = len(cpu_ids)
except Exception:
logger.exception("Unexpected error calculating docker cpuset ids.")
# Possible to-do: Parse cgroups v2's cpuset.cpus.effective for the number
# of accessible CPUs.
if cpu_quota and cpuset_num:
return min(cpu_quota, cpuset_num)
return cpu_quota or cpuset_num
def get_num_cpus() -> int:
cpu_count = multiprocessing.cpu_count()
if os.environ.get("RAY_USE_MULTIPROCESSING_CPU_COUNT"):
logger.info(
"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using "
"multiprocessing.cpu_count() to detect the number of CPUs. "
"This may be inconsistent when used inside docker. "
"To correctly detect CPUs, unset the env var: "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT`."
)
return cpu_count
try:
# Not easy to get cpu count in docker, see:
# https://bugs.python.org/issue36054
docker_count = _get_docker_cpus()
if docker_count is not None and docker_count != cpu_count:
# Don't log this warning if we're on K8s or if the warning is
# explicitly disabled.
if (
"RAY_DISABLE_DOCKER_CPU_WARNING" not in os.environ
and "KUBERNETES_SERVICE_HOST" not in os.environ
):
logger.warning(
"Detecting docker specified CPUs. In "
"previous versions of Ray, CPU detection in containers "
"was incorrect. Please ensure that Ray has enough CPUs "
"allocated. As a temporary workaround to revert to the "
"prior behavior, set "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var "
"before starting Ray. Set the env var: "
"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning."
)
# TODO (Alex): We should probably add support for fractional cpus.
if int(docker_count) != float(docker_count):
logger.warning(
f"Ray currently does not support initializing Ray"
f"with fractional cpus. Your num_cpus will be "
f"truncated from {docker_count} to "
f"{int(docker_count)}."
)
docker_count = int(docker_count)
cpu_count = docker_count
except Exception:
# `nproc` and cgroup are linux-only. If docker only works on linux
# (will run in a linux VM on other platforms), so this is fine.
pass
return cpu_count
def get_used_memory():
"""Return the currently used system memory in bytes
Returns:
The total amount of used memory
"""
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
# For cgroups v2:
memory_usage_filename_v2 = "/sys/fs/cgroup/memory.current"
if os.path.exists(memory_usage_filename):
with open(memory_usage_filename, "r") as f:
docker_usage = int(f.read())
elif os.path.exists(memory_usage_filename_v2):
with open(memory_usage_filename_v2, "r") as f:
docker_usage = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().used
if docker_usage is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_usage, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def estimate_available_memory():
"""Return the currently available amount of system memory in bytes.
Returns:
The total amount of available memory in bytes. Based on the used
and total memory.
"""
return get_system_memory() - get_used_memory()
def get_shared_memory_bytes():
"""Get the size of the shared memory file system.
Returns:
The size of the shared memory file system in bytes.
"""
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail
def check_oversized_function(
pickled: bytes, name: str, obj_type: str, worker: "ray.Worker"
) -> None:
"""Send a warning message if the pickled function is too large.
Args:
pickled: the pickled function.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', or 'actor'.
worker: the worker used to send warning message. message will be logged
locally if None.
"""
length = len(pickled)
if length <= ray_constants.FUNCTION_SIZE_WARN_THRESHOLD:
return
elif length < ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD:
warning_message = (
"The {} {} is very large ({} MiB). "
"Check that its definition is not implicitly capturing a large "
"array or other object in scope. Tip: use ray.put() to put large "
"objects in the Ray object store."
).format(obj_type, name, length // (1024 * 1024))
if worker:
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
"Warning: " + warning_message,
job_id=worker.current_job_id,
)
else:
error = (
"The {} {} is too large ({} MiB > FUNCTION_SIZE_ERROR_THRESHOLD={}"
" MiB). Check that its definition is not implicitly capturing a "
"large array or other object in scope. Tip: use ray.put() to "
"put large objects in the Ray object store."
).format(
obj_type,
name,
length // (1024 * 1024),
ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD // (1024 * 1024),
)
raise ValueError(error)
def is_main_thread():
return threading.current_thread().getName() == "MainThread"
def detect_fate_sharing_support_win32():
global win32_job, win32_AssignProcessToJobObject
if win32_job is None and sys.platform == "win32":
import ctypes
try:
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR
kernel32 = ctypes.WinDLL("kernel32")
kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)
kernel32.CreateJobObjectW.restype = HANDLE
sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)
kernel32.SetInformationJobObject.argtypes = sijo_argtypes
kernel32.SetInformationJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.IsDebuggerPresent.argtypes = ()
kernel32.IsDebuggerPresent.restype = BOOL
except (AttributeError, TypeError, ImportError):
kernel32 = None
job = kernel32.CreateJobObjectW(None, None) if kernel32 else None
job = subprocess.Handle(job) if job else job
if job:
from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", ctypes.c_size_t),
("MaximumWorkingSetSize", ctypes.c_size_t),
("ActiveProcessLimit", DWORD),
("Affinity", ctypes.c_size_t),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULARGE_INTEGER),
("WriteOperationCount", ULARGE_INTEGER),
("OtherOperationCount", ULARGE_INTEGER),
("ReadTransferCount", ULARGE_INTEGER),
("WriteTransferCount", ULARGE_INTEGER),
("OtherTransferCount", ULARGE_INTEGER),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_size_t),
("JobMemoryLimit", ctypes.c_size_t),
("PeakProcessMemoryUsed", ctypes.c_size_t),
("PeakJobMemoryUsed", ctypes.c_size_t),
]
debug = kernel32.IsDebuggerPresent()
# Defined in <WinNT.h>; also available here:
# https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
buf.BasicLimitInformation.LimitFlags = (
(0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
| JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
| JOB_OBJECT_LIMIT_BREAKAWAY_OK
)
infoclass = JobObjectExtendedLimitInformation
if not kernel32.SetInformationJobObject(
job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)
):
job = None
win32_AssignProcessToJobObject = (
kernel32.AssignProcessToJobObject if kernel32 is not None else False
)
win32_job = job if job else False
return bool(win32_job)
def detect_fate_sharing_support_linux():
global linux_prctl
if linux_prctl is None and sys.platform.startswith("linux"):
try:
from ctypes import c_int, c_ulong, CDLL
prctl = CDLL(None).prctl
prctl.restype = c_int
prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]
except (AttributeError, TypeError):
prctl = None
linux_prctl = prctl if prctl else False
return bool(linux_prctl)
def detect_fate_sharing_support():
result = None
if sys.platform == "win32":
result = detect_fate_sharing_support_win32()
elif sys.platform.startswith("linux"):
result = detect_fate_sharing_support_linux()
return result
def set_kill_on_parent_death_linux():
"""Ensures this process dies if its parent dies (fate-sharing).
Linux-only. Must be called in preexec_fn (i.e. by the child).
"""
if detect_fate_sharing_support_linux():
import signal
PR_SET_PDEATHSIG = 1
if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:
import ctypes
raise OSError(ctypes.get_errno(), "prctl(PR_SET_PDEATHSIG) failed")
else:
assert False, "PR_SET_PDEATHSIG used despite being unavailable"
def set_kill_child_on_death_win32(child_proc):
"""Ensures the child process dies if this process dies (fate-sharing).
Windows-only. Must be called by the parent, after spawning the child.
Args:
child_proc: The subprocess.Popen or subprocess.Handle object.
"""
if isinstance(child_proc, subprocess.Popen):
child_proc = child_proc._handle
assert isinstance(child_proc, subprocess.Handle)
if detect_fate_sharing_support_win32():
if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):
import ctypes
raise OSError(ctypes.get_last_error(), "AssignProcessToJobObject() failed")
else:
assert False, "AssignProcessToJobObject used despite being unavailable"
def set_sigterm_handler(sigterm_handler):
"""Registers a handler for SIGTERM in a platform-compatible manner."""
if sys.platform == "win32":
# Note that these signal handlers only work for console applications.
# TODO(mehrdadn): implement graceful process termination mechanism
# SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.
signal.signal(signal.SIGBREAK, sigterm_handler)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
def try_make_directory_shared(directory_path):
try:
os.chmod(directory_path, 0o0777)
except OSError as e:
# Silently suppress the PermissionError that is thrown by the chmod.
# This is done because the user attempting to change the permissions
# on a directory may not own it. The chmod is attempted whether the
# directory is new or not to avoid race conditions.
# ray-project/ray/#3591
if e.errno in [errno.EACCES, errno.EPERM]:
pass
else:
raise
def try_to_create_directory(directory_path):
"""Attempt to create a directory that is globally readable/writable.
Args:
directory_path: The path of the directory to create.
"""
directory_path = os.path.expanduser(directory_path)
os.makedirs(directory_path, exist_ok=True)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
try_make_directory_shared(directory_path)
def try_to_symlink(symlink_path, target_path):
"""Attempt to create a symlink.
If the symlink path exists and isn't a symlink, the symlink will not be
created. If a symlink exists in the path, it will be attempted to be
removed and replaced.
Args:
symlink_path: The path at which to create the symlink.
target_path: The path the symlink should point to.
"""
symlink_path = os.path.expanduser(symlink_path)
target_path = os.path.expanduser(target_path)
if os.path.exists(symlink_path):
if os.path.islink(symlink_path):
# Try to remove existing symlink.
try:
os.remove(symlink_path)
except OSError:
return
else:
# There's an existing non-symlink file, don't overwrite it.
return
try:
os.symlink(target_path, symlink_path)
except OSError:
return
def get_user():
if pwd is None:
return ""
try:
return pwd.getpwuid(os.getuid()).pw_name
except Exception:
return ""
def get_function_args(callable):
all_parameters = frozenset(signature(callable).parameters)
return list(all_parameters)
def get_conda_bin_executable(executable_name):
"""
Return path to the specified executable, assumed to be discoverable within
the 'bin' subdirectory of a conda installation. Adapted from
https://github.com/mlflow/mlflow.
"""
# Use CONDA_EXE as per https://github.com/conda/conda/issues/7126
if "CONDA_EXE" in os.environ:
conda_bin_dir = os.path.dirname(os.environ["CONDA_EXE"])
return os.path.join(conda_bin_dir, executable_name)
return executable_name
def get_conda_env_dir(env_name):
"""Find and validate the conda directory for a given conda environment.
For example, given the environment name `tf1`, this function checks
the existence of the corresponding conda directory, e.g.
`/Users/scaly/anaconda3/envs/tf1`, and returns it.
"""
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# The caller is neither in a conda env or in (base) env. This is rare
# because by default, new terminals start in (base), but we can still
# support this case.
conda_exe = os.environ.get("CONDA_EXE")
if conda_exe is None:
raise ValueError(
"Cannot find environment variables set by conda. "
"Please verify conda is installed."
)
# Example: CONDA_EXE=$HOME/anaconda3/bin/python
# Strip out /bin/python by going up two parent directories.
conda_prefix = str(Path(conda_exe).parent.parent)
# There are two cases:
# 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and
# CONDA_PREFIX=$HOME/anaconda3
# 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and
# CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name
if os.environ.get("CONDA_DEFAULT_ENV") == "base":
# Caller's curent environment is (base).
# Not recommended by conda, but we can still support it.
if env_name == "base":
# Desired environment is (base), located at e.g. $HOME/anaconda3
env_dir = conda_prefix
else:
# Desired environment is user-created, e.g.
# $HOME/anaconda3/envs/$env_name
env_dir = os.path.join(conda_prefix, "envs", env_name)
else:
# Now `conda_prefix` should be something like
# $HOME/anaconda3/envs/$current_env_name
# We want to replace the last component with the desired env name.
conda_envs_dir = os.path.split(conda_prefix)[0]
env_dir = os.path.join(conda_envs_dir, env_name)
if not os.path.isdir(env_dir):
raise ValueError(
"conda env "
+ env_name
+ " not found in conda envs directory. Run `conda env list` to "
+ "verify the name is correct."
)
return env_dir
def get_call_location(back: int = 1):
"""
Get the location (filename and line number) of a function caller, `back`
frames up the stack.
Args:
back: The number of frames to go up the stack, not including this
function.
"""
stack = inspect.stack()
try:
frame = stack[back + 1]
return f"{frame.filename}:{frame.lineno}"
except IndexError:
return "UNKNOWN"
# Used to only print a deprecation warning once for a given function if we
# don't wish to spam the caller.
_PRINTED_WARNING = set()
# The following is inspired by
# https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/python/util/deprecation.py#L274-L329
def deprecated(
instructions: Optional[str] = None,
removal_release: Optional[str] = None,
removal_date: Optional[str] = None,
warn_once: bool = True,
):
"""
Creates a decorator for marking functions as deprecated. The decorator
will log a deprecation warning on the first (or all, see `warn_once` arg)
invocations, and will otherwise leave the wrapped function unchanged.
Args:
instructions: Instructions for the caller to update their code.
removal_release: The release in which this deprecated function
will be removed. Only one of removal_release and removal_date
should be specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
removal_date: The date on which this deprecated function will be
removed. Only one of removal_release and removal_date should be
specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
warn_once: If true, the deprecation warning will only be logged
on the first invocation. Otherwise, the deprecation warning will
be logged on every invocation. Defaults to True.
Returns:
A decorator to be used for wrapping deprecated functions.
"""
if removal_release is not None and removal_date is not None:
raise ValueError(
"Only one of removal_release and removal_date should be specified."
)
def deprecated_wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
global _PRINTED_WARNING
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING.add(func)
msg = (
"From {}: {} (from {}) is deprecated and will ".format(
get_call_location(), func.__name__, func.__module__
)
+ "be removed "
+ (
f"in version {removal_release}."
if removal_release is not None
else f"after {removal_date}"
if removal_date is not None
else "in a future version"
)
+ (f" {instructions}" if instructions is not None else "")
)
warnings.warn(msg)
return func(*args, **kwargs)
return new_func
return deprecated_wrapper
def import_attr(full_path: str):
"""Given a full import path to a module attr, return the imported attr.
For example, the following are equivalent:
MyClass = import_attr("module.submodule:MyClass")
MyClass = import_attr("module.submodule.MyClass")
from module.submodule import MyClass
Returns:
Imported attr
"""
if full_path is None:
raise TypeError("import path cannot be None")
if ":" in full_path:
if full_path.count(":") > 1:
raise ValueError(
f'Got invalid import path "{full_path}". An '
"import path may have at most one colon."
)
module_name, attr_name = full_path.split(":")
else:
last_period_idx = full_path.rfind(".")
module_name = full_path[:last_period_idx]
attr_name = full_path[last_period_idx + 1 :]
module = importlib.import_module(module_name)
return getattr(module, attr_name)
def get_wheel_filename(
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Returns the filename used for the nightly Ray wheel.
Args:
sys_platform: The platform as returned by sys.platform. Examples:
"darwin", "linux", "win32"
ray_version: The Ray version as returned by ray.__version__ or
`ray --version`. Examples: "3.0.0.dev0"
py_version (str):
The major and minor Python versions concatenated. Examples: "36",
"37", "38", "39"
Returns:
The wheel file name. Examples:
ray-3.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl
"""
assert py_version in ["36", "37", "38", "39"], py_version
os_strings = {
"darwin": "macosx_10_15_x86_64"
if py_version in ["38", "39"]
else "macosx_10_15_intel",
"linux": "manylinux2014_x86_64",
"win32": "win_amd64",
}
assert sys_platform in os_strings, sys_platform
wheel_filename = (
f"ray-{ray_version}-cp{py_version}-"
f"cp{py_version}{'m' if py_version in ['36', '37'] else ''}"
f"-{os_strings[sys_platform]}.whl"
)
return wheel_filename
def get_master_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel from a specific commit."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://s3-us-west-2.amazonaws.com/ray-wheels/master/"
f"{ray_commit}/{filename}"
)
def get_release_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel for a specific release."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://ray-wheels.s3-us-west-2.amazonaws.com/releases/"
f"{ray_version}/{ray_commit}/{filename}"
)
# e.g. https://ray-wheels.s3-us-west-2.amazonaws.com/releases/1.4.0rc1/e7c7
# f6371a69eb727fa469e4cd6f4fbefd143b4c/ray-1.4.0rc1-cp36-cp36m-manylinux201
# 4_x86_64.whl
def validate_namespace(namespace: str):
if not isinstance(namespace, str):
raise TypeError("namespace must be None or a string.")
elif namespace == "":
raise ValueError(
'"" is not a valid namespace. ' "Pass None to not specify a namespace."
)
def init_grpc_channel(
address: str,
options: Optional[Sequence[Tuple[str, Any]]] = None,
asynchronous: bool = False,
):
grpc_module = aiogrpc if asynchronous else grpc
if os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true"):
server_cert_chain, private_key, ca_cert = load_certs_from_env()
credentials = grpc.ssl_channel_credentials(
certificate_chain=server_cert_chain,
private_key=private_key,
root_certificates=ca_cert,
)
channel = grpc_module.secure_channel(address, credentials, options=options)
else:
channel = grpc_module.insecure_channel(address, options=options)
return channel
def check_dashboard_dependencies_installed() -> bool:
"""Returns True if Ray Dashboard dependencies are installed.
Checks to see if we should start the dashboard agent or not based on the
Ray installation version the user has installed (ray vs. ray[default]).
Unfortunately there doesn't seem to be a cleaner way to detect this other
than just blindly importing the relevant packages.
"""
try:
import ray.dashboard.optional_deps # noqa: F401
return True
except ImportError:
return False
def internal_kv_list_with_retry(gcs_client, prefix, namespace, num_retries=20):
result = None
if isinstance(prefix, str):
prefix = prefix.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_keys(prefix, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV List failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {prefix}=None from KV. Retrying.")
time.sleep(2)
if result is None:
raise RuntimeError(
f"Could not list '{prefix}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_get_with_retry(gcs_client, key, namespace, num_retries=20):
result = None
if isinstance(key, str):
key = key.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_get(key, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Get failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {key}=None from KV. Retrying.")
time.sleep(2)
if not result:
raise RuntimeError(
f"Could not read '{key.decode()}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_put_with_retry(gcs_client, key, value, namespace, num_retries=20):
if isinstance(key, str):
key = key.encode()
if isinstance(value, str):
value = value.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
error = None
for _ in range(num_retries):
try:
return gcs_client.internal_kv_put(
key, value, overwrite=True, namespace=namespace
)
except grpc.RpcError as e:
if e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Put failed")
time.sleep(2)
error = e
# Reraise the last grpc.RpcError.
raise error
def compute_version_info():
"""Compute the versions of Python, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
return ray_version, python_version
def get_directory_size_bytes(path: Union[str, Path] = ".") -> int:
"""Get the total size of a directory in bytes, including subdirectories."""
total_size_bytes = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is a symbolic link or a .pyc file
if not os.path.islink(fp) and not f.endswith(".pyc"):
total_size_bytes += os.path.getsize(fp)
return total_size_bytes
def check_version_info(cluster_metadata):
"""Check if the Python and Ray versions stored in GCS matches this process.
Args:
cluster_metadata: Ray cluster metadata from GCS.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_address()
error_message = (
"Version mismatch: The cluster was started with:\n"
" Ray: " + cluster_version_info[0] + "\n"
" Python: " + cluster_version_info[1] + "\n"
"This process on node " + node_ip_address + " was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
)
raise RuntimeError(error_message)
| 35.518491 | 136 | 0.640616 | import binascii
import errno
import functools
import hashlib
import importlib
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional, Sequence, Tuple, Any, Union, Dict
import uuid
import grpc
import warnings
try:
from grpc import aio as aiogrpc
except ImportError:
from grpc.experimental import aio as aiogrpc
import inspect
from inspect import signature
from pathlib import Path
import numpy as np
import ray
from ray.core.generated.gcs_pb2 import ErrorTableData
import ray.ray_constants as ray_constants
from ray._private.tls_utils import load_certs_from_env
import psutil
pwd = None
if sys.platform != "win32":
import pwd
logger = logging.getLogger(__name__)
# prctl support is detected dynamically once, and assumed thereafter.
linux_prctl = None
# Windows can bind processes' lifetimes to that of kernel-level "job objects".
win32_job = None
win32_AssignProcessToJobObject = None
def get_user_temp_dir():
if "RAY_TMPDIR" in os.environ:
return os.environ["RAY_TMPDIR"]
elif sys.platform.startswith("linux") and "TMPDIR" in os.environ:
return os.environ["TMPDIR"]
elif sys.platform.startswith("darwin") or sys.platform.startswith("linux"):
# for compatibility
tempdir = os.path.join(os.sep, "tmp")
else:
tempdir = tempfile.gettempdir()
return tempdir
def get_ray_temp_dir():
return os.path.join(get_user_temp_dir(), "ray")
def _random_string():
id_hash = hashlib.shake_128()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest(ray_constants.ID_SIZE)
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
def format_error_message(exception_message: str, task_exception: bool = False):
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
def push_error_to_driver(
worker, error_type: str, message: str, job_id: Optional[str] = None
):
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
def construct_error_message(job_id, error_type, message, timestamp):
data = ErrorTableData()
data.job_id = job_id.binary()
data.type = error_type
data.error_message = message
data.timestamp = timestamp
return data
def publish_error_to_driver(
error_type: str,
message: str,
gcs_publisher,
job_id=None,
):
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
error_data = construct_error_message(job_id, error_type, message, time.time())
try:
gcs_publisher.publish_error(job_id.hex().encode(), error_data)
except Exception:
logger.exception(f"Failed to publish error {error_data}")
def random_string():
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id
def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"):
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(f"The argument {byte_str} must be a bytes object.")
if sys.version_info >= (3, 0):
return byte_str.decode(encode_type)
else:
return byte_str
def ensure_str(s, encoding="utf-8", errors="strict"):
if isinstance(s, str):
return s
else:
assert isinstance(s, bytes)
return s.decode(encoding, errors)
def binary_to_object_ref(binary_object_ref):
return ray.ObjectRef(binary_object_ref)
def binary_to_task_id(binary_task_id):
return ray.TaskID(binary_task_id)
def binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
if sys.version_info >= (3, 0):
hex_identifier = hex_identifier.decode()
return hex_identifier
def hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
# TODO(qwang): Remove these hepler functions
# once we separate `WorkerID` from `UniqueID`.
def compute_job_id_from_driver(driver_id):
assert isinstance(driver_id, ray.WorkerID)
return ray.JobID(driver_id.binary()[0 : ray.JobID.size()])
def compute_driver_id_from_job(job_id):
assert isinstance(job_id, ray.JobID)
rest_length = ray_constants.ID_SIZE - job_id.size()
driver_id_str = job_id.binary() + (rest_length * b"\xff")
return ray.WorkerID(driver_id_str)
def get_cuda_visible_devices():
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
if gpu_ids_str == "NoDevFiles":
return []
# GPU identifiers are given as strings representing integers or UUIDs.
return list(gpu_ids_str.split(","))
last_set_gpu_ids = None
def set_cuda_visible_devices(gpu_ids):
if os.environ.get(ray_constants.NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):
return
global last_set_gpu_ids
if last_set_gpu_ids == gpu_ids:
return # optimization: already set
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
last_set_gpu_ids = gpu_ids
def resources_from_ray_options(options_dict: Dict[str, Any]) -> Dict[str, Any]:
resources = (options_dict.get("resources") or {}).copy()
if "CPU" in resources or "GPU" in resources:
raise ValueError(
"The resources dictionary must not contain the key 'CPU' or 'GPU'"
)
elif "memory" in resources or "object_store_memory" in resources:
raise ValueError(
"The resources dictionary must not "
"contain the key 'memory' or 'object_store_memory'"
)
num_cpus = options_dict.get("num_cpus")
num_gpus = options_dict.get("num_gpus")
memory = options_dict.get("memory")
object_store_memory = options_dict.get("object_store_memory")
accelerator_type = options_dict.get("accelerator_type")
if num_cpus is not None:
resources["CPU"] = num_cpus
if num_gpus is not None:
resources["GPU"] = num_gpus
if memory is not None:
resources["memory"] = ray_constants.to_memory_units(memory, round_up=True)
if object_store_memory is not None:
resources["object_store_memory"] = ray_constants.to_memory_units(
object_store_memory, round_up=True
)
if accelerator_type is not None:
resources[
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}{accelerator_type}"
] = 0.001
return resources
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def open_log(path, unbuffered=False, **kwargs):
# Disable buffering, see test_advanced_3.py::test_logging_to_driver
kwargs.setdefault("buffering", 1)
kwargs.setdefault("mode", "a")
kwargs.setdefault("encoding", "utf-8")
stream = open(path, **kwargs)
if unbuffered:
return Unbuffered(stream)
else:
return stream
def get_system_memory(
# For cgroups v1:
memory_limit_filename="/sys/fs/cgroup/memory/memory.limit_in_bytes",
# For cgroups v2:
memory_limit_filename_v2="/sys/fs/cgroup/memory.max",
):
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
elif os.path.exists(memory_limit_filename_v2):
with open(memory_limit_filename_v2, "r") as f:
max_file = f.read()
if max_file.isnumeric():
docker_limit = int(max_file)
else:
# max_file is "max", i.e. is unset.
docker_limit = None
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().total
if docker_limit is not None:
# We take the min because the cgroup limit is very large if we aren't
return min(docker_limit, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def _get_docker_cpus(
cpu_quota_file_name="/sys/fs/cgroup/cpu/cpu.cfs_quota_us",
cpu_period_file_name="/sys/fs/cgroup/cpu/cpu.cfs_period_us",
cpuset_file_name="/sys/fs/cgroup/cpuset/cpuset.cpus",
cpu_max_file_name="/sys/fs/cgroup/cpu.max",
) -> Optional[float]:
# Docker has 2 underyling ways of implementing CPU limits:
# https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler
# 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a
# soft limit so we don't worry about it). For Ray's purposes, if we use
# docker, the number of vCPUs on a machine is whichever is set (ties broken
# by smaller value).
cpu_quota = None
# See: https://bugs.openjdk.java.net/browse/JDK-8146115
if os.path.exists(cpu_quota_file_name) and os.path.exists(cpu_period_file_name):
try:
with open(cpu_quota_file_name, "r") as quota_file, open(
cpu_period_file_name, "r"
) as period_file:
cpu_quota = float(quota_file.read()) / float(period_file.read())
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
# Look at cpu.max for cgroups v2
elif os.path.exists(cpu_max_file_name):
try:
max_file = open(cpu_max_file_name).read()
quota_str, period_str = max_file.split()
if quota_str.isnumeric() and period_str.isnumeric():
cpu_quota = float(quota_str) / float(period_str)
else:
# quota_str is "max" meaning the cpu quota is unset
cpu_quota = None
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
if (cpu_quota is not None) and (cpu_quota < 0):
cpu_quota = None
elif cpu_quota == 0:
# Round up in case the cpu limit is less than 1.
cpu_quota = 1
cpuset_num = None
if os.path.exists(cpuset_file_name):
try:
with open(cpuset_file_name) as cpuset_file:
ranges_as_string = cpuset_file.read()
ranges = ranges_as_string.split(",")
cpu_ids = []
for num_or_range in ranges:
if "-" in num_or_range:
start, end = num_or_range.split("-")
cpu_ids.extend(list(range(int(start), int(end) + 1)))
else:
cpu_ids.append(int(num_or_range))
cpuset_num = len(cpu_ids)
except Exception:
logger.exception("Unexpected error calculating docker cpuset ids.")
# Possible to-do: Parse cgroups v2's cpuset.cpus.effective for the number
if cpu_quota and cpuset_num:
return min(cpu_quota, cpuset_num)
return cpu_quota or cpuset_num
def get_num_cpus() -> int:
cpu_count = multiprocessing.cpu_count()
if os.environ.get("RAY_USE_MULTIPROCESSING_CPU_COUNT"):
logger.info(
"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using "
"multiprocessing.cpu_count() to detect the number of CPUs. "
"This may be inconsistent when used inside docker. "
"To correctly detect CPUs, unset the env var: "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT`."
)
return cpu_count
try:
docker_count = _get_docker_cpus()
if docker_count is not None and docker_count != cpu_count:
if (
"RAY_DISABLE_DOCKER_CPU_WARNING" not in os.environ
and "KUBERNETES_SERVICE_HOST" not in os.environ
):
logger.warning(
"Detecting docker specified CPUs. In "
"previous versions of Ray, CPU detection in containers "
"was incorrect. Please ensure that Ray has enough CPUs "
"allocated. As a temporary workaround to revert to the "
"prior behavior, set "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var "
"before starting Ray. Set the env var: "
"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning."
)
if int(docker_count) != float(docker_count):
logger.warning(
f"Ray currently does not support initializing Ray"
f"with fractional cpus. Your num_cpus will be "
f"truncated from {docker_count} to "
f"{int(docker_count)}."
)
docker_count = int(docker_count)
cpu_count = docker_count
except Exception:
pass
return cpu_count
def get_used_memory():
docker_usage = None
memory_usage_filename = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
memory_usage_filename_v2 = "/sys/fs/cgroup/memory.current"
if os.path.exists(memory_usage_filename):
with open(memory_usage_filename, "r") as f:
docker_usage = int(f.read())
elif os.path.exists(memory_usage_filename_v2):
with open(memory_usage_filename_v2, "r") as f:
docker_usage = int(f.read())
psutil_memory_in_bytes = psutil.virtual_memory().used
if docker_usage is not None:
# in Docker.
return min(docker_usage, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def estimate_available_memory():
return get_system_memory() - get_used_memory()
def get_shared_memory_bytes():
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail
def check_oversized_function(
pickled: bytes, name: str, obj_type: str, worker: "ray.Worker"
) -> None:
length = len(pickled)
if length <= ray_constants.FUNCTION_SIZE_WARN_THRESHOLD:
return
elif length < ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD:
warning_message = (
"The {} {} is very large ({} MiB). "
"Check that its definition is not implicitly capturing a large "
"array or other object in scope. Tip: use ray.put() to put large "
"objects in the Ray object store."
).format(obj_type, name, length // (1024 * 1024))
if worker:
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
"Warning: " + warning_message,
job_id=worker.current_job_id,
)
else:
error = (
"The {} {} is too large ({} MiB > FUNCTION_SIZE_ERROR_THRESHOLD={}"
" MiB). Check that its definition is not implicitly capturing a "
"large array or other object in scope. Tip: use ray.put() to "
"put large objects in the Ray object store."
).format(
obj_type,
name,
length // (1024 * 1024),
ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD // (1024 * 1024),
)
raise ValueError(error)
def is_main_thread():
return threading.current_thread().getName() == "MainThread"
def detect_fate_sharing_support_win32():
global win32_job, win32_AssignProcessToJobObject
if win32_job is None and sys.platform == "win32":
import ctypes
try:
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR
kernel32 = ctypes.WinDLL("kernel32")
kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)
kernel32.CreateJobObjectW.restype = HANDLE
sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)
kernel32.SetInformationJobObject.argtypes = sijo_argtypes
kernel32.SetInformationJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.IsDebuggerPresent.argtypes = ()
kernel32.IsDebuggerPresent.restype = BOOL
except (AttributeError, TypeError, ImportError):
kernel32 = None
job = kernel32.CreateJobObjectW(None, None) if kernel32 else None
job = subprocess.Handle(job) if job else job
if job:
from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", ctypes.c_size_t),
("MaximumWorkingSetSize", ctypes.c_size_t),
("ActiveProcessLimit", DWORD),
("Affinity", ctypes.c_size_t),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULARGE_INTEGER),
("WriteOperationCount", ULARGE_INTEGER),
("OtherOperationCount", ULARGE_INTEGER),
("ReadTransferCount", ULARGE_INTEGER),
("WriteTransferCount", ULARGE_INTEGER),
("OtherTransferCount", ULARGE_INTEGER),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_size_t),
("JobMemoryLimit", ctypes.c_size_t),
("PeakProcessMemoryUsed", ctypes.c_size_t),
("PeakJobMemoryUsed", ctypes.c_size_t),
]
debug = kernel32.IsDebuggerPresent()
# Defined in <WinNT.h>; also available here:
# https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
buf.BasicLimitInformation.LimitFlags = (
(0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
| JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
| JOB_OBJECT_LIMIT_BREAKAWAY_OK
)
infoclass = JobObjectExtendedLimitInformation
if not kernel32.SetInformationJobObject(
job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)
):
job = None
win32_AssignProcessToJobObject = (
kernel32.AssignProcessToJobObject if kernel32 is not None else False
)
win32_job = job if job else False
return bool(win32_job)
def detect_fate_sharing_support_linux():
global linux_prctl
if linux_prctl is None and sys.platform.startswith("linux"):
try:
from ctypes import c_int, c_ulong, CDLL
prctl = CDLL(None).prctl
prctl.restype = c_int
prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]
except (AttributeError, TypeError):
prctl = None
linux_prctl = prctl if prctl else False
return bool(linux_prctl)
def detect_fate_sharing_support():
result = None
if sys.platform == "win32":
result = detect_fate_sharing_support_win32()
elif sys.platform.startswith("linux"):
result = detect_fate_sharing_support_linux()
return result
def set_kill_on_parent_death_linux():
if detect_fate_sharing_support_linux():
import signal
PR_SET_PDEATHSIG = 1
if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:
import ctypes
raise OSError(ctypes.get_errno(), "prctl(PR_SET_PDEATHSIG) failed")
else:
assert False, "PR_SET_PDEATHSIG used despite being unavailable"
def set_kill_child_on_death_win32(child_proc):
if isinstance(child_proc, subprocess.Popen):
child_proc = child_proc._handle
assert isinstance(child_proc, subprocess.Handle)
if detect_fate_sharing_support_win32():
if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):
import ctypes
raise OSError(ctypes.get_last_error(), "AssignProcessToJobObject() failed")
else:
assert False, "AssignProcessToJobObject used despite being unavailable"
def set_sigterm_handler(sigterm_handler):
if sys.platform == "win32":
# Note that these signal handlers only work for console applications.
# TODO(mehrdadn): implement graceful process termination mechanism
# SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.
signal.signal(signal.SIGBREAK, sigterm_handler)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
def try_make_directory_shared(directory_path):
try:
os.chmod(directory_path, 0o0777)
except OSError as e:
# Silently suppress the PermissionError that is thrown by the chmod.
# This is done because the user attempting to change the permissions
# on a directory may not own it. The chmod is attempted whether the
# directory is new or not to avoid race conditions.
# ray-project/ray/#3591
if e.errno in [errno.EACCES, errno.EPERM]:
pass
else:
raise
def try_to_create_directory(directory_path):
directory_path = os.path.expanduser(directory_path)
os.makedirs(directory_path, exist_ok=True)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
try_make_directory_shared(directory_path)
def try_to_symlink(symlink_path, target_path):
symlink_path = os.path.expanduser(symlink_path)
target_path = os.path.expanduser(target_path)
if os.path.exists(symlink_path):
if os.path.islink(symlink_path):
# Try to remove existing symlink.
try:
os.remove(symlink_path)
except OSError:
return
else:
# There's an existing non-symlink file, don't overwrite it.
return
try:
os.symlink(target_path, symlink_path)
except OSError:
return
def get_user():
if pwd is None:
return ""
try:
return pwd.getpwuid(os.getuid()).pw_name
except Exception:
return ""
def get_function_args(callable):
all_parameters = frozenset(signature(callable).parameters)
return list(all_parameters)
def get_conda_bin_executable(executable_name):
# Use CONDA_EXE as per https://github.com/conda/conda/issues/7126
if "CONDA_EXE" in os.environ:
conda_bin_dir = os.path.dirname(os.environ["CONDA_EXE"])
return os.path.join(conda_bin_dir, executable_name)
return executable_name
def get_conda_env_dir(env_name):
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# The caller is neither in a conda env or in (base) env. This is rare
# because by default, new terminals start in (base), but we can still
# support this case.
conda_exe = os.environ.get("CONDA_EXE")
if conda_exe is None:
raise ValueError(
"Cannot find environment variables set by conda. "
"Please verify conda is installed."
)
# Example: CONDA_EXE=$HOME/anaconda3/bin/python
# Strip out /bin/python by going up two parent directories.
conda_prefix = str(Path(conda_exe).parent.parent)
# There are two cases:
# 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and
# CONDA_PREFIX=$HOME/anaconda3
# 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and
# CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name
if os.environ.get("CONDA_DEFAULT_ENV") == "base":
# Caller's curent environment is (base).
if env_name == "base":
env_dir = conda_prefix
else:
env_dir = os.path.join(conda_prefix, "envs", env_name)
else:
conda_envs_dir = os.path.split(conda_prefix)[0]
env_dir = os.path.join(conda_envs_dir, env_name)
if not os.path.isdir(env_dir):
raise ValueError(
"conda env "
+ env_name
+ " not found in conda envs directory. Run `conda env list` to "
+ "verify the name is correct."
)
return env_dir
def get_call_location(back: int = 1):
stack = inspect.stack()
try:
frame = stack[back + 1]
return f"{frame.filename}:{frame.lineno}"
except IndexError:
return "UNKNOWN"
_PRINTED_WARNING = set()
# The following is inspired by
# https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/python/util/deprecation.py#L274-L329
def deprecated(
instructions: Optional[str] = None,
removal_release: Optional[str] = None,
removal_date: Optional[str] = None,
warn_once: bool = True,
):
if removal_release is not None and removal_date is not None:
raise ValueError(
"Only one of removal_release and removal_date should be specified."
)
def deprecated_wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
global _PRINTED_WARNING
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING.add(func)
msg = (
"From {}: {} (from {}) is deprecated and will ".format(
get_call_location(), func.__name__, func.__module__
)
+ "be removed "
+ (
f"in version {removal_release}."
if removal_release is not None
else f"after {removal_date}"
if removal_date is not None
else "in a future version"
)
+ (f" {instructions}" if instructions is not None else "")
)
warnings.warn(msg)
return func(*args, **kwargs)
return new_func
return deprecated_wrapper
def import_attr(full_path: str):
if full_path is None:
raise TypeError("import path cannot be None")
if ":" in full_path:
if full_path.count(":") > 1:
raise ValueError(
f'Got invalid import path "{full_path}". An '
"import path may have at most one colon."
)
module_name, attr_name = full_path.split(":")
else:
last_period_idx = full_path.rfind(".")
module_name = full_path[:last_period_idx]
attr_name = full_path[last_period_idx + 1 :]
module = importlib.import_module(module_name)
return getattr(module, attr_name)
def get_wheel_filename(
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
assert py_version in ["36", "37", "38", "39"], py_version
os_strings = {
"darwin": "macosx_10_15_x86_64"
if py_version in ["38", "39"]
else "macosx_10_15_intel",
"linux": "manylinux2014_x86_64",
"win32": "win_amd64",
}
assert sys_platform in os_strings, sys_platform
wheel_filename = (
f"ray-{ray_version}-cp{py_version}-"
f"cp{py_version}{'m' if py_version in ['36', '37'] else ''}"
f"-{os_strings[sys_platform]}.whl"
)
return wheel_filename
def get_master_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://s3-us-west-2.amazonaws.com/ray-wheels/master/"
f"{ray_commit}/{filename}"
)
def get_release_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://ray-wheels.s3-us-west-2.amazonaws.com/releases/"
f"{ray_version}/{ray_commit}/{filename}"
)
# e.g. https://ray-wheels.s3-us-west-2.amazonaws.com/releases/1.4.0rc1/e7c7
# f6371a69eb727fa469e4cd6f4fbefd143b4c/ray-1.4.0rc1-cp36-cp36m-manylinux201
# 4_x86_64.whl
def validate_namespace(namespace: str):
if not isinstance(namespace, str):
raise TypeError("namespace must be None or a string.")
elif namespace == "":
raise ValueError(
'"" is not a valid namespace. ' "Pass None to not specify a namespace."
)
def init_grpc_channel(
address: str,
options: Optional[Sequence[Tuple[str, Any]]] = None,
asynchronous: bool = False,
):
grpc_module = aiogrpc if asynchronous else grpc
if os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true"):
server_cert_chain, private_key, ca_cert = load_certs_from_env()
credentials = grpc.ssl_channel_credentials(
certificate_chain=server_cert_chain,
private_key=private_key,
root_certificates=ca_cert,
)
channel = grpc_module.secure_channel(address, credentials, options=options)
else:
channel = grpc_module.insecure_channel(address, options=options)
return channel
def check_dashboard_dependencies_installed() -> bool:
try:
import ray.dashboard.optional_deps # noqa: F401
return True
except ImportError:
return False
def internal_kv_list_with_retry(gcs_client, prefix, namespace, num_retries=20):
result = None
if isinstance(prefix, str):
prefix = prefix.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_keys(prefix, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV List failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {prefix}=None from KV. Retrying.")
time.sleep(2)
if result is None:
raise RuntimeError(
f"Could not list '{prefix}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_get_with_retry(gcs_client, key, namespace, num_retries=20):
result = None
if isinstance(key, str):
key = key.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_get(key, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Get failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {key}=None from KV. Retrying.")
time.sleep(2)
if not result:
raise RuntimeError(
f"Could not read '{key.decode()}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_put_with_retry(gcs_client, key, value, namespace, num_retries=20):
if isinstance(key, str):
key = key.encode()
if isinstance(value, str):
value = value.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
error = None
for _ in range(num_retries):
try:
return gcs_client.internal_kv_put(
key, value, overwrite=True, namespace=namespace
)
except grpc.RpcError as e:
if e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Put failed")
time.sleep(2)
error = e
# Reraise the last grpc.RpcError.
raise error
def compute_version_info():
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
return ray_version, python_version
def get_directory_size_bytes(path: Union[str, Path] = ".") -> int:
total_size_bytes = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is a symbolic link or a .pyc file
if not os.path.islink(fp) and not f.endswith(".pyc"):
total_size_bytes += os.path.getsize(fp)
return total_size_bytes
def check_version_info(cluster_metadata):
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_address()
error_message = (
"Version mismatch: The cluster was started with:\n"
" Ray: " + cluster_version_info[0] + "\n"
" Python: " + cluster_version_info[1] + "\n"
"This process on node " + node_ip_address + " was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
)
raise RuntimeError(error_message)
| true | true |
f71e3ee179bc25f0ee46fadb17b0041bb26da0fb | 10,260 | py | Python | tests/components/homematicip_cloud/test_switch.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | tests/components/homematicip_cloud/test_switch.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | tests/components/homematicip_cloud/test_switch.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """Tests for HomematicIP Cloud switch."""
from openpeerpower.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from openpeerpower.components.homematicip_cloud.generic_entity import (
ATTR_GROUP_MEMBER_UNREACHABLE,
)
from openpeerpower.components.switch import (
ATTR_CURRENT_POWER_W,
ATTR_TODAY_ENERGY_KWH,
DOMAIN as SWITCH_DOMAIN,
)
from openpeerpower.const import STATE_OFF, STATE_ON
from openpeerpower.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(opp):
"""Test that we do not set up an access point."""
assert await async_setup_component(
opp, SWITCH_DOMAIN, {SWITCH_DOMAIN: {"platform": HMIPC_DOMAIN}}
)
assert not opp.data.get(HMIPC_DOMAIN)
async def test_hmip_switch(opp, default_mock_hap_factory):
"""Test HomematicipSwitch."""
entity_id = "switch.schrank"
entity_name = "Schrank"
device_model = "HMIP-PS"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_switch_input(opp, default_mock_hap_factory):
"""Test HomematicipSwitch."""
entity_id = "switch.wohnzimmer_beleuchtung"
entity_name = "Wohnzimmer Beleuchtung"
device_model = "HmIP-FSI16"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_switch_measuring(opp, default_mock_hap_factory):
"""Test HomematicipSwitchMeasuring."""
entity_id = "switch.pc"
entity_name = "Pc"
device_model = "HMIP-PSM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
await async_manipulate_test_data(opp, hmip_device, "currentPowerConsumption", 50)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_CURRENT_POWER_W] == 50
assert ha_state.attributes[ATTR_TODAY_ENERGY_KWH] == 36
await async_manipulate_test_data(opp, hmip_device, "energyCounter", None)
ha_state = opp.states.get(entity_id)
assert not ha_state.attributes.get(ATTR_TODAY_ENERGY_KWH)
async def test_hmip_group_switch(opp, default_mock_hap_factory):
"""Test HomematicipGroupSwitch."""
entity_id = "switch.strom_group"
entity_name = "Strom Group"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Strom"])
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
await async_manipulate_test_data(opp, hmip_device, "unreach", True)
ha_state = opp.states.get(entity_id)
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
async def test_hmip_multi_switch(opp, default_mock_hap_factory):
"""Test HomematicipMultiSwitch."""
entity_id = "switch.jalousien_1_kizi_2_schlazi_channel1"
entity_name = "Jalousien - 1 KiZi, 2 SchlaZi Channel1"
device_model = "HmIP-PCBS2"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[
"Jalousien - 1 KiZi, 2 SchlaZi",
"Multi IO Box",
"Heizungsaktor",
"ioBroker",
"Schaltaktor Verteiler",
]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
ha_state, hmip_device = get_and_check_entity_basics(
opp,
mock_hap,
"switch.schaltaktor_verteiler_channel3",
"Schaltaktor Verteiler Channel3",
"HmIP-DRSI4",
)
assert ha_state.state == STATE_OFF
async def test_hmip_wired_multi_switch(opp, default_mock_hap_factory):
"""Test HomematicipMultiSwitch."""
entity_id = "switch.fernseher_wohnzimmer"
entity_name = "Fernseher (Wohnzimmer)"
device_model = "HmIPW-DRS8"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[
"Wired Schaltaktor – 8-fach",
]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
| 37.582418 | 87 | 0.710721 | from openpeerpower.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from openpeerpower.components.homematicip_cloud.generic_entity import (
ATTR_GROUP_MEMBER_UNREACHABLE,
)
from openpeerpower.components.switch import (
ATTR_CURRENT_POWER_W,
ATTR_TODAY_ENERGY_KWH,
DOMAIN as SWITCH_DOMAIN,
)
from openpeerpower.const import STATE_OFF, STATE_ON
from openpeerpower.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(opp):
assert await async_setup_component(
opp, SWITCH_DOMAIN, {SWITCH_DOMAIN: {"platform": HMIPC_DOMAIN}}
)
assert not opp.data.get(HMIPC_DOMAIN)
async def test_hmip_switch(opp, default_mock_hap_factory):
entity_id = "switch.schrank"
entity_name = "Schrank"
device_model = "HMIP-PS"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_switch_input(opp, default_mock_hap_factory):
entity_id = "switch.wohnzimmer_beleuchtung"
entity_name = "Wohnzimmer Beleuchtung"
device_model = "HmIP-FSI16"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_switch_measuring(opp, default_mock_hap_factory):
entity_id = "switch.pc"
entity_name = "Pc"
device_model = "HMIP-PSM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
await async_manipulate_test_data(opp, hmip_device, "currentPowerConsumption", 50)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_CURRENT_POWER_W] == 50
assert ha_state.attributes[ATTR_TODAY_ENERGY_KWH] == 36
await async_manipulate_test_data(opp, hmip_device, "energyCounter", None)
ha_state = opp.states.get(entity_id)
assert not ha_state.attributes.get(ATTR_TODAY_ENERGY_KWH)
async def test_hmip_group_switch(opp, default_mock_hap_factory):
entity_id = "switch.strom_group"
entity_name = "Strom Group"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Strom"])
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
await async_manipulate_test_data(opp, hmip_device, "unreach", True)
ha_state = opp.states.get(entity_id)
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
async def test_hmip_multi_switch(opp, default_mock_hap_factory):
entity_id = "switch.jalousien_1_kizi_2_schlazi_channel1"
entity_name = "Jalousien - 1 KiZi, 2 SchlaZi Channel1"
device_model = "HmIP-PCBS2"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[
"Jalousien - 1 KiZi, 2 SchlaZi",
"Multi IO Box",
"Heizungsaktor",
"ioBroker",
"Schaltaktor Verteiler",
]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
ha_state, hmip_device = get_and_check_entity_basics(
opp,
mock_hap,
"switch.schaltaktor_verteiler_channel3",
"Schaltaktor Verteiler Channel3",
"HmIP-DRSI4",
)
assert ha_state.state == STATE_OFF
async def test_hmip_wired_multi_switch(opp, default_mock_hap_factory):
entity_id = "switch.fernseher_wohnzimmer"
entity_name = "Fernseher (Wohnzimmer)"
device_model = "HmIPW-DRS8"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[
"Wired Schaltaktor – 8-fach",
]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
| true | true |
f71e401b78d23a22bfd8c5eeb5995f8f22fe4239 | 299 | py | Python | backend/mp/apps/base/migrations/0012_delete_questionnaire.py | shidashui/mymp | 75d81906908395ece1c8d12249d6afc4bd2d0704 | [
"MIT"
] | 1 | 2020-03-14T12:33:24.000Z | 2020-03-14T12:33:24.000Z | backend/mp/apps/base/migrations/0012_delete_questionnaire.py | shidashui/mymp | 75d81906908395ece1c8d12249d6afc4bd2d0704 | [
"MIT"
] | 8 | 2021-03-19T00:59:11.000Z | 2022-03-12T00:19:38.000Z | backend/mp/apps/base/migrations/0012_delete_questionnaire.py | shidashui/mymp | 75d81906908395ece1c8d12249d6afc4bd2d0704 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-26 11:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0011_auto_20200326_1107'),
]
operations = [
migrations.DeleteModel(
name='Questionnaire',
),
]
| 17.588235 | 47 | 0.608696 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0011_auto_20200326_1107'),
]
operations = [
migrations.DeleteModel(
name='Questionnaire',
),
]
| true | true |
f71e40329e3d37616481697f5bfc504ea2916179 | 367 | py | Python | app/classes/run_options.py | robjporter/PYTHON-APIServer-1 | 57df8e8189834504b3f473993ae12586ec32d5c9 | [
"MIT"
] | null | null | null | app/classes/run_options.py | robjporter/PYTHON-APIServer-1 | 57df8e8189834504b3f473993ae12586ec32d5c9 | [
"MIT"
] | null | null | null | app/classes/run_options.py | robjporter/PYTHON-APIServer-1 | 57df8e8189834504b3f473993ae12586ec32d5c9 | [
"MIT"
] | null | null | null | # Run options
# gunicorn -w 4 -b 127.0.0.1:5000 main:app
# waitress main:app
# python3 main.py runserver
# uwsgi --http 0.0.0.0:8000 --home env --wsgi-file main.py --callable app --master --enable-threads --thunder-lock
# virtualenv -p /usr/local/bin/python3 env
# source env/bin/activate
# pip3 install --upgrade pip
# pip3 install -r requirements.txt
| 36.7 | 115 | 0.692098 | true | true | |
f71e41255f5f877f4e6d025f4be916412433703c | 7,454 | py | Python | kubernetes/client/models/v1_pod_affinity_term.py | henrywu2019/python | fb7214144395c05349e70a58ea129576f6b11fc4 | [
"Apache-2.0"
] | 4,417 | 2018-01-13T04:30:48.000Z | 2022-03-31T15:33:59.000Z | kubernetes/client/models/v1_pod_affinity_term.py | henrywu2019/python | fb7214144395c05349e70a58ea129576f6b11fc4 | [
"Apache-2.0"
] | 1,414 | 2018-01-12T19:31:56.000Z | 2022-03-31T22:01:02.000Z | kubernetes/client/models/v1_pod_affinity_term.py | henrywu2019/python | fb7214144395c05349e70a58ea129576f6b11fc4 | [
"Apache-2.0"
] | 2,854 | 2018-01-14T08:57:33.000Z | 2022-03-31T01:41:56.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.21
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PodAffinityTerm(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'label_selector': 'V1LabelSelector',
'namespace_selector': 'V1LabelSelector',
'namespaces': 'list[str]',
'topology_key': 'str'
}
attribute_map = {
'label_selector': 'labelSelector',
'namespace_selector': 'namespaceSelector',
'namespaces': 'namespaces',
'topology_key': 'topologyKey'
}
def __init__(self, label_selector=None, namespace_selector=None, namespaces=None, topology_key=None, local_vars_configuration=None): # noqa: E501
"""V1PodAffinityTerm - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label_selector = None
self._namespace_selector = None
self._namespaces = None
self._topology_key = None
self.discriminator = None
if label_selector is not None:
self.label_selector = label_selector
if namespace_selector is not None:
self.namespace_selector = namespace_selector
if namespaces is not None:
self.namespaces = namespaces
self.topology_key = topology_key
@property
def label_selector(self):
"""Gets the label_selector of this V1PodAffinityTerm. # noqa: E501
:return: The label_selector of this V1PodAffinityTerm. # noqa: E501
:rtype: V1LabelSelector
"""
return self._label_selector
@label_selector.setter
def label_selector(self, label_selector):
"""Sets the label_selector of this V1PodAffinityTerm.
:param label_selector: The label_selector of this V1PodAffinityTerm. # noqa: E501
:type: V1LabelSelector
"""
self._label_selector = label_selector
@property
def namespace_selector(self):
"""Gets the namespace_selector of this V1PodAffinityTerm. # noqa: E501
:return: The namespace_selector of this V1PodAffinityTerm. # noqa: E501
:rtype: V1LabelSelector
"""
return self._namespace_selector
@namespace_selector.setter
def namespace_selector(self, namespace_selector):
"""Sets the namespace_selector of this V1PodAffinityTerm.
:param namespace_selector: The namespace_selector of this V1PodAffinityTerm. # noqa: E501
:type: V1LabelSelector
"""
self._namespace_selector = namespace_selector
@property
def namespaces(self):
"""Gets the namespaces of this V1PodAffinityTerm. # noqa: E501
namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\" # noqa: E501
:return: The namespaces of this V1PodAffinityTerm. # noqa: E501
:rtype: list[str]
"""
return self._namespaces
@namespaces.setter
def namespaces(self, namespaces):
"""Sets the namespaces of this V1PodAffinityTerm.
namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\" # noqa: E501
:param namespaces: The namespaces of this V1PodAffinityTerm. # noqa: E501
:type: list[str]
"""
self._namespaces = namespaces
@property
def topology_key(self):
"""Gets the topology_key of this V1PodAffinityTerm. # noqa: E501
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
:return: The topology_key of this V1PodAffinityTerm. # noqa: E501
:rtype: str
"""
return self._topology_key
@topology_key.setter
def topology_key(self, topology_key):
"""Sets the topology_key of this V1PodAffinityTerm.
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
:param topology_key: The topology_key of this V1PodAffinityTerm. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and topology_key is None: # noqa: E501
raise ValueError("Invalid value for `topology_key`, must not be `None`") # noqa: E501
self._topology_key = topology_key
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodAffinityTerm):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodAffinityTerm):
return True
return self.to_dict() != other.to_dict()
| 36.539216 | 366 | 0.652267 |
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1PodAffinityTerm(object):
openapi_types = {
'label_selector': 'V1LabelSelector',
'namespace_selector': 'V1LabelSelector',
'namespaces': 'list[str]',
'topology_key': 'str'
}
attribute_map = {
'label_selector': 'labelSelector',
'namespace_selector': 'namespaceSelector',
'namespaces': 'namespaces',
'topology_key': 'topologyKey'
}
def __init__(self, label_selector=None, namespace_selector=None, namespaces=None, topology_key=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label_selector = None
self._namespace_selector = None
self._namespaces = None
self._topology_key = None
self.discriminator = None
if label_selector is not None:
self.label_selector = label_selector
if namespace_selector is not None:
self.namespace_selector = namespace_selector
if namespaces is not None:
self.namespaces = namespaces
self.topology_key = topology_key
@property
def label_selector(self):
return self._label_selector
@label_selector.setter
def label_selector(self, label_selector):
self._label_selector = label_selector
@property
def namespace_selector(self):
return self._namespace_selector
@namespace_selector.setter
def namespace_selector(self, namespace_selector):
self._namespace_selector = namespace_selector
@property
def namespaces(self):
return self._namespaces
@namespaces.setter
def namespaces(self, namespaces):
self._namespaces = namespaces
@property
def topology_key(self):
return self._topology_key
@topology_key.setter
def topology_key(self, topology_key):
if self.local_vars_configuration.client_side_validation and topology_key is None:
raise ValueError("Invalid value for `topology_key`, must not be `None`")
self._topology_key = topology_key
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1PodAffinityTerm):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1PodAffinityTerm):
return True
return self.to_dict() != other.to_dict()
| true | true |
f71e43441726991507a806be6346c189869dd316 | 7,835 | py | Python | great_expectations/expectations/core/expect_column_values_to_be_null.py | victorcouste/great_expectations | 9ee46d83feb87e13c769e2ae35b899b3f18d73a4 | [
"Apache-2.0"
] | 2 | 2022-01-28T15:51:32.000Z | 2022-02-02T05:07:58.000Z | great_expectations/expectations/core/expect_column_values_to_be_null.py | victorcouste/great_expectations | 9ee46d83feb87e13c769e2ae35b899b3f18d73a4 | [
"Apache-2.0"
] | null | null | null | great_expectations/expectations/core/expect_column_values_to_be_null.py | victorcouste/great_expectations | 9ee46d83feb87e13c769e2ae35b899b3f18d73a4 | [
"Apache-2.0"
] | 1 | 2021-10-08T01:24:50.000Z | 2021-10-08T01:24:50.000Z | from typing import Dict, Optional
from great_expectations.core import ExpectationConfiguration
from great_expectations.core.expectation_configuration import parse_result_format
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
_format_map_output,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from great_expectations.validator.validation_graph import MetricConfiguration
class ExpectColumnValuesToBeNull(ColumnMapExpectation):
"""Expect column values to be null.
expect_column_values_to_be_null is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_null \
<great_expectations.execution_engine.execution_engine.ExecutionEngine.expect_column_values_to_not_be_null>`
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
map_metric = "column_values.null"
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str = "values must be null, at least $mostly_pct % of the time."
else:
template_str = "values must be null."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.diagnostic.observed_value")
def _diagnostic_observed_value_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
result_dict = result.result
try:
notnull_percent = result_dict["unexpected_percent"]
return (
num_to_str(100 - notnull_percent, precision=5, use_locale=True)
+ "% null"
)
except KeyError:
return "unknown % null"
except TypeError:
return "NaN% null"
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
# We do not need this metric for a null metric
del dependencies["metrics"]["column_values.nonnull.unexpected_count"]
return dependencies
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
if runtime_configuration:
result_format = runtime_configuration.get(
"result_format",
configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
),
)
else:
result_format = configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
)
mostly = self.get_success_kwargs().get(
"mostly", self.default_kwarg_values.get("mostly")
)
total_count = metrics.get("table.row_count")
unexpected_count = metrics.get(self.map_metric + ".unexpected_count")
if total_count is None or total_count == 0:
# Vacuously true
success = True
else:
success_ratio = (total_count - unexpected_count) / total_count
success = success_ratio >= mostly
nonnull_count = None
return _format_map_output(
result_format=parse_result_format(result_format),
success=success,
element_count=metrics.get("table.row_count"),
nonnull_count=nonnull_count,
unexpected_count=metrics.get(self.map_metric + ".unexpected_count"),
unexpected_list=metrics.get(self.map_metric + ".unexpected_values"),
unexpected_index_list=metrics.get(
self.map_metric + ".unexpected_index_list"
),
)
| 37.309524 | 115 | 0.636631 | from typing import Dict, Optional
from great_expectations.core import ExpectationConfiguration
from great_expectations.core.expectation_configuration import parse_result_format
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
_format_map_output,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from great_expectations.validator.validation_graph import MetricConfiguration
class ExpectColumnValuesToBeNull(ColumnMapExpectation):
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
map_metric = "column_values.null"
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
template_str = "values must be null, at least $mostly_pct % of the time."
else:
template_str = "values must be null."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.diagnostic.observed_value")
def _diagnostic_observed_value_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
result_dict = result.result
try:
notnull_percent = result_dict["unexpected_percent"]
return (
num_to_str(100 - notnull_percent, precision=5, use_locale=True)
+ "% null"
)
except KeyError:
return "unknown % null"
except TypeError:
return "NaN% null"
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
del dependencies["metrics"]["column_values.nonnull.unexpected_count"]
return dependencies
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
if runtime_configuration:
result_format = runtime_configuration.get(
"result_format",
configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
),
)
else:
result_format = configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
)
mostly = self.get_success_kwargs().get(
"mostly", self.default_kwarg_values.get("mostly")
)
total_count = metrics.get("table.row_count")
unexpected_count = metrics.get(self.map_metric + ".unexpected_count")
if total_count is None or total_count == 0:
success = True
else:
success_ratio = (total_count - unexpected_count) / total_count
success = success_ratio >= mostly
nonnull_count = None
return _format_map_output(
result_format=parse_result_format(result_format),
success=success,
element_count=metrics.get("table.row_count"),
nonnull_count=nonnull_count,
unexpected_count=metrics.get(self.map_metric + ".unexpected_count"),
unexpected_list=metrics.get(self.map_metric + ".unexpected_values"),
unexpected_index_list=metrics.get(
self.map_metric + ".unexpected_index_list"
),
)
| true | true |
f71e44aad927f5ef6129ca72ac718a82de543b96 | 842 | py | Python | dojo/unittests/tools/test_acunetix_parser.py | brunoduruzup/django-DefectDojo | cd598b44f1c44ca2a05fdf95f99c0d526509f656 | [
"BSD-3-Clause"
] | 2 | 2021-09-19T23:19:12.000Z | 2022-03-26T10:20:49.000Z | dojo/unittests/tools/test_acunetix_parser.py | brunoduruzup/django-DefectDojo | cd598b44f1c44ca2a05fdf95f99c0d526509f656 | [
"BSD-3-Clause"
] | 167 | 2021-03-15T13:49:54.000Z | 2022-03-31T09:10:30.000Z | dojo/unittests/tools/test_acunetix_parser.py | Hijerboa/django-DefectDojo | 3aea3bc3406f860c0842b0bf8800efe2c86bf81b | [
"BSD-3-Clause"
] | 4 | 2016-09-19T17:33:39.000Z | 2018-12-10T07:55:45.000Z | from django.test import TestCase
from dojo.tools.acunetix.parser import AcunetixParser
from dojo.models import Test
class TestAcunetixParser(TestCase):
def test_parse_without_file(self):
parser = AcunetixParser()
findings = parser.get_findings(None, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_finding(self):
testfile = open("dojo/unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_finding(self):
testfile = open("dojo/unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
| 36.608696 | 74 | 0.712589 | from django.test import TestCase
from dojo.tools.acunetix.parser import AcunetixParser
from dojo.models import Test
class TestAcunetixParser(TestCase):
def test_parse_without_file(self):
parser = AcunetixParser()
findings = parser.get_findings(None, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_finding(self):
testfile = open("dojo/unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_finding(self):
testfile = open("dojo/unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
| true | true |
f71e44f724dfe3403874adb309cf00523e7e6a93 | 29 | py | Python | src/all_models/models/__init__.py | sinnamone/PcaprofilerDjango | 72c704c7011dc979b9a0638f7e948c0328bd20ea | [
"MIT"
] | null | null | null | src/all_models/models/__init__.py | sinnamone/PcaprofilerDjango | 72c704c7011dc979b9a0638f7e948c0328bd20ea | [
"MIT"
] | null | null | null | src/all_models/models/__init__.py | sinnamone/PcaprofilerDjango | 72c704c7011dc979b9a0638f7e948c0328bd20ea | [
"MIT"
] | null | null | null | from .custom_user import User | 29 | 29 | 0.862069 | from .custom_user import User | true | true |
f71e45004f351efc76d9ab4441cdedbda91aec6a | 2,668 | py | Python | opps/images/templatetags/images_tags.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 159 | 2015-01-03T16:36:35.000Z | 2022-03-29T20:50:13.000Z | opps/images/templatetags/images_tags.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 81 | 2015-01-02T21:26:16.000Z | 2021-05-29T12:24:52.000Z | opps/images/templatetags/images_tags.py | jeanmask/opps | 031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87 | [
"MIT"
] | 75 | 2015-01-23T13:41:03.000Z | 2021-09-24T03:45:23.000Z | # -*- coding: utf-8 -*-
from django import template
from django.template import Node, TemplateSyntaxError, Variable
from django.conf import settings
from django.utils.translation import ugettext as _
from ..generate import image_url as url
register = template.Library()
class AllImagesCheckPermissionForObjectsNode(Node):
def __init__(self, obj, name):
self.obj = Variable(obj)
self.name = name
def render(self, context):
check_published = True
try:
user = context['request'].user
if user.is_staff or user.is_superuser:
check_published = False
except:
pass
obj = self.obj.resolve(context)
context[self.name] = obj.all_images(check_published)
return ''
@register.tag(name='all_images_check_permission')
def all_images_check_permission(parser, token):
"""
{% all_images_check_permission object as images %}
"""
try:
parans = token.split_contents()
except ValueError:
raise TemplateSyntaxError(
_('tag requires exactly two arguments'))
if len(parans) != 4:
raise TemplateSyntaxError(
_('tag requires exactly three arguments'))
if parans[2] != 'as':
raise TemplateSyntaxError(
_("second argument to tag must be 'as'"))
return AllImagesCheckPermissionForObjectsNode(parans[1], parans[3])
@register.simple_tag
def image_url(image_url, **kwargs):
return url(image_url=image_url, **kwargs)
@register.simple_tag
def image_obj(image, **kwargs):
HALIGN_VALUES = ("left", "center", "right")
VALIGN_VALUES = ("top", "middle", "bottom")
if image == "" or not image:
return ""
if settings.THUMBOR_ENABLED:
new = {}
new['flip'] = image.flip
new['flop'] = image.flop
if image.halign and image.halign in HALIGN_VALUES:
new['halign'] = image.halign
if image.valign and image.valign in VALIGN_VALUES:
new['valign'] = image.valign
new['fit_in'] = image.fit_in
new['smart'] = image.smart
if 'filters' in kwargs:
kw = [kwargs['filters']]
new['filters'] = kw
del kwargs['filters']
if image.crop_x1 > 0 or image.crop_x2 > 0 or image.crop_y1 > 0 or \
image.crop_y2 > 0:
new['crop'] = ((image.crop_x1, image.crop_y1),
(image.crop_x2, image.crop_y2))
kwargs = dict(new, **kwargs)
if image.archive_link and image.archive_link != "":
return url(image_url=image.archive_link, **kwargs)
return image.image_url(**kwargs)
| 28.688172 | 75 | 0.616942 |
from django import template
from django.template import Node, TemplateSyntaxError, Variable
from django.conf import settings
from django.utils.translation import ugettext as _
from ..generate import image_url as url
register = template.Library()
class AllImagesCheckPermissionForObjectsNode(Node):
def __init__(self, obj, name):
self.obj = Variable(obj)
self.name = name
def render(self, context):
check_published = True
try:
user = context['request'].user
if user.is_staff or user.is_superuser:
check_published = False
except:
pass
obj = self.obj.resolve(context)
context[self.name] = obj.all_images(check_published)
return ''
@register.tag(name='all_images_check_permission')
def all_images_check_permission(parser, token):
try:
parans = token.split_contents()
except ValueError:
raise TemplateSyntaxError(
_('tag requires exactly two arguments'))
if len(parans) != 4:
raise TemplateSyntaxError(
_('tag requires exactly three arguments'))
if parans[2] != 'as':
raise TemplateSyntaxError(
_("second argument to tag must be 'as'"))
return AllImagesCheckPermissionForObjectsNode(parans[1], parans[3])
@register.simple_tag
def image_url(image_url, **kwargs):
return url(image_url=image_url, **kwargs)
@register.simple_tag
def image_obj(image, **kwargs):
HALIGN_VALUES = ("left", "center", "right")
VALIGN_VALUES = ("top", "middle", "bottom")
if image == "" or not image:
return ""
if settings.THUMBOR_ENABLED:
new = {}
new['flip'] = image.flip
new['flop'] = image.flop
if image.halign and image.halign in HALIGN_VALUES:
new['halign'] = image.halign
if image.valign and image.valign in VALIGN_VALUES:
new['valign'] = image.valign
new['fit_in'] = image.fit_in
new['smart'] = image.smart
if 'filters' in kwargs:
kw = [kwargs['filters']]
new['filters'] = kw
del kwargs['filters']
if image.crop_x1 > 0 or image.crop_x2 > 0 or image.crop_y1 > 0 or \
image.crop_y2 > 0:
new['crop'] = ((image.crop_x1, image.crop_y1),
(image.crop_x2, image.crop_y2))
kwargs = dict(new, **kwargs)
if image.archive_link and image.archive_link != "":
return url(image_url=image.archive_link, **kwargs)
return image.image_url(**kwargs)
| true | true |
f71e451920124e5a0a23bd63a7f8d86f5311f631 | 3,319 | py | Python | subset/dot1x/authenticator/eap_socket.py | sicada/faucetsdn-daq | 04967711fc58c3101c2449d09dbc9dcef95df21e | [
"Apache-2.0"
] | 41 | 2018-06-11T06:34:37.000Z | 2022-01-14T18:07:49.000Z | subset/dot1x/authenticator/eap_socket.py | sicada/faucetsdn-daq | 04967711fc58c3101c2449d09dbc9dcef95df21e | [
"Apache-2.0"
] | 843 | 2018-09-03T05:28:16.000Z | 2022-03-22T20:11:20.000Z | subset/dot1x/authenticator/eap_socket.py | grafnu/daq-staging | 47c1015dbfd1a5bbcaa373f6b6d5cf2e53402b4f | [
"Apache-2.0"
] | 33 | 2018-05-22T15:33:57.000Z | 2022-02-15T21:55:56.000Z | """Handle the EAP socket"""
from __future__ import absolute_import
import struct
from abc import ABC, abstractmethod
from fcntl import ioctl
import errno
import socket
from mac_address import MacAddress
from utils import get_logger, get_interface_mac
class PromiscuousSocket(ABC):
"""Abstract Raw Socket in Promiscuous Mode"""
SIOCGIFINDEX = 0x8933
PACKET_MR_PROMISC = 1
SOL_PACKET = 263
PACKET_ADD_MEMBERSHIP = 1
@abstractmethod
def send(self, data): # pylint: disable=missing-docstring
pass
@abstractmethod
def receive(self): # pylint: disable=missing-docstring
pass
@abstractmethod
def setup(self): # pylint: disable=missing-docstring
pass
def __init__(self, interface_name, log_prefix):
self.socket = None
self.interface_index = None
self.interface_name = interface_name
self.logger = get_logger(log_prefix)
self.eap_address = MacAddress.from_string(get_interface_mac(interface_name))
def _setup(self, socket_filter):
"""Set up the socket"""
self.logger.debug("Setting up socket on interface: %s", self.interface_name)
try:
self.open(socket_filter)
self.get_interface_index()
self.set_interface_promiscuous()
except socket.error as err:
self.logger.error("Unable to setup socket: %s", str(err))
raise err
def open(self, socket_filter):
"""Setup EAP socket"""
self.socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket_filter)
self.socket.bind((self.interface_name, 0))
def get_interface_index(self):
"""Get the interface index of the EAP Socket"""
# http://man7.org/linux/man-pages/man7/netdevice.7.html
request = struct.pack('16sI', self.interface_name.encode("utf-8"), 0)
response = ioctl(self.socket, self.SIOCGIFINDEX, request)
_ifname, self.interface_index = struct.unpack('16sI', response)
def set_interface_promiscuous(self):
"""Sets the EAP interface to be able to receive EAP messages"""
request = struct.pack("IHH8s", self.interface_index, self.PACKET_MR_PROMISC,
len(self.eap_address.address), self.eap_address.address)
self.socket.setsockopt(self.SOL_PACKET, self.PACKET_ADD_MEMBERSHIP, request)
def shutdown(self):
"""Shutdown socket"""
self.socket.close()
class EapSocket(PromiscuousSocket):
"""Handle the EAP socket"""
def setup(self):
"""Set up the socket"""
self._setup(socket.htons(0x888e))
self.socket.settimeout(2.0)
def send(self, data):
"""send on eap socket.
data (bytes): data to send"""
self.socket.send(data)
def receive(self):
"""receive from eap socket"""
# While socket hasn't been closed
while self.socket.fileno() != -1:
try:
return self.socket.recv(4096)
except socket.timeout:
# Socket timed out. Expected. Move on.
continue
except OSError as exception:
# socket closed
if exception.errno == errno.EBADFD:
break
except Exception:
raise
| 32.539216 | 86 | 0.632419 |
from __future__ import absolute_import
import struct
from abc import ABC, abstractmethod
from fcntl import ioctl
import errno
import socket
from mac_address import MacAddress
from utils import get_logger, get_interface_mac
class PromiscuousSocket(ABC):
SIOCGIFINDEX = 0x8933
PACKET_MR_PROMISC = 1
SOL_PACKET = 263
PACKET_ADD_MEMBERSHIP = 1
@abstractmethod
def send(self, data):
pass
@abstractmethod
def receive(self):
pass
@abstractmethod
def setup(self):
pass
def __init__(self, interface_name, log_prefix):
self.socket = None
self.interface_index = None
self.interface_name = interface_name
self.logger = get_logger(log_prefix)
self.eap_address = MacAddress.from_string(get_interface_mac(interface_name))
def _setup(self, socket_filter):
self.logger.debug("Setting up socket on interface: %s", self.interface_name)
try:
self.open(socket_filter)
self.get_interface_index()
self.set_interface_promiscuous()
except socket.error as err:
self.logger.error("Unable to setup socket: %s", str(err))
raise err
def open(self, socket_filter):
self.socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket_filter)
self.socket.bind((self.interface_name, 0))
def get_interface_index(self):
request = struct.pack('16sI', self.interface_name.encode("utf-8"), 0)
response = ioctl(self.socket, self.SIOCGIFINDEX, request)
_ifname, self.interface_index = struct.unpack('16sI', response)
def set_interface_promiscuous(self):
request = struct.pack("IHH8s", self.interface_index, self.PACKET_MR_PROMISC,
len(self.eap_address.address), self.eap_address.address)
self.socket.setsockopt(self.SOL_PACKET, self.PACKET_ADD_MEMBERSHIP, request)
def shutdown(self):
self.socket.close()
class EapSocket(PromiscuousSocket):
def setup(self):
self._setup(socket.htons(0x888e))
self.socket.settimeout(2.0)
def send(self, data):
self.socket.send(data)
def receive(self):
while self.socket.fileno() != -1:
try:
return self.socket.recv(4096)
except socket.timeout:
# Socket timed out. Expected. Move on.
continue
except OSError as exception:
# socket closed
if exception.errno == errno.EBADFD:
break
except Exception:
raise
| true | true |
f71e45861229ef6c6f9868dcd6df59532c6cf2a6 | 9,410 | py | Python | liftoff/common/options_parser.py | tudor-berariu/liftoff | 4a0734006fb15c6da5013d437263161d3facf3d8 | [
"MIT"
] | 9 | 2018-04-09T16:41:38.000Z | 2021-09-15T13:53:23.000Z | liftoff/common/options_parser.py | tudor-berariu/liftoff | 4a0734006fb15c6da5013d437263161d3facf3d8 | [
"MIT"
] | 14 | 2018-09-26T11:44:30.000Z | 2021-09-26T07:46:10.000Z | liftoff/common/options_parser.py | tudor-berariu/liftoff | 4a0734006fb15c6da5013d437263161d3facf3d8 | [
"MIT"
] | 2 | 2018-04-23T10:26:00.000Z | 2021-02-24T19:32:07.000Z | """ In order to reuse and have a consistent set of arguments we use the
functions in this file to build argument parsers for all scripts.
TODO: change to class methods to common methods if there is no need to call
those functions outside an instance of OptionParser.
"""
from argparse import ArgumentParser, Namespace
from typing import List
import uuid
from .liftoff_config import LiftoffConfig
class OptionParser:
""" This class facilitates combining command line arguments and liftoff
settings.
"""
def __init__(self, name, arguments: List[str]) -> None:
self.liftoff_config = LiftoffConfig()
self.arg_parser = ArgumentParser(name)
self.arguments = [str(arg) for arg in arguments]
for arg in self.arguments:
getattr(self, f"_add_{arg:s}")()
def parse_args(self, args: List[str] = None, strict: bool = True) -> Namespace:
""" Parses command-line arguments and completes options with values
from liftoff configuration files.
"""
if strict:
opts = self.arg_parser.parse_args(args=args)
else:
opts = self.arg_parser.parse_known_args(args=args)
for arg in self.arguments:
if not hasattr(opts, arg) or getattr(opts, arg) is None:
setattr(opts, arg, self.liftoff_config.get(arg))
if hasattr(opts, "verbose") and isinstance(opts.verbose, list):
opts.verbose = len(opts.verbose)
opts.session_id = str(uuid.uuid4())
return opts
def _add_all(self) -> None:
self.arg_parser.add_argument(
"-a",
"--all",
action="store_true",
dest="all",
help="Target all experiments not just the latest.",
)
def _add_append_to(self) -> None:
self.arg_parser.add_argument(
"--append-to",
dest="append_to",
required=False,
type=str,
help="Append files to some existing experiment.",
)
def _add_args(self) -> None:
self.arg_parser.add_argument(
"--args",
dest="args",
type=str,
nargs="*",
help="Use these values to overwrite the config file.",
)
def _add_copy_to_clipboard(self) -> None:
self.arg_parser.add_argument(
"--cc",
action="store_true",
dest="copy_to_clipboard",
help="Copy experiment path to clipboard",
)
def _add_config_path(self) -> None:
self.arg_parser.add_argument(
"config_path", type=str, help="Give a specific name to the experiment."
)
def _add_do(self) -> None:
self.arg_parser.add_argument(
"--do",
action="store_true",
dest="do",
help="Apply the actions (do not only simulate).",
)
def _add_crashed_only(self) -> None:
self.arg_parser.add_argument(
"--crashed-only",
action="store_true",
dest="crashed_only",
help="Apply the actions only to crashed subexperiments.",
)
def _add_experiment(self) -> None:
self.arg_parser.add_argument(
"experiment",
nargs="?",
type=str,
help="Give a specific name to the experiment.",
)
def _add_filters(self) -> None:
self.arg_parser.add_argument(
"--filters",
dest="filters",
type=str,
nargs="*",
help="Use these values to filter experiments to be run.",
)
def _add_gpus(self) -> None:
self.arg_parser.add_argument(
"--gpus", dest="gpus", nargs="*", default=[], help="Available GPUs"
)
def _add_name(self) -> None:
self.arg_parser.add_argument(
"--name",
dest="name",
required=False,
type=str,
help="Give a specific name to the experiment.",
)
def _add_optimize(self) -> None:
self.arg_parser.add_argument(
"-O",
action="store_true",
dest="optimize",
help="Send -OO to python process.",
)
def _add_overwrite(self) -> None:
self.arg_parser.add_argument(
"--overwrite",
action="store_true",
dest="overwrite",
help="Overwrite files if you find them (not if .__end is there",
)
def _add_no_detach(self) -> None:
self.arg_parser.add_argument(
"--no-detach",
action="store_true",
dest="no_detach",
help="Do not detach the process with nohup.",
)
def _add_per_gpu(self) -> None:
self.arg_parser.add_argument(
"--per-gpu",
dest="per_gpu",
nargs="*",
type=int,
default=[],
help="Maximum procs to load on each GPU.",
)
def _add_pid(self) -> None:
self.arg_parser.add_argument("pid", type=int, help="PID of liftoff to kill.")
def _add_procs_no(self) -> None:
default_value = self.liftoff_config.get("procs_no")
if default_value is None:
default_value = 1
default_value = int(default_value)
self.arg_parser.add_argument(
"--procs-no",
dest="procs_no",
required=False,
type=int,
default=default_value,
help="Number of runs for each sub-experiment",
)
def _add_results_path(self) -> None:
default_value = self.liftoff_config.get("results_path")
if default_value is None:
default_value = "./results"
default_value = str(default_value)
self.arg_parser.add_argument(
"--results-path",
dest="results_path",
required=False,
type=str,
default=default_value,
help="Specify the results folder.",
)
def _add_runs_no(self) -> None:
default_value = self.liftoff_config.get("runs_no")
if default_value is None:
default_value = 1
default_value = int(default_value)
self.arg_parser.add_argument(
"--runs-no",
dest="runs_no",
required=False,
type=int,
default=default_value,
help="Number of runs for each sub-experiment",
)
def _add_runs(self) -> None:
self.arg_parser.add_argument(
"--runs",
dest="runs",
required=True,
type=int,
nargs="+",
help="Runs we refer to here.",
)
def _add_safe(self) -> None:
self.arg_parser.add_argument(
"--safe",
action="store_true",
dest="safe",
help="Do not clean ended, but locked ones.",
)
def _add_script(self) -> None:
self.arg_parser.add_argument(
"script", type=str, help="Script to be executed with all those configs."
)
def _add_clean_all(self) -> None:
self.arg_parser.add_argument(
"--clean-all",
action="store_true",
dest="clean_all",
help="Clean *all* the files an experiment run produced.",
)
def _add_timestamp_fmt(self) -> None:
default_value = self.liftoff_config.get("timestamp_fmt")
if default_value is None:
default_value = "%Y%b%d-%H%M%S"
default_value = str(default_value)
self.arg_parser.add_argument(
"--timestamp-fmt",
type=str,
dest="timestamp_fmt",
default=default_value,
help="Timestamp format to be used.",
)
def _add_verbose(self) -> None:
self.arg_parser.add_argument(
"-v",
const=1,
dest="verbose",
action="append_const",
help="Verbose level (default: 0) e.g. -v / -vv / -vvv",
)
def _add_session_id(self) -> None:
self.arg_parser.add_argument(
"--session-id",
type=str,
dest="session_id",
required=True,
help="Seesion id (needed to identify process by command).",
)
def _add_time_limit(self) -> None:
self.arg_parser.add_argument(
"--time-limit",
type=int,
dest="time_limit",
default=0,
help="Stop if this time limit (in miuntes) is exceeded.",
)
def _add_start_by(self) -> None:
self.arg_parser.add_argument(
"--start-by",
type=int,
dest="start_by",
default=0,
help="Do not launch processes if this time (in seconds) has been exceeded.",
)
def _add_end_by(self) -> None:
self.arg_parser.add_argument(
"--end-by",
type=int,
dest="end_by",
default=0,
help="Pass this to the processes using ENDBY variable.",
)
def _add_max_runs(self) -> None:
self.arg_parser.add_argument(
"--max-runs",
type=int,
dest="max_runs",
default=0,
help="Stop if max runs have been exceeded. (default 0 - run all).",
)
| 30.063898 | 88 | 0.541339 |
from argparse import ArgumentParser, Namespace
from typing import List
import uuid
from .liftoff_config import LiftoffConfig
class OptionParser:
def __init__(self, name, arguments: List[str]) -> None:
self.liftoff_config = LiftoffConfig()
self.arg_parser = ArgumentParser(name)
self.arguments = [str(arg) for arg in arguments]
for arg in self.arguments:
getattr(self, f"_add_{arg:s}")()
def parse_args(self, args: List[str] = None, strict: bool = True) -> Namespace:
if strict:
opts = self.arg_parser.parse_args(args=args)
else:
opts = self.arg_parser.parse_known_args(args=args)
for arg in self.arguments:
if not hasattr(opts, arg) or getattr(opts, arg) is None:
setattr(opts, arg, self.liftoff_config.get(arg))
if hasattr(opts, "verbose") and isinstance(opts.verbose, list):
opts.verbose = len(opts.verbose)
opts.session_id = str(uuid.uuid4())
return opts
def _add_all(self) -> None:
self.arg_parser.add_argument(
"-a",
"--all",
action="store_true",
dest="all",
help="Target all experiments not just the latest.",
)
def _add_append_to(self) -> None:
self.arg_parser.add_argument(
"--append-to",
dest="append_to",
required=False,
type=str,
help="Append files to some existing experiment.",
)
def _add_args(self) -> None:
self.arg_parser.add_argument(
"--args",
dest="args",
type=str,
nargs="*",
help="Use these values to overwrite the config file.",
)
def _add_copy_to_clipboard(self) -> None:
self.arg_parser.add_argument(
"--cc",
action="store_true",
dest="copy_to_clipboard",
help="Copy experiment path to clipboard",
)
def _add_config_path(self) -> None:
self.arg_parser.add_argument(
"config_path", type=str, help="Give a specific name to the experiment."
)
def _add_do(self) -> None:
self.arg_parser.add_argument(
"--do",
action="store_true",
dest="do",
help="Apply the actions (do not only simulate).",
)
def _add_crashed_only(self) -> None:
self.arg_parser.add_argument(
"--crashed-only",
action="store_true",
dest="crashed_only",
help="Apply the actions only to crashed subexperiments.",
)
def _add_experiment(self) -> None:
self.arg_parser.add_argument(
"experiment",
nargs="?",
type=str,
help="Give a specific name to the experiment.",
)
def _add_filters(self) -> None:
self.arg_parser.add_argument(
"--filters",
dest="filters",
type=str,
nargs="*",
help="Use these values to filter experiments to be run.",
)
def _add_gpus(self) -> None:
self.arg_parser.add_argument(
"--gpus", dest="gpus", nargs="*", default=[], help="Available GPUs"
)
def _add_name(self) -> None:
self.arg_parser.add_argument(
"--name",
dest="name",
required=False,
type=str,
help="Give a specific name to the experiment.",
)
def _add_optimize(self) -> None:
self.arg_parser.add_argument(
"-O",
action="store_true",
dest="optimize",
help="Send -OO to python process.",
)
def _add_overwrite(self) -> None:
self.arg_parser.add_argument(
"--overwrite",
action="store_true",
dest="overwrite",
help="Overwrite files if you find them (not if .__end is there",
)
def _add_no_detach(self) -> None:
self.arg_parser.add_argument(
"--no-detach",
action="store_true",
dest="no_detach",
help="Do not detach the process with nohup.",
)
def _add_per_gpu(self) -> None:
self.arg_parser.add_argument(
"--per-gpu",
dest="per_gpu",
nargs="*",
type=int,
default=[],
help="Maximum procs to load on each GPU.",
)
def _add_pid(self) -> None:
self.arg_parser.add_argument("pid", type=int, help="PID of liftoff to kill.")
def _add_procs_no(self) -> None:
default_value = self.liftoff_config.get("procs_no")
if default_value is None:
default_value = 1
default_value = int(default_value)
self.arg_parser.add_argument(
"--procs-no",
dest="procs_no",
required=False,
type=int,
default=default_value,
help="Number of runs for each sub-experiment",
)
def _add_results_path(self) -> None:
default_value = self.liftoff_config.get("results_path")
if default_value is None:
default_value = "./results"
default_value = str(default_value)
self.arg_parser.add_argument(
"--results-path",
dest="results_path",
required=False,
type=str,
default=default_value,
help="Specify the results folder.",
)
def _add_runs_no(self) -> None:
default_value = self.liftoff_config.get("runs_no")
if default_value is None:
default_value = 1
default_value = int(default_value)
self.arg_parser.add_argument(
"--runs-no",
dest="runs_no",
required=False,
type=int,
default=default_value,
help="Number of runs for each sub-experiment",
)
def _add_runs(self) -> None:
self.arg_parser.add_argument(
"--runs",
dest="runs",
required=True,
type=int,
nargs="+",
help="Runs we refer to here.",
)
def _add_safe(self) -> None:
self.arg_parser.add_argument(
"--safe",
action="store_true",
dest="safe",
help="Do not clean ended, but locked ones.",
)
def _add_script(self) -> None:
self.arg_parser.add_argument(
"script", type=str, help="Script to be executed with all those configs."
)
def _add_clean_all(self) -> None:
self.arg_parser.add_argument(
"--clean-all",
action="store_true",
dest="clean_all",
help="Clean *all* the files an experiment run produced.",
)
def _add_timestamp_fmt(self) -> None:
default_value = self.liftoff_config.get("timestamp_fmt")
if default_value is None:
default_value = "%Y%b%d-%H%M%S"
default_value = str(default_value)
self.arg_parser.add_argument(
"--timestamp-fmt",
type=str,
dest="timestamp_fmt",
default=default_value,
help="Timestamp format to be used.",
)
def _add_verbose(self) -> None:
self.arg_parser.add_argument(
"-v",
const=1,
dest="verbose",
action="append_const",
help="Verbose level (default: 0) e.g. -v / -vv / -vvv",
)
def _add_session_id(self) -> None:
self.arg_parser.add_argument(
"--session-id",
type=str,
dest="session_id",
required=True,
help="Seesion id (needed to identify process by command).",
)
def _add_time_limit(self) -> None:
self.arg_parser.add_argument(
"--time-limit",
type=int,
dest="time_limit",
default=0,
help="Stop if this time limit (in miuntes) is exceeded.",
)
def _add_start_by(self) -> None:
self.arg_parser.add_argument(
"--start-by",
type=int,
dest="start_by",
default=0,
help="Do not launch processes if this time (in seconds) has been exceeded.",
)
def _add_end_by(self) -> None:
self.arg_parser.add_argument(
"--end-by",
type=int,
dest="end_by",
default=0,
help="Pass this to the processes using ENDBY variable.",
)
def _add_max_runs(self) -> None:
self.arg_parser.add_argument(
"--max-runs",
type=int,
dest="max_runs",
default=0,
help="Stop if max runs have been exceeded. (default 0 - run all).",
)
| true | true |
f71e45e2769e046b231ac437204558ee5ddd0b98 | 2,984 | py | Python | vaas-app/src/vaas/configuration/tests/test_loader.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 251 | 2015-09-02T10:50:51.000Z | 2022-03-16T08:00:35.000Z | vaas-app/src/vaas/configuration/tests/test_loader.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 154 | 2015-09-02T14:54:08.000Z | 2022-03-16T08:34:17.000Z | vaas-app/src/vaas/configuration/tests/test_loader.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 31 | 2015-09-03T07:51:05.000Z | 2020-09-24T09:02:40.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from unittest.mock import patch, mock_open
from nose.tools import assert_equals
from django.test import TestCase
from vaas.configuration.loader import YamlConfigLoader
USER_HOME_PATH = '/user/path/.vaas'
VAAS_APP_RESOURCES_PATH = '/vaas/app/resources'
class YamlConfigLoaderTest(TestCase):
def file_exists_side_effect(self, arg):
return arg in self.file_existence and self.file_existence[arg]
def setUp(self):
self.file_existence = {}
exists_patcher = patch('os.path.exists')
file_exists_mock = exists_patcher.start()
file_exists_mock.side_effect = self.file_exists_side_effect
expand_patcher = patch('os.path.expanduser')
expanduser_mock = expand_patcher.start()
expanduser_mock.return_value = USER_HOME_PATH
abspath_patcher = patch('os.path.abspath')
abspath_mock = abspath_patcher.start()
abspath_mock.return_value = VAAS_APP_RESOURCES_PATH
self.addCleanup(exists_patcher.stop)
self.addCleanup(expand_patcher.stop)
self.addCleanup(abspath_patcher.stop)
def test_should_init_search_paths_with_user_and_resources_paths_if_user_path_exists(self):
self.file_existence = {
USER_HOME_PATH: True
}
directories = YamlConfigLoader().config_directories
assert_equals(2, len(directories))
assert_equals([USER_HOME_PATH, VAAS_APP_RESOURCES_PATH], directories)
def test_should_determine_file_from_users_location_if_exists(self):
expected_path = "{}/{}".format(USER_HOME_PATH, 'test.yaml')
self.file_existence = {
USER_HOME_PATH: True,
expected_path: True
}
assert_equals(expected_path, YamlConfigLoader().determine_config_file('test.yaml'))
def test_should_determine_file_from_resource_location_if_exists(self):
expected_path = "{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml')
self.file_existence = {
USER_HOME_PATH: False,
expected_path: True
}
assert_equals(expected_path, YamlConfigLoader().determine_config_file('test.yaml'))
def test_should_not_determine_file_if_not_exists_in_any_location(self):
resource_path = "{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml')
self.file_existence = {
USER_HOME_PATH: False,
resource_path: False
}
assert_equals(None, YamlConfigLoader().determine_config_file('test.yaml'))
@patch('builtins.open', mock_open(read_data="key1: value1\nkey2: value2"))
def test_should_return_config_tree(self):
expected_tree = {'key1': 'value1', 'key2': 'value2'}
self.file_existence = {
USER_HOME_PATH: False,
"{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml'): True
}
assert_equals(expected_tree, YamlConfigLoader().get_config_tree('test.yaml'))
| 37.772152 | 94 | 0.699397 |
from __future__ import unicode_literals, absolute_import
from unittest.mock import patch, mock_open
from nose.tools import assert_equals
from django.test import TestCase
from vaas.configuration.loader import YamlConfigLoader
USER_HOME_PATH = '/user/path/.vaas'
VAAS_APP_RESOURCES_PATH = '/vaas/app/resources'
class YamlConfigLoaderTest(TestCase):
def file_exists_side_effect(self, arg):
return arg in self.file_existence and self.file_existence[arg]
def setUp(self):
self.file_existence = {}
exists_patcher = patch('os.path.exists')
file_exists_mock = exists_patcher.start()
file_exists_mock.side_effect = self.file_exists_side_effect
expand_patcher = patch('os.path.expanduser')
expanduser_mock = expand_patcher.start()
expanduser_mock.return_value = USER_HOME_PATH
abspath_patcher = patch('os.path.abspath')
abspath_mock = abspath_patcher.start()
abspath_mock.return_value = VAAS_APP_RESOURCES_PATH
self.addCleanup(exists_patcher.stop)
self.addCleanup(expand_patcher.stop)
self.addCleanup(abspath_patcher.stop)
def test_should_init_search_paths_with_user_and_resources_paths_if_user_path_exists(self):
self.file_existence = {
USER_HOME_PATH: True
}
directories = YamlConfigLoader().config_directories
assert_equals(2, len(directories))
assert_equals([USER_HOME_PATH, VAAS_APP_RESOURCES_PATH], directories)
def test_should_determine_file_from_users_location_if_exists(self):
expected_path = "{}/{}".format(USER_HOME_PATH, 'test.yaml')
self.file_existence = {
USER_HOME_PATH: True,
expected_path: True
}
assert_equals(expected_path, YamlConfigLoader().determine_config_file('test.yaml'))
def test_should_determine_file_from_resource_location_if_exists(self):
expected_path = "{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml')
self.file_existence = {
USER_HOME_PATH: False,
expected_path: True
}
assert_equals(expected_path, YamlConfigLoader().determine_config_file('test.yaml'))
def test_should_not_determine_file_if_not_exists_in_any_location(self):
resource_path = "{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml')
self.file_existence = {
USER_HOME_PATH: False,
resource_path: False
}
assert_equals(None, YamlConfigLoader().determine_config_file('test.yaml'))
@patch('builtins.open', mock_open(read_data="key1: value1\nkey2: value2"))
def test_should_return_config_tree(self):
expected_tree = {'key1': 'value1', 'key2': 'value2'}
self.file_existence = {
USER_HOME_PATH: False,
"{}/{}".format(VAAS_APP_RESOURCES_PATH, 'test.yaml'): True
}
assert_equals(expected_tree, YamlConfigLoader().get_config_tree('test.yaml'))
| true | true |
f71e4676c5acc3e5917505e1ba0ea632737ca9e1 | 1,666 | py | Python | config/wsgi.py | Maharshi-Pathak/gumroad-clone | 97ab1bd71585ee7a4279ad0189980e1b69c31948 | [
"MIT"
] | 11 | 2021-04-22T06:26:42.000Z | 2022-03-27T21:19:57.000Z | config/wsgi.py | Maharshi-Pathak/gumroad-clone | 97ab1bd71585ee7a4279ad0189980e1b69c31948 | [
"MIT"
] | null | null | null | config/wsgi.py | Maharshi-Pathak/gumroad-clone | 97ab1bd71585ee7a4279ad0189980e1b69c31948 | [
"MIT"
] | 6 | 2021-02-10T18:12:27.000Z | 2022-03-14T02:17:38.000Z | """
WSGI config for djgumroad project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# djgumroad directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "djgumroad"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 42.717949 | 79 | 0.803121 | import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "djgumroad"))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true | true |
f71e473268e1bb2e5e8d4a270d302baf99209cad | 196 | py | Python | Python Projects/Find the area of a circle.py | perfect104/python-codes | b8994b1f92f88068c2a36a82e32784b093a8d0df | [
"MIT"
] | 4 | 2019-10-24T03:56:53.000Z | 2020-01-07T08:16:42.000Z | Python Projects/Find the area of a circle.py | perfect104/python-codes | b8994b1f92f88068c2a36a82e32784b093a8d0df | [
"MIT"
] | null | null | null | Python Projects/Find the area of a circle.py | perfect104/python-codes | b8994b1f92f88068c2a36a82e32784b093a8d0df | [
"MIT"
] | 4 | 2019-12-30T08:10:27.000Z | 2020-03-02T05:06:23.000Z | '''
Formula for area of circle
Area = pi * r^2
where pi is constant and r is the radius of the circle
'''
def findarea(r):
PI = 3.142
return PI * (r*r);
print("Area is %.6f" % findarea(5));
| 15.076923 | 55 | 0.627551 | def findarea(r):
PI = 3.142
return PI * (r*r);
print("Area is %.6f" % findarea(5));
| true | true |
f71e48b11e64356393129e207a4d9e71cf25e746 | 45 | py | Python | barrelseq/analysis/__init__.py | BeckResearchLab/barrelseq | 044b9f69f10b4b0413231d821ea80af1c7c31544 | [
"MIT"
] | 1 | 2021-11-27T08:35:15.000Z | 2021-11-27T08:35:15.000Z | barrelseq/analysis/__init__.py | BeckResearchLab/barrelseq | 044b9f69f10b4b0413231d821ea80af1c7c31544 | [
"MIT"
] | 5 | 2018-09-19T21:50:01.000Z | 2019-07-16T22:14:52.000Z | barrelseq/analysis/__init__.py | BeckResearchLab/barrelseq | 044b9f69f10b4b0413231d821ea80af1c7c31544 | [
"MIT"
] | null | null | null | from .deseq2 import *
from .example import *
| 15 | 22 | 0.733333 | from .deseq2 import *
from .example import *
| true | true |
f71e48d15d84ca1874a3d3991f474a16272b83ef | 5,583 | py | Python | GuoxiSuspensionBridge01/GuoxiSuspensionBridge01/StructureSketch.py | zjkl19/AbaqusPython | d9c72d15f8928f1938cee46a4b39e2c44b03b62f | [
"MIT"
] | 16 | 2018-05-06T06:09:52.000Z | 2022-03-29T08:50:00.000Z | GuoxiSuspensionBridge01/GuoxiSuspensionBridge01/StructureSketch.py | zjkl19/AbaqusPython | d9c72d15f8928f1938cee46a4b39e2c44b03b62f | [
"MIT"
] | null | null | null | GuoxiSuspensionBridge01/GuoxiSuspensionBridge01/StructureSketch.py | zjkl19/AbaqusPython | d9c72d15f8928f1938cee46a4b39e2c44b03b62f | [
"MIT"
] | 12 | 2018-04-04T08:32:56.000Z | 2021-08-24T11:34:36.000Z | class StructureSketch(object):
"""Create 'Sketch' of the structure"""
def __init__(self,structureModel,structureGeometry):
"""init
Required argument:
Optional arguments:
None.
Return value:
Exceptions:
None.
"""
self.structureGeometry=structureGeometry
self.structureModel=structureModel
def CreateSketch(self):
"""Create Sketch
function summary
Args:
structureGeometry: structureGeometry instance
Returns:
Raises:
"""
#design pattern: builder
self.__CreateTowerSketch()
self.__CreateStiffeningGirderSketch()
self.__CreateGirderRigidarmSketch()
self.__CreateCableSketch()
self.__CreateSuspenderSketch();
def __CreateTowerSketch(self):
"""CreateTowerSketch
function summary
Args:
Returns:
Raises:
"""
#Tower:
dTB=self.structureGeometry.downTowerBottomCoordinate
rUD=self.structureGeometry.rUpDownTowerCoordinate
uTT=self.structureGeometry.upTowerTopCoordinate
self.towerSketch=[]
for i in range(0,2):
mySketch = self.structureModel.ConstrainedSketch(name='towerSketch'+str(i+1),sheetSize=10.0)
#dTB[0][0][0]: 1# tower, 1# tower column, x coordinate
mySketch.Line(point1=(dTB[i][0][0],dTB[i][0][1]), point2=(rUD[i][0][0],rUD[i][0][1]))
mySketch.Line(point1=(rUD[i][0][0],rUD[i][0][1]), point2=(uTT[i][0][0],uTT[i][0][1]))
mySketch.Line(point1=(uTT[i][0][0],uTT[i][0][1]), point2=(uTT[i][1][0]+(uTT[i][1][2]-uTT[i][0][2]),uTT[i][1][1]))
mySketch.Line(point1=(uTT[i][1][0]+(uTT[i][1][2]-uTT[i][0][2]),uTT[i][1][1]), point2=(rUD[i][1][0]+(rUD[i][1][2]-rUD[i][0][2]),rUD[i][1][1]))
mySketch.Line(point1=(rUD[i][1][0]+(rUD[i][1][2]-rUD[i][0][2]),rUD[i][1][1]), point2=(dTB[i][1][0]+(dTB[i][1][2]-dTB[i][0][2]),dTB[i][1][1]))
self.towerSketch.append(mySketch)
self.towerSketch=tuple(self.towerSketch)
def __CreateStiffeningGirderSketch(self):
"""Create Stiffening Girder Sketch
function summary
Args:
Returns:
Raises:
"""
eP=self.structureGeometry.EndPointCoordinate
rGR=self.structureGeometry.rGirderRigidarmCoordinate
rRS=self.structureGeometry.rRigidarmSuspenderCoordinate
#stiffeningGirderCoordinate=(eP[0],rGRC[0],eP[1])
lst=[]
lst.append(eP[0])
for i in range(len(rGR)):
lst.append(rGR[i])
lst.append(eP[1])
stiffeningGirderCoordinate=tuple(lst)
sG=stiffeningGirderCoordinate
mySketch = self.structureModel.ConstrainedSketch(name='stiffeningGirderSketch',sheetSize=10.0)
for i in range(len(sG)-1):
mySketch.Line(point1=(sG[i][0],sG[i][1]), point2=(sG[i+1][0],sG[i+1][1]))
self.stiffeningGirderSketch=mySketch
def __CreateGirderRigidarmSketch(self):
"""Create Girder Rigidarm Sketch
function summary
Args:
Returns:
Raises:
"""
rGR=self.structureGeometry.rGirderRigidarmCoordinate
rRS=self.structureGeometry.rRigidarmSuspenderCoordinate
#create GirderRigidarm Sketch
girderRigidarmSketch=[]
for i in range(len(rGR)):
mySketch = self.structureModel.ConstrainedSketch(name='girderRigidarmSketch'+str(i+1),sheetSize=10.0)
mySketch.Line(point1=(rRS[0][i][0]+rRS[0][i][2],rRS[0][i][1]), point2=(rGR[i][0],rGR[i][1]))
mySketch.Line(point1=(rGR[i][0],rGR[i][1]), point2=(rRS[1][i][0]+rRS[1][i][2],rRS[1][i][1]))
girderRigidarmSketch.append(mySketch) #rRS[0][i][2] is negative
self.girderRigidarmSketch=tuple(girderRigidarmSketch)
def __CreateCableSketch(self):
"""Create Cable Sketch
function summary
Args:
Returns:
Raises:
"""
#cable
cableCoordinate=self.structureGeometry.cableCoordinate
self.cableSketch=[]
cableSketch=[]
for i in range(len(cableCoordinate)):
mySketch = self.structureModel.ConstrainedSketch(name='cableSketch'+str(i+1),sheetSize=10.0)
for j in range(len(cableCoordinate[i])-1):
mySketch.Line(point1=(cableCoordinate[i][j][0],cableCoordinate[i][j][1]), point2=(cableCoordinate[i][j+1][0],cableCoordinate[i][j+1][1]))
cableSketch.append(mySketch)
self.cableSketch=tuple(cableSketch)
def __CreateSuspenderSketch(self):
"""Create Suspender Sketch
function summary
Args:
Returns:
Raises:
"""
hP=self.structureGeometry.hangingPointCoordinate
rRS=self.structureGeometry.rRigidarmSuspenderCoordinate
self.suspenderSketch=[]
suspenderSketch=[]
for i in range(len(rRS)):
for j in range(len(rRS[0])):
mySketch = self.structureModel.ConstrainedSketch(name='girderRigidarmSketch'+str(i+1)+'-'+str(j+1),sheetSize=10.0)
mySketch.Line(point1=(hP[i][j][0],hP[i][j][1]), point2=(rRS[i][j][0],rRS[i][j][1]))
suspenderSketch.append(mySketch)
self.suspenderSketch.append(tuple(suspenderSketch))
self.suspenderSketch=tuple(self.suspenderSketch)
| 31.016667 | 153 | 0.587319 | class StructureSketch(object):
def __init__(self,structureModel,structureGeometry):
self.structureGeometry=structureGeometry
self.structureModel=structureModel
def CreateSketch(self):
self.__CreateTowerSketch()
self.__CreateStiffeningGirderSketch()
self.__CreateGirderRigidarmSketch()
self.__CreateCableSketch()
self.__CreateSuspenderSketch();
def __CreateTowerSketch(self):
dTB=self.structureGeometry.downTowerBottomCoordinate
rUD=self.structureGeometry.rUpDownTowerCoordinate
uTT=self.structureGeometry.upTowerTopCoordinate
self.towerSketch=[]
for i in range(0,2):
mySketch = self.structureModel.ConstrainedSketch(name='towerSketch'+str(i+1),sheetSize=10.0)
nt2=(rUD[i][0][0],rUD[i][0][1]))
mySketch.Line(point1=(rUD[i][0][0],rUD[i][0][1]), point2=(uTT[i][0][0],uTT[i][0][1]))
mySketch.Line(point1=(uTT[i][0][0],uTT[i][0][1]), point2=(uTT[i][1][0]+(uTT[i][1][2]-uTT[i][0][2]),uTT[i][1][1]))
mySketch.Line(point1=(uTT[i][1][0]+(uTT[i][1][2]-uTT[i][0][2]),uTT[i][1][1]), point2=(rUD[i][1][0]+(rUD[i][1][2]-rUD[i][0][2]),rUD[i][1][1]))
mySketch.Line(point1=(rUD[i][1][0]+(rUD[i][1][2]-rUD[i][0][2]),rUD[i][1][1]), point2=(dTB[i][1][0]+(dTB[i][1][2]-dTB[i][0][2]),dTB[i][1][1]))
self.towerSketch.append(mySketch)
self.towerSketch=tuple(self.towerSketch)
def __CreateStiffeningGirderSketch(self):
eP=self.structureGeometry.EndPointCoordinate
rGR=self.structureGeometry.rGirderRigidarmCoordinate
rRS=self.structureGeometry.rRigidarmSuspenderCoordinate
lst=[]
lst.append(eP[0])
for i in range(len(rGR)):
lst.append(rGR[i])
lst.append(eP[1])
stiffeningGirderCoordinate=tuple(lst)
sG=stiffeningGirderCoordinate
mySketch = self.structureModel.ConstrainedSketch(name='stiffeningGirderSketch',sheetSize=10.0)
for i in range(len(sG)-1):
mySketch.Line(point1=(sG[i][0],sG[i][1]), point2=(sG[i+1][0],sG[i+1][1]))
self.stiffeningGirderSketch=mySketch
def __CreateGirderRigidarmSketch(self):
rGR=self.structureGeometry.rGirderRigidarmCoordinate
rRS=self.structureGeometry.rRigidarmSuspenderCoordinate
girderRigidarmSketch=[]
for i in range(len(rGR)):
mySketch = self.structureModel.ConstrainedSketch(name='girderRigidarmSketch'+str(i+1),sheetSize=10.0)
mySketch.Line(point1=(rRS[0][i][0]+rRS[0][i][2],rRS[0][i][1]), point2=(rGR[i][0],rGR[i][1]))
mySketch.Line(point1=(rGR[i][0],rGR[i][1]), point2=(rRS[1][i][0]+rRS[1][i][2],rRS[1][i][1]))
girderRigidarmSketch.append(mySketch)
self.girderRigidarmSketch=tuple(girderRigidarmSketch)
def __CreateCableSketch(self):
cableCoordinate=self.structureGeometry.cableCoordinate
self.cableSketch=[]
cableSketch=[]
for i in range(len(cableCoordinate)):
mySketch = self.structureModel.ConstrainedSketch(name='cableSketch'+str(i+1),sheetSize=10.0)
for j in range(len(cableCoordinate[i])-1):
mySketch.Line(point1=(cableCoordinate[i][j][0],cableCoordinate[i][j][1]), point2=(cableCoordinate[i][j+1][0],cableCoordinate[i][j+1][1]))
cableSketch.append(mySketch)
self.cableSketch=tuple(cableSketch)
def __CreateSuspenderSketch(self):
hP=self.structureGeometry.hangingPointCoordinate
rRS=self.structureGeometry.rRigidarmSuspenderCoordinate
self.suspenderSketch=[]
suspenderSketch=[]
for i in range(len(rRS)):
for j in range(len(rRS[0])):
mySketch = self.structureModel.ConstrainedSketch(name='girderRigidarmSketch'+str(i+1)+'-'+str(j+1),sheetSize=10.0)
mySketch.Line(point1=(hP[i][j][0],hP[i][j][1]), point2=(rRS[i][j][0],rRS[i][j][1]))
suspenderSketch.append(mySketch)
self.suspenderSketch.append(tuple(suspenderSketch))
self.suspenderSketch=tuple(self.suspenderSketch)
| true | true |
f71e48d95b34d49d935b2dc902be37364d27fbf9 | 3,413 | py | Python | marmot/features/source_lm_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 19 | 2015-08-21T13:06:37.000Z | 2021-07-26T09:56:29.000Z | marmot/features/source_lm_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 36 | 2015-01-13T13:01:07.000Z | 2016-06-22T06:59:59.000Z | marmot/features/source_lm_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 8 | 2015-12-11T16:41:47.000Z | 2019-04-08T16:28:40.000Z | import codecs
from subprocess import call
import os
from collections import defaultdict
from marmot.features.feature_extractor import FeatureExtractor
from marmot.util.ngram_window_extractor import left_context, right_context
from marmot.experiment.import_utils import mk_tmp_dir
from marmot.exceptions.no_data_error import NoDataError
# Class that extracts various LM features for source
class SourceLMFeatureExtractor(FeatureExtractor):
def __init__(self, ngram_file=None, corpus_file=None, srilm=None, tmp_dir=None, order=5):
# generate ngram counts
if ngram_file is None:
if srilm is None:
if 'SRILM' in os.environ:
srilm = os.environ['SRILM']
else:
print("No SRILM found")
return
if corpus_file is None:
print ("No corpus for LM generation")
return
srilm_ngram_count = os.path.join(srilm, 'ngram-count')
tmp_dir = mk_tmp_dir(tmp_dir)
lm_file = os.path.join(tmp_dir, 'lm_file')
ngram_file = os.path.join(tmp_dir, 'ngram_count_file')
call([srilm_ngram_count, '-text', corpus_file, '-lm', lm_file, '-order', str(order), '-write', ngram_file])
self.lm = defaultdict(int)
for line in codecs.open(ngram_file, encoding='utf-8'):
chunks = line[:-1].split('\t')
if len(chunks) == 2:
new_tuple = tuple(chunks[0].split())
new_number = int(chunks[1])
self.lm[new_tuple] = new_number
else:
print("Wrong ngram-counts file format at line '", line[:-1], "'")
self.order = order
def check_lm(self, ngram, side='left'):
for i in range(self.order, 0, -1):
if side == 'left':
cur_ngram = ngram[len(ngram)-i:]
elif side == 'right':
cur_ngram = ngram[:i]
else:
print("Unknown parameter 'side'", side)
return 0
if tuple(cur_ngram) in self.lm:
return i
return 0
# returns a set of features related to LM
# currently extracting: highest order ngram including the word and its LEFT context,
# highest order ngram including the word and its RIGHT context
def get_features(self, context_obj):
if 'source' not in context_obj:
raise NoDataError('source', context_obj, 'SourceLMFeatureExtractor')
if 'alignments' not in context_obj:
raise NoDataError('alignments', context_obj, 'SourceLMFeatureExtractor')
align_idx = context_obj['alignments'][context_obj['index']]
# unaligned
if align_idx is None:
return [0, 0]
align_token = context_obj['source'][align_idx]
left_ngram = left_context(context_obj['source'], align_token, context_size=2, idx=align_idx) + [align_token]
right_ngram = [align_token] + right_context(context_obj['source'], align_token, context_size=2, idx=align_idx)
left_ngram_order = self.check_lm(left_ngram, side='left')
right_ngram_order = self.check_lm(right_ngram, side='right')
return [left_ngram_order, right_ngram_order]
def get_feature_names(self):
return ['source_highest_order_ngram_left', 'source_highest_order_ngram_right']
| 41.621951 | 119 | 0.619982 | import codecs
from subprocess import call
import os
from collections import defaultdict
from marmot.features.feature_extractor import FeatureExtractor
from marmot.util.ngram_window_extractor import left_context, right_context
from marmot.experiment.import_utils import mk_tmp_dir
from marmot.exceptions.no_data_error import NoDataError
class SourceLMFeatureExtractor(FeatureExtractor):
def __init__(self, ngram_file=None, corpus_file=None, srilm=None, tmp_dir=None, order=5):
if ngram_file is None:
if srilm is None:
if 'SRILM' in os.environ:
srilm = os.environ['SRILM']
else:
print("No SRILM found")
return
if corpus_file is None:
print ("No corpus for LM generation")
return
srilm_ngram_count = os.path.join(srilm, 'ngram-count')
tmp_dir = mk_tmp_dir(tmp_dir)
lm_file = os.path.join(tmp_dir, 'lm_file')
ngram_file = os.path.join(tmp_dir, 'ngram_count_file')
call([srilm_ngram_count, '-text', corpus_file, '-lm', lm_file, '-order', str(order), '-write', ngram_file])
self.lm = defaultdict(int)
for line in codecs.open(ngram_file, encoding='utf-8'):
chunks = line[:-1].split('\t')
if len(chunks) == 2:
new_tuple = tuple(chunks[0].split())
new_number = int(chunks[1])
self.lm[new_tuple] = new_number
else:
print("Wrong ngram-counts file format at line '", line[:-1], "'")
self.order = order
def check_lm(self, ngram, side='left'):
for i in range(self.order, 0, -1):
if side == 'left':
cur_ngram = ngram[len(ngram)-i:]
elif side == 'right':
cur_ngram = ngram[:i]
else:
print("Unknown parameter 'side'", side)
return 0
if tuple(cur_ngram) in self.lm:
return i
return 0
def get_features(self, context_obj):
if 'source' not in context_obj:
raise NoDataError('source', context_obj, 'SourceLMFeatureExtractor')
if 'alignments' not in context_obj:
raise NoDataError('alignments', context_obj, 'SourceLMFeatureExtractor')
align_idx = context_obj['alignments'][context_obj['index']]
if align_idx is None:
return [0, 0]
align_token = context_obj['source'][align_idx]
left_ngram = left_context(context_obj['source'], align_token, context_size=2, idx=align_idx) + [align_token]
right_ngram = [align_token] + right_context(context_obj['source'], align_token, context_size=2, idx=align_idx)
left_ngram_order = self.check_lm(left_ngram, side='left')
right_ngram_order = self.check_lm(right_ngram, side='right')
return [left_ngram_order, right_ngram_order]
def get_feature_names(self):
return ['source_highest_order_ngram_left', 'source_highest_order_ngram_right']
| true | true |
f71e495c79f4bb1a1505cad9bdde64d7e37c7ba1 | 1,293 | py | Python | proxy/core/tls/certificate.py | fisabiliyusri/proxy | 29934503251b704813ef3e7ed8c2a5ae69448c8a | [
"BSD-3-Clause"
] | null | null | null | proxy/core/tls/certificate.py | fisabiliyusri/proxy | 29934503251b704813ef3e7ed8c2a5ae69448c8a | [
"BSD-3-Clause"
] | 8 | 2022-01-23T10:51:59.000Z | 2022-03-29T22:11:57.000Z | proxy/core/tls/certificate.py | fisabiliyusri/proxy | 29934503251b704813ef3e7ed8c2a5ae69448c8a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import Tuple, Optional
class TlsCertificate:
"""TLS Certificate"""
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
self.data = raw
return True, raw
def build(self) -> bytes:
assert self.data
return self.data
class TlsCertificateRequest:
"""TLS Certificate Request"""
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
return False, raw
def build(self) -> bytes:
assert self.data
return self.data
class TlsCertificateVerify:
"""TLS Certificate Verify"""
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
return False, raw
def build(self) -> bytes:
assert self.data
return self.data
| 23.509091 | 86 | 0.622583 |
from typing import Tuple, Optional
class TlsCertificate:
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
self.data = raw
return True, raw
def build(self) -> bytes:
assert self.data
return self.data
class TlsCertificateRequest:
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
return False, raw
def build(self) -> bytes:
assert self.data
return self.data
class TlsCertificateVerify:
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
return False, raw
def build(self) -> bytes:
assert self.data
return self.data
| true | true |
f71e4a16ec710380ca862000c177dc158aa1e97f | 3,879 | py | Python | template_registrator.py | Nepmia/N4-Framework | 84d98f3fe05ca02f938332e5970bca5482ef8ce7 | [
"MIT"
] | null | null | null | template_registrator.py | Nepmia/N4-Framework | 84d98f3fe05ca02f938332e5970bca5482ef8ce7 | [
"MIT"
] | null | null | null | template_registrator.py | Nepmia/N4-Framework | 84d98f3fe05ca02f938332e5970bca5482ef8ce7 | [
"MIT"
] | null | null | null | import os
import app
from termcolor import colored
from write import write
import re
from pathlib import Path
from template_handler import templates_lister
def template_registrator():
"""Get templateRegistrator module path and call template module and exporter functions
"""
module_path = f"{app.MODULE_FOLDER}/TemplateRegistrator/templates.js"
print(
colored("[N4] ", "blue"),
colored("Begining TemplateRegistrator module...", "cyan")
)
print(
colored("[N4] ", "blue"),
colored("Module folder is", "cyan"),
colored(module_path, "red")
)
templates_list = templates_lister()
templates_module(module_path)
templates_exporter(templates_list, module_path)
def templates_module(module_path):
"""Tests if template.js exists and clear it then rewrites a blank const template
Args:
module_path (string): path to templateRegistrator module
"""
base_template = "const Templates = {\n" # Basic JS const list template
print(
colored("[N4] ", "blue")
,colored("Testing", "cyan")
,colored("template.js", "red")
)
if os.path.exists(module_path): # Test if template.js already exists, if it does then it clears it.
print(
colored("[N4] ", "blue"),
colored("test positive, clearing content to rewrite updated templates list...", "cyan")
)
os.remove(module_path)
write(module_path, base_template, "w")
else:
print(
colored("[N4] ", "blue"),
colored("test negaticve, creating and writing templates list", "cyan")
)
write(module_path, base_template, "w")
def templates_exporter(templates_list, module_path):
"""Writes template.js by extracting titles of templates in template_list. It writes them in a js list like "home" : Tiltle, "page": Title and then close the list.
Args:
templates_list (list): List containing templates file names
module_path (string): path to templateRegistrator module
"""
for item in templates_list:
title = title_extractor(f"{app.PAGES_FOLDER}/{item}") # Get template title using title_extractor()
parsed_item = item.replace(".html", "")
if title == None:
print(
colored("[N4] ", "blue"),
colored("Given value is incorect, skipping.", "cyan")
)
pass
else:
write(module_path, f" {parsed_item} : \"{title}\",\n", "a")
write(module_path, "},", "a")
print(
colored("[N4] ", "blue"),
colored("Finished to build template list for InstantNav, building pages..", "cyan")
)
def title_extractor(file):
"""Extracts titles from template using regex
Args:
file (string): path to the file the function need to work on, usually a template
Returns:
[string]: return a correctly parsed title so it can be writen in template.js
"""
print(
colored("[N4] ", "blue"),
colored("Extracting title of", "cyan"),
colored(file,"red")
)
try:
raw_read = Path(file).read_text()
var_match = re.search(app.PAGE_TITLE_REGEX, raw_read)
var_content = var_match[0].replace("pageTitle=", "").replace("_", " ").replace("-", " ").replace("\"","")
print(
colored("[N4] ", "blue"),
colored(var_content, "red"),
colored("extracted.", "cyan")
)
return var_content
except TypeError:
print(
colored("[N4] ", "blue"),
colored("TypeError!", "red"),
colored("Couldn't extract the title, make sure it respect the syntax as following:", "cyan"),
colored("pageTitle=\"YourTitle\"", "green")
) | 34.945946 | 169 | 0.593194 | import os
import app
from termcolor import colored
from write import write
import re
from pathlib import Path
from template_handler import templates_lister
def template_registrator():
module_path = f"{app.MODULE_FOLDER}/TemplateRegistrator/templates.js"
print(
colored("[N4] ", "blue"),
colored("Begining TemplateRegistrator module...", "cyan")
)
print(
colored("[N4] ", "blue"),
colored("Module folder is", "cyan"),
colored(module_path, "red")
)
templates_list = templates_lister()
templates_module(module_path)
templates_exporter(templates_list, module_path)
def templates_module(module_path):
base_template = "const Templates = {\n"
print(
colored("[N4] ", "blue")
,colored("Testing", "cyan")
,colored("template.js", "red")
)
if os.path.exists(module_path):
print(
colored("[N4] ", "blue"),
colored("test positive, clearing content to rewrite updated templates list...", "cyan")
)
os.remove(module_path)
write(module_path, base_template, "w")
else:
print(
colored("[N4] ", "blue"),
colored("test negaticve, creating and writing templates list", "cyan")
)
write(module_path, base_template, "w")
def templates_exporter(templates_list, module_path):
for item in templates_list:
title = title_extractor(f"{app.PAGES_FOLDER}/{item}")
parsed_item = item.replace(".html", "")
if title == None:
print(
colored("[N4] ", "blue"),
colored("Given value is incorect, skipping.", "cyan")
)
pass
else:
write(module_path, f" {parsed_item} : \"{title}\",\n", "a")
write(module_path, "},", "a")
print(
colored("[N4] ", "blue"),
colored("Finished to build template list for InstantNav, building pages..", "cyan")
)
def title_extractor(file):
print(
colored("[N4] ", "blue"),
colored("Extracting title of", "cyan"),
colored(file,"red")
)
try:
raw_read = Path(file).read_text()
var_match = re.search(app.PAGE_TITLE_REGEX, raw_read)
var_content = var_match[0].replace("pageTitle=", "").replace("_", " ").replace("-", " ").replace("\"","")
print(
colored("[N4] ", "blue"),
colored(var_content, "red"),
colored("extracted.", "cyan")
)
return var_content
except TypeError:
print(
colored("[N4] ", "blue"),
colored("TypeError!", "red"),
colored("Couldn't extract the title, make sure it respect the syntax as following:", "cyan"),
colored("pageTitle=\"YourTitle\"", "green")
) | true | true |
f71e4ba69c62e1f2c427e125b8d3019de0eb2970 | 28 | py | Python | try.py | zf-nobody/pyaudio_portaudio | 8f703866e6b3d9aad30792fbd07fa63d504505f2 | [
"MIT"
] | null | null | null | try.py | zf-nobody/pyaudio_portaudio | 8f703866e6b3d9aad30792fbd07fa63d504505f2 | [
"MIT"
] | null | null | null | try.py | zf-nobody/pyaudio_portaudio | 8f703866e6b3d9aad30792fbd07fa63d504505f2 | [
"MIT"
] | null | null | null | print("I am having a try.")
| 14 | 27 | 0.642857 | print("I am having a try.")
| true | true |
f71e4c36153dcf173b1175cf8407c1244cc5dba5 | 19,855 | py | Python | isapi_wsgi.py | vbolshakov/isapi-wsgi | 0b3f7c9dbc72b4aa10203abbfa15fc51c2f11386 | [
"MIT"
] | 14 | 2015-03-16T13:39:05.000Z | 2022-03-28T15:26:46.000Z | isapi_wsgi.py | vbolshakov/isapi-wsgi | 0b3f7c9dbc72b4aa10203abbfa15fc51c2f11386 | [
"MIT"
] | 4 | 2015-03-14T03:58:08.000Z | 2019-07-05T12:28:06.000Z | isapi_wsgi.py | vbolshakov/isapi-wsgi | 0b3f7c9dbc72b4aa10203abbfa15fc51c2f11386 | [
"MIT"
] | 6 | 2016-02-16T15:57:53.000Z | 2020-10-05T11:36:38.000Z | """
$Id$
This is a ISAPI extension for a wsgi with 2 handlers classes.
- ISAPISimpleHandler which creates a new IsapiWsgiHandler object for
each request.
- ISAPIThreadPoolHandler where the wsgi requests are run on worker threads
from the thread pool.
Dependecies:
- python 2.2+
- win32 extensions
- wsgiref library from http://cvs.eby-sarna.com/wsgiref/
Based on isapi/test/extension_simple.py, PEP 333 etc
"""
__author__ = "Mark Rees <mark.john.rees@gmail.com>"
__release__ = "0.4"
__version__ = "$Rev$ $LastChangedDate$"
__url__ = "http://isapi-wsgi.googlecode.com"
__description__ = "ISAPI WSGI Handler"
__license__ = "MIT"
#this is first so that we can see import errors
import sys
if hasattr(sys, "isapidllhandle"):
import win32traceutil
try:
import isapi
except ImportError:
raise ImportError("Could not find module isapi. isapi_wsgi requires pywin32")
from isapi import isapicon, ExtensionError
from isapi.simple import SimpleExtension
from isapi.threaded_extension import ThreadPoolExtension
from wsgiref.handlers import BaseHandler
from wsgiref.util import shift_path_info
import sys
import os
import stat
import string
import re
try: from cStringIO import StringIO
except ImportError: from StringIO import StringIO
traceon = 0
def trace(*msgs):
"""Write trace message(s) so win32traceutil can display them"""
if not traceon: return
for msg in msgs:
print(msg)
class FoldedCaseString(str):
"""
From jaraco.util.string.FoldedCase:
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCaseString('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCaseString, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __hash__(self):
return hash(self.lower())
# cache lower since it's likely to be called frequently.
def lower(self):
self._lower = super(FoldedCaseString, self).lower()
self.lower = lambda: self._lower
return self._lower
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
class ECBDictAdapter(object):
"""
Adapt ECB to a read-only dictionary interface
>>> from fakeecb import FakeECB
>>> ecb = FakeECB()
>>> ecb_dict = ECBDictAdapter(ecb)
>>> ecb_dict['SCRIPT_NAME']
'/'
>>> ecb_dict['PATH_INFO']
'/'
"""
def __init__(self, ecb):
self.ecb = ecb
if sys.version_info > (3,0):
if ecb.Version >= 0x00060000:
# we can handle UNICODE_* variables.
self._get_variable = self._get_variable_py3k
else:
self._get_variable = self._get_variable_py3k_iis5
else:
self._get_variable = self._get_variable_py2k
def __getitem__(self, key):
try:
return self._get_variable(key)
except ExtensionError:
raise KeyError, key
# a few helpers specific to the IIS and python version.
def _get_variable(self, key):
raise RuntimeError("not reached: replaced at runtime in the ctor")
def _get_variable_py3k_iis5(self, key):
# IIS5 doesn't support UNICODE_* variable names...
return self.ecb.GetServerVariable(key).decode('latin-1')
def _get_variable_py3k(self, key):
# IIS6 and later on py3k - ask IIS for the unicode version.
return self.ecb.GetServerVariable('UNICODE_' + key)
def _get_variable_py2k(self, key):
# py2k - just use normal string objects.
return self.ecb.GetServerVariable(key)
def path_references_application(path, apps):
"""
Return true if the first element in the path matches any string
in the apps list.
>>> path_references_application('/foo/bar', ['foo','baz'])
True
"""
# assume separator is /
nodes = filter(None, path.split('/'))
return nodes and nodes[0] in apps
def interpretPathInfo(ecb_server_vars, app_names=[]):
"""
Based on the a dictionary of ECB server variables and list of valid
subapplication names, determine the correct PATH_INFO, SCRIPT_NAME,
and IIS_EXTENSION_PATH.
By valid, I mean SCRIPT_NAME + PATH_INFO is always the request path and
SCRIPT_NAME is the path to the WSGi application and PATH_INFO is the path
that the WSGI application expects to handle.
In IIS, the path to the extension sometimes varies from the script name,
particularly when the script map extenison is not '*'. IIS_EXTENSION_PATH
is set to the path that leads to the extension.
Return these values as a dict.
For the following doctests, I use a convention:
vappname : the IIS application
appname : the wsgi application (may be )
subappX : a wsgi sub application (must always follow appname)
proc : a method within the WSGI app (something that should appear in PATH_INFO)
--------------------------
First some common examples
Following is an example case where the extension is installed at the root
of the site, the requested
URL is /proc
>>> ecb_vars = dict(SCRIPT_NAME='/proc', PATH_INFO='/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='', PATH_INFO='/proc', IIS_EXTENSION_PATH='')
True
An example where the extension is installed to a virtual directory below
the root.
URL is /vappname/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/proc', PATH_INFO='/vappname/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/vappname', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname')
True
An example where the extension is installed to a virtual directory below
the root, and some subapps are present
>>> subapps = ('subapp1', 'subapp2')
URL is /vappname/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/proc', PATH_INFO='/vappname/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/vappname', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname')
True
URL is /vappname/subapp1/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/subapp1/proc', PATH_INFO='/vappname/subapp1/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/vappname/subapp1', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname', WSGI_SUBAPP='subapp1')
True
------------------------------
Now some less common scenarios
An example where the extension is installed only to the .wsgi extension to
a virtual directory below the root.
URL is /vappname/any.wsgi/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/any.wsgi', PATH_INFO='/vappname/any.wsgi/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/vappname/any.wsgi', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname')
True
An example where the extension is installed only to the .wsgi extension at
the root.
URL is /any_path/any.wsgi/proc
>>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/proc', IIS_EXTENSION_PATH='')
True
How about an extension installed at the root to the .wsgi extension with
subapps
URL is /any_path/any.wsgi/subapp1/proc/foo
>>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/subapp1/proc/foo', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/any_path/any.wsgi/subapp1', PATH_INFO='/proc/foo', IIS_EXTENSION_PATH='', WSGI_SUBAPP='subapp1')
True
How about an extension installed at the root to the .wsgi extension with
subapps... this time default to the root app.
URL is /any_path/any.wsgi/proc/foo
>>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/proc/foo', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/proc/foo', IIS_EXTENSION_PATH='')
True
"""
PATH_INFO = ecb_server_vars['PATH_INFO']
SCRIPT_NAME = ecb_server_vars['SCRIPT_NAME']
IIS_EXTENSION_PATH = getISAPIExtensionPath(ecb_server_vars)
if SCRIPT_NAME == PATH_INFO:
# since they're the same, we're in a * mapped extension; use
# the application path
SCRIPT_NAME = IIS_EXTENSION_PATH
# remove the script name from the path info
if SCRIPT_NAME and PATH_INFO.startswith(SCRIPT_NAME):
_, PATH_INFO = PATH_INFO.split(SCRIPT_NAME, 1)
result = dict(
SCRIPT_NAME=SCRIPT_NAME,
PATH_INFO=PATH_INFO,
IIS_EXTENSION_PATH=IIS_EXTENSION_PATH,
)
# finally, adjust the result if the path info begins with a subapp
if path_references_application(PATH_INFO, app_names):
result.update(WSGI_SUBAPP = shift_path_info(result))
return result
def getISAPIExtensionPath(ecb_server_vars):
"""Returns the path to our extension DLL.
This will be blank ('') if installed at the root, or something like
'/foo' or '/bar/foo' if 'foo' is the name of the virtual directory
where this extension is installed.
>>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/ROOT/test'))
'/test'
>>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/ROOT'))
''
This test exercises the less common mixed-case metadata path
>>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/Root'))
''
"""
# Only way I see how to do this is to fetch the location of our ISAPI
# extension in the metabase then assume that '/ROOT/' is the root!
# It will be something like MD='/LM/W3SVC/1/ROOT/test'
appl_md_path = ecb_server_vars["APPL_MD_PATH"]
appl_md_path = FoldedCaseString(appl_md_path)
site, pos = appl_md_path.split("/ROOT", 1)
return pos
class ISAPIInputWrapper:
# Based on ModPythonInputWrapper in mp_wsgi_handler.py
def __init__(self, ecb):
self._in = StringIO()
self._ecb = ecb
if self._ecb.AvailableBytes > 0:
data = self._ecb.AvailableData
# Check if more data from client than what is in ecb.AvailableData
excess = self._ecb.TotalBytes - self._ecb.AvailableBytes
if excess > 0:
extra = self._ecb.ReadClient(excess)
data = data + extra
self._in.write(data)
# rewind to start
self._in.seek(0)
def next(self):
return self._in.next()
def read(self, size=-1):
return self._in.read(size)
def readline(self, size=-1):
return self._in.readline(size)
def readlines(self, hint=-1):
return self._in.readlines()
def reset(self):
self._in.reset()
def seek(self, *args, **kwargs):
self._in.seek(*args, **kwargs)
def tell(self):
return self._in.tell()
def __iter__(self):
return iter(self._in.readlines())
class ISAPIOutputWrapper:
def __init__(self, ecb):
self.ecb = ecb
def write(self, msg):
self.ecb.WriteClient(msg)
def flush(self):
pass
class ISAPIErrorWrapper:
def write(self, msg):
trace(msg)
def flush(self):
pass
class IsapiWsgiHandler(BaseHandler):
def __init__(self, ecb, path_info):
self.ecb = ecb
self.path_info = path_info
self.stdin = ISAPIInputWrapper(self.ecb)
self.stdout = ISAPIOutputWrapper(self.ecb)
self.stderr = sys.stderr #this will go to the win32traceutil
self.headers = None
self.headers_sent = False
self.wsgi_multithread = False
self.wsgi_multiprocess = False
self.base_env = []
def send_preamble(self):
"""Since ISAPI sends preamble itself, do nothing"""
trace("send_preamble")
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
trace("send_headers", str(self.headers))
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
trace("SendResponseHeaders")
self.ecb.SendResponseHeaders(self.status, str(self.headers), False)
def _write(self, data):
trace("_write", data)
self.ecb.WriteClient(data)
def _flush(self):
trace("_flush")
def get_stdin(self):
trace("get_stdin")
return self.stdin
def get_stderr(self):
trace("get_stderr")
return self.stderr
def add_cgi_vars(self):
trace("add_cgi_vars")
# get standard windows os environment
environ = dict(os.environ.items())
# set standard CGI variables
required_cgienv_vars = ['REQUEST_METHOD', 'SCRIPT_NAME',
'PATH_INFO', 'QUERY_STRING',
'CONTENT_TYPE', 'CONTENT_LENGTH',
'SERVER_NAME', 'SERVER_PORT',
'SERVER_PROTOCOL', 'REMOTE_ADDR'
]
ecb_dict = ECBDictAdapter(self.ecb)
for cgivar in required_cgienv_vars:
try:
environ[cgivar] = ecb_dict[cgivar]
except KeyError:
raise AssertionError("missing CGI environment variable %s" % cgivar)
environ.update(self.path_info)
http_cgienv_vars = ecb_dict['ALL_HTTP'].split("\n")
for cgivar in http_cgienv_vars:
pair = cgivar.split(":",1)
try:
environ[pair[0]] = pair[1]
except:
# Handle last list which is not a pair
pass
# Other useful CGI variables
optional_cgienv_vars = ['REMOTE_USER', 'HTTPS',]
for cgivar in optional_cgienv_vars:
try:
environ[cgivar] = ecb_dict[cgivar]
except KeyError:
pass
# and some custom ones.
environ['isapi.ecb'] = self.ecb
self.environ.update(environ)
def _run_app(rootapp, apps, ecb):
ecb_dict = ECBDictAdapter(ecb)
path_info = interpretPathInfo(ecb_dict, apps.keys())
loc = path_info.get('WSGI_SUBAPP')
application = apps.get(loc, rootapp)
# we have to pass path_info because otherwise the handler can't determine
# what the correct path is (because it doesn't know whether it's a
# subapp or not)
handler = IsapiWsgiHandler(ecb, path_info)
trace("Handler")
try:
if application is not None:
handler.run(application)
else:
handler.run(isapi_error)
except ExtensionError:
# error normally happens when client disconnects before
# extension i/o completed
pass
except:
# ToDo:Other exceptions should generate a nice page
trace("Caught App Exception")
pass
# The ISAPI extension - handles requests in our virtual dir, and sends the
# response to the client.
class ISAPISimpleHandler(SimpleExtension):
'''Python Simple WSGI ISAPI Extension'''
def __init__(self, rootapp=None, **apps):
trace("ISAPISimpleHandler.__init__")
self.rootapp = rootapp
self.apps = apps
SimpleExtension.__init__(self)
def HttpExtensionProc(self, ecb):
trace("Enter HttpExtensionProc")
_run_app(self.rootapp, self.apps, ecb)
ecb.close()
trace("Exit HttpExtensionProc")
return isapicon.HSE_STATUS_SUCCESS
def TerminateExtension(self, status):
trace("TerminateExtension")
class ISAPIThreadPoolHandler(ThreadPoolExtension):
'''Python Thread Pool WSGI ISAPI Extension'''
def __init__(self, rootapp=None, **apps):
trace("ISAPIThreadPoolHandler.__init__")
self.rootapp = rootapp
self.apps = apps
ThreadPoolExtension.__init__(self)
def Dispatch(self, ecb):
trace("Enter Dispatch")
_run_app(self.rootapp, self.apps, ecb)
ecb.DoneWithSession()
trace("Exit Dispatch")
def isapi_error(environ, start_response):
'''Send a nice error page to the client'''
status = '404 OK'
start_response(status, [('Content-type', 'text/plain')])
return ['Page not found']
#-----------------------------------------------------------------------------
def test(environ, start_response):
'''Simple app as per PEP 333'''
status = '200 OK'
start_response(status, [('Content-type', 'text/plain')])
return ['Hello world from isapi!']
# The entry points for the ISAPI extension.
def __ExtensionFactory__():
return ISAPISimpleHandler(test)
# Our special command line customization.
# Pre-install hook for our virtual directory.
def PreInstallDirectory(params, options):
# If the user used our special '--description' option,
# then we override our default.
if options.description:
params.Description = options.description
# Post install hook for our entire script
def PostInstall(params, options):
print "Extension installed"
# Handler for our custom 'status' argument.
def status_handler(options, log, arg):
"Query the status of something"
print "Everything seems to be fine!"
custom_arg_handlers = {"status": status_handler}
if __name__=='__main__':
# If run from the command-line, install ourselves.
from isapi.install import *
params = ISAPIParameters(PostInstall = PostInstall)
# Setup the virtual directories - this is a list of directories our
# extension uses - in this case only 1.
# Each extension has a "script map" - this is the mapping of ISAPI
# extensions.
sm = [
ScriptMapParams(Extension="*", Flags=0)
]
vd = VirtualDirParameters(Name="isapi-wsgi-test",
Description = "ISAPI-WSGI Test",
ScriptMaps = sm,
ScriptMapUpdate = "replace",
# specify the pre-install hook.
PreInstall = PreInstallDirectory
)
params.VirtualDirs = [vd]
# Setup our custom option parser.
from optparse import OptionParser
parser = OptionParser('') # black usage, so isapi sets it.
parser.add_option("", "--description",
action="store",
help="custom description to use for the virtual directory")
HandleCommandLine(params, opt_parser=parser,
custom_arg_handlers = custom_arg_handlers)
| 35.203901 | 164 | 0.627802 | """
$Id$
This is a ISAPI extension for a wsgi with 2 handlers classes.
- ISAPISimpleHandler which creates a new IsapiWsgiHandler object for
each request.
- ISAPIThreadPoolHandler where the wsgi requests are run on worker threads
from the thread pool.
Dependecies:
- python 2.2+
- win32 extensions
- wsgiref library from http://cvs.eby-sarna.com/wsgiref/
Based on isapi/test/extension_simple.py, PEP 333 etc
"""
__author__ = "Mark Rees <mark.john.rees@gmail.com>"
__release__ = "0.4"
__version__ = "$Rev$ $LastChangedDate$"
__url__ = "http://isapi-wsgi.googlecode.com"
__description__ = "ISAPI WSGI Handler"
__license__ = "MIT"
import sys
if hasattr(sys, "isapidllhandle"):
import win32traceutil
try:
import isapi
except ImportError:
raise ImportError("Could not find module isapi. isapi_wsgi requires pywin32")
from isapi import isapicon, ExtensionError
from isapi.simple import SimpleExtension
from isapi.threaded_extension import ThreadPoolExtension
from wsgiref.handlers import BaseHandler
from wsgiref.util import shift_path_info
import sys
import os
import stat
import string
import re
try: from cStringIO import StringIO
except ImportError: from StringIO import StringIO
traceon = 0
def trace(*msgs):
"""Write trace message(s) so win32traceutil can display them"""
if not traceon: return
for msg in msgs:
print(msg)
class FoldedCaseString(str):
"""
From jaraco.util.string.FoldedCase:
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCaseString('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCaseString, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __hash__(self):
return hash(self.lower())
def lower(self):
self._lower = super(FoldedCaseString, self).lower()
self.lower = lambda: self._lower
return self._lower
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
class ECBDictAdapter(object):
"""
Adapt ECB to a read-only dictionary interface
>>> from fakeecb import FakeECB
>>> ecb = FakeECB()
>>> ecb_dict = ECBDictAdapter(ecb)
>>> ecb_dict['SCRIPT_NAME']
'/'
>>> ecb_dict['PATH_INFO']
'/'
"""
def __init__(self, ecb):
self.ecb = ecb
if sys.version_info > (3,0):
if ecb.Version >= 0x00060000:
# we can handle UNICODE_* variables.
self._get_variable = self._get_variable_py3k
else:
self._get_variable = self._get_variable_py3k_iis5
else:
self._get_variable = self._get_variable_py2k
def __getitem__(self, key):
try:
return self._get_variable(key)
except ExtensionError:
raise KeyError, key
# a few helpers specific to the IIS and python version.
def _get_variable(self, key):
raise RuntimeError("not reached: replaced at runtime in the ctor")
def _get_variable_py3k_iis5(self, key):
# IIS5 doesn't support UNICODE_* variable names...
return self.ecb.GetServerVariable(key).decode('latin-1')
def _get_variable_py3k(self, key):
return self.ecb.GetServerVariable('UNICODE_' + key)
def _get_variable_py2k(self, key):
return self.ecb.GetServerVariable(key)
def path_references_application(path, apps):
"""
Return true if the first element in the path matches any string
in the apps list.
>>> path_references_application('/foo/bar', ['foo','baz'])
True
"""
nodes = filter(None, path.split('/'))
return nodes and nodes[0] in apps
def interpretPathInfo(ecb_server_vars, app_names=[]):
"""
Based on the a dictionary of ECB server variables and list of valid
subapplication names, determine the correct PATH_INFO, SCRIPT_NAME,
and IIS_EXTENSION_PATH.
By valid, I mean SCRIPT_NAME + PATH_INFO is always the request path and
SCRIPT_NAME is the path to the WSGi application and PATH_INFO is the path
that the WSGI application expects to handle.
In IIS, the path to the extension sometimes varies from the script name,
particularly when the script map extenison is not '*'. IIS_EXTENSION_PATH
is set to the path that leads to the extension.
Return these values as a dict.
For the following doctests, I use a convention:
vappname : the IIS application
appname : the wsgi application (may be )
subappX : a wsgi sub application (must always follow appname)
proc : a method within the WSGI app (something that should appear in PATH_INFO)
--------------------------
First some common examples
Following is an example case where the extension is installed at the root
of the site, the requested
URL is /proc
>>> ecb_vars = dict(SCRIPT_NAME='/proc', PATH_INFO='/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='', PATH_INFO='/proc', IIS_EXTENSION_PATH='')
True
An example where the extension is installed to a virtual directory below
the root.
URL is /vappname/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/proc', PATH_INFO='/vappname/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/vappname', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname')
True
An example where the extension is installed to a virtual directory below
the root, and some subapps are present
>>> subapps = ('subapp1', 'subapp2')
URL is /vappname/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/proc', PATH_INFO='/vappname/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/vappname', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname')
True
URL is /vappname/subapp1/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/subapp1/proc', PATH_INFO='/vappname/subapp1/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/vappname/subapp1', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname', WSGI_SUBAPP='subapp1')
True
------------------------------
Now some less common scenarios
An example where the extension is installed only to the .wsgi extension to
a virtual directory below the root.
URL is /vappname/any.wsgi/proc
>>> ecb_vars = dict(SCRIPT_NAME='/vappname/any.wsgi', PATH_INFO='/vappname/any.wsgi/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/vappname/any.wsgi', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname')
True
An example where the extension is installed only to the .wsgi extension at
the root.
URL is /any_path/any.wsgi/proc
>>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/proc', IIS_EXTENSION_PATH='')
True
How about an extension installed at the root to the .wsgi extension with
subapps
URL is /any_path/any.wsgi/subapp1/proc/foo
>>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/subapp1/proc/foo', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/any_path/any.wsgi/subapp1', PATH_INFO='/proc/foo', IIS_EXTENSION_PATH='', WSGI_SUBAPP='subapp1')
True
How about an extension installed at the root to the .wsgi extension with
subapps... this time default to the root app.
URL is /any_path/any.wsgi/proc/foo
>>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/proc/foo', APPL_MD_PATH='/LM/W3SVC/1/ROOT')
>>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/proc/foo', IIS_EXTENSION_PATH='')
True
"""
PATH_INFO = ecb_server_vars['PATH_INFO']
SCRIPT_NAME = ecb_server_vars['SCRIPT_NAME']
IIS_EXTENSION_PATH = getISAPIExtensionPath(ecb_server_vars)
if SCRIPT_NAME == PATH_INFO:
SCRIPT_NAME = IIS_EXTENSION_PATH
if SCRIPT_NAME and PATH_INFO.startswith(SCRIPT_NAME):
_, PATH_INFO = PATH_INFO.split(SCRIPT_NAME, 1)
result = dict(
SCRIPT_NAME=SCRIPT_NAME,
PATH_INFO=PATH_INFO,
IIS_EXTENSION_PATH=IIS_EXTENSION_PATH,
)
if path_references_application(PATH_INFO, app_names):
result.update(WSGI_SUBAPP = shift_path_info(result))
return result
def getISAPIExtensionPath(ecb_server_vars):
"""Returns the path to our extension DLL.
This will be blank ('') if installed at the root, or something like
'/foo' or '/bar/foo' if 'foo' is the name of the virtual directory
where this extension is installed.
>>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/ROOT/test'))
'/test'
>>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/ROOT'))
''
This test exercises the less common mixed-case metadata path
>>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/Root'))
''
"""
appl_md_path = ecb_server_vars["APPL_MD_PATH"]
appl_md_path = FoldedCaseString(appl_md_path)
site, pos = appl_md_path.split("/ROOT", 1)
return pos
class ISAPIInputWrapper:
def __init__(self, ecb):
self._in = StringIO()
self._ecb = ecb
if self._ecb.AvailableBytes > 0:
data = self._ecb.AvailableData
excess = self._ecb.TotalBytes - self._ecb.AvailableBytes
if excess > 0:
extra = self._ecb.ReadClient(excess)
data = data + extra
self._in.write(data)
self._in.seek(0)
def next(self):
return self._in.next()
def read(self, size=-1):
return self._in.read(size)
def readline(self, size=-1):
return self._in.readline(size)
def readlines(self, hint=-1):
return self._in.readlines()
def reset(self):
self._in.reset()
def seek(self, *args, **kwargs):
self._in.seek(*args, **kwargs)
def tell(self):
return self._in.tell()
def __iter__(self):
return iter(self._in.readlines())
class ISAPIOutputWrapper:
def __init__(self, ecb):
self.ecb = ecb
def write(self, msg):
self.ecb.WriteClient(msg)
def flush(self):
pass
class ISAPIErrorWrapper:
def write(self, msg):
trace(msg)
def flush(self):
pass
class IsapiWsgiHandler(BaseHandler):
def __init__(self, ecb, path_info):
self.ecb = ecb
self.path_info = path_info
self.stdin = ISAPIInputWrapper(self.ecb)
self.stdout = ISAPIOutputWrapper(self.ecb)
self.stderr = sys.stderr
self.headers = None
self.headers_sent = False
self.wsgi_multithread = False
self.wsgi_multiprocess = False
self.base_env = []
def send_preamble(self):
"""Since ISAPI sends preamble itself, do nothing"""
trace("send_preamble")
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
trace("send_headers", str(self.headers))
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
trace("SendResponseHeaders")
self.ecb.SendResponseHeaders(self.status, str(self.headers), False)
def _write(self, data):
trace("_write", data)
self.ecb.WriteClient(data)
def _flush(self):
trace("_flush")
def get_stdin(self):
trace("get_stdin")
return self.stdin
def get_stderr(self):
trace("get_stderr")
return self.stderr
def add_cgi_vars(self):
trace("add_cgi_vars")
environ = dict(os.environ.items())
required_cgienv_vars = ['REQUEST_METHOD', 'SCRIPT_NAME',
'PATH_INFO', 'QUERY_STRING',
'CONTENT_TYPE', 'CONTENT_LENGTH',
'SERVER_NAME', 'SERVER_PORT',
'SERVER_PROTOCOL', 'REMOTE_ADDR'
]
ecb_dict = ECBDictAdapter(self.ecb)
for cgivar in required_cgienv_vars:
try:
environ[cgivar] = ecb_dict[cgivar]
except KeyError:
raise AssertionError("missing CGI environment variable %s" % cgivar)
environ.update(self.path_info)
http_cgienv_vars = ecb_dict['ALL_HTTP'].split("\n")
for cgivar in http_cgienv_vars:
pair = cgivar.split(":",1)
try:
environ[pair[0]] = pair[1]
except:
pass
optional_cgienv_vars = ['REMOTE_USER', 'HTTPS',]
for cgivar in optional_cgienv_vars:
try:
environ[cgivar] = ecb_dict[cgivar]
except KeyError:
pass
environ['isapi.ecb'] = self.ecb
self.environ.update(environ)
def _run_app(rootapp, apps, ecb):
ecb_dict = ECBDictAdapter(ecb)
path_info = interpretPathInfo(ecb_dict, apps.keys())
loc = path_info.get('WSGI_SUBAPP')
application = apps.get(loc, rootapp)
# what the correct path is (because it doesn't know whether it's a
# subapp or not)
handler = IsapiWsgiHandler(ecb, path_info)
trace("Handler")
try:
if application is not None:
handler.run(application)
else:
handler.run(isapi_error)
except ExtensionError:
# error normally happens when client disconnects before
# extension i/o completed
pass
except:
# ToDo:Other exceptions should generate a nice page
trace("Caught App Exception")
pass
# The ISAPI extension - handles requests in our virtual dir, and sends the
# response to the client.
class ISAPISimpleHandler(SimpleExtension):
'''Python Simple WSGI ISAPI Extension'''
def __init__(self, rootapp=None, **apps):
trace("ISAPISimpleHandler.__init__")
self.rootapp = rootapp
self.apps = apps
SimpleExtension.__init__(self)
def HttpExtensionProc(self, ecb):
trace("Enter HttpExtensionProc")
_run_app(self.rootapp, self.apps, ecb)
ecb.close()
trace("Exit HttpExtensionProc")
return isapicon.HSE_STATUS_SUCCESS
def TerminateExtension(self, status):
trace("TerminateExtension")
class ISAPIThreadPoolHandler(ThreadPoolExtension):
'''Python Thread Pool WSGI ISAPI Extension'''
def __init__(self, rootapp=None, **apps):
trace("ISAPIThreadPoolHandler.__init__")
self.rootapp = rootapp
self.apps = apps
ThreadPoolExtension.__init__(self)
def Dispatch(self, ecb):
trace("Enter Dispatch")
_run_app(self.rootapp, self.apps, ecb)
ecb.DoneWithSession()
trace("Exit Dispatch")
def isapi_error(environ, start_response):
'''Send a nice error page to the client'''
status = '404 OK'
start_response(status, [('Content-type', 'text/plain')])
return ['Page not found']
#-----------------------------------------------------------------------------
def test(environ, start_response):
'''Simple app as per PEP 333'''
status = '200 OK'
start_response(status, [('Content-type', 'text/plain')])
return ['Hello world from isapi!']
# The entry points for the ISAPI extension.
def __ExtensionFactory__():
return ISAPISimpleHandler(test)
# Our special command line customization.
# Pre-install hook for our virtual directory.
def PreInstallDirectory(params, options):
# If the user used our special '--description' option,
# then we override our default.
if options.description:
params.Description = options.description
# Post install hook for our entire script
def PostInstall(params, options):
print "Extension installed"
# Handler for our custom 'status' argument.
def status_handler(options, log, arg):
"Query the status of something"
print "Everything seems to be fine!"
custom_arg_handlers = {"status": status_handler}
if __name__=='__main__':
# If run from the command-line, install ourselves.
from isapi.install import *
params = ISAPIParameters(PostInstall = PostInstall)
# Setup the virtual directories - this is a list of directories our
# extension uses - in this case only 1.
# Each extension has a "script map" - this is the mapping of ISAPI
# extensions.
sm = [
ScriptMapParams(Extension="*", Flags=0)
]
vd = VirtualDirParameters(Name="isapi-wsgi-test",
Description = "ISAPI-WSGI Test",
ScriptMaps = sm,
ScriptMapUpdate = "replace",
# specify the pre-install hook.
PreInstall = PreInstallDirectory
)
params.VirtualDirs = [vd]
# Setup our custom option parser.
from optparse import OptionParser
parser = OptionParser('') # black usage, so isapi sets it.
parser.add_option("", "--description",
action="store",
help="custom description to use for the virtual directory")
HandleCommandLine(params, opt_parser=parser,
custom_arg_handlers = custom_arg_handlers)
| false | true |
f71e4ce28aaa6929ef3046b41295678e3e7ad09e | 2,291 | py | Python | src/backend/apps/chat/views.py | Vixx-X/ati-project | 0ef80772a6fc3807e401cf58b9e15f3628373383 | [
"MIT"
] | null | null | null | src/backend/apps/chat/views.py | Vixx-X/ati-project | 0ef80772a6fc3807e401cf58b9e15f3628373383 | [
"MIT"
] | 61 | 2021-06-10T03:27:06.000Z | 2022-03-12T01:01:34.000Z | src/backend/apps/chat/views.py | Vixx-X/ati-project | 0ef80772a6fc3807e401cf58b9e15f3628373383 | [
"MIT"
] | null | null | null | """
Views for the media module.
"""
import functools
import json
from flask import session
from flask_socketio import disconnect, emit, join_room, leave_room
from flask_user import current_user
from flask_user.decorators import login_required
from backend import socketio
from backend.apps.chat.models import Message
from backend.utils.views import DetailView, TemplateView
from .models import Chat
def authenticated_only(f):
"""
Decorator that disconnect unauthenticated users
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_authenticated:
disconnect()
else:
return f(*args, **kwargs)
return wrapped
@socketio.on("connect")
@authenticated_only
def handle_connect():
"""
Connecting user to pull of online users
"""
room = session.get("room")
join_room(room)
if room == "UNIVERSE":
emit("user_connected", {"username": current_user.username}, room=room)
@socketio.on("message")
@authenticated_only
def handle_messages(message):
"""
Get messages send by client and sent it to all in room
"""
room = session.get("room")
data = json.loads(message)
# Save message to db
msg = Message(content=data["message"], author=current_user)
chat = Chat.objects.get_or_404(pk=room)
chat.add_message(msg)
chat.save()
emit(
"message",
{
"content": msg.content,
"time": str(msg.time),
"author": str(current_user.pk),
},
room=room,
)
@socketio.on("disconnect")
@authenticated_only
def handle_disconnect():
"""
Left room
"""
room = session.get("room")
leave_room(room)
if room == "UNIVERSE":
emit("user_disconnected", {"username": current_user.username}, room=room)
class ChatView(DetailView):
"""
Chat View
"""
model = Chat
decorators = [login_required]
template_name = "chat/chat.html"
pk_or_slug_url = "pk"
def get_context_data(self, **kwargs):
session["room"] = str(self.object.pk)
return super().get_context_data(**kwargs)
class ChatListView(TemplateView):
"""
Chat list
"""
decorators = [login_required]
template_name = "chat/chat.html"
| 20.096491 | 81 | 0.643824 |
import functools
import json
from flask import session
from flask_socketio import disconnect, emit, join_room, leave_room
from flask_user import current_user
from flask_user.decorators import login_required
from backend import socketio
from backend.apps.chat.models import Message
from backend.utils.views import DetailView, TemplateView
from .models import Chat
def authenticated_only(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_authenticated:
disconnect()
else:
return f(*args, **kwargs)
return wrapped
@socketio.on("connect")
@authenticated_only
def handle_connect():
room = session.get("room")
join_room(room)
if room == "UNIVERSE":
emit("user_connected", {"username": current_user.username}, room=room)
@socketio.on("message")
@authenticated_only
def handle_messages(message):
room = session.get("room")
data = json.loads(message)
msg = Message(content=data["message"], author=current_user)
chat = Chat.objects.get_or_404(pk=room)
chat.add_message(msg)
chat.save()
emit(
"message",
{
"content": msg.content,
"time": str(msg.time),
"author": str(current_user.pk),
},
room=room,
)
@socketio.on("disconnect")
@authenticated_only
def handle_disconnect():
room = session.get("room")
leave_room(room)
if room == "UNIVERSE":
emit("user_disconnected", {"username": current_user.username}, room=room)
class ChatView(DetailView):
model = Chat
decorators = [login_required]
template_name = "chat/chat.html"
pk_or_slug_url = "pk"
def get_context_data(self, **kwargs):
session["room"] = str(self.object.pk)
return super().get_context_data(**kwargs)
class ChatListView(TemplateView):
decorators = [login_required]
template_name = "chat/chat.html"
| true | true |
f71e4d0c6f7a17183fe5fe1f40071fec474e71ae | 2,532 | py | Python | mlmodels/model_tf/misc/tf_nlp/speech-to-text/1.tacotron/train.py | gitter-badger/mlmodels | f08cc9b6ec202d4ad25ecdda2f44487da387569d | [
"MIT"
] | 1 | 2022-03-11T07:57:48.000Z | 2022-03-11T07:57:48.000Z | mlmodels/model_tf/misc/tf_nlp/speech-to-text/1.tacotron/train.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | mlmodels/model_tf/misc/tf_nlp/speech-to-text/1.tacotron/train.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | # coding: utf-8
# In[1]:
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from model import Model
from setting import batch_size, get_cached, idx2char, n_mels, reduction_factor, text2idx
# In[2]:
paths, lengths, texts = [], [], []
text_files = [f for f in os.listdir("spectrogram") if f.endswith(".npy")]
for fpath in text_files:
with open("../data/" + fpath.replace("npy", "txt")) as fopen:
text, converted = text2idx(fopen.read())
texts.append(converted)
lengths.append(len(text))
paths.append(fpath.replace(".npy", ""))
# In[3]:
def dynamic_batching(paths):
spectrograms, max_x = [], 0
for path in paths:
spectrograms.append(np.load("spectrogram/" + path + ".npy"))
if spectrograms[-1].shape[0] > max_x:
max_x = spectrograms[-1].shape[0]
return spectrograms, max_x
# In[4]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
sess.run(tf.global_variables_initializer())
# In[5]:
for e in range(30):
pbar = tqdm(range(0, len(text_files), batch_size), desc="minibatch loop")
total_cost, total_acc = 0, 0
for k in pbar:
index = min(k + batch_size, len(text_files))
files, max_x = dynamic_batching(paths[k:index])
max_y = max(lengths[k:index])
batch_x = np.zeros((len(files), max_x, n_mels * reduction_factor))
batch_y = np.zeros((len(files), max_y))
for n in range(len(files)):
batch_x[n] = np.pad(files[n], ((max_x - files[n].shape[0], 0), (0, 0)), mode="constant")
batch_y[n] = np.pad(texts[k + n], ((0, max_y - len(texts[k + n]))), mode="constant")
_, acc, cost = sess.run(
[model.optimizer, model.accuracy, model.cost],
feed_dict={model.X: batch_x, model.Y: batch_y, model.Y_seq_len: lengths[k:index]},
)
total_cost += cost
total_acc += acc
pbar.set_postfix(cost=cost, accuracy=acc)
total_cost /= len(text_files) / batch_size
total_acc /= len(text_files) / batch_size
print("epoch %d, avg loss %f, avg acc %f" % (e + 1, total_cost, total_acc))
empty_y = np.zeros((1, len(batch_y[0])))
predicted = "".join(
[
idx2char[c]
for c in sess.run(model.preds, feed_dict={model.X: batch_x[:1], model.Y: empty_y})[0]
if idx2char[c] not in ["S", "E"]
]
)
ground_truth = "".join([idx2char[c] for c in batch_y[0] if idx2char[c] not in ["S", "E"]])
print("predicted: %s, ground truth: %s" % (predicted, ground_truth))
| 29.103448 | 100 | 0.617299 |
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from model import Model
from setting import batch_size, get_cached, idx2char, n_mels, reduction_factor, text2idx
paths, lengths, texts = [], [], []
text_files = [f for f in os.listdir("spectrogram") if f.endswith(".npy")]
for fpath in text_files:
with open("../data/" + fpath.replace("npy", "txt")) as fopen:
text, converted = text2idx(fopen.read())
texts.append(converted)
lengths.append(len(text))
paths.append(fpath.replace(".npy", ""))
def dynamic_batching(paths):
spectrograms, max_x = [], 0
for path in paths:
spectrograms.append(np.load("spectrogram/" + path + ".npy"))
if spectrograms[-1].shape[0] > max_x:
max_x = spectrograms[-1].shape[0]
return spectrograms, max_x
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
sess.run(tf.global_variables_initializer())
for e in range(30):
pbar = tqdm(range(0, len(text_files), batch_size), desc="minibatch loop")
total_cost, total_acc = 0, 0
for k in pbar:
index = min(k + batch_size, len(text_files))
files, max_x = dynamic_batching(paths[k:index])
max_y = max(lengths[k:index])
batch_x = np.zeros((len(files), max_x, n_mels * reduction_factor))
batch_y = np.zeros((len(files), max_y))
for n in range(len(files)):
batch_x[n] = np.pad(files[n], ((max_x - files[n].shape[0], 0), (0, 0)), mode="constant")
batch_y[n] = np.pad(texts[k + n], ((0, max_y - len(texts[k + n]))), mode="constant")
_, acc, cost = sess.run(
[model.optimizer, model.accuracy, model.cost],
feed_dict={model.X: batch_x, model.Y: batch_y, model.Y_seq_len: lengths[k:index]},
)
total_cost += cost
total_acc += acc
pbar.set_postfix(cost=cost, accuracy=acc)
total_cost /= len(text_files) / batch_size
total_acc /= len(text_files) / batch_size
print("epoch %d, avg loss %f, avg acc %f" % (e + 1, total_cost, total_acc))
empty_y = np.zeros((1, len(batch_y[0])))
predicted = "".join(
[
idx2char[c]
for c in sess.run(model.preds, feed_dict={model.X: batch_x[:1], model.Y: empty_y})[0]
if idx2char[c] not in ["S", "E"]
]
)
ground_truth = "".join([idx2char[c] for c in batch_y[0] if idx2char[c] not in ["S", "E"]])
print("predicted: %s, ground truth: %s" % (predicted, ground_truth))
| true | true |
f71e4d6dbc4f9dd0c1e78dea1d539666e3d83bf0 | 846 | py | Python | sumo/tools/projects/TaxiFCD_Krieg/src/fcdToRoutes/GenerateTaxiRoutesMain.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | null | null | null | sumo/tools/projects/TaxiFCD_Krieg/src/fcdToRoutes/GenerateTaxiRoutesMain.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | null | null | null | sumo/tools/projects/TaxiFCD_Krieg/src/fcdToRoutes/GenerateTaxiRoutesMain.py | iltempe/osmosi | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | [
"MIT"
] | 2 | 2017-12-14T16:41:59.000Z | 2020-10-16T17:51:27.000Z | #!/usr/bin/env python
# -*- coding: Latin-1 -*-
"""
@file GenerateTaxiRoutesMain.py
@author Sascha Krieg
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-04-17
@version $Id$
Main of GenerateTaxiRoutes.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
from .GenerateTaxiRoutes import *
def main():
print("start program")
readFCD()
writeRoutes()
print("end")
# start the program
main()
| 23.5 | 68 | 0.737589 |
from __future__ import absolute_import
from __future__ import print_function
from .GenerateTaxiRoutes import *
def main():
print("start program")
readFCD()
writeRoutes()
print("end")
main()
| true | true |
f71e4e1ab1212fcc12550d50d95595bbcbba1151 | 22,970 | py | Python | saleor/plugins/avatax/plugin.py | batout/saleor | 29830d2a3195c2d83d0a2b0dfdc48ebc18d26dbc | [
"CC-BY-4.0"
] | 1 | 2022-02-19T13:27:40.000Z | 2022-02-19T13:27:40.000Z | saleor/plugins/avatax/plugin.py | autobotasia/saleor | e03e9f6ab1bddac308a6609d6b576a87e90ae655 | [
"CC-BY-4.0"
] | null | null | null | saleor/plugins/avatax/plugin.py | autobotasia/saleor | e03e9f6ab1bddac308a6609d6b576a87e90ae655 | [
"CC-BY-4.0"
] | 2 | 2021-12-03T16:59:37.000Z | 2022-02-19T13:05:42.000Z | import logging
from dataclasses import asdict
from decimal import Decimal
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
from urllib.parse import urljoin
import opentracing
import opentracing.tags
from django.core.exceptions import ValidationError
from prices import Money, TaxedMoney, TaxedMoneyRange
from ...checkout import base_calculations
from ...checkout.fetch import fetch_checkout_lines
from ...core.taxes import TaxError, TaxType, charge_taxes_on_shipping, zero_taxed_money
from ...discount import DiscountInfo
from ...product.models import ProductType
from ..base_plugin import BasePlugin, ConfigurationTypeField
from ..error_codes import PluginErrorCode
from . import (
DEFAULT_TAX_CODE,
DEFAULT_TAX_DESCRIPTION,
META_CODE_KEY,
META_DESCRIPTION_KEY,
AvataxConfiguration,
CustomerErrors,
TransactionType,
_validate_checkout,
_validate_order,
api_get_request,
api_post_request,
generate_request_data_from_checkout,
get_api_url,
get_cached_tax_codes_or_fetch,
get_checkout_tax_data,
get_order_request_data,
get_order_tax_data,
)
from .tasks import api_post_request_task
if TYPE_CHECKING:
# flake8: noqa
from ...account.models import Address
from ...channel.models import Channel
from ...checkout.fetch import CheckoutInfo, CheckoutLineInfo
from ...checkout.models import Checkout, CheckoutLine
from ...order.models import Order, OrderLine
from ...product.models import Product, ProductVariant
from ..models import PluginConfiguration
logger = logging.getLogger(__name__)
class AvataxPlugin(BasePlugin):
PLUGIN_NAME = "Avalara"
PLUGIN_ID = "mirumee.taxes.avalara"
DEFAULT_CONFIGURATION = [
{"name": "Username or account", "value": None},
{"name": "Password or license", "value": None},
{"name": "Use sandbox", "value": True},
{"name": "Company name", "value": "DEFAULT"},
{"name": "Autocommit", "value": False},
]
CONFIG_STRUCTURE = {
"Username or account": {
"type": ConfigurationTypeField.STRING,
"help_text": "Provide user or account details",
"label": "Username or account",
},
"Password or license": {
"type": ConfigurationTypeField.PASSWORD,
"help_text": "Provide password or license details",
"label": "Password or license",
},
"Use sandbox": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should use Avatax sandbox API.",
"label": "Use sandbox",
},
"Company name": {
"type": ConfigurationTypeField.STRING,
"help_text": "Avalara needs to receive company code. Some more "
"complicated systems can use more than one company "
"code, in that case, this variable should be changed "
"based on data from Avalara's admin panel",
"label": "Company name",
},
"Autocommit": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines, if all transactions sent to Avalara "
"should be committed by default.",
"label": "Autocommit",
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Convert to dict to easier take config elements
configuration = {item["name"]: item["value"] for item in self.configuration}
self.config = AvataxConfiguration(
username_or_account=configuration["Username or account"],
password_or_license=configuration["Password or license"],
use_sandbox=configuration["Use sandbox"],
company_name=configuration["Company name"],
autocommit=configuration["Autocommit"],
)
def _skip_plugin(
self, previous_value: Union[TaxedMoney, TaxedMoneyRange, Decimal]
) -> bool:
if not (self.config.username_or_account and self.config.password_or_license):
return True
if not self.active:
return True
# The previous plugin already calculated taxes so we can skip our logic
if isinstance(previous_value, TaxedMoneyRange):
start = previous_value.start
stop = previous_value.stop
return start.net != start.gross and stop.net != stop.gross
if isinstance(previous_value, TaxedMoney):
return previous_value.net != previous_value.gross
return False
def _append_prices_of_not_taxed_lines(
self,
price: TaxedMoney,
lines: Iterable["CheckoutLineInfo"],
channel: "Channel",
discounts: Iterable[DiscountInfo],
):
for line_info in lines:
if line_info.variant.product.charge_taxes:
continue
line_price = base_calculations.base_checkout_line_total(
line_info,
channel,
discounts,
)
price.gross.amount += line_price.gross.amount
price.net.amount += line_price.net.amount
return price
def calculate_checkout_total(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: TaxedMoney,
) -> TaxedMoney:
if self._skip_plugin(previous_value):
return previous_value
checkout_total = previous_value
if not _validate_checkout(checkout_info, lines):
return checkout_total
response = get_checkout_tax_data(checkout_info, lines, discounts, self.config)
if not response or "error" in response:
return checkout_total
currency = response.get("currencyCode")
tax = Decimal(response.get("totalTax", 0.0))
total_net = Decimal(response.get("totalAmount", 0.0))
total_gross = Money(amount=total_net + tax, currency=currency)
total_net = Money(amount=total_net, currency=currency)
taxed_total = TaxedMoney(net=total_net, gross=total_gross)
total = self._append_prices_of_not_taxed_lines(
taxed_total, lines, checkout_info.channel, discounts
)
voucher_value = checkout_info.checkout.discount
if voucher_value:
total -= voucher_value
return max(total, zero_taxed_money(total.currency))
def _calculate_checkout_shipping(
self, currency: str, lines: List[Dict], shipping_price: TaxedMoney
) -> TaxedMoney:
shipping_tax = Decimal(0.0)
shipping_net = shipping_price.net.amount
for line in lines:
if line["itemCode"] == "Shipping":
shipping_net = Decimal(line["lineAmount"])
shipping_tax = Decimal(line["tax"])
break
shipping_gross = Money(amount=shipping_net + shipping_tax, currency=currency)
shipping_net = Money(amount=shipping_net, currency=currency)
return TaxedMoney(net=shipping_net, gross=shipping_gross)
def calculate_checkout_shipping(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: TaxedMoney,
) -> TaxedMoney:
base_shipping_price = previous_value
if not charge_taxes_on_shipping():
return base_shipping_price
if self._skip_plugin(previous_value):
return base_shipping_price
if not _validate_checkout(checkout_info, lines):
return base_shipping_price
response = get_checkout_tax_data(checkout_info, lines, discounts, self.config)
if not response or "error" in response:
return base_shipping_price
currency = str(response.get("currencyCode"))
return self._calculate_checkout_shipping(
currency, response.get("lines", []), base_shipping_price
)
def preprocess_order_creation(
self,
checkout_info: "CheckoutInfo",
discounts: Iterable[DiscountInfo],
lines: Optional[Iterable["CheckoutLineInfo"]],
previous_value: Any,
):
"""Ensure all the data is correct and we can proceed with creation of order.
Raise an error when can't receive taxes.
"""
if lines is None:
lines = fetch_checkout_lines(checkout_info.checkout)
if self._skip_plugin(previous_value):
return previous_value
data = generate_request_data_from_checkout(
checkout_info,
lines,
self.config,
transaction_token=str(checkout_info.checkout.token),
transaction_type=TransactionType.ORDER,
discounts=discounts,
)
if not data.get("createTransactionModel", {}).get("lines"):
return previous_value
transaction_url = urljoin(
get_api_url(self.config.use_sandbox), "transactions/createoradjust"
)
with opentracing.global_tracer().start_active_span(
"avatax.transactions.crateoradjust"
) as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "tax")
span.set_tag("service.name", "avatax")
response = api_post_request(transaction_url, data, self.config)
if not response or "error" in response:
msg = response.get("error", {}).get("message", "")
error_code = response.get("error", {}).get("code", "")
logger.warning(
"Unable to calculate taxes for checkout %s, error_code: %s, "
"error_msg: %s",
checkout_info.checkout.token,
error_code,
msg,
)
customer_msg = CustomerErrors.get_error_msg(response.get("error", {}))
raise TaxError(customer_msg)
return previous_value
def order_created(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
request_data = get_order_request_data(order, self.config)
transaction_url = urljoin(
get_api_url(self.config.use_sandbox), "transactions/createoradjust"
)
api_post_request_task.delay(
transaction_url, request_data, asdict(self.config), order.id
)
return previous_value
def calculate_checkout_line_total(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
checkout_line_info: "CheckoutLineInfo",
address: Optional["Address"],
discounts: Iterable["DiscountInfo"],
previous_value: TaxedMoney,
) -> TaxedMoney:
if self._skip_plugin(previous_value):
return previous_value
base_total = previous_value
if not checkout_line_info.product.charge_taxes:
return base_total
if not _validate_checkout(checkout_info, lines):
return base_total
taxes_data = get_checkout_tax_data(checkout_info, lines, discounts, self.config)
if not taxes_data or "error" in taxes_data:
return base_total
currency = taxes_data.get("currencyCode")
for line in taxes_data.get("lines", []):
if line.get("itemCode") == checkout_line_info.variant.sku:
tax = Decimal(line.get("tax", 0.0))
line_net = Decimal(line["lineAmount"])
line_gross = Money(amount=line_net + tax, currency=currency)
line_net = Money(amount=line_net, currency=currency)
return TaxedMoney(net=line_net, gross=line_gross)
return base_total
def calculate_checkout_line_unit_price(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
checkout_line_info: "CheckoutLineInfo",
address: Optional["Address"],
discounts: Iterable["DiscountInfo"],
previous_value: TaxedMoney,
):
if not checkout_line_info.product.charge_taxes:
return previous_value
return self._calculate_unit_price(
checkout_info,
checkout_line_info.line,
lines,
checkout_line_info.variant,
previous_value,
discounts,
is_order=False,
)
def calculate_order_line_unit(
self,
order: "Order",
order_line: "OrderLine",
variant: "ProductVariant",
product: "Product",
previous_value: TaxedMoney,
) -> TaxedMoney:
if not variant or (variant and not product.charge_taxes):
return previous_value
return self._calculate_unit_price(
order, order_line, [], variant, previous_value, is_order=True
)
def _calculate_unit_price(
self,
instance: Union["CheckoutInfo", "Order"],
line: Union["CheckoutLine", "OrderLine"],
lines_info: Iterable["CheckoutLineInfo"],
variant: "ProductVariant",
base_value: TaxedMoney,
discounts: Optional[Iterable[DiscountInfo]] = [],
*,
is_order: bool,
):
taxes_data = self._get_tax_data(
instance, base_value, is_order, discounts, lines_info
)
if taxes_data is None:
return base_value
currency = taxes_data.get("currencyCode")
for line_data in taxes_data.get("lines", []):
if line_data.get("itemCode") == variant.sku:
tax = Decimal(line_data.get("tax", 0.0)) / line.quantity
net = Decimal(line_data.get("lineAmount", 0.0)) / line.quantity
gross = Money(amount=net + tax, currency=currency)
net = Money(amount=net, currency=currency)
return TaxedMoney(net=net, gross=gross)
return base_value
def calculate_order_shipping(
self, order: "Order", previous_value: TaxedMoney
) -> TaxedMoney:
if self._skip_plugin(previous_value):
return previous_value
if not charge_taxes_on_shipping():
return previous_value
if not _validate_order(order):
return zero_taxed_money(order.total.currency)
taxes_data = get_order_tax_data(order, self.config, False)
currency = taxes_data.get("currencyCode")
for line in taxes_data.get("lines", []):
if line["itemCode"] == "Shipping":
tax = Decimal(line.get("tax", 0.0))
net = Decimal(line.get("lineAmount", 0.0))
gross = Money(amount=net + tax, currency=currency)
net = Money(amount=net, currency=currency)
return TaxedMoney(net=net, gross=gross)
return TaxedMoney(
# Ignore typing checks because it is checked in _validate_order
net=order.shipping_method.price, # type: ignore
gross=order.shipping_method.price, # type: ignore
)
def get_tax_rate_type_choices(self, previous_value: Any) -> List[TaxType]:
if not self.active:
return previous_value
return [
TaxType(code=tax_code, description=desc)
for tax_code, desc in get_cached_tax_codes_or_fetch(self.config).items()
]
def get_checkout_line_tax_rate(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
checkout_line_info: "CheckoutLineInfo",
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: Decimal,
) -> Decimal:
return self._get_unit_tax_rate(
checkout_info, previous_value, False, discounts, lines
)
def get_order_line_tax_rate(
self,
order: "Order",
product: "Product",
address: Optional["Address"],
previous_value: Decimal,
) -> Decimal:
return self._get_unit_tax_rate(order, previous_value, True)
def get_checkout_shipping_tax_rate(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: Decimal,
):
return self._get_shipping_tax_rate(
checkout_info,
previous_value,
False,
discounts,
lines,
)
def get_order_shipping_tax_rate(self, order: "Order", previous_value: Decimal):
return self._get_shipping_tax_rate(order, previous_value, True)
def _get_unit_tax_rate(
self,
instance: Union["Order", "CheckoutInfo"],
base_rate: Decimal,
is_order: bool,
discounts: Optional[Iterable[DiscountInfo]] = None,
lines_info: Iterable["CheckoutLineInfo"] = [],
):
response = self._get_tax_data(
instance, base_rate, is_order, discounts, lines_info
)
if response is None:
return base_rate
rate = None
response_summary = response.get("summary")
if response_summary:
rate = Decimal(response_summary[0].get("rate", 0.0))
return rate or base_rate
def _get_shipping_tax_rate(
self,
instance: Union["Order", "CheckoutInfo"],
base_rate: Decimal,
is_order: bool,
discounts: Optional[Iterable[DiscountInfo]] = None,
lines_info: Iterable["CheckoutLineInfo"] = [],
):
response = self._get_tax_data(
instance, base_rate, is_order, discounts, lines_info
)
if response is None:
return base_rate
lines_data = response.get("lines", [])
for line in lines_data:
if line["itemCode"] == "Shipping":
line_details = line.get("details")
if not line_details:
return
return Decimal(line_details[0].get("rate", 0.0))
return base_rate
def _get_tax_data(
self,
instance: Union["Order", "CheckoutInfo"],
base_value: Decimal,
is_order: bool,
discounts: Optional[Iterable[DiscountInfo]] = None,
lines_info: Iterable["CheckoutLineInfo"] = [],
):
if self._skip_plugin(base_value):
return None
valid = (
_validate_order(instance) # type: ignore
if is_order
else _validate_checkout(instance, lines_info) # type: ignore
)
if not valid:
return None
response = (
get_order_tax_data(instance, self.config, False) # type: ignore
if is_order
else get_checkout_tax_data(instance, lines_info, discounts, self.config) # type: ignore
)
if not response or "error" in response:
return None
return response
def assign_tax_code_to_object_meta(
self,
obj: Union["Product", "ProductType"],
tax_code: Optional[str],
previous_value: Any,
):
if not self.active:
return previous_value
if tax_code is None and obj.pk:
obj.delete_value_from_metadata(META_CODE_KEY)
obj.delete_value_from_metadata(META_DESCRIPTION_KEY)
return previous_value
codes = get_cached_tax_codes_or_fetch(self.config)
if tax_code not in codes:
return previous_value
tax_description = codes.get(tax_code)
tax_item = {META_CODE_KEY: tax_code, META_DESCRIPTION_KEY: tax_description}
obj.store_value_in_metadata(items=tax_item)
return previous_value
def get_tax_code_from_object_meta(
self, obj: Union["Product", "ProductType"], previous_value: Any
) -> TaxType:
if not self.active:
return previous_value
# Product has None as it determines if we overwrite taxes for the product
default_tax_code = None
default_tax_description = None
if isinstance(obj, ProductType):
default_tax_code = DEFAULT_TAX_CODE
default_tax_description = DEFAULT_TAX_DESCRIPTION
tax_code = obj.get_value_from_metadata(META_CODE_KEY, default_tax_code)
tax_description = obj.get_value_from_metadata(
META_DESCRIPTION_KEY, default_tax_description
)
return TaxType(
code=tax_code,
description=tax_description,
)
def show_taxes_on_storefront(self, previous_value: bool) -> bool:
if not self.active:
return previous_value
return False
def fetch_taxes_data(self, previous_value):
if not self.active:
return previous_value
get_cached_tax_codes_or_fetch(self.config)
return True
@classmethod
def validate_authentication(cls, plugin_configuration: "PluginConfiguration"):
conf = {
data["name"]: data["value"] for data in plugin_configuration.configuration
}
url = urljoin(get_api_url(conf["Use sandbox"]), "utilities/ping")
with opentracing.global_tracer().start_active_span(
"avatax.utilities.ping"
) as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "tax")
span.set_tag("service.name", "avatax")
response = api_get_request(
url,
username_or_account=conf["Username or account"],
password_or_license=conf["Password or license"],
)
if not response.get("authenticated"):
raise ValidationError(
"Authentication failed. Please check provided data.",
code=PluginErrorCode.PLUGIN_MISCONFIGURED.value,
)
@classmethod
def validate_plugin_configuration(cls, plugin_configuration: "PluginConfiguration"):
"""Validate if provided configuration is correct."""
missing_fields = []
configuration = plugin_configuration.configuration
configuration = {item["name"]: item["value"] for item in configuration}
if not configuration["Username or account"]:
missing_fields.append("Username or account")
if not configuration["Password or license"]:
missing_fields.append("Password or license")
if plugin_configuration.active:
if missing_fields:
error_msg = (
"To enable a plugin, you need to provide values for the "
"following fields: "
)
raise ValidationError(
error_msg + ", ".join(missing_fields),
code=PluginErrorCode.PLUGIN_MISCONFIGURED.value,
)
cls.validate_authentication(plugin_configuration)
| 36.402536 | 100 | 0.623291 | import logging
from dataclasses import asdict
from decimal import Decimal
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
from urllib.parse import urljoin
import opentracing
import opentracing.tags
from django.core.exceptions import ValidationError
from prices import Money, TaxedMoney, TaxedMoneyRange
from ...checkout import base_calculations
from ...checkout.fetch import fetch_checkout_lines
from ...core.taxes import TaxError, TaxType, charge_taxes_on_shipping, zero_taxed_money
from ...discount import DiscountInfo
from ...product.models import ProductType
from ..base_plugin import BasePlugin, ConfigurationTypeField
from ..error_codes import PluginErrorCode
from . import (
DEFAULT_TAX_CODE,
DEFAULT_TAX_DESCRIPTION,
META_CODE_KEY,
META_DESCRIPTION_KEY,
AvataxConfiguration,
CustomerErrors,
TransactionType,
_validate_checkout,
_validate_order,
api_get_request,
api_post_request,
generate_request_data_from_checkout,
get_api_url,
get_cached_tax_codes_or_fetch,
get_checkout_tax_data,
get_order_request_data,
get_order_tax_data,
)
from .tasks import api_post_request_task
if TYPE_CHECKING:
from ...account.models import Address
from ...channel.models import Channel
from ...checkout.fetch import CheckoutInfo, CheckoutLineInfo
from ...checkout.models import Checkout, CheckoutLine
from ...order.models import Order, OrderLine
from ...product.models import Product, ProductVariant
from ..models import PluginConfiguration
logger = logging.getLogger(__name__)
class AvataxPlugin(BasePlugin):
PLUGIN_NAME = "Avalara"
PLUGIN_ID = "mirumee.taxes.avalara"
DEFAULT_CONFIGURATION = [
{"name": "Username or account", "value": None},
{"name": "Password or license", "value": None},
{"name": "Use sandbox", "value": True},
{"name": "Company name", "value": "DEFAULT"},
{"name": "Autocommit", "value": False},
]
CONFIG_STRUCTURE = {
"Username or account": {
"type": ConfigurationTypeField.STRING,
"help_text": "Provide user or account details",
"label": "Username or account",
},
"Password or license": {
"type": ConfigurationTypeField.PASSWORD,
"help_text": "Provide password or license details",
"label": "Password or license",
},
"Use sandbox": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should use Avatax sandbox API.",
"label": "Use sandbox",
},
"Company name": {
"type": ConfigurationTypeField.STRING,
"help_text": "Avalara needs to receive company code. Some more "
"complicated systems can use more than one company "
"code, in that case, this variable should be changed "
"based on data from Avalara's admin panel",
"label": "Company name",
},
"Autocommit": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines, if all transactions sent to Avalara "
"should be committed by default.",
"label": "Autocommit",
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Convert to dict to easier take config elements
configuration = {item["name"]: item["value"] for item in self.configuration}
self.config = AvataxConfiguration(
username_or_account=configuration["Username or account"],
password_or_license=configuration["Password or license"],
use_sandbox=configuration["Use sandbox"],
company_name=configuration["Company name"],
autocommit=configuration["Autocommit"],
)
def _skip_plugin(
self, previous_value: Union[TaxedMoney, TaxedMoneyRange, Decimal]
) -> bool:
if not (self.config.username_or_account and self.config.password_or_license):
return True
if not self.active:
return True
# The previous plugin already calculated taxes so we can skip our logic
if isinstance(previous_value, TaxedMoneyRange):
start = previous_value.start
stop = previous_value.stop
return start.net != start.gross and stop.net != stop.gross
if isinstance(previous_value, TaxedMoney):
return previous_value.net != previous_value.gross
return False
def _append_prices_of_not_taxed_lines(
self,
price: TaxedMoney,
lines: Iterable["CheckoutLineInfo"],
channel: "Channel",
discounts: Iterable[DiscountInfo],
):
for line_info in lines:
if line_info.variant.product.charge_taxes:
continue
line_price = base_calculations.base_checkout_line_total(
line_info,
channel,
discounts,
)
price.gross.amount += line_price.gross.amount
price.net.amount += line_price.net.amount
return price
def calculate_checkout_total(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: TaxedMoney,
) -> TaxedMoney:
if self._skip_plugin(previous_value):
return previous_value
checkout_total = previous_value
if not _validate_checkout(checkout_info, lines):
return checkout_total
response = get_checkout_tax_data(checkout_info, lines, discounts, self.config)
if not response or "error" in response:
return checkout_total
currency = response.get("currencyCode")
tax = Decimal(response.get("totalTax", 0.0))
total_net = Decimal(response.get("totalAmount", 0.0))
total_gross = Money(amount=total_net + tax, currency=currency)
total_net = Money(amount=total_net, currency=currency)
taxed_total = TaxedMoney(net=total_net, gross=total_gross)
total = self._append_prices_of_not_taxed_lines(
taxed_total, lines, checkout_info.channel, discounts
)
voucher_value = checkout_info.checkout.discount
if voucher_value:
total -= voucher_value
return max(total, zero_taxed_money(total.currency))
def _calculate_checkout_shipping(
self, currency: str, lines: List[Dict], shipping_price: TaxedMoney
) -> TaxedMoney:
shipping_tax = Decimal(0.0)
shipping_net = shipping_price.net.amount
for line in lines:
if line["itemCode"] == "Shipping":
shipping_net = Decimal(line["lineAmount"])
shipping_tax = Decimal(line["tax"])
break
shipping_gross = Money(amount=shipping_net + shipping_tax, currency=currency)
shipping_net = Money(amount=shipping_net, currency=currency)
return TaxedMoney(net=shipping_net, gross=shipping_gross)
def calculate_checkout_shipping(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: TaxedMoney,
) -> TaxedMoney:
base_shipping_price = previous_value
if not charge_taxes_on_shipping():
return base_shipping_price
if self._skip_plugin(previous_value):
return base_shipping_price
if not _validate_checkout(checkout_info, lines):
return base_shipping_price
response = get_checkout_tax_data(checkout_info, lines, discounts, self.config)
if not response or "error" in response:
return base_shipping_price
currency = str(response.get("currencyCode"))
return self._calculate_checkout_shipping(
currency, response.get("lines", []), base_shipping_price
)
def preprocess_order_creation(
self,
checkout_info: "CheckoutInfo",
discounts: Iterable[DiscountInfo],
lines: Optional[Iterable["CheckoutLineInfo"]],
previous_value: Any,
):
if lines is None:
lines = fetch_checkout_lines(checkout_info.checkout)
if self._skip_plugin(previous_value):
return previous_value
data = generate_request_data_from_checkout(
checkout_info,
lines,
self.config,
transaction_token=str(checkout_info.checkout.token),
transaction_type=TransactionType.ORDER,
discounts=discounts,
)
if not data.get("createTransactionModel", {}).get("lines"):
return previous_value
transaction_url = urljoin(
get_api_url(self.config.use_sandbox), "transactions/createoradjust"
)
with opentracing.global_tracer().start_active_span(
"avatax.transactions.crateoradjust"
) as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "tax")
span.set_tag("service.name", "avatax")
response = api_post_request(transaction_url, data, self.config)
if not response or "error" in response:
msg = response.get("error", {}).get("message", "")
error_code = response.get("error", {}).get("code", "")
logger.warning(
"Unable to calculate taxes for checkout %s, error_code: %s, "
"error_msg: %s",
checkout_info.checkout.token,
error_code,
msg,
)
customer_msg = CustomerErrors.get_error_msg(response.get("error", {}))
raise TaxError(customer_msg)
return previous_value
def order_created(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
request_data = get_order_request_data(order, self.config)
transaction_url = urljoin(
get_api_url(self.config.use_sandbox), "transactions/createoradjust"
)
api_post_request_task.delay(
transaction_url, request_data, asdict(self.config), order.id
)
return previous_value
def calculate_checkout_line_total(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
checkout_line_info: "CheckoutLineInfo",
address: Optional["Address"],
discounts: Iterable["DiscountInfo"],
previous_value: TaxedMoney,
) -> TaxedMoney:
if self._skip_plugin(previous_value):
return previous_value
base_total = previous_value
if not checkout_line_info.product.charge_taxes:
return base_total
if not _validate_checkout(checkout_info, lines):
return base_total
taxes_data = get_checkout_tax_data(checkout_info, lines, discounts, self.config)
if not taxes_data or "error" in taxes_data:
return base_total
currency = taxes_data.get("currencyCode")
for line in taxes_data.get("lines", []):
if line.get("itemCode") == checkout_line_info.variant.sku:
tax = Decimal(line.get("tax", 0.0))
line_net = Decimal(line["lineAmount"])
line_gross = Money(amount=line_net + tax, currency=currency)
line_net = Money(amount=line_net, currency=currency)
return TaxedMoney(net=line_net, gross=line_gross)
return base_total
def calculate_checkout_line_unit_price(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
checkout_line_info: "CheckoutLineInfo",
address: Optional["Address"],
discounts: Iterable["DiscountInfo"],
previous_value: TaxedMoney,
):
if not checkout_line_info.product.charge_taxes:
return previous_value
return self._calculate_unit_price(
checkout_info,
checkout_line_info.line,
lines,
checkout_line_info.variant,
previous_value,
discounts,
is_order=False,
)
def calculate_order_line_unit(
self,
order: "Order",
order_line: "OrderLine",
variant: "ProductVariant",
product: "Product",
previous_value: TaxedMoney,
) -> TaxedMoney:
if not variant or (variant and not product.charge_taxes):
return previous_value
return self._calculate_unit_price(
order, order_line, [], variant, previous_value, is_order=True
)
def _calculate_unit_price(
self,
instance: Union["CheckoutInfo", "Order"],
line: Union["CheckoutLine", "OrderLine"],
lines_info: Iterable["CheckoutLineInfo"],
variant: "ProductVariant",
base_value: TaxedMoney,
discounts: Optional[Iterable[DiscountInfo]] = [],
*,
is_order: bool,
):
taxes_data = self._get_tax_data(
instance, base_value, is_order, discounts, lines_info
)
if taxes_data is None:
return base_value
currency = taxes_data.get("currencyCode")
for line_data in taxes_data.get("lines", []):
if line_data.get("itemCode") == variant.sku:
tax = Decimal(line_data.get("tax", 0.0)) / line.quantity
net = Decimal(line_data.get("lineAmount", 0.0)) / line.quantity
gross = Money(amount=net + tax, currency=currency)
net = Money(amount=net, currency=currency)
return TaxedMoney(net=net, gross=gross)
return base_value
def calculate_order_shipping(
self, order: "Order", previous_value: TaxedMoney
) -> TaxedMoney:
if self._skip_plugin(previous_value):
return previous_value
if not charge_taxes_on_shipping():
return previous_value
if not _validate_order(order):
return zero_taxed_money(order.total.currency)
taxes_data = get_order_tax_data(order, self.config, False)
currency = taxes_data.get("currencyCode")
for line in taxes_data.get("lines", []):
if line["itemCode"] == "Shipping":
tax = Decimal(line.get("tax", 0.0))
net = Decimal(line.get("lineAmount", 0.0))
gross = Money(amount=net + tax, currency=currency)
net = Money(amount=net, currency=currency)
return TaxedMoney(net=net, gross=gross)
return TaxedMoney(
# Ignore typing checks because it is checked in _validate_order
net=order.shipping_method.price, # type: ignore
gross=order.shipping_method.price, # type: ignore
)
def get_tax_rate_type_choices(self, previous_value: Any) -> List[TaxType]:
if not self.active:
return previous_value
return [
TaxType(code=tax_code, description=desc)
for tax_code, desc in get_cached_tax_codes_or_fetch(self.config).items()
]
def get_checkout_line_tax_rate(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
checkout_line_info: "CheckoutLineInfo",
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: Decimal,
) -> Decimal:
return self._get_unit_tax_rate(
checkout_info, previous_value, False, discounts, lines
)
def get_order_line_tax_rate(
self,
order: "Order",
product: "Product",
address: Optional["Address"],
previous_value: Decimal,
) -> Decimal:
return self._get_unit_tax_rate(order, previous_value, True)
def get_checkout_shipping_tax_rate(
self,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Iterable[DiscountInfo],
previous_value: Decimal,
):
return self._get_shipping_tax_rate(
checkout_info,
previous_value,
False,
discounts,
lines,
)
def get_order_shipping_tax_rate(self, order: "Order", previous_value: Decimal):
return self._get_shipping_tax_rate(order, previous_value, True)
def _get_unit_tax_rate(
self,
instance: Union["Order", "CheckoutInfo"],
base_rate: Decimal,
is_order: bool,
discounts: Optional[Iterable[DiscountInfo]] = None,
lines_info: Iterable["CheckoutLineInfo"] = [],
):
response = self._get_tax_data(
instance, base_rate, is_order, discounts, lines_info
)
if response is None:
return base_rate
rate = None
response_summary = response.get("summary")
if response_summary:
rate = Decimal(response_summary[0].get("rate", 0.0))
return rate or base_rate
def _get_shipping_tax_rate(
self,
instance: Union["Order", "CheckoutInfo"],
base_rate: Decimal,
is_order: bool,
discounts: Optional[Iterable[DiscountInfo]] = None,
lines_info: Iterable["CheckoutLineInfo"] = [],
):
response = self._get_tax_data(
instance, base_rate, is_order, discounts, lines_info
)
if response is None:
return base_rate
lines_data = response.get("lines", [])
for line in lines_data:
if line["itemCode"] == "Shipping":
line_details = line.get("details")
if not line_details:
return
return Decimal(line_details[0].get("rate", 0.0))
return base_rate
def _get_tax_data(
self,
instance: Union["Order", "CheckoutInfo"],
base_value: Decimal,
is_order: bool,
discounts: Optional[Iterable[DiscountInfo]] = None,
lines_info: Iterable["CheckoutLineInfo"] = [],
):
if self._skip_plugin(base_value):
return None
valid = (
_validate_order(instance) # type: ignore
if is_order
else _validate_checkout(instance, lines_info) # type: ignore
)
if not valid:
return None
response = (
get_order_tax_data(instance, self.config, False) # type: ignore
if is_order
else get_checkout_tax_data(instance, lines_info, discounts, self.config) # type: ignore
)
if not response or "error" in response:
return None
return response
def assign_tax_code_to_object_meta(
self,
obj: Union["Product", "ProductType"],
tax_code: Optional[str],
previous_value: Any,
):
if not self.active:
return previous_value
if tax_code is None and obj.pk:
obj.delete_value_from_metadata(META_CODE_KEY)
obj.delete_value_from_metadata(META_DESCRIPTION_KEY)
return previous_value
codes = get_cached_tax_codes_or_fetch(self.config)
if tax_code not in codes:
return previous_value
tax_description = codes.get(tax_code)
tax_item = {META_CODE_KEY: tax_code, META_DESCRIPTION_KEY: tax_description}
obj.store_value_in_metadata(items=tax_item)
return previous_value
def get_tax_code_from_object_meta(
self, obj: Union["Product", "ProductType"], previous_value: Any
) -> TaxType:
if not self.active:
return previous_value
# Product has None as it determines if we overwrite taxes for the product
default_tax_code = None
default_tax_description = None
if isinstance(obj, ProductType):
default_tax_code = DEFAULT_TAX_CODE
default_tax_description = DEFAULT_TAX_DESCRIPTION
tax_code = obj.get_value_from_metadata(META_CODE_KEY, default_tax_code)
tax_description = obj.get_value_from_metadata(
META_DESCRIPTION_KEY, default_tax_description
)
return TaxType(
code=tax_code,
description=tax_description,
)
def show_taxes_on_storefront(self, previous_value: bool) -> bool:
if not self.active:
return previous_value
return False
def fetch_taxes_data(self, previous_value):
if not self.active:
return previous_value
get_cached_tax_codes_or_fetch(self.config)
return True
@classmethod
def validate_authentication(cls, plugin_configuration: "PluginConfiguration"):
conf = {
data["name"]: data["value"] for data in plugin_configuration.configuration
}
url = urljoin(get_api_url(conf["Use sandbox"]), "utilities/ping")
with opentracing.global_tracer().start_active_span(
"avatax.utilities.ping"
) as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "tax")
span.set_tag("service.name", "avatax")
response = api_get_request(
url,
username_or_account=conf["Username or account"],
password_or_license=conf["Password or license"],
)
if not response.get("authenticated"):
raise ValidationError(
"Authentication failed. Please check provided data.",
code=PluginErrorCode.PLUGIN_MISCONFIGURED.value,
)
@classmethod
def validate_plugin_configuration(cls, plugin_configuration: "PluginConfiguration"):
missing_fields = []
configuration = plugin_configuration.configuration
configuration = {item["name"]: item["value"] for item in configuration}
if not configuration["Username or account"]:
missing_fields.append("Username or account")
if not configuration["Password or license"]:
missing_fields.append("Password or license")
if plugin_configuration.active:
if missing_fields:
error_msg = (
"To enable a plugin, you need to provide values for the "
"following fields: "
)
raise ValidationError(
error_msg + ", ".join(missing_fields),
code=PluginErrorCode.PLUGIN_MISCONFIGURED.value,
)
cls.validate_authentication(plugin_configuration)
| true | true |
f71e4eb8f2d77ec51105db33c6cf3421c481bf57 | 2,194 | py | Python | utils.py | uhh-lt/semeval2019-hhmm | b746b0fb8ab3b957d399276cb354e950f0ef30ed | [
"Apache-2.0"
] | null | null | null | utils.py | uhh-lt/semeval2019-hhmm | b746b0fb8ab3b957d399276cb354e950f0ef30ed | [
"Apache-2.0"
] | null | null | null | utils.py | uhh-lt/semeval2019-hhmm | b746b0fb8ab3b957d399276cb354e950f0ef30ed | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from pathlib import Path
def df_to_csv(df, path):
df.to_csv(path, sep='\t', index=False, encoding='utf-8')
def csv_to_df(path):
df = pd.read_csv(path, sep='\t', dtype=str, encoding='utf-8')
return df
def max_arguments(task):
fp = open(task, 'r')
lines_args = fp.readlines()
maxT = 0
for line in lines_args:
tokens = len(line.split(' '))
if tokens > maxT:
maxT = tokens
return maxT - 3 # context_id, verb pos, verb-frame
def max_frameArguments(dataset):
dir = "./semeval_data"
task21_auto = dir + "/dev/auto/task-2.1.auto.txt"
task21_dev = dir + "/dev/task-2.1.txt"
task21_test =dir+"/test/task-2.1.txt"
if dataset == 'dev':
task21 = task21_dev
elif dataset == 'auto':
task21 = task21_auto
elif dataset == 'test':
task21 = task21_test
return max_arguments(task21)
# ------------------------------------------------------------- df input from txt
import ud2csv
dir = "./semeval_data"
ud_gold = dir+"/dep-stx/pos-gold-dep-auto.conll.txt"
# -----------------------------------
def task_to_df(task, dataset):
if Path('./input/train_task{}_{}.csv'.format(task, dataset)).exists():
return csv_to_df('./input/train_task{}_{}.csv'.format(task, dataset))
else:
if task==1:
return ud2csv.task1_to_df(dir+'/{}/task-1.txt'.format(dataset), ud_gold)
if task ==22:
return ud2csv.task22_to_df(dir + '/{}/task-2.2.txt'.format(dataset), ud_gold)
def task1_to_df_gd(dataset):
if Path('./input/train_task{}_{}.csv'.format(1, dataset)).exists():
return csv_to_df('./input/gd_task{}_{}.csv'.format(1, dataset))
else:
return ud2csv.task1_to_df_gd(dir+'/{}/task-1.txt'.format(dataset), ud_gold)
def task22_baselines(dataset, gr='in'):
if Path('./input/all_grammaticalLabels_{}.csv'.format(dataset)).exists():
df_task22 = csv_to_df('./input/all_grammaticalLabels_{}.csv'.format(dataset))
else:
df_task22 = ud2csv.task22_to_df_withFrameArgsDependencies(dir+'/{}/task-2.2.txt'.format(dataset), ud_gold)
return ud2csv.getGrammaticalBaseline(df_task22, gr)
| 29.648649 | 114 | 0.611668 | import pandas as pd
from pathlib import Path
def df_to_csv(df, path):
df.to_csv(path, sep='\t', index=False, encoding='utf-8')
def csv_to_df(path):
df = pd.read_csv(path, sep='\t', dtype=str, encoding='utf-8')
return df
def max_arguments(task):
fp = open(task, 'r')
lines_args = fp.readlines()
maxT = 0
for line in lines_args:
tokens = len(line.split(' '))
if tokens > maxT:
maxT = tokens
return maxT - 3
def max_frameArguments(dataset):
dir = "./semeval_data"
task21_auto = dir + "/dev/auto/task-2.1.auto.txt"
task21_dev = dir + "/dev/task-2.1.txt"
task21_test =dir+"/test/task-2.1.txt"
if dataset == 'dev':
task21 = task21_dev
elif dataset == 'auto':
task21 = task21_auto
elif dataset == 'test':
task21 = task21_test
return max_arguments(task21)
import ud2csv
dir = "./semeval_data"
ud_gold = dir+"/dep-stx/pos-gold-dep-auto.conll.txt"
def task_to_df(task, dataset):
if Path('./input/train_task{}_{}.csv'.format(task, dataset)).exists():
return csv_to_df('./input/train_task{}_{}.csv'.format(task, dataset))
else:
if task==1:
return ud2csv.task1_to_df(dir+'/{}/task-1.txt'.format(dataset), ud_gold)
if task ==22:
return ud2csv.task22_to_df(dir + '/{}/task-2.2.txt'.format(dataset), ud_gold)
def task1_to_df_gd(dataset):
if Path('./input/train_task{}_{}.csv'.format(1, dataset)).exists():
return csv_to_df('./input/gd_task{}_{}.csv'.format(1, dataset))
else:
return ud2csv.task1_to_df_gd(dir+'/{}/task-1.txt'.format(dataset), ud_gold)
def task22_baselines(dataset, gr='in'):
if Path('./input/all_grammaticalLabels_{}.csv'.format(dataset)).exists():
df_task22 = csv_to_df('./input/all_grammaticalLabels_{}.csv'.format(dataset))
else:
df_task22 = ud2csv.task22_to_df_withFrameArgsDependencies(dir+'/{}/task-2.2.txt'.format(dataset), ud_gold)
return ud2csv.getGrammaticalBaseline(df_task22, gr)
| true | true |
f71e4ebc41a1f3a94f4f8f73cf4f5d5320ba4866 | 7,375 | py | Python | activemri/experimental/cvpr19_models/data/masking_utils.py | qinliuliuqin/active-mri-acquisition | b561f838667f4bc7753b1f89dfbdd545d0f00ada | [
"MIT"
] | 32 | 2020-10-05T19:46:56.000Z | 2022-02-15T10:37:13.000Z | activemri/experimental/cvpr19_models/data/masking_utils.py | qinliuliuqin/active-mri-acquisition | b561f838667f4bc7753b1f89dfbdd545d0f00ada | [
"MIT"
] | 2 | 2020-12-28T21:05:57.000Z | 2022-01-22T17:56:50.000Z | activemri/experimental/cvpr19_models/data/masking_utils.py | qinliuliuqin/active-mri-acquisition | b561f838667f4bc7753b1f89dfbdd545d0f00ada | [
"MIT"
] | 9 | 2020-11-22T18:15:04.000Z | 2022-02-11T06:07:38.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
def get_mask_func(mask_type, which_dataset, rnl_params=None):
# Whether the number of lines is random or not
random_num_lines = mask_type[-4:] == "_rnl"
if "symmetric_basic" in mask_type:
logging.info(
f"Mask is symmetric uniform choice with random_num_lines={random_num_lines}."
)
return SymmetricUniformChoiceMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "basic" in mask_type:
# First two parameters are ignored if `random_num_lines` is True
logging.info(
f"Mask is fixed acceleration mask with random_num_lines={random_num_lines}."
)
return BasicMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "low_to_high" in mask_type:
logging.info(
f"Mask is symmetric low to high with random_num_lines={random_num_lines}."
)
return SymmetricLowToHighMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "symmetric_grid" in mask_type:
logging.info("Mask is symmetric grid.")
return SymmetricUniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
if "grid" in mask_type:
logging.info("Mask is grid (not symmetric).")
return UniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
raise ValueError(f"Invalid mask type: {mask_type}.")
class MaskFunc:
def __init__(
self,
center_fractions,
accelerations,
which_dataset,
random_num_lines=False,
rnl_params=None,
):
if len(center_fractions) != len(accelerations):
raise ValueError(
"Number of center fractions should match number of accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.random_num_lines = random_num_lines
if rnl_params is None:
# The lines below give approx. 4x acceleration on average.
self.min_lowf_lines = 10 if which_dataset != "KNEE_RAW" else 30
self.max_lowf_lines = 12 if which_dataset != "KNEE_RAW" else 32
self.highf_beta_alpha = 1
self.highf_beta_beta = 5
else:
params = [int(x) for x in rnl_params.split(",")]
assert len(params) == 4
self.min_lowf_lines = params[0]
self.max_lowf_lines = params[1]
self.highf_beta_alpha = params[2]
self.highf_beta_beta = params[3]
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
self.rng.seed(seed)
num_cols = shape[-2]
# Determine number of low and high frequency lines to scan
if self.random_num_lines:
# These are guaranteed to be an even number (useful for symmetric masks)
num_low_freqs = self.rng.choice(
range(self.min_lowf_lines, self.max_lowf_lines, 2)
)
num_high_freqs = (
int(
self.rng.beta(self.highf_beta_alpha, self.highf_beta_beta)
* (num_cols - num_low_freqs)
// 2
)
* 2
)
else:
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
num_low_freqs = int(round(num_cols * center_fraction))
num_high_freqs = int(num_cols // acceleration - num_low_freqs)
# Create the mask
mask = self.create_lf_focused_mask(num_cols, num_high_freqs, num_low_freqs)
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-1] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
p = num_high_freqs / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < p
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
return mask
class BasicMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
hf_cols[hf_cols >= (num_cols - num_low_freqs + 1) // 2] += num_low_freqs
mask[hf_cols] = True
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformChoiceMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
mask[hf_cols] = True
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class UniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[: num_low_freqs // 2] = mask[-(num_low_freqs // 2) :] = True
return mask
class SymmetricLowToHighMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
num_low_freqs += num_high_freqs
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
num_cols //= 2
num_low_freqs //= 2
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[:num_low_freqs] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
return mask
| 35.456731 | 89 | 0.608949 |
import logging
import numpy as np
import torch
def get_mask_func(mask_type, which_dataset, rnl_params=None):
random_num_lines = mask_type[-4:] == "_rnl"
if "symmetric_basic" in mask_type:
logging.info(
f"Mask is symmetric uniform choice with random_num_lines={random_num_lines}."
)
return SymmetricUniformChoiceMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "basic" in mask_type:
logging.info(
f"Mask is fixed acceleration mask with random_num_lines={random_num_lines}."
)
return BasicMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "low_to_high" in mask_type:
logging.info(
f"Mask is symmetric low to high with random_num_lines={random_num_lines}."
)
return SymmetricLowToHighMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "symmetric_grid" in mask_type:
logging.info("Mask is symmetric grid.")
return SymmetricUniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
if "grid" in mask_type:
logging.info("Mask is grid (not symmetric).")
return UniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
raise ValueError(f"Invalid mask type: {mask_type}.")
class MaskFunc:
def __init__(
self,
center_fractions,
accelerations,
which_dataset,
random_num_lines=False,
rnl_params=None,
):
if len(center_fractions) != len(accelerations):
raise ValueError(
"Number of center fractions should match number of accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.random_num_lines = random_num_lines
if rnl_params is None:
self.min_lowf_lines = 10 if which_dataset != "KNEE_RAW" else 30
self.max_lowf_lines = 12 if which_dataset != "KNEE_RAW" else 32
self.highf_beta_alpha = 1
self.highf_beta_beta = 5
else:
params = [int(x) for x in rnl_params.split(",")]
assert len(params) == 4
self.min_lowf_lines = params[0]
self.max_lowf_lines = params[1]
self.highf_beta_alpha = params[2]
self.highf_beta_beta = params[3]
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
self.rng.seed(seed)
num_cols = shape[-2]
if self.random_num_lines:
num_low_freqs = self.rng.choice(
range(self.min_lowf_lines, self.max_lowf_lines, 2)
)
num_high_freqs = (
int(
self.rng.beta(self.highf_beta_alpha, self.highf_beta_beta)
* (num_cols - num_low_freqs)
// 2
)
* 2
)
else:
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
num_low_freqs = int(round(num_cols * center_fraction))
num_high_freqs = int(num_cols // acceleration - num_low_freqs)
mask = self.create_lf_focused_mask(num_cols, num_high_freqs, num_low_freqs)
mask_shape = [1 for _ in shape]
mask_shape[-1] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
p = num_high_freqs / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < p
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
return mask
class BasicMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
hf_cols[hf_cols >= (num_cols - num_low_freqs + 1) // 2] += num_low_freqs
mask[hf_cols] = True
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformChoiceMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
mask[hf_cols] = True
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class UniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[: num_low_freqs // 2] = mask[-(num_low_freqs // 2) :] = True
return mask
class SymmetricLowToHighMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
num_low_freqs += num_high_freqs
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
num_cols //= 2
num_low_freqs //= 2
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[:num_low_freqs] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
return mask
| true | true |
f71e53cc65fda9fdbec30eb8bdaa1bf278ded2d8 | 602 | py | Python | Company_Based_Questions/Extras/Sum_of_bit_differences.py | Satyam-Bhalla/Competitive-Coding | 5814f5f60572f1e76495efe751b94bf4d2845198 | [
"MIT"
] | 1 | 2021-12-09T10:36:48.000Z | 2021-12-09T10:36:48.000Z | Company_Based_Questions/Extras/Sum_of_bit_differences.py | Satyam-Bhalla/Competitive-Coding | 5814f5f60572f1e76495efe751b94bf4d2845198 | [
"MIT"
] | null | null | null | Company_Based_Questions/Extras/Sum_of_bit_differences.py | Satyam-Bhalla/Competitive-Coding | 5814f5f60572f1e76495efe751b94bf4d2845198 | [
"MIT"
] | null | null | null | t = int(input())
# Python program to compute sum of pairwise bit differences
def sumBitDifferences(arr,n):
ans = 0 # Initialize result
# traverse over all bits
for i in range(0, 32):
# count number of elements with i'th bit set
count = 0
for j in range(0,n):
if ( (arr[j] & (1 << i)) ):
count+=1
# Add "count * (n - count) * 2" to the answer
ans += (count * (n - count) * 2);
return ans
for _ in range(t):
n = int(input())
l = list(map(int,input().split()))
print(sumBitDifferences(l,n)) | 26.173913 | 59 | 0.528239 | t = int(input())
def sumBitDifferences(arr,n):
ans = 0
for i in range(0, 32):
count = 0
for j in range(0,n):
if ( (arr[j] & (1 << i)) ):
count+=1
# Add "count * (n - count) * 2" to the answer
ans += (count * (n - count) * 2);
return ans
for _ in range(t):
n = int(input())
l = list(map(int,input().split()))
print(sumBitDifferences(l,n)) | true | true |
f71e53e205f5ac04b5c06e079190727d551ec4a4 | 11,777 | py | Python | src/engine/SCons/SConsignTests.py | andrewyoung1991/scons | 7517c277e23bc04e3809a9bf0793cdfe00097a58 | [
"MIT"
] | null | null | null | src/engine/SCons/SConsignTests.py | andrewyoung1991/scons | 7517c277e23bc04e3809a9bf0793cdfe00097a58 | [
"MIT"
] | null | null | null | src/engine/SCons/SConsignTests.py | andrewyoung1991/scons | 7517c277e23bc04e3809a9bf0793cdfe00097a58 | [
"MIT"
] | null | null | null | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import sys
import unittest
import TestCmd
import TestUnit
import SCons.dblite
import SCons.SConsign
class BuildInfo(object):
def merge(self, object):
pass
class DummySConsignEntry(object):
def __init__(self, name):
self.name = name
self.binfo = BuildInfo()
def convert_to_sconsign(self):
self.c_to_s = 1
def convert_from_sconsign(self, dir, name):
self.c_from_s = 1
class FS(object):
def __init__(self, top):
self.Top = top
self.Top.repositories = []
class DummyNode(object):
def __init__(self, path='not_a_valid_path', binfo=None):
self.path = path
self.tpath = path
self.fs = FS(self)
self.binfo = binfo
def get_stored_info(self):
return self.binfo
def get_binfo(self):
return self.binfo
class SConsignTestCase(unittest.TestCase):
def setUp(self):
self.save_cwd = os.getcwd()
self.test = TestCmd.TestCmd(workdir = '')
os.chdir(self.test.workpath(''))
def tearDown(self):
self.test.cleanup()
SCons.SConsign.Reset()
os.chdir(self.save_cwd)
class BaseTestCase(SConsignTestCase):
def test_Base(self):
aaa = DummySConsignEntry('aaa')
bbb = DummySConsignEntry('bbb')
bbb.arg1 = 'bbb arg1'
ccc = DummySConsignEntry('ccc')
ccc.arg2 = 'ccc arg2'
f = SCons.SConsign.Base()
f.set_entry('aaa', aaa)
f.set_entry('bbb', bbb)
#f.merge()
e = f.get_entry('aaa')
assert e == aaa, e
assert e.name == 'aaa', e.name
e = f.get_entry('bbb')
assert e == bbb, e
assert e.name == 'bbb', e.name
assert e.arg1 == 'bbb arg1', e.arg1
assert not hasattr(e, 'arg2'), e
f.set_entry('bbb', ccc)
e = f.get_entry('bbb')
assert e.name == 'ccc', e.name
assert not hasattr(e, 'arg1'), e
assert e.arg2 == 'ccc arg2', e.arg1
ddd = DummySConsignEntry('ddd')
eee = DummySConsignEntry('eee')
fff = DummySConsignEntry('fff')
fff.arg = 'fff arg'
f = SCons.SConsign.Base()
f.set_entry('ddd', ddd)
f.set_entry('eee', eee)
e = f.get_entry('ddd')
assert e == ddd, e
assert e.name == 'ddd', e.name
e = f.get_entry('eee')
assert e == eee, e
assert e.name == 'eee', e.name
assert not hasattr(e, 'arg'), e
f.set_entry('eee', fff)
e = f.get_entry('eee')
assert e.name == 'fff', e.name
assert e.arg == 'fff arg', e.arg
def test_store_info(self):
aaa = DummySConsignEntry('aaa')
bbb = DummySConsignEntry('bbb')
bbb.arg1 = 'bbb arg1'
ccc = DummySConsignEntry('ccc')
ccc.arg2 = 'ccc arg2'
f = SCons.SConsign.Base()
f.store_info('aaa', DummyNode('aaa', aaa))
f.store_info('bbb', DummyNode('bbb', bbb))
try:
e = f.get_entry('aaa')
except KeyError:
pass
else:
raise Exception("unexpected entry %s" % e)
try:
e = f.get_entry('bbb')
except KeyError:
pass
else:
raise Exception("unexpected entry %s" % e)
f.merge()
e = f.get_entry('aaa')
assert e == aaa, "aaa = %s, e = %s" % (aaa, e)
assert e.name == 'aaa', e.name
e = f.get_entry('bbb')
assert e == bbb, "bbb = %s, e = %s" % (bbb, e)
assert e.name == 'bbb', e.name
assert e.arg1 == 'bbb arg1', e.arg1
assert not hasattr(e, 'arg2'), e
f.store_info('bbb', DummyNode('bbb', ccc))
e = f.get_entry('bbb')
assert e == bbb, e
assert e.name == 'bbb', e.name
assert e.arg1 == 'bbb arg1', e.arg1
assert not hasattr(e, 'arg2'), e
f.merge()
e = f.get_entry('bbb')
assert e.name == 'ccc', e.name
assert not hasattr(e, 'arg1'), e
assert e.arg2 == 'ccc arg2', e.arg1
ddd = DummySConsignEntry('ddd')
eee = DummySConsignEntry('eee')
fff = DummySConsignEntry('fff')
fff.arg = 'fff arg'
f = SCons.SConsign.Base()
f.store_info('ddd', DummyNode('ddd', ddd))
f.store_info('eee', DummyNode('eee', eee))
f.merge()
e = f.get_entry('ddd')
assert e == ddd, e
assert e.name == 'ddd', e.name
e = f.get_entry('eee')
assert e == eee, e
assert e.name == 'eee', e.name
assert not hasattr(e, 'arg'), e
f.store_info('eee', DummyNode('eee', fff))
e = f.get_entry('eee')
assert e == eee, e
assert e.name == 'eee', e.name
assert not hasattr(e, 'arg'), e
f.merge()
e = f.get_entry('eee')
assert e.name == 'fff', e.name
assert e.arg == 'fff arg', e.arg
class SConsignDBTestCase(SConsignTestCase):
def test_SConsignDB(self):
save_DataBase = SCons.SConsign.DataBase
SCons.SConsign.DataBase = {}
try:
d1 = SCons.SConsign.DB(DummyNode('dir1'))
d1.set_entry('aaa', DummySConsignEntry('aaa name'))
d1.set_entry('bbb', DummySConsignEntry('bbb name'))
aaa = d1.get_entry('aaa')
assert aaa.name == 'aaa name'
bbb = d1.get_entry('bbb')
assert bbb.name == 'bbb name'
d2 = SCons.SConsign.DB(DummyNode('dir2'))
d2.set_entry('ccc', DummySConsignEntry('ccc name'))
d2.set_entry('ddd', DummySConsignEntry('ddd name'))
ccc = d2.get_entry('ccc')
assert ccc.name == 'ccc name'
ddd = d2.get_entry('ddd')
assert ddd.name == 'ddd name'
d31 = SCons.SConsign.DB(DummyNode('dir3/sub1'))
d31.set_entry('eee', DummySConsignEntry('eee name'))
d31.set_entry('fff', DummySConsignEntry('fff name'))
eee = d31.get_entry('eee')
assert eee.name == 'eee name'
fff = d31.get_entry('fff')
assert fff.name == 'fff name'
d32 = SCons.SConsign.DB(DummyNode('dir3%ssub2' % os.sep))
d32.set_entry('ggg', DummySConsignEntry('ggg name'))
d32.set_entry('hhh', DummySConsignEntry('hhh name'))
ggg = d32.get_entry('ggg')
assert ggg.name == 'ggg name'
hhh = d32.get_entry('hhh')
assert hhh.name == 'hhh name'
finally:
SCons.SConsign.DataBase = save_DataBase
class SConsignDirFileTestCase(SConsignTestCase):
def test_SConsignDirFile(self):
bi_foo = DummySConsignEntry('foo')
bi_bar = DummySConsignEntry('bar')
f = SCons.SConsign.DirFile(DummyNode())
f.set_entry('foo', bi_foo)
f.set_entry('bar', bi_bar)
e = f.get_entry('foo')
assert e == bi_foo, e
assert e.name == 'foo', e.name
e = f.get_entry('bar')
assert e == bi_bar, e
assert e.name == 'bar', e.name
assert not hasattr(e, 'arg'), e
bbb = DummySConsignEntry('bbb')
bbb.arg = 'bbb arg'
f.set_entry('bar', bbb)
e = f.get_entry('bar')
assert e.name == 'bbb', e.name
assert e.arg == 'bbb arg', e.arg
class SConsignFileTestCase(SConsignTestCase):
def test_SConsignFile(self):
test = self.test
file = test.workpath('sconsign_file')
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name == ".sconsign", SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is SCons.dblite, SCons.SConsign.DB_Module
SCons.SConsign.File(file)
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name is file, SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is SCons.dblite, SCons.SConsign.DB_Module
SCons.SConsign.File(None)
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name is file, SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is None, SCons.SConsign.DB_Module
class Fake_DBM(object):
def open(self, name, mode):
self.name = name
self.mode = mode
return self
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
fake_dbm = Fake_DBM()
SCons.SConsign.File(file, fake_dbm)
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name is file, SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is fake_dbm, SCons.SConsign.DB_Module
assert not hasattr(fake_dbm, 'name'), fake_dbm
assert not hasattr(fake_dbm, 'mode'), fake_dbm
SCons.SConsign.ForDirectory(DummyNode(test.workpath('dir')))
assert not SCons.SConsign.DataBase is None, SCons.SConsign.DataBase
assert fake_dbm.name == file, fake_dbm.name
assert fake_dbm.mode == "c", fake_dbm.mode
class writeTestCase(SConsignTestCase):
def test_write(self):
test = self.test
file = test.workpath('sconsign_file')
class Fake_DBM(object):
def __getitem__(self, key):
return None
def __setitem__(self, key, value):
pass
def open(self, name, mode):
self.sync_count = 0
return self
def sync(self):
self.sync_count = self.sync_count + 1
fake_dbm = Fake_DBM()
SCons.SConsign.DataBase = {}
SCons.SConsign.File(file, fake_dbm)
f = SCons.SConsign.DB(DummyNode())
bi_foo = DummySConsignEntry('foo')
bi_bar = DummySConsignEntry('bar')
f.set_entry('foo', bi_foo)
f.set_entry('bar', bi_bar)
SCons.SConsign.write()
assert bi_foo.c_to_s, bi_foo.c_to_s
assert bi_bar.c_to_s, bi_bar.c_to_s
assert fake_dbm.sync_count == 1, fake_dbm.sync_count
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [
BaseTestCase,
SConsignDBTestCase,
SConsignDirFileTestCase,
SConsignFileTestCase,
writeTestCase,
]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29.516291 | 81 | 0.585888 |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import sys
import unittest
import TestCmd
import TestUnit
import SCons.dblite
import SCons.SConsign
class BuildInfo(object):
def merge(self, object):
pass
class DummySConsignEntry(object):
def __init__(self, name):
self.name = name
self.binfo = BuildInfo()
def convert_to_sconsign(self):
self.c_to_s = 1
def convert_from_sconsign(self, dir, name):
self.c_from_s = 1
class FS(object):
def __init__(self, top):
self.Top = top
self.Top.repositories = []
class DummyNode(object):
def __init__(self, path='not_a_valid_path', binfo=None):
self.path = path
self.tpath = path
self.fs = FS(self)
self.binfo = binfo
def get_stored_info(self):
return self.binfo
def get_binfo(self):
return self.binfo
class SConsignTestCase(unittest.TestCase):
def setUp(self):
self.save_cwd = os.getcwd()
self.test = TestCmd.TestCmd(workdir = '')
os.chdir(self.test.workpath(''))
def tearDown(self):
self.test.cleanup()
SCons.SConsign.Reset()
os.chdir(self.save_cwd)
class BaseTestCase(SConsignTestCase):
def test_Base(self):
aaa = DummySConsignEntry('aaa')
bbb = DummySConsignEntry('bbb')
bbb.arg1 = 'bbb arg1'
ccc = DummySConsignEntry('ccc')
ccc.arg2 = 'ccc arg2'
f = SCons.SConsign.Base()
f.set_entry('aaa', aaa)
f.set_entry('bbb', bbb)
e = f.get_entry('aaa')
assert e == aaa, e
assert e.name == 'aaa', e.name
e = f.get_entry('bbb')
assert e == bbb, e
assert e.name == 'bbb', e.name
assert e.arg1 == 'bbb arg1', e.arg1
assert not hasattr(e, 'arg2'), e
f.set_entry('bbb', ccc)
e = f.get_entry('bbb')
assert e.name == 'ccc', e.name
assert not hasattr(e, 'arg1'), e
assert e.arg2 == 'ccc arg2', e.arg1
ddd = DummySConsignEntry('ddd')
eee = DummySConsignEntry('eee')
fff = DummySConsignEntry('fff')
fff.arg = 'fff arg'
f = SCons.SConsign.Base()
f.set_entry('ddd', ddd)
f.set_entry('eee', eee)
e = f.get_entry('ddd')
assert e == ddd, e
assert e.name == 'ddd', e.name
e = f.get_entry('eee')
assert e == eee, e
assert e.name == 'eee', e.name
assert not hasattr(e, 'arg'), e
f.set_entry('eee', fff)
e = f.get_entry('eee')
assert e.name == 'fff', e.name
assert e.arg == 'fff arg', e.arg
def test_store_info(self):
aaa = DummySConsignEntry('aaa')
bbb = DummySConsignEntry('bbb')
bbb.arg1 = 'bbb arg1'
ccc = DummySConsignEntry('ccc')
ccc.arg2 = 'ccc arg2'
f = SCons.SConsign.Base()
f.store_info('aaa', DummyNode('aaa', aaa))
f.store_info('bbb', DummyNode('bbb', bbb))
try:
e = f.get_entry('aaa')
except KeyError:
pass
else:
raise Exception("unexpected entry %s" % e)
try:
e = f.get_entry('bbb')
except KeyError:
pass
else:
raise Exception("unexpected entry %s" % e)
f.merge()
e = f.get_entry('aaa')
assert e == aaa, "aaa = %s, e = %s" % (aaa, e)
assert e.name == 'aaa', e.name
e = f.get_entry('bbb')
assert e == bbb, "bbb = %s, e = %s" % (bbb, e)
assert e.name == 'bbb', e.name
assert e.arg1 == 'bbb arg1', e.arg1
assert not hasattr(e, 'arg2'), e
f.store_info('bbb', DummyNode('bbb', ccc))
e = f.get_entry('bbb')
assert e == bbb, e
assert e.name == 'bbb', e.name
assert e.arg1 == 'bbb arg1', e.arg1
assert not hasattr(e, 'arg2'), e
f.merge()
e = f.get_entry('bbb')
assert e.name == 'ccc', e.name
assert not hasattr(e, 'arg1'), e
assert e.arg2 == 'ccc arg2', e.arg1
ddd = DummySConsignEntry('ddd')
eee = DummySConsignEntry('eee')
fff = DummySConsignEntry('fff')
fff.arg = 'fff arg'
f = SCons.SConsign.Base()
f.store_info('ddd', DummyNode('ddd', ddd))
f.store_info('eee', DummyNode('eee', eee))
f.merge()
e = f.get_entry('ddd')
assert e == ddd, e
assert e.name == 'ddd', e.name
e = f.get_entry('eee')
assert e == eee, e
assert e.name == 'eee', e.name
assert not hasattr(e, 'arg'), e
f.store_info('eee', DummyNode('eee', fff))
e = f.get_entry('eee')
assert e == eee, e
assert e.name == 'eee', e.name
assert not hasattr(e, 'arg'), e
f.merge()
e = f.get_entry('eee')
assert e.name == 'fff', e.name
assert e.arg == 'fff arg', e.arg
class SConsignDBTestCase(SConsignTestCase):
def test_SConsignDB(self):
save_DataBase = SCons.SConsign.DataBase
SCons.SConsign.DataBase = {}
try:
d1 = SCons.SConsign.DB(DummyNode('dir1'))
d1.set_entry('aaa', DummySConsignEntry('aaa name'))
d1.set_entry('bbb', DummySConsignEntry('bbb name'))
aaa = d1.get_entry('aaa')
assert aaa.name == 'aaa name'
bbb = d1.get_entry('bbb')
assert bbb.name == 'bbb name'
d2 = SCons.SConsign.DB(DummyNode('dir2'))
d2.set_entry('ccc', DummySConsignEntry('ccc name'))
d2.set_entry('ddd', DummySConsignEntry('ddd name'))
ccc = d2.get_entry('ccc')
assert ccc.name == 'ccc name'
ddd = d2.get_entry('ddd')
assert ddd.name == 'ddd name'
d31 = SCons.SConsign.DB(DummyNode('dir3/sub1'))
d31.set_entry('eee', DummySConsignEntry('eee name'))
d31.set_entry('fff', DummySConsignEntry('fff name'))
eee = d31.get_entry('eee')
assert eee.name == 'eee name'
fff = d31.get_entry('fff')
assert fff.name == 'fff name'
d32 = SCons.SConsign.DB(DummyNode('dir3%ssub2' % os.sep))
d32.set_entry('ggg', DummySConsignEntry('ggg name'))
d32.set_entry('hhh', DummySConsignEntry('hhh name'))
ggg = d32.get_entry('ggg')
assert ggg.name == 'ggg name'
hhh = d32.get_entry('hhh')
assert hhh.name == 'hhh name'
finally:
SCons.SConsign.DataBase = save_DataBase
class SConsignDirFileTestCase(SConsignTestCase):
def test_SConsignDirFile(self):
bi_foo = DummySConsignEntry('foo')
bi_bar = DummySConsignEntry('bar')
f = SCons.SConsign.DirFile(DummyNode())
f.set_entry('foo', bi_foo)
f.set_entry('bar', bi_bar)
e = f.get_entry('foo')
assert e == bi_foo, e
assert e.name == 'foo', e.name
e = f.get_entry('bar')
assert e == bi_bar, e
assert e.name == 'bar', e.name
assert not hasattr(e, 'arg'), e
bbb = DummySConsignEntry('bbb')
bbb.arg = 'bbb arg'
f.set_entry('bar', bbb)
e = f.get_entry('bar')
assert e.name == 'bbb', e.name
assert e.arg == 'bbb arg', e.arg
class SConsignFileTestCase(SConsignTestCase):
def test_SConsignFile(self):
test = self.test
file = test.workpath('sconsign_file')
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name == ".sconsign", SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is SCons.dblite, SCons.SConsign.DB_Module
SCons.SConsign.File(file)
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name is file, SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is SCons.dblite, SCons.SConsign.DB_Module
SCons.SConsign.File(None)
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name is file, SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is None, SCons.SConsign.DB_Module
class Fake_DBM(object):
def open(self, name, mode):
self.name = name
self.mode = mode
return self
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
fake_dbm = Fake_DBM()
SCons.SConsign.File(file, fake_dbm)
assert SCons.SConsign.DataBase == {}, SCons.SConsign.DataBase
assert SCons.SConsign.DB_Name is file, SCons.SConsign.DB_Name
assert SCons.SConsign.DB_Module is fake_dbm, SCons.SConsign.DB_Module
assert not hasattr(fake_dbm, 'name'), fake_dbm
assert not hasattr(fake_dbm, 'mode'), fake_dbm
SCons.SConsign.ForDirectory(DummyNode(test.workpath('dir')))
assert not SCons.SConsign.DataBase is None, SCons.SConsign.DataBase
assert fake_dbm.name == file, fake_dbm.name
assert fake_dbm.mode == "c", fake_dbm.mode
class writeTestCase(SConsignTestCase):
def test_write(self):
test = self.test
file = test.workpath('sconsign_file')
class Fake_DBM(object):
def __getitem__(self, key):
return None
def __setitem__(self, key, value):
pass
def open(self, name, mode):
self.sync_count = 0
return self
def sync(self):
self.sync_count = self.sync_count + 1
fake_dbm = Fake_DBM()
SCons.SConsign.DataBase = {}
SCons.SConsign.File(file, fake_dbm)
f = SCons.SConsign.DB(DummyNode())
bi_foo = DummySConsignEntry('foo')
bi_bar = DummySConsignEntry('bar')
f.set_entry('foo', bi_foo)
f.set_entry('bar', bi_bar)
SCons.SConsign.write()
assert bi_foo.c_to_s, bi_foo.c_to_s
assert bi_bar.c_to_s, bi_bar.c_to_s
assert fake_dbm.sync_count == 1, fake_dbm.sync_count
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [
BaseTestCase,
SConsignDBTestCase,
SConsignDirFileTestCase,
SConsignFileTestCase,
writeTestCase,
]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
| true | true |
f71e5460155bd9f16fa19e166afd300b1c1a08f8 | 1,469 | py | Python | setup.py | inducer/pyfft | 3780b30aebcd2c056f8c6bdd2ad7572e04d6b886 | [
"AML"
] | 1 | 2016-08-26T07:12:54.000Z | 2016-08-26T07:12:54.000Z | setup.py | inducer/pyfft | 3780b30aebcd2c056f8c6bdd2ad7572e04d6b886 | [
"AML"
] | null | null | null | setup.py | inducer/pyfft | 3780b30aebcd2c056f8c6bdd2ad7572e04d6b886 | [
"AML"
] | null | null | null | import sys
major, minor, micro, releaselevel, serial = sys.version_info
if not (major == 2 and minor >= 5):
print("Python >=2.5 is required to use this module.")
sys.exit(1)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
import logging
setup_dir = os.path.split(os.path.abspath(__file__))[0]
DOCUMENTATION = open(os.path.join(setup_dir, 'README.rst')).read()
pyfft_path = os.path.join(setup_dir, 'pyfft', '__init__.py')
globals_dict = {}
execfile(pyfft_path, globals_dict)
VERSION = '.'.join([str(x) for x in globals_dict['VERSION']])
dependencies = ['mako', 'numpy']
logging.warning("*" * 80 + "\n\n" +
"PyFFT is deprecated and will not be updated any more.\n" +
"Its functionality is being moved to Reikna (http://reikna.publicfields.net).\n\n" +
"*" * 80)
setup(
name='pyfft',
packages=['pyfft'],
provides=['pyfft'],
requires=dependencies,
install_requires=dependencies,
package_data={'pyfft': ['*.mako']},
version=VERSION,
author='Bogdan Opanchuk',
author_email='mantihor@gmail.com',
url='http://github.com/Manticore/pyfft',
description='FFT library for PyCuda and PyOpenCL',
long_description=DOCUMENTATION,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Mathematics'
]
)
| 27.716981 | 85 | 0.704561 | import sys
major, minor, micro, releaselevel, serial = sys.version_info
if not (major == 2 and minor >= 5):
print("Python >=2.5 is required to use this module.")
sys.exit(1)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
import logging
setup_dir = os.path.split(os.path.abspath(__file__))[0]
DOCUMENTATION = open(os.path.join(setup_dir, 'README.rst')).read()
pyfft_path = os.path.join(setup_dir, 'pyfft', '__init__.py')
globals_dict = {}
execfile(pyfft_path, globals_dict)
VERSION = '.'.join([str(x) for x in globals_dict['VERSION']])
dependencies = ['mako', 'numpy']
logging.warning("*" * 80 + "\n\n" +
"PyFFT is deprecated and will not be updated any more.\n" +
"Its functionality is being moved to Reikna (http://reikna.publicfields.net).\n\n" +
"*" * 80)
setup(
name='pyfft',
packages=['pyfft'],
provides=['pyfft'],
requires=dependencies,
install_requires=dependencies,
package_data={'pyfft': ['*.mako']},
version=VERSION,
author='Bogdan Opanchuk',
author_email='mantihor@gmail.com',
url='http://github.com/Manticore/pyfft',
description='FFT library for PyCuda and PyOpenCL',
long_description=DOCUMENTATION,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Mathematics'
]
)
| true | true |
f71e5477d9830202d03e82ec5b932891a0558a54 | 22,305 | py | Python | test/functional/test_framework/util.py | Guinea1/AnotherScryptCoin | 58bae163e23da7468a9854073231a512a4d037ee | [
"MIT"
] | null | null | null | test/functional/test_framework/util.py | Guinea1/AnotherScryptCoin | 58bae163e23da7468a9854073231a512a4d037ee | [
"MIT"
] | 2 | 2021-01-14T16:29:02.000Z | 2022-03-03T22:55:41.000Z | test/functional/test_framework/util.py | Guinea1/AnotherScryptCoin | 58bae163e23da7468a9854073231a512a4d037ee | [
"MIT"
] | 1 | 2022-01-09T08:44:40.000Z | 2022-01-09T08:44:40.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s ASC too low! (Should be %s ASC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s ASC too high! (Should be %s ASC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "anotherscryptcoin.conf")):
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| 38.926702 | 140 | 0.652589 |
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
nError("Fee of %s ASC too low! (Should be %s ASC)" % (str(fee), str(target_fee)))
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s ASC too high! (Should be %s ASC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
PORT_RANGE = 5000
class PortSeed:
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
os.makedirs(datadir)
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "anotherscryptcoin.conf")):
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29:
raise
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
equired >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
change_address = from_node.getnewaddress()
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
def gen_return_txouts():
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
txouts = txouts + "0000000000000000"
txouts = txouts + "fd0402"
txouts = txouts + script_pubkey
return txouts
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| true | true |
f71e55cfd055438d6c39116f56d41f4f94768b6f | 75,527 | py | Python | models/networks.py | izhorvath/MetGAN | aca85fb3306d2515a65c8d525cd78e1147ba7e1b | [
"BSD-3-Clause"
] | null | null | null | models/networks.py | izhorvath/MetGAN | aca85fb3306d2515a65c8d525cd78e1147ba7e1b | [
"BSD-3-Clause"
] | null | null | null | models/networks.py | izhorvath/MetGAN | aca85fb3306d2515a65c8d525cd78e1147ba7e1b | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from math import floor, log2
from functools import partial
from linear_attention_transformer import ImageLinearAttention
###
from random import random
import numpy as np
import torch.nn.functional as F
###
from models.networks_SPADE.base_network import BaseNetwork
from models.networks_SPADE.architecture import ResnetBlock as ResnetBlock
from models.networks_SPADE.architecture import SPADEResnetBlock as SPADEResnetBlock
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_SPADE(opt,gpu_ids):
if('spade8' in opt.netG):
net = SPADE8Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
elif('spade6' in opt.netG):
net = SPADE6Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
else:
net = SPADEGenerator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids)
net.init_weights()
return net
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_9blocksup':
net = ResnetGeneratorUp(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_768':
net = UNet768(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_768_sigm':
net = UNet768Sigm(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_spade':
net = UNet768PIXSPADE(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_spade8sm':
net = UNet768PIXSPADE8SM(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == 'conditional': #conditional patchGAN
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'unet':
net = UnetDiscriminator()
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
class UnetGANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(UnetGANLoss, self).__init__()
self.register_buffer('real_label_1', torch.tensor(target_real_label))
self.register_buffer('real_label_2', torch.tensor(np.ones((1,256,256))))
self.register_buffer('fake_label_1', torch.tensor(target_fake_label))
self.register_buffer('fake_label_2', torch.tensor(np.zeros((1,256,256))))
self.loss_1 = nn.BCEWithLogitsLoss()
self.loss_2 = nn.BCEWithLogitsLoss()
def get_target_tensor(self, prediction_1, prediction_2, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor_1 = self.real_label_1
target_tensor_2 = self.real_label_2
else:
target_tensor_1 = self.fake_label_1
target_tensor_2 = self.fake_label_2
return target_tensor_1.expand_as(prediction_1), target_tensor_2.expand_as(prediction_2)
def __call__(self, prediction_1, prediction_2, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
target_tensor_1, target_tensor_2 = self.get_target_tensor(prediction_1, prediction_2, target_is_real)
loss_1 = self.loss_1(prediction_1, target_tensor_1)
loss_2 = self.loss_2(prediction_2, target_tensor_2)
loss = loss_1.mean()+loss_2.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetGeneratorUp(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGeneratorUp, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.Upsample(scale_factor = 2, mode='nearest'),
nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
#%%% Unet from DeepMact
class ConvBnRelu2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, output_padding=1, dilation=1, stride=1, groups=1, is_bn=True, is_relu=True, is_decoder=False):
super(ConvBnRelu2d, self).__init__()
if is_decoder:
self.transpConv = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, output_padding=output_padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.conv = None
else:
self.transpConv = None
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=1e-4)
self.relu = torch.nn.ReLU(inplace=True)
if is_bn is False: self.bn = None
if is_relu is False: self.relu = None
def forward(self, x):
if self.conv is None:
x = self.transpConv(x)
elif self.transpConv is None:
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class StackEncoder(torch.nn.Module):
def __init__(self, x_channels, y_channels, kernel_size=3, stride=1):
super(StackEncoder, self).__init__()
padding = (kernel_size - 1) // 2
self.encode = torch.nn.Sequential(
ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x):
y = self.encode(x)
y_small = torch.nn.functional.max_pool2d(y, kernel_size=2, stride=2)
return y, y_small
class StackDecoder(torch.nn.Module):
def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3, stride=1):
super(StackDecoder, self).__init__()
padding = (kernel_size - 1) // 2
self.decode = torch.nn.Sequential(
ConvBnRelu2d(x_big_channels + x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x_big, x):
N, C, H, W = x_big.size()
y = torch.nn.functional.upsample(x, size=(H, W), mode='bilinear', align_corners=True)
y = torch.cat([y, x_big], 1)
y = self.decode(y)
return y
# 768
class UNet768(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
#print('2',out.shape)
out = self.up4(down4, out)
#print('3',out.shape)
out = self.up3(down3, out)
#print('4',out.shape)
out = self.up2(down2, out)
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, x.shape[2],x.shape[3]))#, dim=1)
return out
#%%Unet_spade_768_300
#%%sigm
class UNet768Sigm(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768Sigm, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Sigmoid()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;print('down1',down1.size()) #256
down2, out = self.down2(out) # ;print('down2',down2.size()) #128
down3, out = self.down3(out) # ;print('down3',down3.size()) #64
down4, out = self.down4(out) # ;print('down4',down4.size()) #32
down5, out = self.down5(out) # ;print('down5',down5.size()) #16
down6, out = self.down6(out) # ;print('down6',down6.size()) #8
pass # ;print('out ',out.size())
out = self.center(out)
out = self.up6(down6, out)
out = self.up5(down5, out)
out = self.up4(down4, out)
out = self.up3(down3, out)
out = self.up2(down2, out)
out = self.up1(down1, out)
# 1024
out = self.final_out(self.classify(out))
out = torch.reshape(out,(1, self.output_nc, 256,256))#, dim=1)
return out
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
#%% Unet as Disdef random_hflip(tensor, prob):
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous(memory_format = torch.contiguous_format)
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2).contiguous(memory_format = torch.contiguous_format)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
def random_float(lo, hi):
return lo + (hi - lo) * random()
def random_crop_and_resize(tensor, scale):
b, c, h, _ = tensor.shape
new_width = int(h * scale)
delta = h - new_width
h_delta = int(random() * delta)
w_delta = int(random() * delta)
cropped = tensor[:, :, h_delta:(h_delta + new_width), w_delta:(w_delta + new_width)].clone()
return F.interpolate(cropped, size=(h, h), mode='bilinear')
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size, types):
super().__init__()
self.D = D
self.types = types
def forward(self, images, prob = 0., detach = False):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=self.types)
if detach:
images.detach_()
return self.D(images), images
def leaky_relu(p=0.2):
return nn.LeakyReLU(p)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class Flatten(nn.Module):
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x.flatten(self.index)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.fn(x) * self.g
def double_conv(chan_in, chan_out):
return nn.Sequential(
nn.Conv2d(chan_in, chan_out, 3, padding=1),
leaky_relu(),
nn.Conv2d(chan_out, chan_out, 3, padding=1),
leaky_relu()
)
class DownBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = double_conv(input_channels, filters)
self.down = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
unet_res = x
if self.down is not None:
x = self.down(x)
x = x + res
return x, unet_res
# one layer of self-attention and feedforward, for images
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),
Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])
class UpBlock(nn.Module):
def __init__(self, input_channels, filters):
super().__init__()
self.conv_res = nn.ConvTranspose2d(input_channels // 2, filters, 1, stride = 2)
self.net = double_conv(input_channels, filters)
self.up = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False)
self.input_channels = input_channels
self.filters = filters
def forward(self, x, res):
*_, h, w = x.shape
conv_res = self.conv_res(x, output_size = (h * 2, w * 2))
x = self.up(x)
x = torch.cat((x, res), dim=1)
x = self.net(x)
x = x + conv_res
return x
class UnetDiscriminator(nn.Module):
def __init__(self, image_size=256, network_capacity = 16, transparent = False, fmap_max = 256):
super().__init__()
num_layers = int(log2(image_size) - 3)
num_init_filters = 2# if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
filters[-1] = filters[-2]
chan_in_out = list(zip(filters[:-1], filters[1:]))
chan_in_out = list(map(list, chan_in_out))
print('Channels',chan_in_out)
down_blocks = []
attn_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DownBlock(in_chan, out_chan, downsample = is_not_last)
down_blocks.append(block)
attn_fn = attn_and_ff(out_chan)
attn_blocks.append(attn_fn)
self.down_blocks = nn.ModuleList(down_blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
last_chan = filters[-1]
self.to_logit = nn.Sequential(
leaky_relu(),
nn.AvgPool2d(image_size // (2 ** num_layers)),
Flatten(1),
nn.Linear(last_chan, 1)
)
self.conv = double_conv(last_chan, last_chan)
dec_chan_in_out = chan_in_out[:-1][::-1]
self.up_blocks = nn.ModuleList(list(map(lambda c: UpBlock(c[1] * 2, c[0]), dec_chan_in_out)))
self.conv_out = nn.Conv2d(2, 1, 1)
def forward(self, x):
#print('Input shape:', x.shape)
b, *_ = x.shape
residuals = []
i=0
for (down_block, attn_block) in zip(self.down_blocks, self.attn_blocks):
#print('Step', i, x.shape)
i=i+1
x, unet_res = down_block(x)
residuals.append(unet_res)
if attn_block is not None:
x = attn_block(x)
x = self.conv(x) + x
enc_out = self.to_logit(x)
for (up_block, res) in zip(self.up_blocks, residuals[:-1][::-1]):
#print('in up blocK', x.shape)
x = up_block(x, res)
dec_out = self.conv_out(x)
return enc_out.squeeze(), dec_out
#%% SPADE RESNET
class SPADEGenerator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADEGenerator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 64
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
#x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
x = self.up(x)
# print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
# print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% spade8
class SPADE8Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE8Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 8
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
'''this can be removed'''
x = self.up(x)
#print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
#print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
'''til here'''
return output_5,output_6,output_9,output_11,output_15
#%%
class SPADE6Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE6Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 6
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 300
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = 10#self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
print('0,', x.shape)
x = self.head_0(x, seg)
print('1,', x.shape)
x = self.up(x)
print('2', x.shape)
x = self.G_middle_0(x, seg)
print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
print('5,', x.shape)
x = self.up(x)
output_6 = x
print('6,', x.shape)
x = self.up_0(x, seg)
print('7,', x.shape)
x = self.up(x)
print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
print('9,', x.shape)
x = self.up(x)
print('10,', x.shape)
x = self.up_2(x, seg)
print('11,', x.shape)
output_11 = x
x = self.up(x)
print('12,', x.shape)
x = self.up_3(x, seg)
print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% For the PIX2SPADE
class UNet768PIXSPADE(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+1024, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+1024, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+256, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+128, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24+3, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
#%%Unet for spade8
class UNet768PIXSPADE8SM(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE8SM, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+128, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+32, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+16, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
| 41.959444 | 215 | 0.600421 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from math import floor, log2
from functools import partial
from linear_attention_transformer import ImageLinearAttention
rom random import random
import numpy as np
import torch.nn.functional as F
rom models.networks_SPADE.base_network import BaseNetwork
from models.networks_SPADE.architecture import ResnetBlock as ResnetBlock
from models.networks_SPADE.architecture import SPADEResnetBlock as SPADEResnetBlock
################################################
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
class UnetGANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
super(UnetGANLoss, self).__init__()
self.register_buffer('real_label_1', torch.tensor(target_real_label))
self.register_buffer('real_label_2', torch.tensor(np.ones((1,256,256))))
self.register_buffer('fake_label_1', torch.tensor(target_fake_label))
self.register_buffer('fake_label_2', torch.tensor(np.zeros((1,256,256))))
self.loss_1 = nn.BCEWithLogitsLoss()
self.loss_2 = nn.BCEWithLogitsLoss()
def get_target_tensor(self, prediction_1, prediction_2, target_is_real):
if target_is_real:
target_tensor_1 = self.real_label_1
target_tensor_2 = self.real_label_2
else:
target_tensor_1 = self.fake_label_1
target_tensor_2 = self.fake_label_2
return target_tensor_1.expand_as(prediction_1), target_tensor_2.expand_as(prediction_2)
def __call__(self, prediction_1, prediction_2, target_is_real):
target_tensor_1, target_tensor_2 = self.get_target_tensor(prediction_1, prediction_2, target_is_real)
loss_1 = self.loss_1(prediction_1, target_tensor_1)
loss_2 = self.loss_2(prediction_2, target_tensor_2)
loss = loss_1.mean()+loss_2.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
class ResnetGeneratorUp(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGeneratorUp, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.Upsample(scale_factor = 2, mode='nearest'),
nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
#%%% Unet from DeepMact
class ConvBnRelu2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, output_padding=1, dilation=1, stride=1, groups=1, is_bn=True, is_relu=True, is_decoder=False):
super(ConvBnRelu2d, self).__init__()
if is_decoder:
self.transpConv = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, output_padding=output_padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.conv = None
else:
self.transpConv = None
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=1e-4)
self.relu = torch.nn.ReLU(inplace=True)
if is_bn is False: self.bn = None
if is_relu is False: self.relu = None
def forward(self, x):
if self.conv is None:
x = self.transpConv(x)
elif self.transpConv is None:
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class StackEncoder(torch.nn.Module):
def __init__(self, x_channels, y_channels, kernel_size=3, stride=1):
super(StackEncoder, self).__init__()
padding = (kernel_size - 1) // 2
self.encode = torch.nn.Sequential(
ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x):
y = self.encode(x)
y_small = torch.nn.functional.max_pool2d(y, kernel_size=2, stride=2)
return y, y_small
class StackDecoder(torch.nn.Module):
def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3, stride=1):
super(StackDecoder, self).__init__()
padding = (kernel_size - 1) // 2
self.decode = torch.nn.Sequential(
ConvBnRelu2d(x_big_channels + x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x_big, x):
N, C, H, W = x_big.size()
y = torch.nn.functional.upsample(x, size=(H, W), mode='bilinear', align_corners=True)
y = torch.cat([y, x_big], 1)
y = self.decode(y)
return y
# 768
class UNet768(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
#print('2',out.shape)
out = self.up4(down4, out)
#print('3',out.shape)
out = self.up3(down3, out)
#print('4',out.shape)
out = self.up2(down2, out)
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, x.shape[2],x.shape[3]))#, dim=1)
return out
#%%Unet_spade_768_300
#%%sigm
class UNet768Sigm(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768Sigm, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Sigmoid()
def _crop_concat(self, upsampled, bypass):
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;print('down1',down1.size()) #256
down2, out = self.down2(out) # ;print('down2',down2.size()) #128
down3, out = self.down3(out) # ;print('down3',down3.size()) #64
down4, out = self.down4(out) # ;print('down4',down4.size()) #32
down5, out = self.down5(out) # ;print('down5',down5.size()) #16
down6, out = self.down6(out) # ;print('down6',down6.size()) #8
pass # ;print('out ',out.size())
out = self.center(out)
out = self.up6(down6, out)
out = self.up5(down5, out)
out = self.up4(down4, out)
out = self.up3(down3, out)
out = self.up2(down2, out)
out = self.up1(down1, out)
# 1024
out = self.final_out(self.classify(out))
out = torch.reshape(out,(1, self.output_nc, 256,256))#, dim=1)
return out
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
return self.net(input)
#%% Unet as Disdef random_hflip(tensor, prob):
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous(memory_format = torch.contiguous_format)
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2).contiguous(memory_format = torch.contiguous_format)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
def random_float(lo, hi):
return lo + (hi - lo) * random()
def random_crop_and_resize(tensor, scale):
b, c, h, _ = tensor.shape
new_width = int(h * scale)
delta = h - new_width
h_delta = int(random() * delta)
w_delta = int(random() * delta)
cropped = tensor[:, :, h_delta:(h_delta + new_width), w_delta:(w_delta + new_width)].clone()
return F.interpolate(cropped, size=(h, h), mode='bilinear')
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size, types):
super().__init__()
self.D = D
self.types = types
def forward(self, images, prob = 0., detach = False):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=self.types)
if detach:
images.detach_()
return self.D(images), images
def leaky_relu(p=0.2):
return nn.LeakyReLU(p)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class Flatten(nn.Module):
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x.flatten(self.index)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.fn(x) * self.g
def double_conv(chan_in, chan_out):
return nn.Sequential(
nn.Conv2d(chan_in, chan_out, 3, padding=1),
leaky_relu(),
nn.Conv2d(chan_out, chan_out, 3, padding=1),
leaky_relu()
)
class DownBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = double_conv(input_channels, filters)
self.down = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
unet_res = x
if self.down is not None:
x = self.down(x)
x = x + res
return x, unet_res
# one layer of self-attention and feedforward, for images
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),
Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])
class UpBlock(nn.Module):
def __init__(self, input_channels, filters):
super().__init__()
self.conv_res = nn.ConvTranspose2d(input_channels // 2, filters, 1, stride = 2)
self.net = double_conv(input_channels, filters)
self.up = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False)
self.input_channels = input_channels
self.filters = filters
def forward(self, x, res):
*_, h, w = x.shape
conv_res = self.conv_res(x, output_size = (h * 2, w * 2))
x = self.up(x)
x = torch.cat((x, res), dim=1)
x = self.net(x)
x = x + conv_res
return x
class UnetDiscriminator(nn.Module):
def __init__(self, image_size=256, network_capacity = 16, transparent = False, fmap_max = 256):
super().__init__()
num_layers = int(log2(image_size) - 3)
num_init_filters = 2# if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
filters[-1] = filters[-2]
chan_in_out = list(zip(filters[:-1], filters[1:]))
chan_in_out = list(map(list, chan_in_out))
print('Channels',chan_in_out)
down_blocks = []
attn_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DownBlock(in_chan, out_chan, downsample = is_not_last)
down_blocks.append(block)
attn_fn = attn_and_ff(out_chan)
attn_blocks.append(attn_fn)
self.down_blocks = nn.ModuleList(down_blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
last_chan = filters[-1]
self.to_logit = nn.Sequential(
leaky_relu(),
nn.AvgPool2d(image_size // (2 ** num_layers)),
Flatten(1),
nn.Linear(last_chan, 1)
)
self.conv = double_conv(last_chan, last_chan)
dec_chan_in_out = chan_in_out[:-1][::-1]
self.up_blocks = nn.ModuleList(list(map(lambda c: UpBlock(c[1] * 2, c[0]), dec_chan_in_out)))
self.conv_out = nn.Conv2d(2, 1, 1)
def forward(self, x):
#print('Input shape:', x.shape)
b, *_ = x.shape
residuals = []
i=0
for (down_block, attn_block) in zip(self.down_blocks, self.attn_blocks):
#print('Step', i, x.shape)
i=i+1
x, unet_res = down_block(x)
residuals.append(unet_res)
if attn_block is not None:
x = attn_block(x)
x = self.conv(x) + x
enc_out = self.to_logit(x)
for (up_block, res) in zip(self.up_blocks, residuals[:-1][::-1]):
#print('in up blocK', x.shape)
x = up_block(x, res)
dec_out = self.conv_out(x)
return enc_out.squeeze(), dec_out
#%% SPADE RESNET
class SPADEGenerator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADEGenerator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 64
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
#x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
x = self.up(x)
# print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
# print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% spade8
class SPADE8Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE8Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 8
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
x = self.up(x)
#print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
#print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%%
class SPADE6Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE6Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 6
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 300
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = 10#self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
print('0,', x.shape)
x = self.head_0(x, seg)
print('1,', x.shape)
x = self.up(x)
print('2', x.shape)
x = self.G_middle_0(x, seg)
print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
print('5,', x.shape)
x = self.up(x)
output_6 = x
print('6,', x.shape)
x = self.up_0(x, seg)
print('7,', x.shape)
x = self.up(x)
print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
print('9,', x.shape)
x = self.up(x)
print('10,', x.shape)
x = self.up_2(x, seg)
print('11,', x.shape)
output_11 = x
x = self.up(x)
print('12,', x.shape)
x = self.up_3(x, seg)
print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% For the PIX2SPADE
class UNet768PIXSPADE(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+1024, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+1024, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+256, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+128, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24+3, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
#%%Unet for spade8
class UNet768PIXSPADE8SM(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE8SM, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+128, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+32, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+16, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
| true | true |
f71e55d3cf3aa4932ba68a34b333db3adb118d95 | 3,235 | py | Python | tests/unittests/test_extension.py | kontur/psautohint | 69bab0df4eac8c4a88d9ac4dce94c2d6c61aba99 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/unittests/test_extension.py | kontur/psautohint | 69bab0df4eac8c4a88d9ac4dce94c2d6c61aba99 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/unittests/test_extension.py | kontur/psautohint | 69bab0df4eac8c4a88d9ac4dce94c2d6c61aba99 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import pytest
from psautohint import _psautohint
INFO = b"FontName Foo"
NAME = b"Foo"
GLYPH = b"""% square
0 500 rb
60 500 ry
sc
560 500 mt
560 0 dt
60 0 dt
60 500 dt
cp
ed
"""
def test_autohint_good_args():
_psautohint.autohint(INFO, GLYPH)
def test_autohintmm_good_args():
_psautohint.autohintmm((GLYPH, GLYPH), (NAME, NAME))
@pytest.mark.parametrize("args", [
[], # no arguments
[INFO], # 1 argument
[INFO.decode('ascii'), GLYPH], # 1st is string not bytes
[INFO, GLYPH.decode('ascii')], # 2nd is string not bytes
[[INFO], GLYPH.decode('ascii')], # 1st is a list
[INFO, [GLYPH.decode('ascii')]], # 2nd is a list
])
def test_autohint_bad_args(args):
with pytest.raises(TypeError):
_psautohint.autohint(*args)
@pytest.mark.parametrize("args", [
[], # no arguments
[(GLYPH, GLYPH)], # 1 argument
[GLYPH, (NAME, NAME)], # 1st is not a tuple
[(GLYPH, GLYPH), NAME], # 2nd is not a tuple
[(GLYPH, GLYPH), tuple()], # 2nd is an empty tuple
[(GLYPH, GLYPH), (NAME,)], # 2nd is shorter than 1st
[(GLYPH,), (NAME,)], # 1st is one glyph
])
def test_autohintmm_bad_args(args):
with pytest.raises(TypeError):
_psautohint.autohintmm(*args)
@pytest.mark.parametrize("args", [
[(GLYPH.decode('ascii'), GLYPH), (NAME, NAME)], # 1st should be bytes
[(GLYPH, GLYPH), (NAME.decode('ascii'), NAME)], # 2nd should be bytes
])
def test_autohintmm_unicode(args):
with pytest.raises(TypeError):
_psautohint.autohintmm(*args)
@pytest.mark.parametrize("glyph", [
b"% foo\ned", # ending comment with newline
b"% foo\red", # ending comment with linefeed
b"\t% foo\nsc\ted", # separating tokens with tab
b"% foo\nsc ed", # separating tokens with space
b"% foo\nsc\ned", # separating tokens with newline
b"% foo\nsc\red", # separating tokens with linefeed
b"% foo", # glyph name only
b"% foo bar", # extra data after glyph name
])
def test_autohint_good_glyph(glyph):
result = _psautohint.autohint(INFO, glyph)
assert result == b"% foo\nsc\ned\n"
@pytest.mark.parametrize("glyph", [
b"% foo\ncf", # unknown operator
b"% foo\n" + 80 * b"f", # too long unknown operator
b"% " + 65 * b"A", # too long glyph name
b"% foo\n10 ", # number left on stack at end of glyph
b"% foo\n0a 0 rm\ned", # bad number terminator
b"% foo\nry", # stack underflow
b"% foo\n$", # unexpected character
])
def test_autohint_bad_glyph(glyph):
with pytest.raises(_psautohint.error):
_psautohint.autohint(INFO, glyph)
@pytest.mark.parametrize("glyphs", [
(b"cf", b"cf"),
])
def test_autohintmm_bad_glyphs(glyphs):
with pytest.raises(_psautohint.error):
_psautohint.autohintmm(glyphs, (NAME, NAME))
@pytest.mark.parametrize("info", [
b"HCounterChars [" + b" ".join(b"A" * i for i in range(16)) + b"]",
b"VCounterChars [" + b" ".join(b"A" * i for i in range(16)) + b"]",
])
def test_autohint_too_many_counter_glyphs(info):
_psautohint.autohint(info, GLYPH)
| 29.953704 | 74 | 0.605255 | import pytest
from psautohint import _psautohint
INFO = b"FontName Foo"
NAME = b"Foo"
GLYPH = b"""% square
0 500 rb
60 500 ry
sc
560 500 mt
560 0 dt
60 0 dt
60 500 dt
cp
ed
"""
def test_autohint_good_args():
_psautohint.autohint(INFO, GLYPH)
def test_autohintmm_good_args():
_psautohint.autohintmm((GLYPH, GLYPH), (NAME, NAME))
@pytest.mark.parametrize("args", [
[],
[INFO],
[INFO.decode('ascii'), GLYPH],
[INFO, GLYPH.decode('ascii')],
[[INFO], GLYPH.decode('ascii')],
[INFO, [GLYPH.decode('ascii')]],
])
def test_autohint_bad_args(args):
with pytest.raises(TypeError):
_psautohint.autohint(*args)
@pytest.mark.parametrize("args", [
[],
[(GLYPH, GLYPH)],
[GLYPH, (NAME, NAME)],
[(GLYPH, GLYPH), NAME],
[(GLYPH, GLYPH), tuple()],
[(GLYPH, GLYPH), (NAME,)],
[(GLYPH,), (NAME,)],
])
def test_autohintmm_bad_args(args):
with pytest.raises(TypeError):
_psautohint.autohintmm(*args)
@pytest.mark.parametrize("args", [
[(GLYPH.decode('ascii'), GLYPH), (NAME, NAME)],
[(GLYPH, GLYPH), (NAME.decode('ascii'), NAME)],
])
def test_autohintmm_unicode(args):
with pytest.raises(TypeError):
_psautohint.autohintmm(*args)
@pytest.mark.parametrize("glyph", [
b"% foo\ned",
b"% foo\red",
b"\t% foo\nsc\ted",
b"% foo\nsc ed",
b"% foo\nsc\ned",
b"% foo\nsc\red",
b"% foo",
b"% foo bar",
])
def test_autohint_good_glyph(glyph):
result = _psautohint.autohint(INFO, glyph)
assert result == b"% foo\nsc\ned\n"
@pytest.mark.parametrize("glyph", [
b"% foo\ncf",
b"% foo\n" + 80 * b"f",
b"% " + 65 * b"A",
b"% foo\n10 ",
b"% foo\n0a 0 rm\ned",
b"% foo\nry",
b"% foo\n$",
])
def test_autohint_bad_glyph(glyph):
with pytest.raises(_psautohint.error):
_psautohint.autohint(INFO, glyph)
@pytest.mark.parametrize("glyphs", [
(b"cf", b"cf"),
])
def test_autohintmm_bad_glyphs(glyphs):
with pytest.raises(_psautohint.error):
_psautohint.autohintmm(glyphs, (NAME, NAME))
@pytest.mark.parametrize("info", [
b"HCounterChars [" + b" ".join(b"A" * i for i in range(16)) + b"]",
b"VCounterChars [" + b" ".join(b"A" * i for i in range(16)) + b"]",
])
def test_autohint_too_many_counter_glyphs(info):
_psautohint.autohint(info, GLYPH)
| true | true |
f71e55d60e509a916cc25b65b45816137179e94a | 1,085 | py | Python | MNIST_convert.py | actionLUO/mnist_handwriting | fa8958417960978fa6230085950dc75e36697fdc | [
"Apache-2.0"
] | 1 | 2019-01-03T06:50:49.000Z | 2019-01-03T06:50:49.000Z | MNIST_convert.py | actionLUO/mnist_handwriting | fa8958417960978fa6230085950dc75e36697fdc | [
"Apache-2.0"
] | null | null | null | MNIST_convert.py | actionLUO/mnist_handwriting | fa8958417960978fa6230085950dc75e36697fdc | [
"Apache-2.0"
] | null | null | null | """
Created by Jacky LUO
Using python3.5
Reference: https://pjreddie.com/projects/mnist-in-csv
"""
def convert_mnist_csv(img_file, label_file, output_file, n):
imgf = open(img_file, 'rb')
labelf = open(label_file, 'rb')
outputf = open(output_file, 'w')
imgf.read(16)
labelf.read(8)
images = []
for i in range(n):
image = [ord(labelf.read(1))]
for j in range(28 * 28):
image.append(ord(imgf.read(1)))
images.append(image)
for image in images:
outputf.write(",".join(str(pixel) for pixel in image) + "\n")
imgf.close()
labelf.close()
outputf.close()
if __name__ == '__main__':
convert_mnist_csv("mnist/train-images-idx3-ubyte",
"mnist/train-labels-idx1-ubyte",
"mnist/mnist_train.csv",
60000)
convert_mnist_csv("mnist/t10k-images-idx3-ubyte",
"mnist/t10k-labels-idx1-ubyte",
"mnist/mnist_test.csv",
10000)
print('Finished data convert!')
| 21.7 | 69 | 0.560369 |
def convert_mnist_csv(img_file, label_file, output_file, n):
imgf = open(img_file, 'rb')
labelf = open(label_file, 'rb')
outputf = open(output_file, 'w')
imgf.read(16)
labelf.read(8)
images = []
for i in range(n):
image = [ord(labelf.read(1))]
for j in range(28 * 28):
image.append(ord(imgf.read(1)))
images.append(image)
for image in images:
outputf.write(",".join(str(pixel) for pixel in image) + "\n")
imgf.close()
labelf.close()
outputf.close()
if __name__ == '__main__':
convert_mnist_csv("mnist/train-images-idx3-ubyte",
"mnist/train-labels-idx1-ubyte",
"mnist/mnist_train.csv",
60000)
convert_mnist_csv("mnist/t10k-images-idx3-ubyte",
"mnist/t10k-labels-idx1-ubyte",
"mnist/mnist_test.csv",
10000)
print('Finished data convert!')
| true | true |
f71e566e04bc0d00ac8b0de8f6db0abb216cd3ab | 71,850 | py | Python | sdks/python/apache_beam/runners/dataflow/dataflow_runner.py | stephenoken/beam | 4797f310b6671de6fd703502520f4b012b655c82 | [
"Apache-2.0"
] | 1 | 2020-05-29T12:37:18.000Z | 2020-05-29T12:37:18.000Z | sdks/python/apache_beam/runners/dataflow/dataflow_runner.py | stephenoken/beam | 4797f310b6671de6fd703502520f4b012b655c82 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/dataflow/dataflow_runner.py | stephenoken/beam | 4797f310b6671de6fd703502520f4b012b655c82 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import base64
import json
import logging
import os
import subprocess
import sys
import threading
import time
import traceback
import urllib
from builtins import hex
from collections import defaultdict
from typing import TYPE_CHECKING
from typing import List
from future.utils import iteritems
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms import window
from apache_beam.transforms.core import RunnerAPIPTransformHolder
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX
from apache_beam.typehints import typehints
from apache_beam.utils import processes
from apache_beam.utils import proto_utils
from apache_beam.utils.interactive_utils import is_in_notebook
from apache_beam.utils.plugin import BeamPlugin
if TYPE_CHECKING:
from apache_beam.pipeline import PTransformOverride
if sys.version_info[0] > 2:
unquote_to_bytes = urllib.parse.unquote_to_bytes
quote = urllib.parse.quote
else:
unquote_to_bytes = urllib.unquote # pylint: disable=deprecated-urllib-function
quote = urllib.quote # pylint: disable=deprecated-urllib-function
__all__ = ['DataflowRunner']
_LOGGER = logging.getLogger(__name__)
BQ_SOURCE_UW_ERROR = (
'The Read(BigQuerySource(...)) transform is not supported with newer stack '
'features (Fn API, Dataflow Runner V2, etc). Please use the transform '
'apache_beam.io.gcp.bigquery.ReadFromBigQuery instead.')
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DataflowRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
# Imported here to avoid circular dependencies.
# TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CombineValuesPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import JrhReadPTransformOverride
# Thesse overrides should be applied before the proto representation of the
# graph is created.
_PTRANSFORM_OVERRIDES = [
CombineValuesPTransformOverride()
] # type: List[PTransformOverride]
_JRH_PTRANSFORM_OVERRIDES = [
JrhReadPTransformOverride(),
] # type: List[PTransformOverride]
# These overrides should be applied after the proto representation of the
# graph is created.
_NON_PORTABLE_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
ReadPTransformOverride(),
] # type: List[PTransformOverride]
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
def is_fnapi_compatible(self):
return False
def apply(self, transform, input, options):
self._maybe_add_unified_worker_missing_options(options)
return super(DataflowRunner, self).apply(transform, input, options)
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
"""Polls for the specified job to finish running (successfully or not).
Updates the result with the new job information before returning.
Args:
runner: DataflowRunner instance to use for polling job state.
result: DataflowPipelineResult instance used for job information.
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
"""
last_message_time = None
current_seen_messages = set()
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration // 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
_LOGGER.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0 or last_error_msg is not None or
str(response.currentState) == 'JOB_STATE_DONE' or
str(response.currentState) == 'JOB_STATE_CANCELLED' or
str(response.currentState) == 'JOB_STATE_UPDATED' or
str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# Check that job is in a post-preparation state before starting the
# final countdown.
if (str(response.currentState) not in ('JOB_STATE_PENDING',
'JOB_STATE_QUEUED')):
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
if not last_message_time or m.time > last_message_time:
last_message_time = m.time
current_seen_messages = set()
if message in current_seen_messages:
# Skip the message if it has already been seen at the current
# time. This could be the case since the list_messages API is
# queried starting at last_message_time.
continue
else:
current_seen_messages.add(message)
# Skip empty messages.
if m.messageImportance is None:
continue
_LOGGER.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if passed_secs > duration_secs:
_LOGGER.warning(
'Timing out on waiting for job %s after %d seconds',
job_id,
passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def _only_element(iterable):
# type: (Iterable[T]) -> T
element, = iterable
return element
@staticmethod
def group_by_key_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class GroupByKeyInputVisitor(PipelineVisitor):
"""A visitor that replaces `Any` element type for input `PCollection` of
a `GroupByKey` or `_GroupByKeyOnly` with a `KV` type.
TODO(BEAM-115): Once Python SDk is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly
if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)):
pcoll = transform_node.inputs[0]
pcoll.element_type = typehints.coerce_to_kv_type(
pcoll.element_type, transform_node.full_label)
key_type, value_type = pcoll.element_type.tuple_types
if transform_node.outputs:
key = DataflowRunner._only_element(transform_node.outputs.keys())
transform_node.outputs[key].element_type = typehints.KV[
key_type, typehints.Iterable[value_type]]
return GroupByKeyInputVisitor()
@staticmethod
def _set_pdone_visitor(pipeline):
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class SetPDoneVisitor(PipelineVisitor):
def __init__(self, pipeline):
self._pipeline = pipeline
@staticmethod
def _maybe_fix_output(transform_node, pipeline):
if not transform_node.outputs:
pval = pvalue.PDone(pipeline)
pval.producer = transform_node
transform_node.outputs = {None: pval}
def enter_composite_transform(self, transform_node):
SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline)
def visit_transform(self, transform_node):
SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline)
return SetPDoneVisitor(pipeline)
@staticmethod
def side_input_visitor(use_unified_worker=False):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.transforms.core import ParDo
class SideInputVisitor(PipelineVisitor):
"""Ensures input `PCollection` used as a side inputs has a `KV` type.
TODO(BEAM-115): Once Python SDK is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
if isinstance(transform_node.transform, ParDo):
new_side_inputs = []
for ix, side_input in enumerate(transform_node.side_inputs):
access_pattern = side_input._side_input_data().access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
if use_unified_worker:
# TODO(BEAM-9173): Stop patching up the access pattern to
# appease Dataflow when using the UW and hardcode the output
# type to be Any since the Dataflow JSON and pipeline proto
# can differ in coders which leads to encoding/decoding issues
# within the runner.
side_input.pvalue.element_type = typehints.Any
new_side_input = _DataflowIterableSideInput(side_input)
else:
# Add a map to ('', value) as Dataflow currently only handles
# keyed side inputs when using the JRH.
pipeline = side_input.pvalue.pipeline
new_side_input = _DataflowIterableAsMultimapSideInput(
side_input)
new_side_input.pvalue = beam.pvalue.PCollection(
pipeline,
element_type=typehints.KV[bytes,
side_input.pvalue.element_type],
is_bounded=side_input.pvalue.is_bounded)
parent = transform_node.parent or pipeline._root_transform()
map_to_void_key = beam.pipeline.AppliedPTransform(
pipeline,
beam.Map(lambda x: (b'', x)),
transform_node.full_label + '/MapToVoidKey%s' % ix,
(side_input.pvalue, ))
new_side_input.pvalue.producer = map_to_void_key
map_to_void_key.add_output(new_side_input.pvalue, None)
parent.add_part(map_to_void_key)
elif access_pattern == common_urns.side_inputs.MULTIMAP.urn:
# Ensure the input coder is a KV coder and patch up the
# access pattern to appease Dataflow.
side_input.pvalue.element_type = typehints.coerce_to_kv_type(
side_input.pvalue.element_type, transform_node.full_label)
new_side_input = _DataflowMultimapSideInput(side_input)
else:
raise ValueError(
'Unsupported access pattern for %r: %r' %
(transform_node.full_label, access_pattern))
new_side_inputs.append(new_side_input)
transform_node.side_inputs = new_side_inputs
transform_node.transform.side_inputs = new_side_inputs
return SideInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = DataflowRunner._only_element(
transform_node.outputs.values())
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
def _check_for_unsupported_fnapi_features(self, pipeline_proto):
components = pipeline_proto.components
for windowing_strategy in components.windowing_strategies.values():
if (windowing_strategy.merge_status ==
beam_runner_api_pb2.MergeStatus.NEEDS_MERGE and
windowing_strategy.window_fn.urn not in (
common_urns.session_windows.urn, )):
raise RuntimeError(
'Unsupported merging windowing strategy: %s' %
windowing_strategy.window_fn.urn)
elif components.coders[
windowing_strategy.window_coder_id].spec.urn not in (
common_urns.coders.GLOBAL_WINDOW.urn,
common_urns.coders.INTERVAL_WINDOW.urn):
raise RuntimeError(
'Unsupported window coder %s for window fn %s' % (
components.coders[windowing_strategy.window_coder_id].spec.urn,
windowing_strategy.window_fn.urn))
def run_pipeline(self, pipeline, options):
"""Remotely executes entire pipeline or parts reachable from node."""
# Label goog-dataflow-notebook if job is started from notebook.
if is_in_notebook():
notebook_version = (
'goog-dataflow-notebook=' +
beam.version.__version__.replace('.', '_'))
if options.view_as(GoogleCloudOptions).labels:
options.view_as(GoogleCloudOptions).labels.append(notebook_version)
else:
options.view_as(GoogleCloudOptions).labels = [notebook_version]
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
self._maybe_add_unified_worker_missing_options(options)
# Convert all side inputs into a form acceptable to Dataflow.
if apiclient._use_fnapi(options):
pipeline.visit(
self.side_input_visitor(apiclient._use_unified_worker(options)))
# Performing configured PTransform overrides. Note that this is currently
# done before Runner API serialization, since the new proto needs to contain
# any added PTransforms.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
if (apiclient._use_fnapi(options) and
not apiclient._use_unified_worker(options)):
pipeline.replace_all(DataflowRunner._JRH_PTRANSFORM_OVERRIDES)
use_fnapi = apiclient._use_fnapi(options)
from apache_beam.transforms import environments
default_environment = environments.DockerEnvironment.from_container_image(
apiclient.get_container_image_from_options(options))
# Snapshot the pipeline in a portable proto.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=default_environment)
if use_fnapi:
self._check_for_unsupported_fnapi_features(self.proto_pipeline)
# Cross language transform require using a pipeline object constructed
# from the full pipeline proto to make sure that expanded version of
# external transforms are reflected in the Pipeline job graph.
from apache_beam import Pipeline
pipeline = Pipeline.from_runner_api(
self.proto_pipeline,
pipeline.runner,
options,
allow_proto_holders=True)
# Pipelines generated from proto do not have output set to PDone set for
# leaf elements.
pipeline.visit(self._set_pdone_visitor(pipeline))
# We need to generate a new context that maps to the new pipeline object.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=default_environment)
else:
# Performing configured PTransform overrides which should not be reflected
# in the proto representation of the graph.
pipeline.replace_all(DataflowRunner._NON_PORTABLE_PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
# Elevate "min_cpu_platform" to pipeline option, but using the existing
# experiment.
debug_options = options.view_as(DebugOptions)
worker_options = options.view_as(WorkerOptions)
if worker_options.min_cpu_platform:
debug_options.add_experiment(
'min_cpu_platform=' + worker_options.min_cpu_platform)
# Elevate "enable_streaming_engine" to pipeline option, but using the
# existing experiment.
google_cloud_options = options.view_as(GoogleCloudOptions)
if google_cloud_options.enable_streaming_engine:
debug_options.add_experiment("enable_windmill_service")
debug_options.add_experiment("enable_streaming_engine")
else:
if (debug_options.lookup_experiment("enable_windmill_service") or
debug_options.lookup_experiment("enable_streaming_engine")):
raise ValueError(
"""Streaming engine both disabled and enabled:
enable_streaming_engine flag is not set, but enable_windmill_service
and/or enable_streaming_engine experiments are present.
It is recommended you only set the enable_streaming_engine flag.""")
dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None)
if dataflow_worker_jar is not None:
if not apiclient._use_fnapi(options):
_LOGGER.warning(
'Typical end users should not use this worker jar feature. '
'It can only be used when FnAPI is enabled.')
else:
debug_options.add_experiment('use_staged_dataflow_worker_jar')
# Make Dataflow workers use FastAvro on Python 3 unless use_avro experiment
# is set. Note that use_avro is only interpreted by the Dataflow runner
# at job submission and is not interpreted by Dataflow service or workers,
# which by default use avro library unless use_fastavro experiment is set.
if sys.version_info[0] > 2 and (
not debug_options.lookup_experiment('use_avro')):
debug_options.add_experiment('use_fastavro')
self.job = apiclient.Job(options, self.proto_pipeline)
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(self.group_by_key_input_visitor())
# Dataflow runner requires output type of the Flatten to be the same as the
# inputs, hence we enforce that here.
pipeline.visit(self.flatten_input_visitor())
# Trigger a traversal of all reachable nodes.
self.visit_transforms(pipeline, options)
test_options = options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
result = PipelineResult(PipelineState.DONE)
result.wait_until_finish = lambda duration=None: None
return result
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
# TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring.
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _maybe_add_unified_worker_missing_options(self, options):
# set default beam_fn_api and use_beam_bq_sink experiment if use unified
# worker experiment flag exists, no-op otherwise.
debug_options = options.view_as(DebugOptions)
from apache_beam.runners.dataflow.internal import apiclient
if apiclient._use_unified_worker(options):
if not debug_options.lookup_experiment('beam_fn_api'):
debug_options.add_experiment('beam_fn_api')
if not debug_options.lookup_experiment('use_beam_bq_sink'):
debug_options.add_experiment('use_beam_bq_sink')
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(
self._get_coder(typehint, window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint), window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder, unused=None):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError(
'Coder object must inherit from coders.Coder: %s.' % str(coder))
return coder.as_cloud_object(self.proto_context.coders)
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': 'kind:stream',
'component_encodings': [input_encoding],
'is_stream_like': {
'value': True
},
}
def _get_encoded_output_coder(
self, transform_node, window_value=True, output_tag=None):
"""Returns the cloud encoding of the coder for the output of a transform."""
is_external_transform = isinstance(
transform_node.transform, RunnerAPIPTransformHolder)
if output_tag in transform_node.outputs:
element_type = transform_node.outputs[output_tag].element_type
elif len(transform_node.outputs) == 1:
output_tag = DataflowRunner._only_element(transform_node.outputs.keys())
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[output_tag].element_type
elif is_external_transform:
raise ValueError(
'For external transforms, output_tag must be specified '
'since we cannot fallback to a Python only coder.')
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
# All outputs have the same windowing. So getting the coder from an
# arbitrary window is fine.
output_tag = next(iter(transform_node.outputs.keys()))
window_coder = (
transform_node.outputs[output_tag].windowing.windowfn.
get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(element_type, window_coder)
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
# Main output key of external transforms can be ambiguous, so we only tag if
# there's only one tag instead of None.
output_tag = (
DataflowRunner._only_element(transform_node.outputs.keys()) if len(
transform_node.outputs.keys()) == 1 else None)
self._cache.cache_output(transform_node, output_tag, step)
# If side_tags is not () then this is a multi-output transform node and we
# need to cache the (node, tag, step) for each of the tags used to access
# the outputs. This is essential because the keys used to search in the
# cache always contain the tag.
for tag in side_tags:
self._cache.cache_output(transform_node, tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[
item.get_dict()
for item in DisplayData.create_from(transform_node.transform).items
])
return step
def _add_singleton_step(
self,
label,
full_label,
tag,
input_step,
windowing_strategy,
access_pattern):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)
})
step.encoding = self._get_side_input_encoding(input_step.encoding)
output_info = {
PropertyNames.USER_NAME: '%s.%s' % (full_label, PropertyNames.OUTPUT),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
if common_urns.side_inputs.MULTIMAP.urn == access_pattern:
output_info[PropertyNames.USE_INDEXED_FORMAT] = True
step.add_property(PropertyNames.OUTPUT_INFO, [output_info])
step.add_property(
PropertyNames.WINDOWING_STRATEGY,
self.serialize_windowing_strategy(windowing_strategy))
return step
def run_Impulse(self, transform_node, options):
standard_options = options.view_as(StandardOptions)
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
use_streaming_engine = (
debug_options.experiments and
'enable_streaming_engine' in debug_options.experiments and
'enable_windmill_service' in debug_options.experiments)
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
if (standard_options.streaming and
(not use_fn_api or not use_streaming_engine)):
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
else:
step.add_property(PropertyNames.FORMAT, 'impulse')
encoded_impulse_element = coders.WindowedValueCoder(
coders.BytesCoder(),
coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
window.GlobalWindows.windowed_value(b''))
if use_fn_api:
encoded_impulse_as_str = self.byte_array_to_json_string(
encoded_impulse_element)
else:
encoded_impulse_as_str = base64.b64encode(
encoded_impulse_element).decode('ascii')
step.add_property(PropertyNames.IMPULSE_ELEMENT, encoded_impulse_as_str)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run_Flatten(self, transform_node, options):
step = self._add_step(
TransformNames.FLATTEN, transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append({
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)
})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def apply_WriteToBigQuery(self, transform, pcoll, options):
# Make sure this is the WriteToBigQuery class that we expected, and that
# users did not specifically request the new BQ sink by passing experiment
# flag.
# TODO(BEAM-6928): Remove this function for release 2.14.0.
experiments = options.view_as(DebugOptions).experiments or []
from apache_beam.runners.dataflow.internal import apiclient
use_fnapi = apiclient._use_fnapi(options)
if (not isinstance(transform, beam.io.WriteToBigQuery) or use_fnapi or
'use_beam_bq_sink' in experiments):
return self.apply_PTransform(transform, pcoll, options)
if transform.schema == beam.io.gcp.bigquery.SCHEMA_AUTODETECT:
raise RuntimeError(
'Schema auto-detection is not supported on the native sink')
standard_options = options.view_as(StandardOptions)
if standard_options.streaming:
if (transform.write_disposition ==
beam.io.BigQueryDisposition.WRITE_TRUNCATE):
raise RuntimeError('Can not use write truncation mode in streaming')
return self.apply_PTransform(transform, pcoll, options)
else:
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
schema = None
if transform.schema:
schema = parse_table_schema_from_json(json.dumps(transform.schema))
return pcoll | 'WriteToBigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
transform.table_reference.tableId,
transform.table_reference.datasetId,
transform.table_reference.projectId,
schema,
transform.create_disposition,
transform.write_disposition,
kms_key=transform.kms_key))
def apply_GroupByKey(self, transform, pcoll, options):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError((
'Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label, coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
return pvalue.PCollection.from_(pcoll)
def run_GroupByKey(self, transform_node, options):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
windowing = transform_node.transform.get_windowing(transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing))
def run_RunnerAPIPTransformHolder(self, transform_node, options):
"""Adding Dataflow runner job description for transform holder objects.
These holder transform objects are generated for some of the transforms that
become available after a cross-language transform expansion, usually if the
corresponding transform object cannot be generated in Python SDK (for
example, a python `ParDo` transform cannot be generated without a serialized
Python `DoFn` object).
"""
urn = transform_node.transform.proto().urn
assert urn
# TODO(chamikara): support other transforms that requires holder objects in
# Python SDk.
if common_urns.primitives.PAR_DO.urn == urn:
self.run_ParDo(transform_node, options)
else:
raise NotImplementedError(
'%s uses unsupported URN: %s' % (transform_node.full_label, urn))
def run_ParDo(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
is_external_transform = isinstance(transform, RunnerAPIPTransformHolder)
# Attach side inputs.
si_dict = {}
all_input_labels = transform_node.input_tags_to_preserve
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
named_inputs = transform_node.named_inputs()
label_renames = {}
for ix, side_pval in enumerate(transform_node.side_inputs):
assert isinstance(side_pval, AsSideInput)
step_name = 'SideInput-' + self._get_unique_step_name()
si_label = ((SIDE_INPUT_PREFIX + '%d-%s') %
(ix, transform_node.full_label)
if side_pval.pvalue not in all_input_labels else
all_input_labels[side_pval.pvalue])
old_label = (SIDE_INPUT_PREFIX + '%d') % ix
if not is_external_transform:
label_renames[old_label] = si_label
assert old_label in named_inputs
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (
transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
# Count the number of times the same PCollection is a side input
# to the same ParDo.
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
step_name,
si_full_label,
side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue),
side_pval.pvalue.windowing,
side_pval._side_input_data().access_pattern)
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: step_name,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label +
('/{}'.format(transform_name) if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
transform_proto = self.proto_context.transforms.get_proto(transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node)
use_fnapi = apiclient._use_fnapi(options)
use_unified_worker = apiclient._use_unified_worker(options)
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
if (use_fnapi and
(transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn or
use_unified_worker)):
# Patch side input ids to be unique across a given pipeline.
if (label_renames and
transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn):
# Patch PTransform proto.
for old, new in iteritems(label_renames):
transform_proto.inputs[new] = transform_proto.inputs[old]
del transform_proto.inputs[old]
# Patch ParDo proto.
proto_type, _ = beam.PTransform._known_urns[transform_proto.spec.urn]
proto = proto_utils.parse_Bytes(
transform_proto.spec.payload, proto_type)
for old, new in iteritems(label_renames):
proto.side_inputs[new].CopyFrom(proto.side_inputs[old])
del proto.side_inputs[old]
transform_proto.spec.payload = proto.SerializeToString()
# We need to update the pipeline proto.
del self.proto_pipeline.components.transforms[transform_id]
(
self.proto_pipeline.components.transforms[transform_id].CopyFrom(
transform_proto))
serialized_data = transform_id
else:
serialized_data = pickler.dumps(
self._pardo_fn_data(transform_node, lookup_label))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'None' for main output and '<tag>' for a tagged output.
outputs = []
all_output_tags = transform_proto.outputs.keys()
# Some external transforms require output tags to not be modified.
# So we randomly select one of the output tags as the main output and
# leave others as side outputs. Transform execution should not change
# dependending on which output tag we choose as the main output here.
# Also, some SDKs do not work correctly if output tags are modified. So for
# external transforms, we leave tags unmodified.
#
# Python SDK uses 'None' as the tag of the main output.
main_output_tag = (all_output_tags[0] if is_external_transform else 'None')
step.encoding = self._get_encoded_output_coder(
transform_node, output_tag=main_output_tag)
side_output_tags = set(all_output_tags).difference({main_output_tag})
# Add the main output to the description.
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: main_output_tag
})
for side_tag in side_output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
encoding = self._get_encoded_output_coder(
transform_node, output_tag=side_tag)
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: encoding,
PropertyNames.OUTPUT_NAME: side_tag
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
# Add the restriction encoding if we are a splittable DoFn
# and are using the Fn API on the unified worker.
restriction_coder = transform.get_restriction_coder()
if restriction_coder:
step.add_property(
PropertyNames.RESTRICTION_ENCODING,
self._get_cloud_encoding(restriction_coder))
if options.view_as(StandardOptions).streaming:
is_stateful_dofn = (
transform.is_pardo_with_stateful_dofn if is_external_transform else
DoFnSignature(transform.dofn).is_stateful_dofn())
if is_stateful_dofn:
step.add_property(PropertyNames.USES_KEYED_STATE, 'true')
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (
transform.fn,
transform.args,
transform.kwargs,
si_tags_and_types,
transform_node.inputs[0].windowing)
def run_CombineValuesReplacement(self, transform_node, options):
transform = transform_node.transform.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node.parent)
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
from apache_beam.runners.dataflow.internal import apiclient
use_fnapi = apiclient._use_fnapi(options)
if use_fnapi:
# Fnapi pipelines send the transform ID of the CombineValues transform's
# parent composite because Dataflow expects the ID of a CombinePerKey
# transform.
serialized_data = transform_id
else:
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
serialized_data = pickler.dumps(
(transform.fn, transform.args, transform.kwargs, ()))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_cloud_encoding(
transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def apply_Read(self, transform, pbegin, options):
if hasattr(transform.source, 'format'):
# Consider native Read to be a primitive for dataflow.
return beam.pvalue.PCollection.from_(pbegin)
else:
return self.apply_PTransform(transform, pbegin, options)
def run_Read(self, transform_node, options):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
standard_options = options.view_as(StandardOptions)
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
_LOGGER.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
_LOGGER.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source,
traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT, source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
if standard_options.streaming:
raise ValueError(
'BigQuery source is not currently available for use '
'in streaming pipelines.')
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
if use_fn_api:
raise ValueError(BQ_SOURCE_UW_ERROR)
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(
PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(
PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError(
'BigQuery source %r must specify either a table or'
' a query' % transform.source)
if transform.source.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.source.kms_key)
elif transform.source.format == 'pubsub':
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
# Only one of topic or subscription should be set.
if transform.source.full_subscription:
step.add_property(
PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(
PropertyNames.PUBSUB_TOPIC, transform.source.full_topic)
if transform.source.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.source.id_label)
if transform.source.with_attributes:
# Setting this property signals Dataflow runner to return full
# PubsubMessages instead of just the data part of the payload.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.source.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.source.timestamp_attribute)
else:
raise ValueError(
'Source %r has unexpected format %s.' %
(transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
# Using a GlobalWindowCoder as a place holder instead of the default
# PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(
coders.registry.get_coder(transform_node.outputs[None].element_type),
coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run__NativeWrite(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX,
transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX,
transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE,
transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE, transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(
PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(
PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
if transform.sink.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key)
elif transform.sink.format == 'pubsub':
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
if transform.sink.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.sink.id_label)
if transform.sink.with_attributes:
# Setting this property signals Dataflow runner that the PCollection
# contains PubsubMessage objects instead of just raw data.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.sink.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.sink.timestamp_attribute)
else:
raise ValueError(
'Sink %r has unexpected format %s.' %
(transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation. Using a GlobalWindowCoder as a place holder instead
# of the default PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(
transform.sink.coder, coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
def run_TestStream(self, transform_node, options):
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import WatermarkEvent
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'TestStream is currently available for use '
'only in streaming pipelines.')
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
step.add_property(PropertyNames.FORMAT, 'test_stream')
test_stream_payload = beam_runner_api_pb2.TestStreamPayload()
# TestStream source doesn't do any decoding of elements,
# so we won't set test_stream_payload.coder_id.
output_coder = transform._infer_output_coder() # pylint: disable=protected-access
for event in transform._events:
new_event = test_stream_payload.events.add()
if isinstance(event, ElementEvent):
for tv in event.timestamped_values:
element = new_event.element_event.elements.add()
element.encoded_element = output_coder.encode(tv.value)
element.timestamp = tv.timestamp.micros
elif isinstance(event, ProcessingTimeEvent):
new_event.processing_time_event.advance_duration = (
event.advance_by.micros)
elif isinstance(event, WatermarkEvent):
new_event.watermark_event.new_watermark = event.new_watermark.micros
serialized_payload = self.byte_array_to_json_string(
test_stream_payload.SerializeToString())
step.add_property(PropertyNames.SERIALIZED_TEST_STREAM, serialized_payload)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
# We must mark this method as not a test or else its name is a matcher for
# nosetest tests.
run_TestStream.__test__ = False # type: ignore[attr-defined]
@classmethod
def serialize_windowing_strategy(cls, windowing):
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext()
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners import pipeline_context
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
"""Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString."""
return quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
"""Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray."""
return unquote_to_bytes(encoded_string)
def get_default_gcp_region(self):
"""Get a default value for Google Cloud region according to
https://cloud.google.com/compute/docs/gcloud-compute/#default-properties.
If no default can be found, returns None.
"""
environment_region = os.environ.get('CLOUDSDK_COMPUTE_REGION')
if environment_region:
_LOGGER.info(
'Using default GCP region %s from $CLOUDSDK_COMPUTE_REGION',
environment_region)
return environment_region
try:
cmd = ['gcloud', 'config', 'get-value', 'compute/region']
# Use subprocess.DEVNULL in Python 3.3+.
if hasattr(subprocess, 'DEVNULL'):
DEVNULL = subprocess.DEVNULL
else:
DEVNULL = open(os.devnull, 'ab')
raw_output = processes.check_output(cmd, stderr=DEVNULL)
formatted_output = raw_output.decode('utf-8').strip()
if formatted_output:
_LOGGER.info(
'Using default GCP region %s from `%s`',
formatted_output,
' '.join(cmd))
return formatted_output
except RuntimeError:
pass
return None
class _DataflowSideInput(beam.pvalue.AsSideInput):
"""Wraps a side input as a dataflow-compatible side input."""
def _view_options(self):
return {
'data': self._data,
}
def _side_input_data(self):
return self._data
class _DataflowIterableAsMultimapSideInput(_DataflowSideInput):
"""Wraps an iterable side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
iterable_view_fn = side_input_data.view_fn
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
lambda multimap: iterable_view_fn(multimap[b'']))
class _DataflowIterableSideInput(_DataflowSideInput):
"""Wraps an iterable side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.ITERABLE.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class _DataflowMultimapSideInput(_DataflowSideInput):
"""Wraps a multimap side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance.
"""
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
# We need the job id to be able to update job information. There is no need
# to update the job if we are in a known terminal state.
if self.has_job and not self.is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
def _get_job_state(self):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
# Ordered by the enum values. Values that may be introduced in
# future versions of Dataflow API are considered UNRECOGNIZED by the SDK.
api_jobstate_map = defaultdict(
lambda: PipelineState.UNRECOGNIZED,
{
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
})
return (
api_jobstate_map[self._job.currentState]
if self._job.currentState else PipelineState.UNKNOWN)
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
return self._get_job_state()
def is_in_terminal_state(self):
if not self.has_job:
return True
return PipelineState.is_terminal(self._get_job_state())
def wait_until_finish(self, duration=None):
if not self.is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.is_alive():
time.sleep(5.0)
# TODO: Merge the termination code in poll_for_job_completion and
# is_in_terminal_state.
terminated = self.is_in_terminal_state()
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on
# theresolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)),
self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self.is_in_terminal_state():
_LOGGER.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(),
self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
_LOGGER.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.job_id(), self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
| 42.691622 | 95 | 0.708281 |
from __future__ import absolute_import
from __future__ import division
import base64
import json
import logging
import os
import subprocess
import sys
import threading
import time
import traceback
import urllib
from builtins import hex
from collections import defaultdict
from typing import TYPE_CHECKING
from typing import List
from future.utils import iteritems
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms import window
from apache_beam.transforms.core import RunnerAPIPTransformHolder
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX
from apache_beam.typehints import typehints
from apache_beam.utils import processes
from apache_beam.utils import proto_utils
from apache_beam.utils.interactive_utils import is_in_notebook
from apache_beam.utils.plugin import BeamPlugin
if TYPE_CHECKING:
from apache_beam.pipeline import PTransformOverride
if sys.version_info[0] > 2:
unquote_to_bytes = urllib.parse.unquote_to_bytes
quote = urllib.parse.quote
else:
unquote_to_bytes = urllib.unquote
quote = urllib.quote
__all__ = ['DataflowRunner']
_LOGGER = logging.getLogger(__name__)
BQ_SOURCE_UW_ERROR = (
'The Read(BigQuerySource(...)) transform is not supported with newer stack '
'features (Fn API, Dataflow Runner V2, etc). Please use the transform '
'apache_beam.io.gcp.bigquery.ReadFromBigQuery instead.')
class DataflowRunner(PipelineRunner):
from apache_beam.runners.dataflow.ptransform_overrides import CombineValuesPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import JrhReadPTransformOverride
_PTRANSFORM_OVERRIDES = [
CombineValuesPTransformOverride()
]
_JRH_PTRANSFORM_OVERRIDES = [
JrhReadPTransformOverride(),
]
_NON_PORTABLE_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
ReadPTransformOverride(),
]
def __init__(self, cache=None):
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
def is_fnapi_compatible(self):
return False
def apply(self, transform, input, options):
self._maybe_add_unified_worker_missing_options(options)
return super(DataflowRunner, self).apply(transform, input, options)
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
last_message_time = None
current_seen_messages = set()
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration // 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
if response.currentState is not None:
if response.currentState != last_job_state:
_LOGGER.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
if (final_countdown_timer_secs <= 0.0 or last_error_msg is not None or
str(response.currentState) == 'JOB_STATE_DONE' or
str(response.currentState) == 'JOB_STATE_CANCELLED' or
str(response.currentState) == 'JOB_STATE_UPDATED' or
str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# Check that job is in a post-preparation state before starting the
# final countdown.
if (str(response.currentState) not in ('JOB_STATE_PENDING',
'JOB_STATE_QUEUED')):
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
if not last_message_time or m.time > last_message_time:
last_message_time = m.time
current_seen_messages = set()
if message in current_seen_messages:
# Skip the message if it has already been seen at the current
# time. This could be the case since the list_messages API is
# queried starting at last_message_time.
continue
else:
current_seen_messages.add(message)
# Skip empty messages.
if m.messageImportance is None:
continue
_LOGGER.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if passed_secs > duration_secs:
_LOGGER.warning(
'Timing out on waiting for job %s after %d seconds',
job_id,
passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def _only_element(iterable):
# type: (Iterable[T]) -> T
element, = iterable
return element
@staticmethod
def group_by_key_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class GroupByKeyInputVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly
if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)):
pcoll = transform_node.inputs[0]
pcoll.element_type = typehints.coerce_to_kv_type(
pcoll.element_type, transform_node.full_label)
key_type, value_type = pcoll.element_type.tuple_types
if transform_node.outputs:
key = DataflowRunner._only_element(transform_node.outputs.keys())
transform_node.outputs[key].element_type = typehints.KV[
key_type, typehints.Iterable[value_type]]
return GroupByKeyInputVisitor()
@staticmethod
def _set_pdone_visitor(pipeline):
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class SetPDoneVisitor(PipelineVisitor):
def __init__(self, pipeline):
self._pipeline = pipeline
@staticmethod
def _maybe_fix_output(transform_node, pipeline):
if not transform_node.outputs:
pval = pvalue.PDone(pipeline)
pval.producer = transform_node
transform_node.outputs = {None: pval}
def enter_composite_transform(self, transform_node):
SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline)
def visit_transform(self, transform_node):
SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline)
return SetPDoneVisitor(pipeline)
@staticmethod
def side_input_visitor(use_unified_worker=False):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.transforms.core import ParDo
class SideInputVisitor(PipelineVisitor):
def visit_transform(self, transform_node):
if isinstance(transform_node.transform, ParDo):
new_side_inputs = []
for ix, side_input in enumerate(transform_node.side_inputs):
access_pattern = side_input._side_input_data().access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
if use_unified_worker:
# TODO(BEAM-9173): Stop patching up the access pattern to
# appease Dataflow when using the UW and hardcode the output
# type to be Any since the Dataflow JSON and pipeline proto
# can differ in coders which leads to encoding/decoding issues
# within the runner.
side_input.pvalue.element_type = typehints.Any
new_side_input = _DataflowIterableSideInput(side_input)
else:
# Add a map to ('', value) as Dataflow currently only handles
# keyed side inputs when using the JRH.
pipeline = side_input.pvalue.pipeline
new_side_input = _DataflowIterableAsMultimapSideInput(
side_input)
new_side_input.pvalue = beam.pvalue.PCollection(
pipeline,
element_type=typehints.KV[bytes,
side_input.pvalue.element_type],
is_bounded=side_input.pvalue.is_bounded)
parent = transform_node.parent or pipeline._root_transform()
map_to_void_key = beam.pipeline.AppliedPTransform(
pipeline,
beam.Map(lambda x: (b'', x)),
transform_node.full_label + '/MapToVoidKey%s' % ix,
(side_input.pvalue, ))
new_side_input.pvalue.producer = map_to_void_key
map_to_void_key.add_output(new_side_input.pvalue, None)
parent.add_part(map_to_void_key)
elif access_pattern == common_urns.side_inputs.MULTIMAP.urn:
# Ensure the input coder is a KV coder and patch up the
# access pattern to appease Dataflow.
side_input.pvalue.element_type = typehints.coerce_to_kv_type(
side_input.pvalue.element_type, transform_node.full_label)
new_side_input = _DataflowMultimapSideInput(side_input)
else:
raise ValueError(
'Unsupported access pattern for %r: %r' %
(transform_node.full_label, access_pattern))
new_side_inputs.append(new_side_input)
transform_node.side_inputs = new_side_inputs
transform_node.transform.side_inputs = new_side_inputs
return SideInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = DataflowRunner._only_element(
transform_node.outputs.values())
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
def _check_for_unsupported_fnapi_features(self, pipeline_proto):
components = pipeline_proto.components
for windowing_strategy in components.windowing_strategies.values():
if (windowing_strategy.merge_status ==
beam_runner_api_pb2.MergeStatus.NEEDS_MERGE and
windowing_strategy.window_fn.urn not in (
common_urns.session_windows.urn, )):
raise RuntimeError(
'Unsupported merging windowing strategy: %s' %
windowing_strategy.window_fn.urn)
elif components.coders[
windowing_strategy.window_coder_id].spec.urn not in (
common_urns.coders.GLOBAL_WINDOW.urn,
common_urns.coders.INTERVAL_WINDOW.urn):
raise RuntimeError(
'Unsupported window coder %s for window fn %s' % (
components.coders[windowing_strategy.window_coder_id].spec.urn,
windowing_strategy.window_fn.urn))
def run_pipeline(self, pipeline, options):
# Label goog-dataflow-notebook if job is started from notebook.
if is_in_notebook():
notebook_version = (
'goog-dataflow-notebook=' +
beam.version.__version__.replace('.', '_'))
if options.view_as(GoogleCloudOptions).labels:
options.view_as(GoogleCloudOptions).labels.append(notebook_version)
else:
options.view_as(GoogleCloudOptions).labels = [notebook_version]
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
self._maybe_add_unified_worker_missing_options(options)
# Convert all side inputs into a form acceptable to Dataflow.
if apiclient._use_fnapi(options):
pipeline.visit(
self.side_input_visitor(apiclient._use_unified_worker(options)))
# Performing configured PTransform overrides. Note that this is currently
# done before Runner API serialization, since the new proto needs to contain
# any added PTransforms.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
if (apiclient._use_fnapi(options) and
not apiclient._use_unified_worker(options)):
pipeline.replace_all(DataflowRunner._JRH_PTRANSFORM_OVERRIDES)
use_fnapi = apiclient._use_fnapi(options)
from apache_beam.transforms import environments
default_environment = environments.DockerEnvironment.from_container_image(
apiclient.get_container_image_from_options(options))
# Snapshot the pipeline in a portable proto.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=default_environment)
if use_fnapi:
self._check_for_unsupported_fnapi_features(self.proto_pipeline)
# Cross language transform require using a pipeline object constructed
# from the full pipeline proto to make sure that expanded version of
# external transforms are reflected in the Pipeline job graph.
from apache_beam import Pipeline
pipeline = Pipeline.from_runner_api(
self.proto_pipeline,
pipeline.runner,
options,
allow_proto_holders=True)
# Pipelines generated from proto do not have output set to PDone set for
# leaf elements.
pipeline.visit(self._set_pdone_visitor(pipeline))
# We need to generate a new context that maps to the new pipeline object.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=default_environment)
else:
# Performing configured PTransform overrides which should not be reflected
# in the proto representation of the graph.
pipeline.replace_all(DataflowRunner._NON_PORTABLE_PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
# Elevate "min_cpu_platform" to pipeline option, but using the existing
# experiment.
debug_options = options.view_as(DebugOptions)
worker_options = options.view_as(WorkerOptions)
if worker_options.min_cpu_platform:
debug_options.add_experiment(
'min_cpu_platform=' + worker_options.min_cpu_platform)
# Elevate "enable_streaming_engine" to pipeline option, but using the
# existing experiment.
google_cloud_options = options.view_as(GoogleCloudOptions)
if google_cloud_options.enable_streaming_engine:
debug_options.add_experiment("enable_windmill_service")
debug_options.add_experiment("enable_streaming_engine")
else:
if (debug_options.lookup_experiment("enable_windmill_service") or
debug_options.lookup_experiment("enable_streaming_engine")):
raise ValueError(
"""Streaming engine both disabled and enabled:
enable_streaming_engine flag is not set, but enable_windmill_service
and/or enable_streaming_engine experiments are present.
It is recommended you only set the enable_streaming_engine flag.""")
dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None)
if dataflow_worker_jar is not None:
if not apiclient._use_fnapi(options):
_LOGGER.warning(
'Typical end users should not use this worker jar feature. '
'It can only be used when FnAPI is enabled.')
else:
debug_options.add_experiment('use_staged_dataflow_worker_jar')
# Make Dataflow workers use FastAvro on Python 3 unless use_avro experiment
# is set. Note that use_avro is only interpreted by the Dataflow runner
# at job submission and is not interpreted by Dataflow service or workers,
# which by default use avro library unless use_fastavro experiment is set.
if sys.version_info[0] > 2 and (
not debug_options.lookup_experiment('use_avro')):
debug_options.add_experiment('use_fastavro')
self.job = apiclient.Job(options, self.proto_pipeline)
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(self.group_by_key_input_visitor())
# Dataflow runner requires output type of the Flatten to be the same as the
# inputs, hence we enforce that here.
pipeline.visit(self.flatten_input_visitor())
# Trigger a traversal of all reachable nodes.
self.visit_transforms(pipeline, options)
test_options = options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
result = PipelineResult(PipelineState.DONE)
result.wait_until_finish = lambda duration=None: None
return result
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
# TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring.
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _maybe_add_unified_worker_missing_options(self, options):
# set default beam_fn_api and use_beam_bq_sink experiment if use unified
# worker experiment flag exists, no-op otherwise.
debug_options = options.view_as(DebugOptions)
from apache_beam.runners.dataflow.internal import apiclient
if apiclient._use_unified_worker(options):
if not debug_options.lookup_experiment('beam_fn_api'):
debug_options.add_experiment('beam_fn_api')
if not debug_options.lookup_experiment('use_beam_bq_sink'):
debug_options.add_experiment('use_beam_bq_sink')
def _get_typehint_based_encoding(self, typehint, window_coder):
return self._get_cloud_encoding(
self._get_coder(typehint, window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint), window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder, unused=None):
if not isinstance(coder, coders.Coder):
raise TypeError(
'Coder object must inherit from coders.Coder: %s.' % str(coder))
return coder.as_cloud_object(self.proto_context.coders)
def _get_side_input_encoding(self, input_encoding):
return {
'@type': 'kind:stream',
'component_encodings': [input_encoding],
'is_stream_like': {
'value': True
},
}
def _get_encoded_output_coder(
self, transform_node, window_value=True, output_tag=None):
is_external_transform = isinstance(
transform_node.transform, RunnerAPIPTransformHolder)
if output_tag in transform_node.outputs:
element_type = transform_node.outputs[output_tag].element_type
elif len(transform_node.outputs) == 1:
output_tag = DataflowRunner._only_element(transform_node.outputs.keys())
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[output_tag].element_type
elif is_external_transform:
raise ValueError(
'For external transforms, output_tag must be specified '
'since we cannot fallback to a Python only coder.')
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
# All outputs have the same windowing. So getting the coder from an
# arbitrary window is fine.
output_tag = next(iter(transform_node.outputs.keys()))
window_coder = (
transform_node.outputs[output_tag].windowing.windowfn.
get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(element_type, window_coder)
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
# Main output key of external transforms can be ambiguous, so we only tag if
# there's only one tag instead of None.
output_tag = (
DataflowRunner._only_element(transform_node.outputs.keys()) if len(
transform_node.outputs.keys()) == 1 else None)
self._cache.cache_output(transform_node, output_tag, step)
for tag in side_tags:
self._cache.cache_output(transform_node, tag, step)
step.add_property(
PropertyNames.DISPLAY_DATA,
[
item.get_dict()
for item in DisplayData.create_from(transform_node.transform).items
])
return step
def _add_singleton_step(
self,
label,
full_label,
tag,
input_step,
windowing_strategy,
access_pattern):
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)
})
step.encoding = self._get_side_input_encoding(input_step.encoding)
output_info = {
PropertyNames.USER_NAME: '%s.%s' % (full_label, PropertyNames.OUTPUT),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
if common_urns.side_inputs.MULTIMAP.urn == access_pattern:
output_info[PropertyNames.USE_INDEXED_FORMAT] = True
step.add_property(PropertyNames.OUTPUT_INFO, [output_info])
step.add_property(
PropertyNames.WINDOWING_STRATEGY,
self.serialize_windowing_strategy(windowing_strategy))
return step
def run_Impulse(self, transform_node, options):
standard_options = options.view_as(StandardOptions)
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
use_streaming_engine = (
debug_options.experiments and
'enable_streaming_engine' in debug_options.experiments and
'enable_windmill_service' in debug_options.experiments)
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
if (standard_options.streaming and
(not use_fn_api or not use_streaming_engine)):
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
else:
step.add_property(PropertyNames.FORMAT, 'impulse')
encoded_impulse_element = coders.WindowedValueCoder(
coders.BytesCoder(),
coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
window.GlobalWindows.windowed_value(b''))
if use_fn_api:
encoded_impulse_as_str = self.byte_array_to_json_string(
encoded_impulse_element)
else:
encoded_impulse_as_str = base64.b64encode(
encoded_impulse_element).decode('ascii')
step.add_property(PropertyNames.IMPULSE_ELEMENT, encoded_impulse_as_str)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run_Flatten(self, transform_node, options):
step = self._add_step(
TransformNames.FLATTEN, transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append({
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)
})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def apply_WriteToBigQuery(self, transform, pcoll, options):
experiments = options.view_as(DebugOptions).experiments or []
from apache_beam.runners.dataflow.internal import apiclient
use_fnapi = apiclient._use_fnapi(options)
if (not isinstance(transform, beam.io.WriteToBigQuery) or use_fnapi or
'use_beam_bq_sink' in experiments):
return self.apply_PTransform(transform, pcoll, options)
if transform.schema == beam.io.gcp.bigquery.SCHEMA_AUTODETECT:
raise RuntimeError(
'Schema auto-detection is not supported on the native sink')
standard_options = options.view_as(StandardOptions)
if standard_options.streaming:
if (transform.write_disposition ==
beam.io.BigQueryDisposition.WRITE_TRUNCATE):
raise RuntimeError('Can not use write truncation mode in streaming')
return self.apply_PTransform(transform, pcoll, options)
else:
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
schema = None
if transform.schema:
schema = parse_table_schema_from_json(json.dumps(transform.schema))
return pcoll | 'WriteToBigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
transform.table_reference.tableId,
transform.table_reference.datasetId,
transform.table_reference.projectId,
schema,
transform.create_disposition,
transform.write_disposition,
kms_key=transform.kms_key))
def apply_GroupByKey(self, transform, pcoll, options):
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder()
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError((
'Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label, coder))
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
return pvalue.PCollection.from_(pcoll)
def run_GroupByKey(self, transform_node, options):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
windowing = transform_node.transform.get_windowing(transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing))
def run_RunnerAPIPTransformHolder(self, transform_node, options):
urn = transform_node.transform.proto().urn
assert urn
if common_urns.primitives.PAR_DO.urn == urn:
self.run_ParDo(transform_node, options)
else:
raise NotImplementedError(
'%s uses unsupported URN: %s' % (transform_node.full_label, urn))
def run_ParDo(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
is_external_transform = isinstance(transform, RunnerAPIPTransformHolder)
si_dict = {}
all_input_labels = transform_node.input_tags_to_preserve
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
named_inputs = transform_node.named_inputs()
label_renames = {}
for ix, side_pval in enumerate(transform_node.side_inputs):
assert isinstance(side_pval, AsSideInput)
step_name = 'SideInput-' + self._get_unique_step_name()
si_label = ((SIDE_INPUT_PREFIX + '%d-%s') %
(ix, transform_node.full_label)
if side_pval.pvalue not in all_input_labels else
all_input_labels[side_pval.pvalue])
old_label = (SIDE_INPUT_PREFIX + '%d') % ix
if not is_external_transform:
label_renames[old_label] = si_label
assert old_label in named_inputs
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (
transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
step_name,
si_full_label,
side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue),
side_pval.pvalue.windowing,
side_pval._side_input_data().access_pattern)
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: step_name,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
si_labels[side_pval] = si_label
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label +
('/{}'.format(transform_name) if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
from apache_beam.runners.dataflow.internal import apiclient
transform_proto = self.proto_context.transforms.get_proto(transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node)
use_fnapi = apiclient._use_fnapi(options)
use_unified_worker = apiclient._use_unified_worker(options)
if (use_fnapi and
(transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn or
use_unified_worker)):
if (label_renames and
transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn):
for old, new in iteritems(label_renames):
transform_proto.inputs[new] = transform_proto.inputs[old]
del transform_proto.inputs[old]
proto_type, _ = beam.PTransform._known_urns[transform_proto.spec.urn]
proto = proto_utils.parse_Bytes(
transform_proto.spec.payload, proto_type)
for old, new in iteritems(label_renames):
proto.side_inputs[new].CopyFrom(proto.side_inputs[old])
del proto.side_inputs[old]
transform_proto.spec.payload = proto.SerializeToString()
del self.proto_pipeline.components.transforms[transform_id]
(
self.proto_pipeline.components.transforms[transform_id].CopyFrom(
transform_proto))
serialized_data = transform_id
else:
serialized_data = pickler.dumps(
self._pardo_fn_data(transform_node, lookup_label))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'None' for main output and '<tag>' for a tagged output.
outputs = []
all_output_tags = transform_proto.outputs.keys()
# Some external transforms require output tags to not be modified.
# So we randomly select one of the output tags as the main output and
# leave others as side outputs. Transform execution should not change
# dependending on which output tag we choose as the main output here.
# Also, some SDKs do not work correctly if output tags are modified. So for
# external transforms, we leave tags unmodified.
#
# Python SDK uses 'None' as the tag of the main output.
main_output_tag = (all_output_tags[0] if is_external_transform else 'None')
step.encoding = self._get_encoded_output_coder(
transform_node, output_tag=main_output_tag)
side_output_tags = set(all_output_tags).difference({main_output_tag})
# Add the main output to the description.
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: main_output_tag
})
for side_tag in side_output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
encoding = self._get_encoded_output_coder(
transform_node, output_tag=side_tag)
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: encoding,
PropertyNames.OUTPUT_NAME: side_tag
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
# Add the restriction encoding if we are a splittable DoFn
# and are using the Fn API on the unified worker.
restriction_coder = transform.get_restriction_coder()
if restriction_coder:
step.add_property(
PropertyNames.RESTRICTION_ENCODING,
self._get_cloud_encoding(restriction_coder))
if options.view_as(StandardOptions).streaming:
is_stateful_dofn = (
transform.is_pardo_with_stateful_dofn if is_external_transform else
DoFnSignature(transform.dofn).is_stateful_dofn())
if is_stateful_dofn:
step.add_property(PropertyNames.USES_KEYED_STATE, 'true')
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (
transform.fn,
transform.args,
transform.kwargs,
si_tags_and_types,
transform_node.inputs[0].windowing)
def run_CombineValuesReplacement(self, transform_node, options):
transform = transform_node.transform.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node.parent)
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
from apache_beam.runners.dataflow.internal import apiclient
use_fnapi = apiclient._use_fnapi(options)
if use_fnapi:
# Fnapi pipelines send the transform ID of the CombineValues transform's
serialized_data = transform_id
else:
# side inputs information would go.
serialized_data = pickler.dumps(
(transform.fn, transform.args, transform.kwargs, ()))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
accumulator_encoding = self._get_cloud_encoding(
transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
outputs = []
outputs.append({
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def apply_Read(self, transform, pbegin, options):
if hasattr(transform.source, 'format'):
return beam.pvalue.PCollection.from_(pbegin)
else:
return self.apply_PTransform(transform, pbegin, options)
def run_Read(self, transform_node, options):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
standard_options = options.view_as(StandardOptions)
if not hasattr(transform.source, 'format'):
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
_LOGGER.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception:
_LOGGER.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source,
traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT, source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
if standard_options.streaming:
raise ValueError(
'BigQuery source is not currently available for use '
'in streaming pipelines.')
debug_options = options.view_as(DebugOptions)
use_fn_api = (
debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
if use_fn_api:
raise ValueError(BQ_SOURCE_UW_ERROR)
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
if transform.source.table_reference is not None:
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
if transform.source.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(
PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(
PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError(
'BigQuery source %r must specify either a table or'
' a query' % transform.source)
if transform.source.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.source.kms_key)
elif transform.source.format == 'pubsub':
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
if transform.source.full_subscription:
step.add_property(
PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(
PropertyNames.PUBSUB_TOPIC, transform.source.full_topic)
if transform.source.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.source.id_label)
if transform.source.with_attributes:
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.source.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.source.timestamp_attribute)
else:
raise ValueError(
'Source %r has unexpected format %s.' %
(transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
coder = coders.WindowedValueCoder(
coders.registry.get_coder(transform_node.outputs[None].element_type),
coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
def run__NativeWrite(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
if transform.sink.format == 'text':
step.add_property(
PropertyNames.FILE_NAME_PREFIX,
transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX,
transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE,
transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
step.add_property(
PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(
PropertyNames.BIGQUERY_TABLE, transform.sink.table_reference.tableId)
if transform.sink.table_reference.projectId is not None:
step.add_property(
PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(
PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(
PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
if transform.sink.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key)
elif transform.sink.format == 'pubsub':
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
if transform.sink.id_label:
step.add_property(
PropertyNames.PUBSUB_ID_LABEL, transform.sink.id_label)
if transform.sink.with_attributes:
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.sink.timestamp_attribute is not None:
step.add_property(
PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.sink.timestamp_attribute)
else:
raise ValueError(
'Sink %r has unexpected format %s.' %
(transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
coder = coders.WindowedValueCoder(
transform.sink.coder, coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{
'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)
})
def run_TestStream(self, transform_node, options):
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import WatermarkEvent
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError(
'TestStream is currently available for use '
'only in streaming pipelines.')
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
step.add_property(PropertyNames.FORMAT, 'test_stream')
test_stream_payload = beam_runner_api_pb2.TestStreamPayload()
# so we won't set test_stream_payload.coder_id.
output_coder = transform._infer_output_coder()
for event in transform._events:
new_event = test_stream_payload.events.add()
if isinstance(event, ElementEvent):
for tv in event.timestamped_values:
element = new_event.element_event.elements.add()
element.encoded_element = output_coder.encode(tv.value)
element.timestamp = tv.timestamp.micros
elif isinstance(event, ProcessingTimeEvent):
new_event.processing_time_event.advance_duration = (
event.advance_by.micros)
elif isinstance(event, WatermarkEvent):
new_event.watermark_event.new_watermark = event.new_watermark.micros
serialized_payload = self.byte_array_to_json_string(
test_stream_payload.SerializeToString())
step.add_property(PropertyNames.SERIALIZED_TEST_STREAM, serialized_payload)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{
PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
run_TestStream.__test__ = False
@classmethod
def serialize_windowing_strategy(cls, windowing):
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext()
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
from apache_beam.runners import pipeline_context
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
return quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
return unquote_to_bytes(encoded_string)
def get_default_gcp_region(self):
environment_region = os.environ.get('CLOUDSDK_COMPUTE_REGION')
if environment_region:
_LOGGER.info(
'Using default GCP region %s from $CLOUDSDK_COMPUTE_REGION',
environment_region)
return environment_region
try:
cmd = ['gcloud', 'config', 'get-value', 'compute/region']
if hasattr(subprocess, 'DEVNULL'):
DEVNULL = subprocess.DEVNULL
else:
DEVNULL = open(os.devnull, 'ab')
raw_output = processes.check_output(cmd, stderr=DEVNULL)
formatted_output = raw_output.decode('utf-8').strip()
if formatted_output:
_LOGGER.info(
'Using default GCP region %s from `%s`',
formatted_output,
' '.join(cmd))
return formatted_output
except RuntimeError:
pass
return None
class _DataflowSideInput(beam.pvalue.AsSideInput):
def _view_options(self):
return {
'data': self._data,
}
def _side_input_data(self):
return self._data
class _DataflowIterableAsMultimapSideInput(_DataflowSideInput):
def __init__(self, side_input):
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
iterable_view_fn = side_input_data.view_fn
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
lambda multimap: iterable_view_fn(multimap[b'']))
class _DataflowIterableSideInput(_DataflowSideInput):
def __init__(self, side_input):
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.ITERABLE.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class _DataflowMultimapSideInput(_DataflowSideInput):
def __init__(self, side_input):
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class DataflowPipelineResult(PipelineResult):
def __init__(self, job, runner):
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
if self.has_job and not self.is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
def _get_job_state(self):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
api_jobstate_map = defaultdict(
lambda: PipelineState.UNRECOGNIZED,
{
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
})
return (
api_jobstate_map[self._job.currentState]
if self._job.currentState else PipelineState.UNKNOWN)
@property
def state(self):
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
return self._get_job_state()
def is_in_terminal_state(self):
if not self.has_job:
return True
return PipelineState.is_terminal(self._get_job_state())
def wait_until_finish(self, duration=None):
if not self.is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
thread.daemon = True
thread.start()
while thread.is_alive():
time.sleep(5.0)
terminated = self.is_in_terminal_state()
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)),
self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self.is_in_terminal_state():
_LOGGER.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(),
self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
_LOGGER.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.job_id(), self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
| true | true |
f71e568dd0e667d488a561c5ed72e8b96933814c | 507 | py | Python | setup.py | thejohnfreeman/picard | 09b39d6655e1df071ea9f3f63eaf35ddbd2f0e0f | [
"MIT"
] | 3 | 2018-12-28T03:14:49.000Z | 2019-05-24T21:05:13.000Z | setup.py | thejohnfreeman/picard | 09b39d6655e1df071ea9f3f63eaf35ddbd2f0e0f | [
"MIT"
] | null | null | null | setup.py | thejohnfreeman/picard | 09b39d6655e1df071ea9f3f63eaf35ddbd2f0e0f | [
"MIT"
] | null | null | null | # WARNING: DO NOT USE THIS.
# This setup.py exists only to satisfy Read the Docs until they can support
# pyproject.toml (PEP 517):
# https://github.com/rtfd/readthedocs.org/issues/4912#issuecomment-444198329
# https://github.com/pypa/pip/pull/5743
from setuptools import setup
setup(
name='picard',
version='0.1.3',
packages=['picard'],
install_requires=[
'boto3>=1.9,<1.10',
'tabulate>=0.8,<0.9',
'toml>=0.10,<0.11',
'typing_extensions>=3.6,<3.7',
],
)
| 28.166667 | 76 | 0.637081 |
t setup
setup(
name='picard',
version='0.1.3',
packages=['picard'],
install_requires=[
'boto3>=1.9,<1.10',
'tabulate>=0.8,<0.9',
'toml>=0.10,<0.11',
'typing_extensions>=3.6,<3.7',
],
)
| true | true |
f71e58148d9d1a173dc56feacc564d5b197c059c | 2,488 | py | Python | httpstream/numbers.py | technige/httpstream | 8be13f52fc4f031dfed281963f4313c3c9b4729a | [
"Apache-2.0"
] | 2 | 2017-09-21T10:41:17.000Z | 2019-06-27T13:23:11.000Z | httpstream/numbers.py | technige/httpstream | 8be13f52fc4f031dfed281963f4313c3c9b4729a | [
"Apache-2.0"
] | null | null | null | httpstream/numbers.py | technige/httpstream | 8be13f52fc4f031dfed281963f4313c3c9b4729a | [
"Apache-2.0"
] | 1 | 2019-02-18T13:57:22.000Z | 2019-02-18T13:57:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2015, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# HTTP/HTTPS Port Numbers
HTTP_PORT = 80
HTTPS_PORT = 443
# HTTP Status Codes <http://www.iana.org/assignments/http-status-codes>
# 1xx: Informational
# Request received, continuing process
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# 2xx: Success
# The action was successfully received, understood, and accepted
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
ALREADY_REPORTED = 208
IM_USED = 226
# 3xx: Redirection
# Further action must be taken in order to complete the request
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
PERMANENT_REDIRECT = 308
# 4xx: Client Error
# The request contains bad syntax or cannot be fulfilled
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
IM_A_TEAPOT = 418
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
PRECONDITION_REQUIRED = 428
TOO_MANY_REQUESTS = 429
REQUEST_HEADER_FIELDS_TOO_LARGE = 431
UNAVAILABLE_FOR_LEGAL_REASONS = 451
# 5xx: Server Error
# The server failed to fulfill an apparently valid request
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
VARIANT_ALSO_NEGOTIATES = 506
INSUFFICIENT_STORAGE = 507
LOOP_DETECTED = 508
NOT_EXTENDED = 510
NETWORK_AUTHENTICATION_REQUIRED = 511
| 24.88 | 74 | 0.791801 |
HTTP_PORT = 80
HTTPS_PORT = 443
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
ALREADY_REPORTED = 208
IM_USED = 226
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
PERMANENT_REDIRECT = 308
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
IM_A_TEAPOT = 418
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
PRECONDITION_REQUIRED = 428
TOO_MANY_REQUESTS = 429
REQUEST_HEADER_FIELDS_TOO_LARGE = 431
UNAVAILABLE_FOR_LEGAL_REASONS = 451
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
VARIANT_ALSO_NEGOTIATES = 506
INSUFFICIENT_STORAGE = 507
LOOP_DETECTED = 508
NOT_EXTENDED = 510
NETWORK_AUTHENTICATION_REQUIRED = 511
| true | true |
f71e5834283c296de8eb47d6ad42b9957168b32a | 2,078 | py | Python | carga_contadores.py | robzoros/tienda_online | 86fa8533f068559e845f867330ad2ba24a2d3601 | [
"MIT"
] | null | null | null | carga_contadores.py | robzoros/tienda_online | 86fa8533f068559e845f867330ad2ba24a2d3601 | [
"MIT"
] | null | null | null | carga_contadores.py | robzoros/tienda_online | 86fa8533f068559e845f867330ad2ba24a2d3601 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from cassandra.cluster import Cluster
# Conectamos al Cluster
cluster = Cluster()
session = cluster.connect('tienda_online')
#******************************************************
# CONTADORES
#******************************************************
# Cargamos la lista de productos en una lista
session.execute("TRUNCATE productos_mas_vendidos")
session.execute("TRUNCATE productos_vendidos_juntos")
result = session.execute('SELECT * FROM contador_productos_vendidos')
productos_vendidos = sorted(result, key=lambda p: -1 * p.numero_ventas)[0:8]
for producto in productos_vendidos:
prod = session.execute('SELECT * FROM productos WHERE codigo_referencia = ' + str(producto.codigo_referencia))[0]
for contador in range(10):
prepared = session.prepare("INSERT INTO productos_mas_vendidos (semilla, codigo_referencia, nombre_producto, precio_producto, url_imagen, numero_ventas)" +
" VALUES (?, ?, ?, " + "{:0.2f}".format(prod.precio_producto) + ", ?, ?)")
session.execute(prepared, (contador, prod.codigo_referencia, prod.nombre_producto, prod.url_imagen, producto.numero_ventas))
productos = session.execute('SELECT * FROM productos')
for producto in productos:
result = session.execute('SELECT * FROM contador_prod_vendidos_juntos WHERE producto = ' + str(producto.codigo_referencia))
productos_rel = sorted(result, key=lambda p: -1 * p.numero_ventas)
for prod in productos_rel:
prod_rel = session.execute('SELECT * FROM productos WHERE codigo_referencia = ' + str(prod.producto_rel))[0]
prepared = session.prepare("INSERT INTO productos_vendidos_juntos (producto, producto_rel, nombre_producto, precio_producto, url_imagen, numero_ventas)" +
" VALUES (?, ?, ?, " + "{:0.2f}".format(producto.precio_producto) + ", ?, ?)")
session.execute(prepared, (producto.codigo_referencia, prod_rel.codigo_referencia, prod_rel.nombre_producto, prod_rel.url_imagen, prod.numero_ventas))
| 53.282051 | 163 | 0.674687 |
from cassandra.cluster import Cluster
cluster = Cluster()
session = cluster.connect('tienda_online')
session.execute("TRUNCATE productos_mas_vendidos")
session.execute("TRUNCATE productos_vendidos_juntos")
result = session.execute('SELECT * FROM contador_productos_vendidos')
productos_vendidos = sorted(result, key=lambda p: -1 * p.numero_ventas)[0:8]
for producto in productos_vendidos:
prod = session.execute('SELECT * FROM productos WHERE codigo_referencia = ' + str(producto.codigo_referencia))[0]
for contador in range(10):
prepared = session.prepare("INSERT INTO productos_mas_vendidos (semilla, codigo_referencia, nombre_producto, precio_producto, url_imagen, numero_ventas)" +
" VALUES (?, ?, ?, " + "{:0.2f}".format(prod.precio_producto) + ", ?, ?)")
session.execute(prepared, (contador, prod.codigo_referencia, prod.nombre_producto, prod.url_imagen, producto.numero_ventas))
productos = session.execute('SELECT * FROM productos')
for producto in productos:
result = session.execute('SELECT * FROM contador_prod_vendidos_juntos WHERE producto = ' + str(producto.codigo_referencia))
productos_rel = sorted(result, key=lambda p: -1 * p.numero_ventas)
for prod in productos_rel:
prod_rel = session.execute('SELECT * FROM productos WHERE codigo_referencia = ' + str(prod.producto_rel))[0]
prepared = session.prepare("INSERT INTO productos_vendidos_juntos (producto, producto_rel, nombre_producto, precio_producto, url_imagen, numero_ventas)" +
" VALUES (?, ?, ?, " + "{:0.2f}".format(producto.precio_producto) + ", ?, ?)")
session.execute(prepared, (producto.codigo_referencia, prod_rel.codigo_referencia, prod_rel.nombre_producto, prod_rel.url_imagen, prod.numero_ventas))
| true | true |
f71e58d321e14ad0e343e03cdeed51884bcfe901 | 5,207 | py | Python | official/core/base_trainer_test.py | xxia-kathy/models | 157faae1af5d89c53a5699b601dc68fee274ef09 | [
"Apache-2.0"
] | 1 | 2021-02-22T13:05:55.000Z | 2021-02-22T13:05:55.000Z | official/core/base_trainer_test.py | xxia-kathy/models | 157faae1af5d89c53a5699b601dc68fee274ef09 | [
"Apache-2.0"
] | null | null | null | official/core/base_trainer_test.py | xxia-kathy/models | 157faae1af5d89c53a5699b601dc68fee274ef09 | [
"Apache-2.0"
] | 3 | 2021-02-22T13:24:07.000Z | 2021-02-26T02:06:24.000Z | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.core.trainers.trainer."""
# pylint: disable=g-direct-tensorflow-import
import os
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.core import base_trainer as trainer_lib
from official.core import train_lib
from official.modeling.hyperparams import config_definitions as cfg
from official.utils.testing import mock_task
def all_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
mode='eager',
)
class TrainerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
def create_test_trainer(self, config, model_dir=None):
task = mock_task.MockTask(config.task, logging_dir=model_dir)
ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir)
trainer = trainer_lib.Trainer(
config,
task,
model=task.build_model(),
optimizer=trainer_lib.create_optimizer(config.trainer, config.runtime),
checkpoint_exporter=ckpt_exporter)
return trainer
@combinations.generate(all_strategy_combinations())
def test_trainer_train(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('validation_loss', logs)
self.assertEqual(logs['acc'], 5. * distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mixed_precision_dtype=['float32', 'bfloat16', 'float16'],
loss_scale=[None, 'dynamic', 128, 256],
))
def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
trainer = self.create_test_trainer(config)
if mixed_precision_dtype != 'float16':
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
elif mixed_precision_dtype == 'float16' and loss_scale is None:
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
else:
self.assertIsInstance(
trainer.optimizer,
tf.keras.mixed_precision.experimental.LossScaleOptimizer)
metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', metrics)
@combinations.generate(all_strategy_combinations())
def test_export_best_ckpt(self, distribution):
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='acc',
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
model_dir = self.get_temp_dir()
trainer = self.create_test_trainer(config, model_dir=model_dir)
trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))
self.assertTrue(
tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json')))
if __name__ == '__main__':
tf.test.main()
| 37.460432 | 80 | 0.668907 |
import os
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.core import base_trainer as trainer_lib
from official.core import train_lib
from official.modeling.hyperparams import config_definitions as cfg
from official.utils.testing import mock_task
def all_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
mode='eager',
)
class TrainerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
def create_test_trainer(self, config, model_dir=None):
task = mock_task.MockTask(config.task, logging_dir=model_dir)
ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir)
trainer = trainer_lib.Trainer(
config,
task,
model=task.build_model(),
optimizer=trainer_lib.create_optimizer(config.trainer, config.runtime),
checkpoint_exporter=ckpt_exporter)
return trainer
@combinations.generate(all_strategy_combinations())
def test_trainer_train(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('validation_loss', logs)
self.assertEqual(logs['acc'], 5. * distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mixed_precision_dtype=['float32', 'bfloat16', 'float16'],
loss_scale=[None, 'dynamic', 128, 256],
))
def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
trainer = self.create_test_trainer(config)
if mixed_precision_dtype != 'float16':
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
elif mixed_precision_dtype == 'float16' and loss_scale is None:
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
else:
self.assertIsInstance(
trainer.optimizer,
tf.keras.mixed_precision.experimental.LossScaleOptimizer)
metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', metrics)
@combinations.generate(all_strategy_combinations())
def test_export_best_ckpt(self, distribution):
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='acc',
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
model_dir = self.get_temp_dir()
trainer = self.create_test_trainer(config, model_dir=model_dir)
trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))
self.assertTrue(
tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json')))
if __name__ == '__main__':
tf.test.main()
| true | true |
f71e591c2ff6a50a381fad44e154010bbb850217 | 7,849 | py | Python | tools/mo/openvino/tools/mo/middle/passes/convert_data_type.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 1 | 2019-09-22T01:05:07.000Z | 2019-09-22T01:05:07.000Z | tools/mo/openvino/tools/mo/middle/passes/convert_data_type.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 58 | 2020-11-06T12:13:45.000Z | 2022-03-28T13:20:11.000Z | tools/mo/openvino/tools/mo/middle/passes/convert_data_type.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 2 | 2021-07-14T07:40:50.000Z | 2021-07-27T01:40:03.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
import numpy as np
from openvino.tools.mo.front.extractor import get_new_placeholder_name
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
"""
Packed data of custom types are stored in numpy uint8 data type.
To distinguish true uint8 and custom data we introduce this class not to store,
but to have unique data type in SUPPORTED_DATA_TYPES map
"""
class packed_U1(np.generic):
pass
class packed_U4(np.generic):
pass
class packed_I4(np.generic):
pass
SUPPORTED_DATA_TYPES = {
'float': (np.float32, 'FP32', 'f32'),
'half': (np.float16, 'FP16', 'f16'),
'FP32': (np.float32, 'FP32', 'f32'),
'FP64': (np.float64, 'FP64', 'f64'),
'FP16': (np.float16, 'FP16', 'f16'),
'I32': (np.int32, 'I32', 'i32'),
'I64': (np.int64, 'I64', 'i64'),
'int8': (np.int8, 'I8', 'i8'),
'int32': (np.int32, 'I32', 'i32'),
'int64': (np.int64, 'I64', 'i64'),
'bool': (np.bool, 'BOOL', 'boolean'),
'uint8': (np.uint8, 'U8', 'u8'),
'uint32': (np.uint32, 'U32', 'u32'),
'uint64': (np.uint64, 'U64', 'u64'),
# custom types
'U1': (packed_U1, 'U1', 'u1'),
'int4': (packed_I4, 'I4', 'i4'),
'uint4': (packed_U4, 'U4', 'u4'),
'I4': (packed_I4, 'I4', 'i4'),
'U4': (packed_U4, 'U4', 'u4'),
}
def data_type_str_to_np(data_type_str: str):
return SUPPORTED_DATA_TYPES[data_type_str][0] if data_type_str in SUPPORTED_DATA_TYPES else None
def data_type_str_to_precision(data_type_str: str):
return SUPPORTED_DATA_TYPES[data_type_str][1] if data_type_str in SUPPORTED_DATA_TYPES else None
def data_type_str_to_destination_type(data_type_str: str):
return SUPPORTED_DATA_TYPES[data_type_str][2] if data_type_str in SUPPORTED_DATA_TYPES else None
def np_data_type_to_precision(np_data_type):
for np_t, precision, _ in SUPPORTED_DATA_TYPES.values():
if np_t == np_data_type:
return precision
raise Error('Data type "{}" is not supported'.format(np_data_type))
def np_data_type_to_destination_type(np_data_type):
for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values():
if np_t == np_data_type:
return destination_type
raise Error('Data type "{}" is not supported'.format(np_data_type))
def destination_type_to_np_data_type(dst_type):
for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values():
if destination_type == dst_type:
return np_t
raise Error('Destination type "{}" is not supported'.format(dst_type))
def precision_to_destination_type(data_type_str):
for _, precision, destination_type in SUPPORTED_DATA_TYPES.values():
if precision == data_type_str:
return destination_type
raise Error('Data type "{}" is not supported'.format(data_type_str))
def convert_blob(blob: np.ndarray, dst_type: type):
if blob.dtype == dst_type:
return blob, None, None
converted_blob = blob.astype(dtype=dst_type, casting="unsafe")
if dst_type in (np.int32, np.int64, np.uint8, np.int8) and not np.array_equal(blob, converted_blob):
raise Error('The conversion of blob with value "{}" to dst_type "{}" results in rounding'.format(
blob, dst_type))
finite_match = (np.isfinite(blob) != np.isfinite(converted_blob))
zero_match = ((blob == 0) != (converted_blob == 0))
finite_match_count = np.count_nonzero(finite_match)
zero_match_count = np.count_nonzero(zero_match)
return converted_blob, finite_match_count, zero_match_count
def convert_node_blobs(graph: Graph, node: Node, data_type: type):
out_edges = graph.out_edges(node.node, data=True)
# if the data.value is used as binary weights
if any('bin' in d for _, __, d in out_edges):
blob = node.value
if blob.dtype != data_type:
new_blob, finite_match_count, zero_match_count = convert_blob(blob, data_type)
consumers = [x.name if x.has_valid('name') else '<NO NAME>' for x in node.out_nodes()]
log.debug(
'Blob was converted to {} while dumping to the bin file. This blob is an input for {} nodes.'.format(
data_type, consumers))
if finite_match_count:
log.error(
("{} elements of {} were clipped to infinity while converting a blob for node [{}] to {}. " +
refer_to_faq_msg(76)).format(finite_match_count, blob.size, consumers, data_type))
if zero_match_count:
log.warning(
("{} elements of {} were clipped to zero while converting a blob for node [{}] to {}. " +
refer_to_faq_msg(77)).format(zero_match_count, blob.size, consumers, data_type))
node.value = new_blob
# for the constant node need to propagate the converted value to the node output because there is a fake
# input data for the 'Const' nodes being generated in the CreateConstNodesReplacement
if len(node.out_nodes()) == 1 and node.out_node(0).op == 'Const':
const_node = node.out_node(0)
const_node.value = new_blob
const_node.infer(const_node)
const_node.type_infer(const_node)
def convert_parameters_data_type(graph: Graph, data_type_str: str):
inputs = graph.get_op_nodes(op='Parameter')
data_type = data_type_str_to_np(data_type_str)
user_defined_data_types = graph.graph['user_shapes'] if 'user_shapes' in graph.graph else None
for input in inputs:
user_defined_type = None
name = input.soft_get('initial_node_name', input.id)
# override data type for Parameter specified by the user. This is a workaround for the issue in the
# extensions.middle.ChangePlaceholderTypes transformation which has an incorrect condition and always overrides
# Parameter data type to np.float32. When the transformation is fixed the code below must be updated
if user_defined_data_types is not None and name in user_defined_data_types:
for desc in user_defined_data_types[name]:
if 'port' in desc and desc['port'] is None: # neither input nor output port specified
user_defined_type = desc.get('data_type', None)
else: # need to check the particular port the Parameter was created for
p_name = get_new_placeholder_name(name, 'out' in desc, desc['out'] if 'out' in desc else desc['in'])
if p_name == input.soft_get('name'):
user_defined_type = desc.get('data_type', None)
if user_defined_type is not None:
log.info('Overriding Parameter node {} data type to {}'.format(name, user_defined_type))
input['data_type'] = user_defined_type
input.out_port(0).set_data_type(user_defined_type, True)
elif not input.has_valid('data_type') or input.data_type == np.float32:
input['data_type'] = data_type
input.out_port(0).set_data_type(data_type, True)
else:
log.info('Do not change data type for node {}'.format(input.soft_get('name')))
def convert_blobs(graph: Graph, data_type_str: str):
for node in graph.get_data_nodes():
if node.value is not None:
try:
if node.value.dtype in [np.float32, np.float64, np.float16] and not node.has_and_set('correct_data_type'):
convert_node_blobs(graph, node, data_type_str_to_np(data_type_str))
except Exception as e:
raise Error('Coudn\'t convert blob {}, details: {}', node.soft_get('name'), e) from e
| 42.89071 | 122 | 0.659829 |
import logging as log
import numpy as np
from openvino.tools.mo.front.extractor import get_new_placeholder_name
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
class packed_U1(np.generic):
pass
class packed_U4(np.generic):
pass
class packed_I4(np.generic):
pass
SUPPORTED_DATA_TYPES = {
'float': (np.float32, 'FP32', 'f32'),
'half': (np.float16, 'FP16', 'f16'),
'FP32': (np.float32, 'FP32', 'f32'),
'FP64': (np.float64, 'FP64', 'f64'),
'FP16': (np.float16, 'FP16', 'f16'),
'I32': (np.int32, 'I32', 'i32'),
'I64': (np.int64, 'I64', 'i64'),
'int8': (np.int8, 'I8', 'i8'),
'int32': (np.int32, 'I32', 'i32'),
'int64': (np.int64, 'I64', 'i64'),
'bool': (np.bool, 'BOOL', 'boolean'),
'uint8': (np.uint8, 'U8', 'u8'),
'uint32': (np.uint32, 'U32', 'u32'),
'uint64': (np.uint64, 'U64', 'u64'),
'U1': (packed_U1, 'U1', 'u1'),
'int4': (packed_I4, 'I4', 'i4'),
'uint4': (packed_U4, 'U4', 'u4'),
'I4': (packed_I4, 'I4', 'i4'),
'U4': (packed_U4, 'U4', 'u4'),
}
def data_type_str_to_np(data_type_str: str):
return SUPPORTED_DATA_TYPES[data_type_str][0] if data_type_str in SUPPORTED_DATA_TYPES else None
def data_type_str_to_precision(data_type_str: str):
return SUPPORTED_DATA_TYPES[data_type_str][1] if data_type_str in SUPPORTED_DATA_TYPES else None
def data_type_str_to_destination_type(data_type_str: str):
return SUPPORTED_DATA_TYPES[data_type_str][2] if data_type_str in SUPPORTED_DATA_TYPES else None
def np_data_type_to_precision(np_data_type):
for np_t, precision, _ in SUPPORTED_DATA_TYPES.values():
if np_t == np_data_type:
return precision
raise Error('Data type "{}" is not supported'.format(np_data_type))
def np_data_type_to_destination_type(np_data_type):
for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values():
if np_t == np_data_type:
return destination_type
raise Error('Data type "{}" is not supported'.format(np_data_type))
def destination_type_to_np_data_type(dst_type):
for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values():
if destination_type == dst_type:
return np_t
raise Error('Destination type "{}" is not supported'.format(dst_type))
def precision_to_destination_type(data_type_str):
for _, precision, destination_type in SUPPORTED_DATA_TYPES.values():
if precision == data_type_str:
return destination_type
raise Error('Data type "{}" is not supported'.format(data_type_str))
def convert_blob(blob: np.ndarray, dst_type: type):
if blob.dtype == dst_type:
return blob, None, None
converted_blob = blob.astype(dtype=dst_type, casting="unsafe")
if dst_type in (np.int32, np.int64, np.uint8, np.int8) and not np.array_equal(blob, converted_blob):
raise Error('The conversion of blob with value "{}" to dst_type "{}" results in rounding'.format(
blob, dst_type))
finite_match = (np.isfinite(blob) != np.isfinite(converted_blob))
zero_match = ((blob == 0) != (converted_blob == 0))
finite_match_count = np.count_nonzero(finite_match)
zero_match_count = np.count_nonzero(zero_match)
return converted_blob, finite_match_count, zero_match_count
def convert_node_blobs(graph: Graph, node: Node, data_type: type):
out_edges = graph.out_edges(node.node, data=True)
if any('bin' in d for _, __, d in out_edges):
blob = node.value
if blob.dtype != data_type:
new_blob, finite_match_count, zero_match_count = convert_blob(blob, data_type)
consumers = [x.name if x.has_valid('name') else '<NO NAME>' for x in node.out_nodes()]
log.debug(
'Blob was converted to {} while dumping to the bin file. This blob is an input for {} nodes.'.format(
data_type, consumers))
if finite_match_count:
log.error(
("{} elements of {} were clipped to infinity while converting a blob for node [{}] to {}. " +
refer_to_faq_msg(76)).format(finite_match_count, blob.size, consumers, data_type))
if zero_match_count:
log.warning(
("{} elements of {} were clipped to zero while converting a blob for node [{}] to {}. " +
refer_to_faq_msg(77)).format(zero_match_count, blob.size, consumers, data_type))
node.value = new_blob
if len(node.out_nodes()) == 1 and node.out_node(0).op == 'Const':
const_node = node.out_node(0)
const_node.value = new_blob
const_node.infer(const_node)
const_node.type_infer(const_node)
def convert_parameters_data_type(graph: Graph, data_type_str: str):
inputs = graph.get_op_nodes(op='Parameter')
data_type = data_type_str_to_np(data_type_str)
user_defined_data_types = graph.graph['user_shapes'] if 'user_shapes' in graph.graph else None
for input in inputs:
user_defined_type = None
name = input.soft_get('initial_node_name', input.id)
if user_defined_data_types is not None and name in user_defined_data_types:
for desc in user_defined_data_types[name]:
if 'port' in desc and desc['port'] is None:
user_defined_type = desc.get('data_type', None)
else:
p_name = get_new_placeholder_name(name, 'out' in desc, desc['out'] if 'out' in desc else desc['in'])
if p_name == input.soft_get('name'):
user_defined_type = desc.get('data_type', None)
if user_defined_type is not None:
log.info('Overriding Parameter node {} data type to {}'.format(name, user_defined_type))
input['data_type'] = user_defined_type
input.out_port(0).set_data_type(user_defined_type, True)
elif not input.has_valid('data_type') or input.data_type == np.float32:
input['data_type'] = data_type
input.out_port(0).set_data_type(data_type, True)
else:
log.info('Do not change data type for node {}'.format(input.soft_get('name')))
def convert_blobs(graph: Graph, data_type_str: str):
for node in graph.get_data_nodes():
if node.value is not None:
try:
if node.value.dtype in [np.float32, np.float64, np.float16] and not node.has_and_set('correct_data_type'):
convert_node_blobs(graph, node, data_type_str_to_np(data_type_str))
except Exception as e:
raise Error('Coudn\'t convert blob {}, details: {}', node.soft_get('name'), e) from e
| true | true |
f71e59893a5ea3504751b1c52828d7f9e9fbe96e | 3,718 | py | Python | python/lib/packet/path_mgmt/rev_info.py | stschwar/scion | f15933081b4947ed46a032d34e9b9c563f2989c4 | [
"Apache-2.0"
] | null | null | null | python/lib/packet/path_mgmt/rev_info.py | stschwar/scion | f15933081b4947ed46a032d34e9b9c563f2989c4 | [
"Apache-2.0"
] | null | null | null | python/lib/packet/path_mgmt/rev_info.py | stschwar/scion | f15933081b4947ed46a032d34e9b9c563f2989c4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`rev_info` --- Revocation info payload
============================================
"""
# Stdlib
import logging
# External
import capnp # noqa
# SCION
import proto.rev_info_capnp as P
from lib.defines import HASHTREE_EPOCH_TIME
from lib.errors import SCIONBaseError
from lib.packet.packet_base import Cerealizable
from lib.packet.scion_addr import ISD_AS
class RevInfoValidationError(SCIONBaseError):
"""Validation of RevInfo failed"""
class RevocationInfo(Cerealizable):
"""
Class containing revocation information, i.e., the revocation token.
"""
NAME = "RevocationInfo"
P_CLS = P.RevInfo
@classmethod
def from_values(cls, isd_as, if_id, epoch, nonce, siblings, prev_root,
next_root, hash_type, tree_ttl):
"""
Returns a RevocationInfo object with the specified values.
:param ISD_AS isd_as: The ISD_AS of the issuer of the revocation.
:param int if_id: ID of the interface to be revoked
:param int epoch: Time epoch for which interface is to be revoked
:param bytes nonce: Nonce for the (if_id, epoch) leaf in the hashtree
:param list[(bool, bytes)] siblings: Positions and hashes of siblings
:param bytes prev_root: Hash of the tree root at time T-1
:param bytes next_root: Hash of the tree root at time T+1
:param int hash_type: The hash function needed to verify the revocation.
:param int tree_ttl: The validity period of the revocation tree.
"""
# Put the isd_as, if_id, epoch and nonce of the leaf into the proof.
p = cls.P_CLS.new_message(isdas=int(isd_as), ifID=if_id, epoch=epoch,
nonce=nonce, hashType=hash_type, treeTTL=tree_ttl)
# Put the list of sibling hashes (along with l/r) into the proof.
sibs = p.init('siblings', len(siblings))
for i, sibling in enumerate(siblings):
sibs[i].isLeft, sibs[i].hash = sibling
# Put the roots of the hash trees at T-1 and T+1.
p.prevRoot = prev_root
p.nextRoot = next_root
return cls(p)
def isd_as(self):
return ISD_AS(self.p.isdas)
def validate(self):
if self.p.ifID == 0:
raise RevInfoValidationError("Invalid ifID: %d" % self.p.ifID)
if self.p.treeTTL == 0 or (self.p.treeTTL % HASHTREE_EPOCH_TIME != 0):
raise RevInfoValidationError("Invalid TreeTTL: %d" % self.p.treeTTL)
self.isd_as()
def cmp_str(self):
b = []
b.append(self.p.isdas.to_bytes(8, 'big'))
b.append(self.p.ifID.to_bytes(8, 'big'))
b.append(self.p.epoch.to_bytes(8, 'big'))
b.append(self.p.nonce)
return b"".join(b)
def __eq__(self, other):
if other is None:
logging.error("Other RevInfo object is None.")
return False
return self.cmp_str() == other.cmp_str()
def __hash__(self):
return hash(self.cmp_str())
def short_desc(self):
return "RevInfo: %s IF: %d EPOCH: %d TreeTTL: %d" % (
self.isd_as(), self.p.ifID, self.p.epoch, self.p.treeTTL)
| 37.18 | 84 | 0.649274 |
import logging
import capnp
import proto.rev_info_capnp as P
from lib.defines import HASHTREE_EPOCH_TIME
from lib.errors import SCIONBaseError
from lib.packet.packet_base import Cerealizable
from lib.packet.scion_addr import ISD_AS
class RevInfoValidationError(SCIONBaseError):
class RevocationInfo(Cerealizable):
NAME = "RevocationInfo"
P_CLS = P.RevInfo
@classmethod
def from_values(cls, isd_as, if_id, epoch, nonce, siblings, prev_root,
next_root, hash_type, tree_ttl):
p = cls.P_CLS.new_message(isdas=int(isd_as), ifID=if_id, epoch=epoch,
nonce=nonce, hashType=hash_type, treeTTL=tree_ttl)
sibs = p.init('siblings', len(siblings))
for i, sibling in enumerate(siblings):
sibs[i].isLeft, sibs[i].hash = sibling
p.prevRoot = prev_root
p.nextRoot = next_root
return cls(p)
def isd_as(self):
return ISD_AS(self.p.isdas)
def validate(self):
if self.p.ifID == 0:
raise RevInfoValidationError("Invalid ifID: %d" % self.p.ifID)
if self.p.treeTTL == 0 or (self.p.treeTTL % HASHTREE_EPOCH_TIME != 0):
raise RevInfoValidationError("Invalid TreeTTL: %d" % self.p.treeTTL)
self.isd_as()
def cmp_str(self):
b = []
b.append(self.p.isdas.to_bytes(8, 'big'))
b.append(self.p.ifID.to_bytes(8, 'big'))
b.append(self.p.epoch.to_bytes(8, 'big'))
b.append(self.p.nonce)
return b"".join(b)
def __eq__(self, other):
if other is None:
logging.error("Other RevInfo object is None.")
return False
return self.cmp_str() == other.cmp_str()
def __hash__(self):
return hash(self.cmp_str())
def short_desc(self):
return "RevInfo: %s IF: %d EPOCH: %d TreeTTL: %d" % (
self.isd_as(), self.p.ifID, self.p.epoch, self.p.treeTTL)
| true | true |
f71e5c0ef766848ef073a7722f2396d351009ea4 | 11,159 | py | Python | lib/models.py | prkriley/neurips2019_intrus | 3e36930246347e6b80a583d2ab378054ea3b9f7a | [
"MIT"
] | 18 | 2019-11-04T06:43:47.000Z | 2021-10-04T15:02:52.000Z | lib/models.py | prkriley/neurips2019_intrus | 3e36930246347e6b80a583d2ab378054ea3b9f7a | [
"MIT"
] | 9 | 2019-11-25T06:35:20.000Z | 2022-02-09T23:32:20.000Z | lib/models.py | prkriley/neurips2019_intrus | 3e36930246347e6b80a583d2ab378054ea3b9f7a | [
"MIT"
] | 5 | 2020-01-24T09:12:58.000Z | 2021-10-04T20:14:11.000Z | """
Transformer encoder / decoder layer chain
"""
import numpy as np
import tensorflow as tf
import lib.layers
from . import layers, ops
from .data import linelen
class Transformer:
def __init__(
self, name, inp_voc, out_voc,
logits_bias=False, share_emb=False, dst_rand_offset=False,
rescale_emb=True, inp_emb_bias=False, emb_inp_device='', emb_out_device='',
**kwargs
):
"""
Transformer-based model that predicts logp(insert(i, token) | x, y)
:type inp_voc: lib.voc.Voc
:type out_voc: lib.voc.Voc
:param logits_bias: if True, final logits layer has bias term.
:param share_emb: if True, input and output embeddings will use the same matrix.
Useful for in case of shared vocabularies or when there is a
:param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions
:param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding
"""
self.name = name
self.inp_voc, self.out_voc = inp_voc, out_voc
self.dst_rand_offset = dst_rand_offset
self.hp = kwargs
emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))
max_voc_size = max(len(inp_voc), len(out_voc))
with tf.variable_scope(self.name) as self.scope:
# Embeddings
self.emb_inp = layers.TransformerEmbedding(
'emb_inp', max_voc_size if share_emb else len(inp_voc), emb_size,
bias=inp_emb_bias, rescale=rescale_emb, device=emb_inp_device)
self.emb_out = layers.TransformerEmbedding(
'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,
matrix=self.emb_inp.emb.mat if share_emb else None,
rescale=rescale_emb, device=emb_out_device)
# Model body
self.encoder = layers.TransformerChain('enc', **kwargs)
self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'], **kwargs)
# logits: token insertions plus one extra logit to predict position where to insert
self.logits = layers.Dense(
'logits', kwargs['hid_size'], len(out_voc) + 1,
matrix=tf.transpose(self.emb_out.emb.mat) if kwargs.get('dwwt', False) else None,
bias=None if logits_bias else 0
)
def _get_batch_sample(self):
""" A minimal example of model input data """
return [("i saw a cat", "i write the code")]
def make_encoder_batch_ph(self):
return {
'inp': tf.placeholder('int32', [None, None]),
'inp_len': tf.placeholder('int32', [None])
}
def make_feed_dict(self, batch, **kwargs):
""" Take input data strings, return a dict { key: np.array(value) } """
inp_lines, out_lines = zip(*batch)
inp_len = [linelen(line) for line in inp_lines]
out_len = [linelen(line) for line in out_lines]
return {
'inp': self.inp_voc.to_matrix(inp_lines),
'inp_len': np.array(inp_len, 'int32'),
'out': self.out_voc.to_matrix(out_lines),
'out_len': np.array(out_len, 'int32')
}
def encode(self, batch, is_train):
""" Take placeholders for data batch, return encoder state """
with tf.name_scope(self.name), ops.dropout_scope(is_train):
inp = batch['inp'] # [batch_size * ninp]
inp_len = batch.get('inp_len', ops.infer_length(inp, self.inp_voc.eos)) # [batch]
attn_mask = ops.make_attn_mask(inp, inp_len) # [batch_size, 1, 1, ninp]
out, _ = self.encoder(self.emb_inp(inp), self_attn_mask=attn_mask)
# ^-- [batch_size, ninp, hid_size]
return dict(out=out, attn_mask=attn_mask)
def compute_action_logprobs(self, batch, is_train, enc=None, temperature=None):
"""
Compute log-probabilities for all possible actions (aka agent policy)
:param batch: a dict with
- token matrix 'out'[batch_size, output_length]
- optional length vector out_len[batch_size]
:param is_train: whether or not to use training behavior (e.g. dropout)
:returns: {'insert':logp(insert(i, c) | x, y), 'finish':logp(terminate| x, y)}
"""
enc = self.encode(batch, is_train) if enc is None else enc
with tf.name_scope(self.name), ops.dropout_scope(is_train):
out = batch['out'] # partial translation, shape: [batch_size * nout]
out_len = batch.get('out_len', ops.infer_length(out, self.out_voc.eos)) # [batch]
# embedding. Note: at this point, a special "zero" vector is added
# to the first position hence length is increased by 1
out_padded = tf.concat([tf.zeros_like(out[:, :1]), out], axis=1) # [batch_size, nout+1]
dec_emb = self.emb_out(out_padded, offset='random' if self.dst_rand_offset else 0)
# ^-- shape: [batch_size, nout + 1]
# run decoder
attn_mask = ops.make_attn_mask(out_padded, out_len + 1) # [batch_size, 1, 1, nout + 1]
dec_out, _ = self.decoder(dec_emb, self_attn_mask=attn_mask,
enc_out=enc['out'], enc_attn_mask=enc['attn_mask'])
# ^-- [batch_size, nout + 1, hid_size]
logits = self.logits(dec_out) # [batch_size, nout + 1, voc_size + 1]
if temperature is not None:
logits /= temperature
# compute log-probabilities for actions
# position log-probabilities, logP(insert(pos, *) | ...)
# used to predict position of next insert and termination condition (EOS)
position_logits = logits[:, :, -1] # [batch_size, nout + 1]
position_mask = tf.cast(attn_mask, tf.bool)[:, 0, 0, :] # [batch_size, nout + 1]
position_logits = tf.where(position_mask, position_logits,
tf.fill(tf.shape(position_logits), -1e9))
position_logp = tf.nn.log_softmax(position_logits, axis=-1) # [batch_size, n_out]
# two actions: insert - at any non-EOS position - or finish - defined as inserting at EOS
finish_logp = tf.gather_nd(position_logp,
tf.stack([tf.range(tf.shape(out_len)[0]), out_len], axis=1))
# ^-- [batch_size]
insert_position_logp = tf.where(position_mask[:, 1:], position_logp[:, :-1],
tf.fill(tf.shape(position_logp[:, :-1]), -1e9))
# ^-- [batch_size, nout]
# insertion log-probabilities:
# logP(insert(pos, tok) | ...) = logP(insert(pos, *) | ...) + logP(insert(pos, tok) | insert(pos, *), ...)
token_logits = logits[:, :-1, :len(self.out_voc)] # [batch_size, n_out, voc_size]
token_logp_given_position = tf.nn.log_softmax(token_logits, axis=-1)
# note: we do not need mask on token_logp_given_position cuz mask is already applied to insert_position_logp
insert_logp = insert_position_logp[:, :, None] + token_logp_given_position
return {
# group 1 (exps sum to 1)
'insert': insert_logp, # [batch_size, nout, voc_size]
'finish': finish_logp, # [batch_size]
}
class ImgToSeqTransformer(Transformer):
def __init__(
self, name, out_voc, inp_w, inp_h, inp_channels=3, make_encoder=lib.layers.ImageEncoder,
logits_bias=False, share_emb=False, dst_rand_offset=False,
rescale_emb=True, emb_out_device='',
**kwargs
):
"""
Transformer-based model that predicts logp(insert(i, token) | x, y)
:type out_voc: lib.voc.Voc
:param logits_bias: if True, final logits layer has bias term.
:param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions
:param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding
"""
self.name = name
self.inp_voc, self.out_voc = out_voc, out_voc # inp voc is a stub, the same as out_voc
self.dst_rand_offset = dst_rand_offset
self.hp = kwargs
self.w = inp_w
self.h = inp_h
self.inp_channels = inp_channels
emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))
max_voc_size = len(out_voc)
with tf.variable_scope(self.name) as self.scope:
# Embeddings
self.emb_out = layers.TransformerEmbedding(
'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,
matrix=self.emb_inp.emb.mat if share_emb else None,
rescale=rescale_emb, device=emb_out_device)
# Model body
self.encoder = make_encoder('enc', inp_h=inp_w, inp_w=inp_h, inp_channels=inp_channels, **kwargs)
enc_out_shape = self.encode(self.make_encoder_batch_ph(), True)['out'].shape
assert enc_out_shape.ndims == 3 and enc_out_shape[-1].value is not None, \
"encoder output shape must be a 3d tensor with fixed num units, " \
"got shape {}".format(enc_out_shape)
self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'],
attn_input_sizes={'enc': enc_out_shape[-1].value},
**kwargs)
# logits: token insertions plus one extra logit to predict position where to insert
self.logits = layers.Dense(
'logits', kwargs['hid_size'], len(out_voc) + 1,
bias=None if logits_bias else 0
)
def _get_batch_sample(self):
""" A minimal example of model input data """
return [(np.zeros((self.h, self.w, self.inp_channels)), 'A cat sat')]
def make_feed_dict(self, batch, **kwargs):
""" Take input data strings, return a dict { key: np.array(value) } """
inp_imgs, out_lines = zip(*batch)
out_len = [linelen(line) for line in out_lines]
return {
'inp': np.array(inp_imgs, 'float32'),
'out': self.out_voc.to_matrix(out_lines),
'out_len': np.array(out_len, 'int32')
}
def make_encoder_batch_ph(self):
return {
'inp': tf.placeholder('float32', [None, self.h, self.w, self.inp_channels]),
}
def encode(self, batch, is_train):
""" Take placeholders for data batch, return encoder state """
with tf.name_scope(self.name), ops.dropout_scope(is_train):
inp = batch['inp'] # [batch_size * ninp]
out = self.encoder(inp)
assert out.shape[-1] is not None
out_shape = tf.shape(out)
out = tf.reshape(out, [out_shape[0], -1, out.shape[-1]])
attn_mask = tf.ones((out_shape[0], 1, 1, out_shape[1] * out_shape[2])) # [batch_size, 1, 1, ninp]
return dict(out=out, attn_mask=attn_mask)
| 45.546939 | 120 | 0.595663 | import numpy as np
import tensorflow as tf
import lib.layers
from . import layers, ops
from .data import linelen
class Transformer:
def __init__(
self, name, inp_voc, out_voc,
logits_bias=False, share_emb=False, dst_rand_offset=False,
rescale_emb=True, inp_emb_bias=False, emb_inp_device='', emb_out_device='',
**kwargs
):
self.name = name
self.inp_voc, self.out_voc = inp_voc, out_voc
self.dst_rand_offset = dst_rand_offset
self.hp = kwargs
emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))
max_voc_size = max(len(inp_voc), len(out_voc))
with tf.variable_scope(self.name) as self.scope:
self.emb_inp = layers.TransformerEmbedding(
'emb_inp', max_voc_size if share_emb else len(inp_voc), emb_size,
bias=inp_emb_bias, rescale=rescale_emb, device=emb_inp_device)
self.emb_out = layers.TransformerEmbedding(
'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,
matrix=self.emb_inp.emb.mat if share_emb else None,
rescale=rescale_emb, device=emb_out_device)
self.encoder = layers.TransformerChain('enc', **kwargs)
self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'], **kwargs)
self.logits = layers.Dense(
'logits', kwargs['hid_size'], len(out_voc) + 1,
matrix=tf.transpose(self.emb_out.emb.mat) if kwargs.get('dwwt', False) else None,
bias=None if logits_bias else 0
)
def _get_batch_sample(self):
return [("i saw a cat", "i write the code")]
def make_encoder_batch_ph(self):
return {
'inp': tf.placeholder('int32', [None, None]),
'inp_len': tf.placeholder('int32', [None])
}
def make_feed_dict(self, batch, **kwargs):
inp_lines, out_lines = zip(*batch)
inp_len = [linelen(line) for line in inp_lines]
out_len = [linelen(line) for line in out_lines]
return {
'inp': self.inp_voc.to_matrix(inp_lines),
'inp_len': np.array(inp_len, 'int32'),
'out': self.out_voc.to_matrix(out_lines),
'out_len': np.array(out_len, 'int32')
}
def encode(self, batch, is_train):
with tf.name_scope(self.name), ops.dropout_scope(is_train):
inp = batch['inp']
inp_len = batch.get('inp_len', ops.infer_length(inp, self.inp_voc.eos))
attn_mask = ops.make_attn_mask(inp, inp_len)
out, _ = self.encoder(self.emb_inp(inp), self_attn_mask=attn_mask)
return dict(out=out, attn_mask=attn_mask)
def compute_action_logprobs(self, batch, is_train, enc=None, temperature=None):
enc = self.encode(batch, is_train) if enc is None else enc
with tf.name_scope(self.name), ops.dropout_scope(is_train):
out = batch['out']
out_len = batch.get('out_len', ops.infer_length(out, self.out_voc.eos))
out_padded = tf.concat([tf.zeros_like(out[:, :1]), out], axis=1)
dec_emb = self.emb_out(out_padded, offset='random' if self.dst_rand_offset else 0)
attn_mask = ops.make_attn_mask(out_padded, out_len + 1)
dec_out, _ = self.decoder(dec_emb, self_attn_mask=attn_mask,
enc_out=enc['out'], enc_attn_mask=enc['attn_mask'])
logits = self.logits(dec_out)
if temperature is not None:
logits /= temperature
position_logits = logits[:, :, -1]
position_mask = tf.cast(attn_mask, tf.bool)[:, 0, 0, :]
position_logits = tf.where(position_mask, position_logits,
tf.fill(tf.shape(position_logits), -1e9))
position_logp = tf.nn.log_softmax(position_logits, axis=-1)
finish_logp = tf.gather_nd(position_logp,
tf.stack([tf.range(tf.shape(out_len)[0]), out_len], axis=1))
insert_position_logp = tf.where(position_mask[:, 1:], position_logp[:, :-1],
tf.fill(tf.shape(position_logp[:, :-1]), -1e9))
token_logits = logits[:, :-1, :len(self.out_voc)]
token_logp_given_position = tf.nn.log_softmax(token_logits, axis=-1)
insert_logp = insert_position_logp[:, :, None] + token_logp_given_position
return {
'insert': insert_logp,
'finish': finish_logp,
}
class ImgToSeqTransformer(Transformer):
def __init__(
self, name, out_voc, inp_w, inp_h, inp_channels=3, make_encoder=lib.layers.ImageEncoder,
logits_bias=False, share_emb=False, dst_rand_offset=False,
rescale_emb=True, emb_out_device='',
**kwargs
):
self.name = name
self.inp_voc, self.out_voc = out_voc, out_voc
self.dst_rand_offset = dst_rand_offset
self.hp = kwargs
self.w = inp_w
self.h = inp_h
self.inp_channels = inp_channels
emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))
max_voc_size = len(out_voc)
with tf.variable_scope(self.name) as self.scope:
self.emb_out = layers.TransformerEmbedding(
'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,
matrix=self.emb_inp.emb.mat if share_emb else None,
rescale=rescale_emb, device=emb_out_device)
self.encoder = make_encoder('enc', inp_h=inp_w, inp_w=inp_h, inp_channels=inp_channels, **kwargs)
enc_out_shape = self.encode(self.make_encoder_batch_ph(), True)['out'].shape
assert enc_out_shape.ndims == 3 and enc_out_shape[-1].value is not None, \
"encoder output shape must be a 3d tensor with fixed num units, " \
"got shape {}".format(enc_out_shape)
self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'],
attn_input_sizes={'enc': enc_out_shape[-1].value},
**kwargs)
self.logits = layers.Dense(
'logits', kwargs['hid_size'], len(out_voc) + 1,
bias=None if logits_bias else 0
)
def _get_batch_sample(self):
return [(np.zeros((self.h, self.w, self.inp_channels)), 'A cat sat')]
def make_feed_dict(self, batch, **kwargs):
inp_imgs, out_lines = zip(*batch)
out_len = [linelen(line) for line in out_lines]
return {
'inp': np.array(inp_imgs, 'float32'),
'out': self.out_voc.to_matrix(out_lines),
'out_len': np.array(out_len, 'int32')
}
def make_encoder_batch_ph(self):
return {
'inp': tf.placeholder('float32', [None, self.h, self.w, self.inp_channels]),
}
def encode(self, batch, is_train):
with tf.name_scope(self.name), ops.dropout_scope(is_train):
inp = batch['inp']
out = self.encoder(inp)
assert out.shape[-1] is not None
out_shape = tf.shape(out)
out = tf.reshape(out, [out_shape[0], -1, out.shape[-1]])
attn_mask = tf.ones((out_shape[0], 1, 1, out_shape[1] * out_shape[2]))
return dict(out=out, attn_mask=attn_mask)
| true | true |
f71e5cd99b7d4e33658c397072d56eb1f5d3bd35 | 11,969 | py | Python | desktop/core/ext-py/Django/django/db/backends/mysql/base.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 2 | 2021-04-27T03:57:00.000Z | 2021-06-18T09:39:58.000Z | django/db/backends/mysql/base.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | null | null | null | django/db/backends/mysql/base.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | 2 | 2021-09-06T18:44:45.000Z | 2022-02-24T04:10:10.000Z | """
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import re
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE, FLAG, CLIENT
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.safestring import SafeString, SafeUnicode
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
from warnings import filterwarnings
filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeUnicode and SafeString as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise Database.IntegrityError(tuple(e))
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise Database.IntegrityError(tuple(e))
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation()
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['DATABASE_USER']:
kwargs['user'] = settings_dict['DATABASE_USER']
if settings_dict['DATABASE_NAME']:
kwargs['db'] = settings_dict['DATABASE_NAME']
if settings_dict['DATABASE_PASSWORD']:
kwargs['passwd'] = settings_dict['DATABASE_PASSWORD']
if settings_dict['DATABASE_HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['DATABASE_HOST']
elif settings_dict['DATABASE_HOST']:
kwargs['host'] = settings_dict['DATABASE_HOST']
if settings_dict['DATABASE_PORT']:
kwargs['port'] = int(settings_dict['DATABASE_PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['DATABASE_OPTIONS'])
self.connection = Database.connect(**kwargs)
self.connection.encoders[SafeUnicode] = self.connection.encoders[unicode]
self.connection.encoders[SafeString] = self.connection.encoders[str]
connection_created.send(sender=self.__class__)
cursor = CursorWrapper(self.connection.cursor())
return cursor
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
| 39.50165 | 127 | 0.636728 | """
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import re
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE, FLAG, CLIENT
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.safestring import SafeString, SafeUnicode
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
from warnings import filterwarnings
filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeUnicode and SafeString as MySQLdb's type
_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.OperationalError, e:
if e[0] in self.codes_for_integrityerror:
raise Database.IntegrityError(tuple(e))
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError, e:
if e[0] in self.codes_for_integrityerror:
raise Database.IntegrityError(tuple(e))
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s')
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation()
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['DATABASE_USER']:
kwargs['user'] = settings_dict['DATABASE_USER']
if settings_dict['DATABASE_NAME']:
kwargs['db'] = settings_dict['DATABASE_NAME']
if settings_dict['DATABASE_PASSWORD']:
kwargs['passwd'] = settings_dict['DATABASE_PASSWORD']
if settings_dict['DATABASE_HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['DATABASE_HOST']
elif settings_dict['DATABASE_HOST']:
kwargs['host'] = settings_dict['DATABASE_HOST']
if settings_dict['DATABASE_PORT']:
kwargs['port'] = int(settings_dict['DATABASE_PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['DATABASE_OPTIONS'])
self.connection = Database.connect(**kwargs)
self.connection.encoders[SafeUnicode] = self.connection.encoders[unicode]
self.connection.encoders[SafeString] = self.connection.encoders[str]
connection_created.send(sender=self.__class__)
cursor = CursorWrapper(self.connection.cursor())
return cursor
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
| false | true |
f71e5cfe3706fc69791aed22bc1afc781220530b | 14,652 | py | Python | test/torch/pointers/test_pointer_tensor.py | Prtfw/PySyft | 82eede90acf09f26de389237524897e2b142f9ff | [
"Apache-2.0"
] | 1 | 2020-04-11T06:46:49.000Z | 2020-04-11T06:46:49.000Z | test/torch/pointers/test_pointer_tensor.py | haofanwang/PySyft | 653d31001dbc0c611d486c6c5543fce0abf9b27d | [
"Apache-2.0"
] | 1 | 2020-04-07T13:36:44.000Z | 2020-04-07T13:36:44.000Z | test/torch/pointers/test_pointer_tensor.py | JMBehnken/PySyft | 35012f5bf55628bb19761d5f40d03181fbbb1766 | [
"Apache-2.0"
] | 1 | 2020-03-11T09:52:47.000Z | 2020-03-11T09:52:47.000Z | import torch
import torch as th
import syft
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.generic.pointers.pointer_tensor import PointerTensor
import pytest
def test_init(workers):
alice, me = workers["alice"], workers["me"]
pointer = PointerTensor(id=1000, location=alice, owner=me)
pointer.__str__()
def test_create_pointer():
x = torch.Tensor([1, 2])
x.create_pointer()
def test_send_default_garbage_collector_true(workers):
"""
Remote tensor should be garbage collected by default on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.child.garbage_collect_data
def test_send_garbage_collect_data_false(workers):
"""
Remote tensor should be not garbage collected on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.garbage_collection = False
assert x_ptr.child.garbage_collect_data == False
def test_send_gc_false(workers):
"""
Remote tensor should be not garbage collected on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.gc = False
assert x_ptr.child.garbage_collect_data == False
assert x_ptr.gc == False, "property GC is not in sync"
assert x_ptr.garbage_collection == False, "property garbage_collection is not in sync"
def test_send_gc_true(workers):
"""
Remote tensor by default is garbage collected on
deletion of Pointer Tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.gc == True
def test_send_disable_gc(workers):
"""Pointer tensor should be not garbage collected."""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice).disable_gc
assert x_ptr.child.garbage_collect_data == False
assert x_ptr.gc == False, "property GC is not in sync"
assert x_ptr.garbage_collection == False, "property garbage_collection is not in sync"
def test_send_get(workers):
"""Test several send get usages"""
bob = workers["bob"]
alice = workers["alice"]
# simple send
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_back = x_ptr.get()
assert (x == x_back).all()
# send with variable overwriting
x = torch.Tensor([1, 2])
x = x.send(bob)
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
# double send
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr_ptr = x_ptr.send(alice)
x_ptr_back = x_ptr_ptr.get()
x_back_back = x_ptr_back.get()
assert (x == x_back_back).all()
# double send with variable overwriting
x = torch.Tensor([1, 2])
x = x.send(bob)
x = x.send(alice)
x = x.get()
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
# chained double send
x = torch.Tensor([1, 2])
x = x.send(bob).send(alice)
x_back = x.get().get()
assert (torch.Tensor([1, 2]) == x_back).all()
def test_inplace_send_get(workers):
bob = workers["bob"]
tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])
tensor_ptr = tensor.send_(bob)
assert tensor_ptr.id == tensor.id
assert id(tensor_ptr) == id(tensor)
tensor_back = tensor_ptr.get_()
assert tensor_back.id == tensor_ptr.id
assert tensor_back.id == tensor.id
assert id(tensor_back) == id(tensor)
assert id(tensor_back) == id(tensor)
assert (tensor_back == tensor).all()
def test_repeated_send(workers):
"""Tests that repeated calls to .send(bob) works gracefully.
Previously garbage collection deleted the remote object
when .send() was called twice. This test ensures the fix still
works."""
bob = workers["bob"]
# create tensor
x = torch.Tensor([1, 2])
# send tensor to bob
x_ptr = x.send(bob)
# send tensor again
x_ptr = x.send(bob)
# ensure bob has tensor
assert x.id in bob._objects
def test_remote_autograd(workers):
"""Tests the ability to backpropagate gradients on a remote
worker."""
bob = workers["bob"]
# TEST: simple remote grad calculation
# create a tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
# send tensor to bob
x = x.send(bob)
# do some calculation
y = (x + x).sum()
# backpropagate on remote machine
y.backward()
# check that remote gradient is correct
x_grad = bob._objects[x.id_at_location].grad
x_grad_target = torch.ones(4).float() + 1
assert (x_grad == x_grad_target).all()
# TEST: Ensure remote grad calculation gets properly serded
# create tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)
# compute function
y = x.sum()
# backpropagate
y.backward()
# get the gradient created from backpropagation manually
x_grad = bob._objects[x.id_at_location].grad
# get the entire x tensor (should bring the grad too)
x = x.get()
# make sure that the grads match
assert (x.grad == x_grad).all()
def test_gradient_send_recv(workers):
"""Tests that gradients are properly sent and received along
with their tensors."""
bob = workers["bob"]
# create a tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
# create gradient on tensor
x.sum().backward(th.tensor(1.0))
# save gradient
orig_grad = x.grad
# send and get back
t = x.send(bob).get()
# check that gradient was properly serde
assert (t.grad == orig_grad).all()
def test_method_on_attribute(workers):
bob = workers["bob"]
# create remote object with children
x = torch.Tensor([1, 2, 3])
x = syft.LoggingTensor().on(x).send(bob)
# call method on data tensor directly
x.child.point_to_attr = "child.child"
y = x.add(x)
assert isinstance(y.get(), torch.Tensor)
# call method on loggingtensor directly
x.child.point_to_attr = "child"
y = x.add(x)
y = y.get()
assert isinstance(y.child, syft.LoggingTensor)
# # call method on zeroth attribute
# x.child.point_to_attr = ""
# y = x.add(x)
# y = y.get()
#
# assert isinstance(y, torch.Tensor)
# assert isinstance(y.child, syft.LoggingTensor)
# assert isinstance(y.child.child, torch.Tensor)
# call .get() on pinter to attribute (should error)
x.child.point_to_attr = "child"
try:
x.get()
except syft.exceptions.CannotRequestObjectAttribute as e:
assert True
def test_grad_pointer(workers):
"""Tests the automatic creation of a .grad pointer when
calling .send() on a tensor with requires_grad==True"""
bob = workers["bob"]
x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)
y = (x + x).sum()
y.backward()
assert (bob._objects[x.id_at_location].grad == torch.tensor([2, 2, 2.0])).all()
def test_move(workers):
alice, bob, james, me = workers["alice"], workers["bob"], workers["james"], workers["me"]
x = torch.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.id_at_location in bob._objects
assert x.id_at_location not in alice._objects
x.move(alice)
assert x.id_at_location in bob._objects
assert x.id_at_location in alice._objects
x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)
assert x.id_at_location in bob._objects
assert x.id_at_location not in alice._objects
x.move(alice)
assert x.id_at_location in bob._objects
assert x.id_at_location in alice._objects
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
x.move(alice)
assert len(alice._objects) == 1
# Test .move on remote objects
james.clear_objects()
x = th.tensor([1.0]).send(james)
remote_x = james._objects[x.id_at_location]
remote_ptr = remote_x.send(bob)
assert remote_ptr.id in james._objects.keys()
remote_ptr2 = remote_ptr.move(alice)
assert remote_ptr2.id in james._objects.keys()
# Test .move back to myself
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
y = x.move(alice)
z = y.move(me)
assert (z == x).all()
def test_combine_pointers(workers):
"""
Ensure that the sy.combine_pointers works as expected
"""
bob = workers["bob"]
alice = workers["alice"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
y = th.tensor([1, 2, 3, 4, 5]).send(alice)
a = x.combine(y)
b = a + a
c = b.get(sum_results=True)
assert (c == th.tensor([4, 8, 12, 16, 20])).all()
b = a + a
c = b.get(sum_results=False)
assert len(c) == 2
assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all
def test_remote_to_cpu_device(workers):
"""Ensure remote .to cpu works"""
device = torch.device("cpu")
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
x.to(device)
def test_get_remote_shape(workers):
"""Test pointer.shape functionality"""
bob = workers["bob"]
# tensor directly sent: shape stored at sending
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.shape == torch.Size([5])
# result of an operation: need to make a call to the remote worker
y = x + x
assert y.shape == torch.Size([5])
def test_remote_function_with_multi_ouput(workers):
"""
Functions like .split return several tensors, registration and response
must be made carefully in this case
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
r_ptr = torch.split(ptr, 2)
assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
max_value, argmax_idx = torch.max(ptr, 0)
assert max_value.get().item() == 4.0
assert argmax_idx.get().item() == 3
def test_raising_error_when_item_func_called(workers):
pointer = PointerTensor(id=1000, location=workers["alice"], owner=workers["me"])
with pytest.raises(RuntimeError):
pointer.item()
def test_fix_prec_on_pointer_tensor(workers):
"""
Ensure .fix_precision() works as expected.
Also check that fix_precision() is not inplace.
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr_fp = ptr.fix_precision()
remote_tensor = bob._objects[ptr.id_at_location]
remote_fp_tensor = bob._objects[ptr_fp.id_at_location]
# check that fix_precision is not inplace
assert (remote_tensor == tensor).all()
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)
def test_fix_prec_on_pointer_of_pointer(workers):
"""
Ensure .fix_precision() works along a chain of pointers.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
alice_tensor = alice._objects[ptr.id_at_location]
remote_tensor = bob._objects[alice_tensor.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, FixedPrecisionTensor)
def test_float_prec_on_pointer_tensor(workers):
"""
Ensure .float_precision() works as expected.
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
remote_tensor = bob._objects[ptr.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_float_prec_on_pointer_of_pointer(workers):
"""
Ensure .float_precision() works along a chain of pointers.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
alice_tensor = alice._objects[ptr.id_at_location]
remote_tensor = bob._objects[alice_tensor.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_share_get(workers):
"""
Ensure .share() works as expected.
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3])
ptr = tensor.send(bob)
ptr = ptr.share()
remote_tensor = bob._objects[ptr.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, AdditiveSharingTensor)
def test_registration_of_action_on_pointer_of_pointer(workers):
"""
Ensure actions along a chain of pointers are registered as expected.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr_action = ptr + ptr
assert len(alice._objects) == 2
assert len(bob._objects) == 2
def test_setting_back_grad_to_origin_after_send(workers):
"""
Calling .backward() on a tensor sent using `.send(..., requires_grad=True)`
should update the origin tensor gradient
"""
me = workers["me"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y) # registration on the local worker is sometimes buggy
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z = z_ptr.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_setting_back_grad_to_origin_after_move(workers):
"""
Calling .backward() on a tensor moved using `.move(..., requires_grad=True)`
should update the origin tensor gradient
"""
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y) # registration on the local worker is sometimes buggy
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z_ptr2 = z_ptr.move(bob, requires_grad=True)
z = z_ptr2.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
| 26.305206 | 93 | 0.648034 | import torch
import torch as th
import syft
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.generic.pointers.pointer_tensor import PointerTensor
import pytest
def test_init(workers):
alice, me = workers["alice"], workers["me"]
pointer = PointerTensor(id=1000, location=alice, owner=me)
pointer.__str__()
def test_create_pointer():
x = torch.Tensor([1, 2])
x.create_pointer()
def test_send_default_garbage_collector_true(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.child.garbage_collect_data
def test_send_garbage_collect_data_false(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.garbage_collection = False
assert x_ptr.child.garbage_collect_data == False
def test_send_gc_false(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.gc = False
assert x_ptr.child.garbage_collect_data == False
assert x_ptr.gc == False, "property GC is not in sync"
assert x_ptr.garbage_collection == False, "property garbage_collection is not in sync"
def test_send_gc_true(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.gc == True
def test_send_disable_gc(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice).disable_gc
assert x_ptr.child.garbage_collect_data == False
assert x_ptr.gc == False, "property GC is not in sync"
assert x_ptr.garbage_collection == False, "property garbage_collection is not in sync"
def test_send_get(workers):
bob = workers["bob"]
alice = workers["alice"]
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_back = x_ptr.get()
assert (x == x_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob)
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr_ptr = x_ptr.send(alice)
x_ptr_back = x_ptr_ptr.get()
x_back_back = x_ptr_back.get()
assert (x == x_back_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob)
x = x.send(alice)
x = x.get()
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob).send(alice)
x_back = x.get().get()
assert (torch.Tensor([1, 2]) == x_back).all()
def test_inplace_send_get(workers):
bob = workers["bob"]
tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])
tensor_ptr = tensor.send_(bob)
assert tensor_ptr.id == tensor.id
assert id(tensor_ptr) == id(tensor)
tensor_back = tensor_ptr.get_()
assert tensor_back.id == tensor_ptr.id
assert tensor_back.id == tensor.id
assert id(tensor_back) == id(tensor)
assert id(tensor_back) == id(tensor)
assert (tensor_back == tensor).all()
def test_repeated_send(workers):
bob = workers["bob"]
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr = x.send(bob)
assert x.id in bob._objects
def test_remote_autograd(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
x = x.send(bob)
y = (x + x).sum()
y.backward()
x_grad = bob._objects[x.id_at_location].grad
x_grad_target = torch.ones(4).float() + 1
assert (x_grad == x_grad_target).all()
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)
y = x.sum()
y.backward()
x_grad = bob._objects[x.id_at_location].grad
x = x.get()
assert (x.grad == x_grad).all()
def test_gradient_send_recv(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
x.sum().backward(th.tensor(1.0))
orig_grad = x.grad
t = x.send(bob).get()
assert (t.grad == orig_grad).all()
def test_method_on_attribute(workers):
bob = workers["bob"]
x = torch.Tensor([1, 2, 3])
x = syft.LoggingTensor().on(x).send(bob)
x.child.point_to_attr = "child.child"
y = x.add(x)
assert isinstance(y.get(), torch.Tensor)
x.child.point_to_attr = "child"
y = x.add(x)
y = y.get()
assert isinstance(y.child, syft.LoggingTensor)
x.child.point_to_attr = "child"
try:
x.get()
except syft.exceptions.CannotRequestObjectAttribute as e:
assert True
def test_grad_pointer(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)
y = (x + x).sum()
y.backward()
assert (bob._objects[x.id_at_location].grad == torch.tensor([2, 2, 2.0])).all()
def test_move(workers):
alice, bob, james, me = workers["alice"], workers["bob"], workers["james"], workers["me"]
x = torch.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.id_at_location in bob._objects
assert x.id_at_location not in alice._objects
x.move(alice)
assert x.id_at_location in bob._objects
assert x.id_at_location in alice._objects
x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)
assert x.id_at_location in bob._objects
assert x.id_at_location not in alice._objects
x.move(alice)
assert x.id_at_location in bob._objects
assert x.id_at_location in alice._objects
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
x.move(alice)
assert len(alice._objects) == 1
james.clear_objects()
x = th.tensor([1.0]).send(james)
remote_x = james._objects[x.id_at_location]
remote_ptr = remote_x.send(bob)
assert remote_ptr.id in james._objects.keys()
remote_ptr2 = remote_ptr.move(alice)
assert remote_ptr2.id in james._objects.keys()
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
y = x.move(alice)
z = y.move(me)
assert (z == x).all()
def test_combine_pointers(workers):
bob = workers["bob"]
alice = workers["alice"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
y = th.tensor([1, 2, 3, 4, 5]).send(alice)
a = x.combine(y)
b = a + a
c = b.get(sum_results=True)
assert (c == th.tensor([4, 8, 12, 16, 20])).all()
b = a + a
c = b.get(sum_results=False)
assert len(c) == 2
assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all
def test_remote_to_cpu_device(workers):
device = torch.device("cpu")
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
x.to(device)
def test_get_remote_shape(workers):
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.shape == torch.Size([5])
y = x + x
assert y.shape == torch.Size([5])
def test_remote_function_with_multi_ouput(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
r_ptr = torch.split(ptr, 2)
assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
max_value, argmax_idx = torch.max(ptr, 0)
assert max_value.get().item() == 4.0
assert argmax_idx.get().item() == 3
def test_raising_error_when_item_func_called(workers):
pointer = PointerTensor(id=1000, location=workers["alice"], owner=workers["me"])
with pytest.raises(RuntimeError):
pointer.item()
def test_fix_prec_on_pointer_tensor(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr_fp = ptr.fix_precision()
remote_tensor = bob._objects[ptr.id_at_location]
remote_fp_tensor = bob._objects[ptr_fp.id_at_location]
assert (remote_tensor == tensor).all()
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)
def test_fix_prec_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
alice_tensor = alice._objects[ptr.id_at_location]
remote_tensor = bob._objects[alice_tensor.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, FixedPrecisionTensor)
def test_float_prec_on_pointer_tensor(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
remote_tensor = bob._objects[ptr.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_float_prec_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
alice_tensor = alice._objects[ptr.id_at_location]
remote_tensor = bob._objects[alice_tensor.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_share_get(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3])
ptr = tensor.send(bob)
ptr = ptr.share()
remote_tensor = bob._objects[ptr.id_at_location]
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, AdditiveSharingTensor)
def test_registration_of_action_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr_action = ptr + ptr
assert len(alice._objects) == 2
assert len(bob._objects) == 2
def test_setting_back_grad_to_origin_after_send(workers):
me = workers["me"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y)
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z = z_ptr.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_setting_back_grad_to_origin_after_move(workers):
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y)
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z_ptr2 = z_ptr.move(bob, requires_grad=True)
z = z_ptr2.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
| true | true |
f71e5df787aa25c35b48b6c65a4730286ccce326 | 3,762 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py | MissPenguin/Paddle | 70a9b6522bdbd8ce36be763d8c2866fc8095f1b9 | [
"Apache-2.0"
] | 1 | 2021-09-06T15:52:29.000Z | 2021-09-06T15:52:29.000Z | python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py | glisca/Paddle | 266fcbe0aed3e566c167ea8de5114f62c428c013 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py | glisca/Paddle | 266fcbe0aed3e566c167ea8de5114f62c428c013 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import abc
import os
import enum
import logging
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import NumpyArrayInitializer
import paddle.fluid.core as core
from paddle import compat as cpt
import paddle.inference as paddle_infer
from typing import Optional, List, Callable, Dict, Any, Set
from program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model
logging.basicConfig(level=logging.INFO, format="%(message)s")
class SkipReasons(enum.Enum):
# Paddle not support, but trt support, we need to add the feature.
TRT_NOT_IMPLEMENTED = 0
# TRT not support.
TRT_NOT_SUPPORT = 1
class AutoScanTest(unittest.TestCase):
def __init__(self, methodName='runTest'):
np.random.seed(1024)
paddle.enable_static()
super(AutoScanTest, self).__init__(methodName)
self.skip_cases = []
@abc.abstractmethod
def sample_program_configs(self) -> List[ProgramConfig]:
'''
Generate all config with the combination of different Input tensor shape and
different Attr values.
'''
raise NotImplementedError
@abc.abstractmethod
def sample_predictor_configs(self) -> List[paddle_infer.Config]:
raise NotImplementedError
@abc.abstractmethod
def add_skip_case(
self,
teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]],
reason: SkipReasons,
note: str):
self.skip_cases.append((teller, reason, note))
@abc.abstractmethod
def is_program_valid(self, program_config: ProgramConfig) -> bool:
raise NotImplementedError
def run_test_config(self, model, params, prog_config, pred_config,
feed_data) -> Dict[str, np.ndarray]:
'''
Test a single case.
'''
pred_config.set_model_buffer(model, len(model), params, len(params))
predictor = paddle_infer.create_predictor(pred_config)
for name, _ in prog_config.inputs.items():
input_tensor = predictor.get_input_handle(name)
input_tensor.copy_from_cpu(feed_data[name]['data'])
if feed_data[name]['lod'] is not None:
input_tensor.set_lod(feed_data[name]['lod'])
predictor.run()
result = {}
for out_name, o_name in zip(prog_config.outputs,
predictor.get_output_names()):
result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()
return result
def assert_tensors_near(self,
threshold: float,
tensors: List[Dict[str, np.array]]):
assert len(tensors) > 1
first = tensors[0]
for group in tensors[1:]:
for key, arr in group.items():
self.assertTrue(
np.allclose(
first[key], arr, atol=threshold),
"Output has diff between GPU and TensorRT. ")
@abc.abstractmethod
def run_test(self, quant=False):
raise NotImplementedError
| 35.490566 | 103 | 0.661882 |
import numpy as np
import unittest
import abc
import os
import enum
import logging
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import NumpyArrayInitializer
import paddle.fluid.core as core
from paddle import compat as cpt
import paddle.inference as paddle_infer
from typing import Optional, List, Callable, Dict, Any, Set
from program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model
logging.basicConfig(level=logging.INFO, format="%(message)s")
class SkipReasons(enum.Enum):
TRT_NOT_IMPLEMENTED = 0
TRT_NOT_SUPPORT = 1
class AutoScanTest(unittest.TestCase):
def __init__(self, methodName='runTest'):
np.random.seed(1024)
paddle.enable_static()
super(AutoScanTest, self).__init__(methodName)
self.skip_cases = []
@abc.abstractmethod
def sample_program_configs(self) -> List[ProgramConfig]:
raise NotImplementedError
@abc.abstractmethod
def sample_predictor_configs(self) -> List[paddle_infer.Config]:
raise NotImplementedError
@abc.abstractmethod
def add_skip_case(
self,
teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]],
reason: SkipReasons,
note: str):
self.skip_cases.append((teller, reason, note))
@abc.abstractmethod
def is_program_valid(self, program_config: ProgramConfig) -> bool:
raise NotImplementedError
def run_test_config(self, model, params, prog_config, pred_config,
feed_data) -> Dict[str, np.ndarray]:
pred_config.set_model_buffer(model, len(model), params, len(params))
predictor = paddle_infer.create_predictor(pred_config)
for name, _ in prog_config.inputs.items():
input_tensor = predictor.get_input_handle(name)
input_tensor.copy_from_cpu(feed_data[name]['data'])
if feed_data[name]['lod'] is not None:
input_tensor.set_lod(feed_data[name]['lod'])
predictor.run()
result = {}
for out_name, o_name in zip(prog_config.outputs,
predictor.get_output_names()):
result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()
return result
def assert_tensors_near(self,
threshold: float,
tensors: List[Dict[str, np.array]]):
assert len(tensors) > 1
first = tensors[0]
for group in tensors[1:]:
for key, arr in group.items():
self.assertTrue(
np.allclose(
first[key], arr, atol=threshold),
"Output has diff between GPU and TensorRT. ")
@abc.abstractmethod
def run_test(self, quant=False):
raise NotImplementedError
| true | true |
f71e5df90847f1507f8858e74f83696283613d19 | 52,912 | py | Python | snakemake/io.py | nikostr/snakemake | 99d2517b3456771574c26a40da0cff0e611ad99a | [
"MIT"
] | null | null | null | snakemake/io.py | nikostr/snakemake | 99d2517b3456771574c26a40da0cff0e611ad99a | [
"MIT"
] | null | null | null | snakemake/io.py | nikostr/snakemake | 99d2517b3456771574c26a40da0cff0e611ad99a | [
"MIT"
] | null | null | null | __author__ = "Johannes Köster"
__copyright__ = "Copyright 2021, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import collections
import os
import shutil
from pathlib import Path
import re
import stat
import time
import datetime
import json
import copy
import functools
import subprocess as sp
from itertools import product, chain
from contextlib import contextmanager
import string
import collections
import asyncio
from snakemake.exceptions import (
MissingOutputException,
WorkflowError,
WildcardError,
RemoteFileException,
)
from snakemake.logging import logger
from inspect import isfunction, ismethod
from snakemake.common import DYNAMIC_FILL, ON_WINDOWS, async_run
class Mtime:
__slots__ = ["_local", "_local_target", "_remote"]
def __init__(self, local=None, local_target=None, remote=None):
self._local = local
self._local_target = local_target
self._remote = remote
def local_or_remote(self, follow_symlinks=False):
if self._remote is not None:
return self._remote
if follow_symlinks and self._local_target is not None:
return self._local_target
return self._local
def remote(
self,
):
return self._remote
def local(self, follow_symlinks=False):
if follow_symlinks and self._local_target is not None:
return self._local_target
return self._local
def lutime(f, times):
# In some cases, we have a platform where os.supports_follow_symlink includes stat()
# but not utime(). This leads to an anomaly. In any case we never want to touch the
# target of a link.
if os.utime in os.supports_follow_symlinks:
# ...utime is well behaved
os.utime(f, times, follow_symlinks=False)
elif not os.path.islink(f):
# ...symlinks not an issue here
os.utime(f, times)
else:
try:
# try the system command
if times:
fmt_time = lambda sec: datetime.fromtimestamp(sec).strftime(
"%Y%m%d%H%M.%S"
)
atime, mtime = times
sp.check_call(["touch", "-h", f, "-a", "-t", fmt_time(atime)])
sp.check_call(["touch", "-h", f, "-m", "-t", fmt_time(mtime)])
else:
sp.check_call(["touch", "-h", f])
except sp.CalledProcessError:
pass
# ...problem system. Do nothing.
logger.warning(
"Unable to set utime on symlink {}. Your Python build does not support it.".format(
f
)
)
return None
if os.chmod in os.supports_follow_symlinks:
def lchmod(f, mode):
os.chmod(f, mode, follow_symlinks=False)
else:
def lchmod(f, mode):
os.chmod(f, mode)
class ExistsDict(dict):
def __init__(self, cache):
super().__init__()
self.cache = cache
self.has_inventory = set()
def __getitem__(self, path):
# Always return False if not in dict.
# The reason is that this is only called if the method contains below has returned True.
# Hence, we already know that either path is in dict, or inventory has never
# seen it, and hence it does not exist.
return self.get(path, False)
def __contains__(self, path):
# if already in inventory, always return True.
parent = path.get_inventory_parent()
return parent in self.has_inventory or super().__contains__(path)
class IOCache:
def __init__(self, max_wait_time):
self.mtime = dict()
self.exists_local = ExistsDict(self)
self.exists_remote = ExistsDict(self)
self.size = dict()
self.active = True
self.remaining_wait_time = max_wait_time
self.max_wait_time = max_wait_time
def mtime_inventory(self, jobs):
async_run(self._mtime_inventory(jobs))
async def _mtime_inventory(self, jobs, n_workers=8):
queue = asyncio.Queue()
stop_item = object()
async def worker(queue):
while True:
item = await queue.get()
if item is stop_item:
queue.task_done()
return
try:
self.mtime[item] = await self.collect_mtime(item)
except Exception as e:
queue.task_done()
raise e
queue.task_done()
tasks = [
asyncio.get_event_loop().create_task(worker(queue))
for _ in range(n_workers)
]
for job in jobs:
for f in chain(job.input, job.expanded_output):
if f.exists:
queue.put_nowait(f)
if job.benchmark and job.benchmark.exists:
queue.put_nowait(job.benchmark)
# Send a stop item to each worker.
for _ in range(n_workers):
queue.put_nowait(stop_item)
await asyncio.gather(*tasks)
async def collect_mtime(self, path):
return path.mtime_uncached
def clear(self):
self.mtime.clear()
self.size.clear()
self.exists_local.clear()
self.exists_remote.clear()
self.remaining_wait_time = self.max_wait_time
def deactivate(self):
self.clear()
self.active = False
def IOFile(file, rule=None):
assert rule is not None
f = _IOFile(file)
f.rule = rule
return f
class _IOFile(str):
"""
A file that is either input or output of a rule.
"""
__slots__ = [
"_is_function",
"_file",
"rule",
"_regex",
]
def __new__(cls, file):
is_annotated = isinstance(file, AnnotatedString)
is_callable = (
isfunction(file) or ismethod(file) or (is_annotated and bool(file.callable))
)
if not is_callable and file.endswith("/"):
# remove trailing slashes
stripped = file.rstrip("/")
if is_annotated:
stripped = AnnotatedString(stripped)
stripped.flags = file.flags
file = stripped
obj = str.__new__(cls, file)
obj._is_function = is_callable
obj._file = file
obj.rule = None
obj._regex = None
if obj.is_remote:
obj.remote_object._iofile = obj
return obj
def iocache(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.rule.workflow.iocache.active:
cache = getattr(self.rule.workflow.iocache, func.__name__)
if self in cache:
return cache[self]
v = func(self, *args, **kwargs)
cache[self] = v
return v
else:
return func(self, *args, **kwargs)
return wrapper
def _refer_to_remote(func):
"""
A decorator so that if the file is remote and has a version
of the same file-related function, call that version instead.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.is_remote:
if hasattr(self.remote_object, func.__name__):
return getattr(self.remote_object, func.__name__)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapper
def inventory(self):
async_run(self._inventory())
async def _inventory(self):
"""Starting from the given file, try to cache as much existence and
modification date information of this and other files as possible.
"""
cache = self.rule.workflow.iocache
if cache.active:
tasks = []
if self.is_remote and self not in cache.exists_remote:
# info not yet in inventory, let's discover as much as we can
tasks.append(self.remote_object.inventory(cache))
if not ON_WINDOWS and self not in cache.exists_local:
# we don't want to mess with different path representations on windows
tasks.append(self._local_inventory(cache))
await asyncio.gather(*tasks)
async def _local_inventory(self, cache):
# for local files, perform BFS via os.scandir to determine existence of files
if cache.remaining_wait_time <= 0:
# No more time to create inventory.
return
start_time = time.time()
folders = self.split("/")[:-1]
if not folders:
return
if os.path.isabs(self):
# For absolute paths, only use scan the immediate parent
ancestors = [os.path.dirname(self)]
else:
ancestors = ["/".join(folders[:i]) for i in range(1, len(folders) + 1)]
for (i, path) in enumerate(ancestors):
if path in cache.exists_local.has_inventory:
# This path was already scanned before, hence we can stop.
break
try:
with os.scandir(path) as scan:
for entry in scan:
cache.exists_local[entry.path] = True
cache.exists_local[path] = True
cache.exists_local.has_inventory.add(path)
except FileNotFoundError:
# Not found, hence, all subfolders cannot be present as well
for path in ancestors[i:]:
cache.exists_local[path] = False
cache.exists_local.has_inventory.add(path)
break
except PermissionError:
raise WorkflowError(
"Insufficient permissions to access {}. "
"Please make sure that all accessed files and directories "
"are readable and writable for you.".format(self)
)
cache.remaining_wait_time -= time.time() - start_time
@_refer_to_remote
def get_inventory_parent(self):
"""If eligible for inventory, get the parent of a given path.
This code does not work on local Windows paths,
but inventory is disabled on Windows.
"""
parent = os.path.dirname(self)
if parent and parent != "..":
return parent
@contextmanager
def open(self, mode="r", buffering=-1, encoding=None, errors=None, newline=None):
"""Open this file. If necessary, download it from remote first.
This can (and should) be used in a `with`-statement.
"""
if not self.exists:
raise WorkflowError(
"File {} cannot be opened, since it does not exist.".format(self)
)
if not self.exists_local and self.is_remote:
self.download_from_remote()
f = open(self)
try:
yield f
finally:
f.close()
def contains_wildcard(self):
return contains_wildcard(self.file)
@property
def is_remote(self):
return is_flagged(self._file, "remote_object")
@property
def is_ancient(self):
return is_flagged(self._file, "ancient")
@property
def is_directory(self):
return is_flagged(self._file, "directory")
@property
def is_temp(self):
return is_flagged(self._file, "temp")
@property
def is_multiext(self):
return is_flagged(self._file, "multiext")
@property
def multiext_prefix(self):
return get_flag_value(self._file, "multiext")
def update_remote_filepath(self):
# if the file string is different in the iofile, update the remote object
# (as in the case of wildcard expansion)
remote_object = self.remote_object
if remote_object._file != self._file:
remote_object._iofile = self
@property
def should_keep_local(self):
return self.remote_object.keep_local
@property
def should_stay_on_remote(self):
return self.remote_object.stay_on_remote
@property
def remote_object(self):
return get_flag_value(self._file, "remote_object")
@property
@_refer_to_remote
def file(self):
if not self._is_function:
return self._file
else:
raise ValueError(
"This IOFile is specified as a function and "
"may not be used directly."
)
def check(self):
hint = (
"It can also lead to inconsistent results of the file-matching "
"approach used by Snakemake."
)
if self._file.startswith("./"):
logger.warning(
"Relative file path '{}' starts with './'. This is redundant "
"and strongly discouraged. {} You can simply omit the './' "
"for relative file paths.".format(self._file, hint)
)
if self._file.startswith(" "):
logger.warning(
"File path '{}' starts with whitespace. "
"This is likely unintended. {}".format(self._file, hint)
)
if self._file.endswith(" "):
logger.warning(
"File path '{}' ends with whitespace. "
"This is likely unintended. {}".format(self._file, hint)
)
if "\n" in self._file:
logger.warning(
"File path '{}' contains line break. "
"This is likely unintended. {}".format(self._file, hint)
)
if _double_slash_regex.search(self._file) is not None and not self.is_remote:
logger.warning(
"File path {} contains double '{}'. "
"This is likely unintended. {}".format(self._file, os.path.sep, hint)
)
@property
def exists(self):
if self.is_remote:
return self.exists_remote
else:
return self.exists_local
def parents(self, omit=0):
"""Yield all parent paths, omitting the given number of ancestors."""
for p in list(Path(self.file).parents)[::-1][omit:]:
p = IOFile(str(p), rule=self.rule)
p.clone_flags(self)
yield p
@property
@iocache
def exists_local(self):
return os.path.exists(self.file)
@property
@iocache
def exists_remote(self):
if not self.is_remote:
return False
return self.remote_object.exists()
@property
def protected(self):
"""Returns True if the file is protected. Always False for symlinks."""
# symlinks are never regarded as protected
return (
self.exists_local
and not os.access(self.file, os.W_OK)
and not os.path.islink(self.file)
)
@property
@iocache
def mtime(self):
return self.mtime_uncached
@property
def mtime_uncached(self):
"""Obtain mtime.
Usually, this will be one stat call only. For symlinks and directories
it will be two, for symlinked directories it will be three,
for remote files it will additionally query the remote
location.
"""
mtime_remote = self.remote_object.mtime() if self.is_remote else None
# We first do a normal stat.
try:
_stat = os.stat(self.file, follow_symlinks=False)
is_symlink = stat.S_ISLNK(_stat.st_mode)
is_dir = stat.S_ISDIR(_stat.st_mode)
mtime = _stat.st_mtime
def get_dir_mtime():
# Try whether we have a timestamp file for it.
return os.stat(
os.path.join(self.file, ".snakemake_timestamp"),
follow_symlinks=True,
).st_mtime
if not is_symlink:
if is_dir:
try:
mtime = get_dir_mtime()
except FileNotFoundError:
# No timestamp, hence go on as if it is a file.
pass
# In the usual case, not a dir, not a symlink.
# We need just a single stat call.
return Mtime(local=mtime, remote=mtime_remote)
else:
# In case of a symlink, we need the stats for the target file/dir.
target_stat = os.stat(self.file, follow_symlinks=True)
# Further, we need to check again if this is a directory.
is_dir = stat.S_ISDIR(target_stat.st_mode)
mtime_target = target_stat.st_mtime
if is_dir:
try:
mtime_target = get_dir_mtime()
except FileNotFoundError:
# No timestamp, hence go on as if it is a file.
pass
return Mtime(
local=mtime, local_target=mtime_target, remote=mtime_remote
)
except FileNotFoundError:
if self.is_remote:
return Mtime(remote=mtime_remote)
raise WorkflowError(
"Unable to obtain modification time of file {} although it existed before. "
"It could be that a concurrent process has deleted it while Snakemake "
"was running.".format(self.file)
)
except PermissionError:
raise WorkflowError(
"Unable to obtain modification time of file {} because of missing "
"read permissions.".format(self.file)
)
@property
def flags(self):
return getattr(self._file, "flags", {})
@property
@iocache
@_refer_to_remote
def size(self):
return self.size_local
@property
def size_local(self):
# follow symlinks but throw error if invalid
self.check_broken_symlink()
return os.path.getsize(self.file)
def check_broken_symlink(self):
""" Raise WorkflowError if file is a broken symlink. """
if not self.exists_local and os.lstat(self.file):
raise WorkflowError(
"File {} seems to be a broken symlink.".format(self.file)
)
@_refer_to_remote
def is_newer(self, time):
"""Returns true of the file (which is an input file) is newer than time, or if it is
a symlink that points to a file newer than time."""
if self.is_ancient:
return False
return self.mtime.local_or_remote(follow_symlinks=True) > time
def download_from_remote(self):
if self.is_remote and self.remote_object.exists():
if not self.should_stay_on_remote:
logger.info("Downloading from remote: {}".format(self.file))
self.remote_object.download()
logger.info("Finished download.")
else:
raise RemoteFileException(
"The file to be downloaded does not seem to exist remotely."
)
def upload_to_remote(self):
if self.is_remote:
logger.info("Uploading to remote: {}".format(self.file))
self.remote_object.upload()
logger.info("Finished upload.")
def prepare(self):
path_until_wildcard = re.split(DYNAMIC_FILL, self.file)[0]
dir = os.path.dirname(path_until_wildcard)
if len(dir) > 0:
try:
os.makedirs(dir, exist_ok=True)
except OSError as e:
# ignore Errno 17 "File exists" (reason: multiprocessing)
if e.errno != 17:
raise e
if is_flagged(self._file, "pipe"):
os.mkfifo(self._file)
def protect(self):
mode = (
os.lstat(self.file).st_mode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH
)
if os.path.isdir(self.file):
for root, dirs, files in os.walk(self.file):
for d in dirs:
lchmod(os.path.join(self.file, d), mode)
for f in files:
lchmod(os.path.join(self.file, f), mode)
lchmod(self.file, mode)
def remove(self, remove_non_empty_dir=False):
if self.is_directory:
remove(self, remove_non_empty_dir=True)
else:
remove(self, remove_non_empty_dir=remove_non_empty_dir)
def touch(self, times=None):
""" times must be 2-tuple: (atime, mtime) """
try:
if self.is_directory:
file = os.path.join(self.file, ".snakemake_timestamp")
# Create the flag file if it doesn't exist
if not os.path.exists(file):
with open(file, "w"):
pass
lutime(file, times)
else:
lutime(self.file, times)
except OSError as e:
if e.errno == 2:
raise MissingOutputException(
"Output file {} of rule {} shall be touched but "
"does not exist.".format(self.file, self.rule.name),
lineno=self.rule.lineno,
snakefile=self.rule.snakefile,
)
else:
raise e
def touch_or_create(self):
try:
self.touch()
except MissingOutputException:
# first create directory if it does not yet exist
dir = self.file if self.is_directory else os.path.dirname(self.file)
if dir:
os.makedirs(dir, exist_ok=True)
# create empty file
file = (
os.path.join(self.file, ".snakemake_timestamp")
if self.is_directory
else self.file
)
with open(file, "w") as f:
pass
def apply_wildcards(self, wildcards, fill_missing=False, fail_dynamic=False):
f = self._file
if self._is_function:
f = self._file(Namedlist(fromdict=wildcards))
# this bit ensures flags are transferred over to files after
# wildcards are applied
file_with_wildcards_applied = IOFile(
apply_wildcards(
f,
wildcards,
fill_missing=fill_missing,
fail_dynamic=fail_dynamic,
dynamic_fill=DYNAMIC_FILL,
),
rule=self.rule,
)
file_with_wildcards_applied.clone_flags(self)
return file_with_wildcards_applied
def get_wildcard_names(self):
return get_wildcard_names(self.file)
def regex(self):
if self._regex is None:
# compile a regular expression
self._regex = re.compile(regex(self.file))
return self._regex
def constant_prefix(self):
first_wildcard = _wildcard_regex.search(self.file)
if first_wildcard:
return self.file[: first_wildcard.start()]
return self.file
def constant_suffix(self):
m = None
for m in _wildcard_regex.finditer(self.file):
pass
last_wildcard = m
if last_wildcard:
return self.file[last_wildcard.end() :]
return self.file
def match(self, target):
return self.regex().match(target) or None
def format_dynamic(self):
return self.replace(DYNAMIC_FILL, "{*}")
def clone_flags(self, other):
if isinstance(self._file, str):
self._file = AnnotatedString(self._file)
if isinstance(other._file, AnnotatedString):
self._file.flags = getattr(other._file, "flags", {}).copy()
if "remote_object" in self._file.flags:
self._file.flags["remote_object"] = copy.copy(
self._file.flags["remote_object"]
)
self.update_remote_filepath()
def clone_remote_object(self, other):
if (
isinstance(other._file, AnnotatedString)
and "remote_object" in other._file.flags
):
self._file.flags["remote_object"] = copy.copy(
other._file.flags["remote_object"]
)
self.update_remote_filepath()
def set_flags(self, flags):
if isinstance(self._file, str):
self._file = AnnotatedString(self._file)
self._file.flags = flags
def __eq__(self, other):
f = other._file if isinstance(other, _IOFile) else other
return self._file == f
def __hash__(self):
return self._file.__hash__()
_double_slash_regex = (
re.compile(r"([^:]//|^//)") if os.path.sep == "/" else re.compile(r"\\\\")
)
_wildcard_regex = re.compile(
r"""
\{
(?=( # This lookahead assertion emulates an 'atomic group'
# which is required for performance
\s*(?P<name>\w+) # wildcard name
(\s*,\s*
(?P<constraint> # an optional constraint
([^{}]+ | \{\d+(,\d+)?\})* # allow curly braces to nest one level
) # ... as in '{w,a{3,5}}'
)?\s*
))\1
\}
""",
re.VERBOSE,
)
def wait_for_files(
files, latency_wait=3, force_stay_on_remote=False, ignore_pipe=False
):
"""Wait for given files to be present in filesystem."""
files = list(files)
def get_missing():
return [
f
for f in files
if not (
f.exists_remote
if (
isinstance(f, _IOFile)
and f.is_remote
and (force_stay_on_remote or f.should_stay_on_remote)
)
else os.path.exists(f)
if not (is_flagged(f, "pipe") and ignore_pipe)
else True
)
]
missing = get_missing()
if missing:
logger.info(
"Waiting at most {} seconds for missing files.".format(latency_wait)
)
for _ in range(latency_wait):
if not get_missing():
return
time.sleep(1)
raise IOError(
"Missing files after {} seconds:\n{}".format(
latency_wait, "\n".join(get_missing())
)
)
def get_wildcard_names(pattern):
return set(match.group("name") for match in _wildcard_regex.finditer(pattern))
def contains_wildcard(path):
return _wildcard_regex.search(path) is not None
def contains_wildcard_constraints(pattern):
return any(match.group("constraint") for match in _wildcard_regex.finditer(pattern))
def remove(file, remove_non_empty_dir=False):
if file.is_remote and file.should_stay_on_remote:
if file.exists_remote:
file.remote_object.remove()
elif os.path.isdir(file) and not os.path.islink(file):
if remove_non_empty_dir:
shutil.rmtree(file)
else:
try:
os.removedirs(file)
except OSError as e:
# skip non empty directories
if e.errno == 39:
logger.info(
"Skipped removing non-empty directory {}".format(e.filename)
)
else:
logger.warning(str(e))
# Remember that dangling symlinks fail the os.path.exists() test, but
# we definitely still want to zap them. try/except is the safest way.
# Also, we don't want to remove the null device if it is an output.
elif os.devnull != str(file):
try:
os.remove(file)
except FileNotFoundError:
pass
def regex(filepattern):
f = []
last = 0
wildcards = set()
for match in _wildcard_regex.finditer(filepattern):
f.append(re.escape(filepattern[last : match.start()]))
wildcard = match.group("name")
if wildcard in wildcards:
if match.group("constraint"):
raise ValueError(
"Constraint regex must be defined only in the first "
"occurence of the wildcard in a string."
)
f.append("(?P={})".format(wildcard))
else:
wildcards.add(wildcard)
f.append(
"(?P<{}>{})".format(
wildcard,
match.group("constraint") if match.group("constraint") else ".+",
)
)
last = match.end()
f.append(re.escape(filepattern[last:]))
f.append("$") # ensure that the match spans the whole file
return "".join(f)
def apply_wildcards(
pattern,
wildcards,
fill_missing=False,
fail_dynamic=False,
dynamic_fill=None,
keep_dynamic=False,
):
def format_match(match):
name = match.group("name")
try:
value = wildcards[name]
if fail_dynamic and value == dynamic_fill:
raise WildcardError(name)
return str(value) # convert anything into a str
except KeyError as ex:
if keep_dynamic:
return "{{{}}}".format(name)
elif fill_missing:
return dynamic_fill
else:
raise WildcardError(str(ex))
return _wildcard_regex.sub(format_match, pattern)
def not_iterable(value):
return (
isinstance(value, str)
or isinstance(value, dict)
or not isinstance(value, collections.abc.Iterable)
)
def is_callable(value):
return (
callable(value)
or (isinstance(value, _IOFile) and value._is_function)
or (isinstance(value, AnnotatedString) and value.callable is not None)
)
class AnnotatedString(str):
def __init__(self, value):
self.flags = dict()
self.callable = value if is_callable(value) else None
def flag(value, flag_type, flag_value=True):
if isinstance(value, AnnotatedString):
value.flags[flag_type] = flag_value
return value
if not_iterable(value):
value = AnnotatedString(value)
value.flags[flag_type] = flag_value
return value
return [flag(v, flag_type, flag_value=flag_value) for v in value]
def is_flagged(value, flag):
if isinstance(value, AnnotatedString):
return flag in value.flags and value.flags[flag]
if isinstance(value, _IOFile):
return flag in value.flags and value.flags[flag]
return False
def get_flag_value(value, flag_type):
if isinstance(value, AnnotatedString) or isinstance(value, _IOFile):
if flag_type in value.flags:
return value.flags[flag_type]
else:
return None
def ancient(value):
"""
A flag for an input file that shall be considered ancient; i.e. its timestamp shall have no effect on which jobs to run.
"""
return flag(value, "ancient")
def directory(value):
"""
A flag to specify that an output is a directory, rather than a file or named pipe.
"""
if is_flagged(value, "pipe"):
raise SyntaxError("Pipe and directory flags are mutually exclusive.")
if is_flagged(value, "remote"):
raise SyntaxError("Remote and directory flags are mutually exclusive.")
if is_flagged(value, "dynamic"):
raise SyntaxError("Dynamic and directory flags are mutually exclusive.")
return flag(value, "directory")
def temp(value):
"""
A flag for an input or output file that shall be removed after usage.
"""
if is_flagged(value, "protected"):
raise SyntaxError("Protected and temporary flags are mutually exclusive.")
if is_flagged(value, "remote"):
raise SyntaxError("Remote and temporary flags are mutually exclusive.")
return flag(value, "temp")
def pipe(value):
if is_flagged(value, "protected"):
raise SyntaxError("Pipes may not be protected.")
if is_flagged(value, "remote"):
raise SyntaxError("Pipes may not be remote files.")
if ON_WINDOWS:
logger.warning("Pipes is not yet supported on Windows.")
return flag(value, "pipe", not ON_WINDOWS)
def temporary(value):
""" An alias for temp. """
return temp(value)
def protected(value):
""" A flag for a file that shall be write protected after creation. """
if is_flagged(value, "temp"):
raise SyntaxError("Protected and temporary flags are mutually exclusive.")
if is_flagged(value, "remote"):
raise SyntaxError("Remote and protected flags are mutually exclusive.")
return flag(value, "protected")
def dynamic(value):
"""
A flag for a file that shall be dynamic, i.e. the multiplicity
(and wildcard values) will be expanded after a certain
rule has been run"""
annotated = flag(value, "dynamic", True)
tocheck = [annotated] if not_iterable(annotated) else annotated
for file in tocheck:
matches = list(_wildcard_regex.finditer(file))
# if len(matches) != 1:
# raise SyntaxError("Dynamic files need exactly one wildcard.")
for match in matches:
if match.group("constraint"):
raise SyntaxError(
"The wildcards in dynamic files cannot be constrained."
)
return annotated
def touch(value):
return flag(value, "touch")
def unpack(value):
return flag(value, "unpack")
def repeat(value, n_repeat):
"""Flag benchmark records with the number of repeats."""
return flag(value, "repeat", n_repeat)
def checkpoint_target(value):
return flag(value, "checkpoint_target")
ReportObject = collections.namedtuple(
"ReportObject", ["caption", "category", "subcategory", "patterns", "htmlindex"]
)
def report(
value, caption=None, category=None, subcategory=None, patterns=[], htmlindex=None
):
"""Flag output file or directory as to be included into reports.
In case of directory, files to include can be specified via a glob pattern (default: *).
Arguments
value -- File or directory.
caption -- Path to a .rst file with a textual description of the result.
category -- Name of the category in which the result should be displayed in the report.
pattern -- Wildcard pattern for selecting files if a directory is given (this is used as
input for snakemake.io.glob_wildcards). Pattern shall not include the path to the
directory itself.
"""
return flag(
value,
"report",
ReportObject(caption, category, subcategory, patterns, htmlindex),
)
def local(value):
"""Mark a file as local file. This disables application of a default remote
provider.
"""
if is_flagged(value, "remote"):
raise SyntaxError("Remote and local flags are mutually exclusive.")
return flag(value, "local")
def expand(*args, **wildcards):
"""
Expand wildcards in given filepatterns.
Arguments
*args -- first arg: filepatterns as list or one single filepattern,
second arg (optional): a function to combine wildcard values
(itertools.product per default)
**wildcards -- the wildcards as keyword arguments
with their values as lists. If allow_missing=True is included
wildcards in filepattern without values will stay unformatted.
"""
filepatterns = args[0]
if len(args) == 1:
combinator = product
elif len(args) == 2:
combinator = args[1]
if isinstance(filepatterns, str) or isinstance(filepatterns, Path):
filepatterns = [filepatterns]
def path_to_str(f):
if isinstance(f, Path):
return str(f)
return f
filepatterns = list(map(path_to_str, filepatterns))
if any(map(lambda f: getattr(f, "flags", {}), filepatterns)):
raise WorkflowError(
"Flags in file patterns given to expand() are invalid. "
"Flags (e.g. temp(), directory()) have to be applied outside "
"of expand (e.g. 'temp(expand(\"plots/{sample}.pdf\", sample=SAMPLES))')."
)
# check if remove missing is provided
format_dict = dict
if "allow_missing" in wildcards and wildcards["allow_missing"] is True:
class FormatDict(dict):
def __missing__(self, key):
return "{" + key + "}"
format_dict = FormatDict
# check that remove missing is not a wildcard in the filepatterns
for filepattern in filepatterns:
if "allow_missing" in re.findall(r"{([^}\.[!:]+)", filepattern):
format_dict = dict
break
# remove unused wildcards to avoid duplicate filepatterns
wildcards = {
filepattern: {
k: v
for k, v in wildcards.items()
if k in re.findall(r"{([^}\.[!:]+)", filepattern)
}
for filepattern in filepatterns
}
def flatten(wildcards):
for wildcard, values in wildcards.items():
if isinstance(values, str) or not isinstance(
values, collections.abc.Iterable
):
values = [values]
yield [(wildcard, value) for value in values]
formatter = string.Formatter()
try:
return [
formatter.vformat(filepattern, (), comb)
for filepattern in filepatterns
for comb in map(format_dict, combinator(*flatten(wildcards[filepattern])))
]
except KeyError as e:
raise WildcardError("No values given for wildcard {}.".format(e))
def multiext(prefix, *extensions):
"""Expand a given prefix with multiple extensions (e.g. .txt, .csv, _peaks.bed, ...)."""
if any((r"/" in ext or r"\\" in ext) for ext in extensions):
raise WorkflowError(
r"Extensions for multiext may not contain path delimiters " r"(/,\)."
)
return [flag(prefix + ext, "multiext", flag_value=prefix) for ext in extensions]
def limit(pattern, **wildcards):
"""
Limit wildcards to the given values.
Arguments:
**wildcards -- the wildcards as keyword arguments
with their values as lists
"""
return pattern.format(
**{
wildcard: "{{{},{}}}".format(wildcard, "|".join(values))
for wildcard, values in wildcards.items()
}
)
def glob_wildcards(pattern, files=None, followlinks=False):
"""
Glob the values of the wildcards by matching the given pattern to the filesystem.
Returns a named tuple with a list of values for each wildcard.
"""
pattern = os.path.normpath(pattern)
first_wildcard = re.search("{[^{]", pattern)
dirname = (
os.path.dirname(pattern[: first_wildcard.start()])
if first_wildcard
else os.path.dirname(pattern)
)
if not dirname:
dirname = "."
names = [match.group("name") for match in _wildcard_regex.finditer(pattern)]
Wildcards = collections.namedtuple("Wildcards", names)
wildcards = Wildcards(*[list() for name in names])
pattern = re.compile(regex(pattern))
if files is None:
files = (
os.path.normpath(os.path.join(dirpath, f))
for dirpath, dirnames, filenames in os.walk(
dirname, followlinks=followlinks
)
for f in chain(filenames, dirnames)
)
for f in files:
match = re.match(pattern, f)
if match:
for name, value in match.groupdict().items():
getattr(wildcards, name).append(value)
return wildcards
def update_wildcard_constraints(
pattern, wildcard_constraints, global_wildcard_constraints
):
"""Update wildcard constraints
Args:
pattern (str): pattern on which to update constraints
wildcard_constraints (dict): dictionary of wildcard:constraint key-value pairs
global_wildcard_constraints (dict): dictionary of wildcard:constraint key-value pairs
"""
def replace_constraint(match):
name = match.group("name")
constraint = match.group("constraint")
newconstraint = wildcard_constraints.get(
name, global_wildcard_constraints.get(name)
)
if name in examined_names:
return match.group(0)
examined_names.add(name)
# Don't override if constraint already set
if constraint is not None:
return match.group(0)
# Only update if a new constraint has actually been set
elif newconstraint is not None:
return "{{{},{}}}".format(name, newconstraint)
else:
return match.group(0)
examined_names = set()
updated = _wildcard_regex.sub(replace_constraint, pattern)
# inherit flags
if isinstance(pattern, AnnotatedString):
updated = AnnotatedString(updated)
updated.flags = dict(pattern.flags)
return updated
def split_git_path(path):
file_sub = re.sub(r"^git\+file:/+", "/", path)
(file_path, version) = file_sub.split("@")
file_path = os.path.realpath(file_path)
root_path = get_git_root(file_path)
if file_path.startswith(root_path):
file_path = file_path[len(root_path) :].lstrip("/")
return (root_path, file_path, version)
def get_git_root(path):
"""
Args:
path: (str) Path a to a directory/file that is located inside the repo
Returns:
path to root folder for git repo
"""
import git
try:
git_repo = git.Repo(path, search_parent_directories=True)
return git_repo.git.rev_parse("--show-toplevel")
except git.exc.NoSuchPathError:
tail, head = os.path.split(path)
return get_git_root_parent_directory(tail, path)
def get_git_root_parent_directory(path, input_path):
"""
This function will recursively go through parent directories until a git
repository is found or until no parent directories are left, in which case
a error will be raised. This is needed when providing a path to a
file/folder that is located on a branch/tag no currently checked out.
Args:
path: (str) Path a to a directory that is located inside the repo
input_path: (str) origin path, used when raising WorkflowError
Returns:
path to root folder for git repo
"""
import git
try:
git_repo = git.Repo(path, search_parent_directories=True)
return git_repo.git.rev_parse("--show-toplevel")
except git.exc.NoSuchPathError:
tail, head = os.path.split(path)
if tail is None:
raise WorkflowError(
"Neither provided git path ({}) ".format(input_path)
+ "or parent directories contain a valid git repo."
)
else:
return get_git_root_parent_directory(tail, input_path)
def git_content(git_file):
"""
This function will extract a file from a git repository, one located on
the filesystem.
Expected format is git+file:///path/to/your/repo/path_to_file@@version
Args:
env_file (str): consist of path to repo, @, version and file information
Ex: git+file:////home/smeds/snakemake-wrappers/bio/fastqc/wrapper.py@0.19.3
Returns:
file content or None if the expected format isn't meet
"""
import git
if git_file.startswith("git+file:"):
(root_path, file_path, version) = split_git_path(git_file)
return git.Repo(root_path).git.show("{}:{}".format(version, file_path))
else:
raise WorkflowError(
"Provided git path ({}) doesn't meet the "
"expected format:".format(git_file) + ", expected format is "
"git+file://PATH_TO_REPO/PATH_TO_FILE_INSIDE_REPO@VERSION"
)
def strip_wildcard_constraints(pattern):
"""Return a string that does not contain any wildcard constraints."""
def strip_constraint(match):
return "{{{}}}".format(match.group("name"))
return _wildcard_regex.sub(strip_constraint, pattern)
class Namedlist(list):
"""
A list that additionally provides functions to name items. Further,
it is hashable, however the hash does not consider the item names.
"""
def __init__(
self,
toclone=None,
fromdict=None,
plainstr=False,
strip_constraints=False,
custom_map=None,
):
"""
Create the object.
Arguments
toclone -- another Namedlist that shall be cloned
fromdict -- a dict that shall be converted to a
Namedlist (keys become names)
"""
list.__init__(self)
self._names = dict()
# white-list of attribute names that can be overridden in _set_name
# default to throwing exception if called to prevent use as functions
self._allowed_overrides = ["index", "sort"]
for name in self._allowed_overrides:
setattr(self, name, functools.partial(self._used_attribute, _name=name))
if toclone:
if custom_map is not None:
self.extend(map(custom_map, toclone))
elif plainstr:
self.extend(map(str, toclone))
elif strip_constraints:
self.extend(map(strip_wildcard_constraints, toclone))
else:
self.extend(toclone)
if isinstance(toclone, Namedlist):
self._take_names(toclone._get_names())
if fromdict:
for key, item in fromdict.items():
self.append(item)
self._add_name(key)
@staticmethod
def _used_attribute(*args, _name, **kwargs):
"""
Generic function that throws an `AttributeError`.
Used as replacement for functions such as `index()` and `sort()`,
which may be overridden by workflows, to signal to a user that
these functions should not be used.
"""
raise AttributeError(
"{_name}() cannot be used; attribute name reserved"
" for use in some existing workflows".format(_name=_name)
)
def _add_name(self, name):
"""
Add a name to the last item.
Arguments
name -- a name
"""
self._set_name(name, len(self) - 1)
def _set_name(self, name, index, end=None):
"""
Set the name of an item.
Arguments
name -- a name
index -- the item index
"""
if name not in self._allowed_overrides and hasattr(self.__class__, name):
raise AttributeError(
"invalid name for input, output, wildcard, "
"params or log: {name} is reserved for internal use".format(name=name)
)
self._names[name] = (index, end)
if end is None:
setattr(self, name, self[index])
else:
setattr(self, name, Namedlist(toclone=self[index:end]))
def _get_names(self):
"""
Get the defined names as (name, index) pairs.
"""
for name, index in self._names.items():
yield name, index
def _take_names(self, names):
"""
Take over the given names.
Arguments
names -- the given names as (name, index) pairs
"""
for name, (i, j) in names:
self._set_name(name, i, end=j)
def items(self):
for name in self._names:
yield name, getattr(self, name)
def _allitems(self):
next = 0
for name, index in sorted(
self._names.items(),
key=lambda item: (
item[1][0],
item[1][0] + 1 if item[1][1] is None else item[1][1],
),
):
start, end = index
if end is None:
end = start + 1
if start > next:
for item in self[next:start]:
yield None, item
yield name, getattr(self, name)
next = end
for item in self[next:]:
yield None, item
def _insert_items(self, index, items):
self[index : index + 1] = items
add = len(items) - 1
for name, (i, j) in self._names.items():
if i > index:
self._names[name] = (i + add, None if j is None else j + add)
elif i == index:
self._set_name(name, i, end=i + len(items))
def keys(self):
return self._names.keys()
def _plainstrings(self):
return self.__class__.__call__(toclone=self, plainstr=True)
def _stripped_constraints(self):
return self.__class__.__call__(toclone=self, strip_constraints=True)
def _clone(self):
return self.__class__.__call__(toclone=self)
def get(self, key, default_value=None):
return self.__dict__.get(key, default_value)
def __getitem__(self, key):
try:
return super().__getitem__(key)
except TypeError:
pass
return getattr(self, key)
def __hash__(self):
return hash(tuple(self))
def __str__(self):
return " ".join(map(str, self))
class InputFiles(Namedlist):
@property
def size(self):
return sum(f.size for f in self)
@property
def size_mb(self):
return self.size / 1024 / 1024
class OutputFiles(Namedlist):
pass
class Wildcards(Namedlist):
pass
class Params(Namedlist):
pass
class Resources(Namedlist):
pass
class Log(Namedlist):
pass
def _load_configfile(configpath_or_obj, filetype="Config"):
"Tries to load a configfile first as JSON, then as YAML, into a dict."
import yaml
if isinstance(configpath_or_obj, str) or isinstance(configpath_or_obj, Path):
obj = open(configpath_or_obj)
else:
obj = configpath_or_obj
try:
with obj as f:
try:
return json.load(f, object_pairs_hook=collections.OrderedDict)
except ValueError:
f.seek(0) # try again
try:
# From https://stackoverflow.com/a/21912744/84349
class OrderedLoader(yaml.Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return collections.OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping
)
return yaml.load(f, Loader=OrderedLoader)
except yaml.YAMLError:
raise WorkflowError(
"Config file is not valid JSON or YAML. "
"In case of YAML, make sure to not mix "
"whitespace and tab indentation.".format(filetype)
)
except FileNotFoundError:
raise WorkflowError("{} file {} not found.".format(filetype, configpath))
def load_configfile(configpath):
"Loads a JSON or YAML configfile as a dict, then checks that it's a dict."
config = _load_configfile(configpath)
if not isinstance(config, dict):
raise WorkflowError(
"Config file must be given as JSON or YAML " "with keys at top level."
)
return config
##### Wildcard pumping detection #####
class PeriodicityDetector:
def __init__(self, min_repeat=20, max_repeat=100):
"""
Args:
max_repeat (int): The maximum length of the periodic substring.
min_repeat (int): The minimum length of the periodic substring.
"""
self.min_repeat = min_repeat
self.regex = re.compile(
"((?P<value>.+)(?P=value){{{min_repeat},{max_repeat}}})$".format(
min_repeat=min_repeat - 1, max_repeat=max_repeat - 1
)
)
def is_periodic(self, value):
"""Returns the periodic substring or None if not periodic."""
# short-circuit: need at least min_repeat characters
if len(value) < self.min_repeat:
return None
# short-circuit: need at least min_repeat same characters
last_letter = value[-1]
counter = collections.Counter(value)
if counter[last_letter] < self.min_repeat:
return None
# short-circuit: need at least min_repeat same characters
pos = 2
while (
value[-pos] != last_letter
): # as long as last letter is not seen, repeat length is minimally pos
if (
len(value) < (pos * self.min_repeat)
or counter[value[-pos]] < self.min_repeat
):
return None
pos += 1
# now do the expensive regex
m = self.regex.search(value) # search for a periodic suffix.
if m is not None:
return m.group("value")
| 31.951691 | 124 | 0.584933 | __author__ = "Johannes Köster"
__copyright__ = "Copyright 2021, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import collections
import os
import shutil
from pathlib import Path
import re
import stat
import time
import datetime
import json
import copy
import functools
import subprocess as sp
from itertools import product, chain
from contextlib import contextmanager
import string
import collections
import asyncio
from snakemake.exceptions import (
MissingOutputException,
WorkflowError,
WildcardError,
RemoteFileException,
)
from snakemake.logging import logger
from inspect import isfunction, ismethod
from snakemake.common import DYNAMIC_FILL, ON_WINDOWS, async_run
class Mtime:
__slots__ = ["_local", "_local_target", "_remote"]
def __init__(self, local=None, local_target=None, remote=None):
self._local = local
self._local_target = local_target
self._remote = remote
def local_or_remote(self, follow_symlinks=False):
if self._remote is not None:
return self._remote
if follow_symlinks and self._local_target is not None:
return self._local_target
return self._local
def remote(
self,
):
return self._remote
def local(self, follow_symlinks=False):
if follow_symlinks and self._local_target is not None:
return self._local_target
return self._local
def lutime(f, times):
if os.utime in os.supports_follow_symlinks:
os.utime(f, times, follow_symlinks=False)
elif not os.path.islink(f):
os.utime(f, times)
else:
try:
if times:
fmt_time = lambda sec: datetime.fromtimestamp(sec).strftime(
"%Y%m%d%H%M.%S"
)
atime, mtime = times
sp.check_call(["touch", "-h", f, "-a", "-t", fmt_time(atime)])
sp.check_call(["touch", "-h", f, "-m", "-t", fmt_time(mtime)])
else:
sp.check_call(["touch", "-h", f])
except sp.CalledProcessError:
pass
logger.warning(
"Unable to set utime on symlink {}. Your Python build does not support it.".format(
f
)
)
return None
if os.chmod in os.supports_follow_symlinks:
def lchmod(f, mode):
os.chmod(f, mode, follow_symlinks=False)
else:
def lchmod(f, mode):
os.chmod(f, mode)
class ExistsDict(dict):
def __init__(self, cache):
super().__init__()
self.cache = cache
self.has_inventory = set()
def __getitem__(self, path):
return self.get(path, False)
def __contains__(self, path):
parent = path.get_inventory_parent()
return parent in self.has_inventory or super().__contains__(path)
class IOCache:
def __init__(self, max_wait_time):
self.mtime = dict()
self.exists_local = ExistsDict(self)
self.exists_remote = ExistsDict(self)
self.size = dict()
self.active = True
self.remaining_wait_time = max_wait_time
self.max_wait_time = max_wait_time
def mtime_inventory(self, jobs):
async_run(self._mtime_inventory(jobs))
async def _mtime_inventory(self, jobs, n_workers=8):
queue = asyncio.Queue()
stop_item = object()
async def worker(queue):
while True:
item = await queue.get()
if item is stop_item:
queue.task_done()
return
try:
self.mtime[item] = await self.collect_mtime(item)
except Exception as e:
queue.task_done()
raise e
queue.task_done()
tasks = [
asyncio.get_event_loop().create_task(worker(queue))
for _ in range(n_workers)
]
for job in jobs:
for f in chain(job.input, job.expanded_output):
if f.exists:
queue.put_nowait(f)
if job.benchmark and job.benchmark.exists:
queue.put_nowait(job.benchmark)
for _ in range(n_workers):
queue.put_nowait(stop_item)
await asyncio.gather(*tasks)
async def collect_mtime(self, path):
return path.mtime_uncached
def clear(self):
self.mtime.clear()
self.size.clear()
self.exists_local.clear()
self.exists_remote.clear()
self.remaining_wait_time = self.max_wait_time
def deactivate(self):
self.clear()
self.active = False
def IOFile(file, rule=None):
assert rule is not None
f = _IOFile(file)
f.rule = rule
return f
class _IOFile(str):
__slots__ = [
"_is_function",
"_file",
"rule",
"_regex",
]
def __new__(cls, file):
is_annotated = isinstance(file, AnnotatedString)
is_callable = (
isfunction(file) or ismethod(file) or (is_annotated and bool(file.callable))
)
if not is_callable and file.endswith("/"):
stripped = file.rstrip("/")
if is_annotated:
stripped = AnnotatedString(stripped)
stripped.flags = file.flags
file = stripped
obj = str.__new__(cls, file)
obj._is_function = is_callable
obj._file = file
obj.rule = None
obj._regex = None
if obj.is_remote:
obj.remote_object._iofile = obj
return obj
def iocache(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.rule.workflow.iocache.active:
cache = getattr(self.rule.workflow.iocache, func.__name__)
if self in cache:
return cache[self]
v = func(self, *args, **kwargs)
cache[self] = v
return v
else:
return func(self, *args, **kwargs)
return wrapper
def _refer_to_remote(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.is_remote:
if hasattr(self.remote_object, func.__name__):
return getattr(self.remote_object, func.__name__)(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapper
def inventory(self):
async_run(self._inventory())
async def _inventory(self):
cache = self.rule.workflow.iocache
if cache.active:
tasks = []
if self.is_remote and self not in cache.exists_remote:
tasks.append(self.remote_object.inventory(cache))
if not ON_WINDOWS and self not in cache.exists_local:
# we don't want to mess with different path representations on windows
tasks.append(self._local_inventory(cache))
await asyncio.gather(*tasks)
async def _local_inventory(self, cache):
if cache.remaining_wait_time <= 0:
return
start_time = time.time()
folders = self.split("/")[:-1]
if not folders:
return
if os.path.isabs(self):
ancestors = [os.path.dirname(self)]
else:
ancestors = ["/".join(folders[:i]) for i in range(1, len(folders) + 1)]
for (i, path) in enumerate(ancestors):
if path in cache.exists_local.has_inventory:
break
try:
with os.scandir(path) as scan:
for entry in scan:
cache.exists_local[entry.path] = True
cache.exists_local[path] = True
cache.exists_local.has_inventory.add(path)
except FileNotFoundError:
for path in ancestors[i:]:
cache.exists_local[path] = False
cache.exists_local.has_inventory.add(path)
break
except PermissionError:
raise WorkflowError(
"Insufficient permissions to access {}. "
"Please make sure that all accessed files and directories "
"are readable and writable for you.".format(self)
)
cache.remaining_wait_time -= time.time() - start_time
@_refer_to_remote
def get_inventory_parent(self):
parent = os.path.dirname(self)
if parent and parent != "..":
return parent
@contextmanager
def open(self, mode="r", buffering=-1, encoding=None, errors=None, newline=None):
if not self.exists:
raise WorkflowError(
"File {} cannot be opened, since it does not exist.".format(self)
)
if not self.exists_local and self.is_remote:
self.download_from_remote()
f = open(self)
try:
yield f
finally:
f.close()
def contains_wildcard(self):
return contains_wildcard(self.file)
@property
def is_remote(self):
return is_flagged(self._file, "remote_object")
@property
def is_ancient(self):
return is_flagged(self._file, "ancient")
@property
def is_directory(self):
return is_flagged(self._file, "directory")
@property
def is_temp(self):
return is_flagged(self._file, "temp")
@property
def is_multiext(self):
return is_flagged(self._file, "multiext")
@property
def multiext_prefix(self):
return get_flag_value(self._file, "multiext")
def update_remote_filepath(self):
remote_object = self.remote_object
if remote_object._file != self._file:
remote_object._iofile = self
@property
def should_keep_local(self):
return self.remote_object.keep_local
@property
def should_stay_on_remote(self):
return self.remote_object.stay_on_remote
@property
def remote_object(self):
return get_flag_value(self._file, "remote_object")
@property
@_refer_to_remote
def file(self):
if not self._is_function:
return self._file
else:
raise ValueError(
"This IOFile is specified as a function and "
"may not be used directly."
)
def check(self):
hint = (
"It can also lead to inconsistent results of the file-matching "
"approach used by Snakemake."
)
if self._file.startswith("./"):
logger.warning(
"Relative file path '{}' starts with './'. This is redundant "
"and strongly discouraged. {} You can simply omit the './' "
"for relative file paths.".format(self._file, hint)
)
if self._file.startswith(" "):
logger.warning(
"File path '{}' starts with whitespace. "
"This is likely unintended. {}".format(self._file, hint)
)
if self._file.endswith(" "):
logger.warning(
"File path '{}' ends with whitespace. "
"This is likely unintended. {}".format(self._file, hint)
)
if "\n" in self._file:
logger.warning(
"File path '{}' contains line break. "
"This is likely unintended. {}".format(self._file, hint)
)
if _double_slash_regex.search(self._file) is not None and not self.is_remote:
logger.warning(
"File path {} contains double '{}'. "
"This is likely unintended. {}".format(self._file, os.path.sep, hint)
)
@property
def exists(self):
if self.is_remote:
return self.exists_remote
else:
return self.exists_local
def parents(self, omit=0):
for p in list(Path(self.file).parents)[::-1][omit:]:
p = IOFile(str(p), rule=self.rule)
p.clone_flags(self)
yield p
@property
@iocache
def exists_local(self):
return os.path.exists(self.file)
@property
@iocache
def exists_remote(self):
if not self.is_remote:
return False
return self.remote_object.exists()
@property
def protected(self):
return (
self.exists_local
and not os.access(self.file, os.W_OK)
and not os.path.islink(self.file)
)
@property
@iocache
def mtime(self):
return self.mtime_uncached
@property
def mtime_uncached(self):
mtime_remote = self.remote_object.mtime() if self.is_remote else None
try:
_stat = os.stat(self.file, follow_symlinks=False)
is_symlink = stat.S_ISLNK(_stat.st_mode)
is_dir = stat.S_ISDIR(_stat.st_mode)
mtime = _stat.st_mtime
def get_dir_mtime():
return os.stat(
os.path.join(self.file, ".snakemake_timestamp"),
follow_symlinks=True,
).st_mtime
if not is_symlink:
if is_dir:
try:
mtime = get_dir_mtime()
except FileNotFoundError:
pass
return Mtime(local=mtime, remote=mtime_remote)
else:
target_stat = os.stat(self.file, follow_symlinks=True)
is_dir = stat.S_ISDIR(target_stat.st_mode)
mtime_target = target_stat.st_mtime
if is_dir:
try:
mtime_target = get_dir_mtime()
except FileNotFoundError:
pass
return Mtime(
local=mtime, local_target=mtime_target, remote=mtime_remote
)
except FileNotFoundError:
if self.is_remote:
return Mtime(remote=mtime_remote)
raise WorkflowError(
"Unable to obtain modification time of file {} although it existed before. "
"It could be that a concurrent process has deleted it while Snakemake "
"was running.".format(self.file)
)
except PermissionError:
raise WorkflowError(
"Unable to obtain modification time of file {} because of missing "
"read permissions.".format(self.file)
)
@property
def flags(self):
return getattr(self._file, "flags", {})
@property
@iocache
@_refer_to_remote
def size(self):
return self.size_local
@property
def size_local(self):
self.check_broken_symlink()
return os.path.getsize(self.file)
def check_broken_symlink(self):
if not self.exists_local and os.lstat(self.file):
raise WorkflowError(
"File {} seems to be a broken symlink.".format(self.file)
)
@_refer_to_remote
def is_newer(self, time):
if self.is_ancient:
return False
return self.mtime.local_or_remote(follow_symlinks=True) > time
def download_from_remote(self):
if self.is_remote and self.remote_object.exists():
if not self.should_stay_on_remote:
logger.info("Downloading from remote: {}".format(self.file))
self.remote_object.download()
logger.info("Finished download.")
else:
raise RemoteFileException(
"The file to be downloaded does not seem to exist remotely."
)
def upload_to_remote(self):
if self.is_remote:
logger.info("Uploading to remote: {}".format(self.file))
self.remote_object.upload()
logger.info("Finished upload.")
def prepare(self):
path_until_wildcard = re.split(DYNAMIC_FILL, self.file)[0]
dir = os.path.dirname(path_until_wildcard)
if len(dir) > 0:
try:
os.makedirs(dir, exist_ok=True)
except OSError as e:
if e.errno != 17:
raise e
if is_flagged(self._file, "pipe"):
os.mkfifo(self._file)
def protect(self):
mode = (
os.lstat(self.file).st_mode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH
)
if os.path.isdir(self.file):
for root, dirs, files in os.walk(self.file):
for d in dirs:
lchmod(os.path.join(self.file, d), mode)
for f in files:
lchmod(os.path.join(self.file, f), mode)
lchmod(self.file, mode)
def remove(self, remove_non_empty_dir=False):
if self.is_directory:
remove(self, remove_non_empty_dir=True)
else:
remove(self, remove_non_empty_dir=remove_non_empty_dir)
def touch(self, times=None):
try:
if self.is_directory:
file = os.path.join(self.file, ".snakemake_timestamp")
if not os.path.exists(file):
with open(file, "w"):
pass
lutime(file, times)
else:
lutime(self.file, times)
except OSError as e:
if e.errno == 2:
raise MissingOutputException(
"Output file {} of rule {} shall be touched but "
"does not exist.".format(self.file, self.rule.name),
lineno=self.rule.lineno,
snakefile=self.rule.snakefile,
)
else:
raise e
def touch_or_create(self):
try:
self.touch()
except MissingOutputException:
# first create directory if it does not yet exist
dir = self.file if self.is_directory else os.path.dirname(self.file)
if dir:
os.makedirs(dir, exist_ok=True)
# create empty file
file = (
os.path.join(self.file, ".snakemake_timestamp")
if self.is_directory
else self.file
)
with open(file, "w") as f:
pass
def apply_wildcards(self, wildcards, fill_missing=False, fail_dynamic=False):
f = self._file
if self._is_function:
f = self._file(Namedlist(fromdict=wildcards))
# this bit ensures flags are transferred over to files after
# wildcards are applied
file_with_wildcards_applied = IOFile(
apply_wildcards(
f,
wildcards,
fill_missing=fill_missing,
fail_dynamic=fail_dynamic,
dynamic_fill=DYNAMIC_FILL,
),
rule=self.rule,
)
file_with_wildcards_applied.clone_flags(self)
return file_with_wildcards_applied
def get_wildcard_names(self):
return get_wildcard_names(self.file)
def regex(self):
if self._regex is None:
# compile a regular expression
self._regex = re.compile(regex(self.file))
return self._regex
def constant_prefix(self):
first_wildcard = _wildcard_regex.search(self.file)
if first_wildcard:
return self.file[: first_wildcard.start()]
return self.file
def constant_suffix(self):
m = None
for m in _wildcard_regex.finditer(self.file):
pass
last_wildcard = m
if last_wildcard:
return self.file[last_wildcard.end() :]
return self.file
def match(self, target):
return self.regex().match(target) or None
def format_dynamic(self):
return self.replace(DYNAMIC_FILL, "{*}")
def clone_flags(self, other):
if isinstance(self._file, str):
self._file = AnnotatedString(self._file)
if isinstance(other._file, AnnotatedString):
self._file.flags = getattr(other._file, "flags", {}).copy()
if "remote_object" in self._file.flags:
self._file.flags["remote_object"] = copy.copy(
self._file.flags["remote_object"]
)
self.update_remote_filepath()
def clone_remote_object(self, other):
if (
isinstance(other._file, AnnotatedString)
and "remote_object" in other._file.flags
):
self._file.flags["remote_object"] = copy.copy(
other._file.flags["remote_object"]
)
self.update_remote_filepath()
def set_flags(self, flags):
if isinstance(self._file, str):
self._file = AnnotatedString(self._file)
self._file.flags = flags
def __eq__(self, other):
f = other._file if isinstance(other, _IOFile) else other
return self._file == f
def __hash__(self):
return self._file.__hash__()
_double_slash_regex = (
re.compile(r"([^:]//|^//)") if os.path.sep == "/" else re.compile(r"\\\\")
)
_wildcard_regex = re.compile(
r"""
\{
(?=( # This lookahead assertion emulates an 'atomic group'
# which is required for performance
\s*(?P<name>\w+) # wildcard name
(\s*,\s*
(?P<constraint> # an optional constraint
([^{}]+ | \{\d+(,\d+)?\})* # allow curly braces to nest one level
) # ... as in '{w,a{3,5}}'
)?\s*
))\1
\}
""",
re.VERBOSE,
)
def wait_for_files(
files, latency_wait=3, force_stay_on_remote=False, ignore_pipe=False
):
files = list(files)
def get_missing():
return [
f
for f in files
if not (
f.exists_remote
if (
isinstance(f, _IOFile)
and f.is_remote
and (force_stay_on_remote or f.should_stay_on_remote)
)
else os.path.exists(f)
if not (is_flagged(f, "pipe") and ignore_pipe)
else True
)
]
missing = get_missing()
if missing:
logger.info(
"Waiting at most {} seconds for missing files.".format(latency_wait)
)
for _ in range(latency_wait):
if not get_missing():
return
time.sleep(1)
raise IOError(
"Missing files after {} seconds:\n{}".format(
latency_wait, "\n".join(get_missing())
)
)
def get_wildcard_names(pattern):
return set(match.group("name") for match in _wildcard_regex.finditer(pattern))
def contains_wildcard(path):
return _wildcard_regex.search(path) is not None
def contains_wildcard_constraints(pattern):
return any(match.group("constraint") for match in _wildcard_regex.finditer(pattern))
def remove(file, remove_non_empty_dir=False):
if file.is_remote and file.should_stay_on_remote:
if file.exists_remote:
file.remote_object.remove()
elif os.path.isdir(file) and not os.path.islink(file):
if remove_non_empty_dir:
shutil.rmtree(file)
else:
try:
os.removedirs(file)
except OSError as e:
# skip non empty directories
if e.errno == 39:
logger.info(
"Skipped removing non-empty directory {}".format(e.filename)
)
else:
logger.warning(str(e))
# Remember that dangling symlinks fail the os.path.exists() test, but
# we definitely still want to zap them. try/except is the safest way.
# Also, we don't want to remove the null device if it is an output.
elif os.devnull != str(file):
try:
os.remove(file)
except FileNotFoundError:
pass
def regex(filepattern):
f = []
last = 0
wildcards = set()
for match in _wildcard_regex.finditer(filepattern):
f.append(re.escape(filepattern[last : match.start()]))
wildcard = match.group("name")
if wildcard in wildcards:
if match.group("constraint"):
raise ValueError(
"Constraint regex must be defined only in the first "
"occurence of the wildcard in a string."
)
f.append("(?P={})".format(wildcard))
else:
wildcards.add(wildcard)
f.append(
"(?P<{}>{})".format(
wildcard,
match.group("constraint") if match.group("constraint") else ".+",
)
)
last = match.end()
f.append(re.escape(filepattern[last:]))
f.append("$")
return "".join(f)
def apply_wildcards(
pattern,
wildcards,
fill_missing=False,
fail_dynamic=False,
dynamic_fill=None,
keep_dynamic=False,
):
def format_match(match):
name = match.group("name")
try:
value = wildcards[name]
if fail_dynamic and value == dynamic_fill:
raise WildcardError(name)
return str(value)
except KeyError as ex:
if keep_dynamic:
return "{{{}}}".format(name)
elif fill_missing:
return dynamic_fill
else:
raise WildcardError(str(ex))
return _wildcard_regex.sub(format_match, pattern)
def not_iterable(value):
return (
isinstance(value, str)
or isinstance(value, dict)
or not isinstance(value, collections.abc.Iterable)
)
def is_callable(value):
return (
callable(value)
or (isinstance(value, _IOFile) and value._is_function)
or (isinstance(value, AnnotatedString) and value.callable is not None)
)
class AnnotatedString(str):
def __init__(self, value):
self.flags = dict()
self.callable = value if is_callable(value) else None
def flag(value, flag_type, flag_value=True):
if isinstance(value, AnnotatedString):
value.flags[flag_type] = flag_value
return value
if not_iterable(value):
value = AnnotatedString(value)
value.flags[flag_type] = flag_value
return value
return [flag(v, flag_type, flag_value=flag_value) for v in value]
def is_flagged(value, flag):
if isinstance(value, AnnotatedString):
return flag in value.flags and value.flags[flag]
if isinstance(value, _IOFile):
return flag in value.flags and value.flags[flag]
return False
def get_flag_value(value, flag_type):
if isinstance(value, AnnotatedString) or isinstance(value, _IOFile):
if flag_type in value.flags:
return value.flags[flag_type]
else:
return None
def ancient(value):
return flag(value, "ancient")
def directory(value):
if is_flagged(value, "pipe"):
raise SyntaxError("Pipe and directory flags are mutually exclusive.")
if is_flagged(value, "remote"):
raise SyntaxError("Remote and directory flags are mutually exclusive.")
if is_flagged(value, "dynamic"):
raise SyntaxError("Dynamic and directory flags are mutually exclusive.")
return flag(value, "directory")
def temp(value):
if is_flagged(value, "protected"):
raise SyntaxError("Protected and temporary flags are mutually exclusive.")
if is_flagged(value, "remote"):
raise SyntaxError("Remote and temporary flags are mutually exclusive.")
return flag(value, "temp")
def pipe(value):
if is_flagged(value, "protected"):
raise SyntaxError("Pipes may not be protected.")
if is_flagged(value, "remote"):
raise SyntaxError("Pipes may not be remote files.")
if ON_WINDOWS:
logger.warning("Pipes is not yet supported on Windows.")
return flag(value, "pipe", not ON_WINDOWS)
def temporary(value):
return temp(value)
def protected(value):
if is_flagged(value, "temp"):
raise SyntaxError("Protected and temporary flags are mutually exclusive.")
if is_flagged(value, "remote"):
raise SyntaxError("Remote and protected flags are mutually exclusive.")
return flag(value, "protected")
def dynamic(value):
annotated = flag(value, "dynamic", True)
tocheck = [annotated] if not_iterable(annotated) else annotated
for file in tocheck:
matches = list(_wildcard_regex.finditer(file))
for match in matches:
if match.group("constraint"):
raise SyntaxError(
"The wildcards in dynamic files cannot be constrained."
)
return annotated
def touch(value):
return flag(value, "touch")
def unpack(value):
return flag(value, "unpack")
def repeat(value, n_repeat):
return flag(value, "repeat", n_repeat)
def checkpoint_target(value):
return flag(value, "checkpoint_target")
ReportObject = collections.namedtuple(
"ReportObject", ["caption", "category", "subcategory", "patterns", "htmlindex"]
)
def report(
value, caption=None, category=None, subcategory=None, patterns=[], htmlindex=None
):
return flag(
value,
"report",
ReportObject(caption, category, subcategory, patterns, htmlindex),
)
def local(value):
if is_flagged(value, "remote"):
raise SyntaxError("Remote and local flags are mutually exclusive.")
return flag(value, "local")
def expand(*args, **wildcards):
filepatterns = args[0]
if len(args) == 1:
combinator = product
elif len(args) == 2:
combinator = args[1]
if isinstance(filepatterns, str) or isinstance(filepatterns, Path):
filepatterns = [filepatterns]
def path_to_str(f):
if isinstance(f, Path):
return str(f)
return f
filepatterns = list(map(path_to_str, filepatterns))
if any(map(lambda f: getattr(f, "flags", {}), filepatterns)):
raise WorkflowError(
"Flags in file patterns given to expand() are invalid. "
"Flags (e.g. temp(), directory()) have to be applied outside "
"of expand (e.g. 'temp(expand(\"plots/{sample}.pdf\", sample=SAMPLES))')."
)
format_dict = dict
if "allow_missing" in wildcards and wildcards["allow_missing"] is True:
class FormatDict(dict):
def __missing__(self, key):
return "{" + key + "}"
format_dict = FormatDict
for filepattern in filepatterns:
if "allow_missing" in re.findall(r"{([^}\.[!:]+)", filepattern):
format_dict = dict
break
wildcards = {
filepattern: {
k: v
for k, v in wildcards.items()
if k in re.findall(r"{([^}\.[!:]+)", filepattern)
}
for filepattern in filepatterns
}
def flatten(wildcards):
for wildcard, values in wildcards.items():
if isinstance(values, str) or not isinstance(
values, collections.abc.Iterable
):
values = [values]
yield [(wildcard, value) for value in values]
formatter = string.Formatter()
try:
return [
formatter.vformat(filepattern, (), comb)
for filepattern in filepatterns
for comb in map(format_dict, combinator(*flatten(wildcards[filepattern])))
]
except KeyError as e:
raise WildcardError("No values given for wildcard {}.".format(e))
def multiext(prefix, *extensions):
if any((r"/" in ext or r"\\" in ext) for ext in extensions):
raise WorkflowError(
r"Extensions for multiext may not contain path delimiters " r"(/,\)."
)
return [flag(prefix + ext, "multiext", flag_value=prefix) for ext in extensions]
def limit(pattern, **wildcards):
return pattern.format(
**{
wildcard: "{{{},{}}}".format(wildcard, "|".join(values))
for wildcard, values in wildcards.items()
}
)
def glob_wildcards(pattern, files=None, followlinks=False):
pattern = os.path.normpath(pattern)
first_wildcard = re.search("{[^{]", pattern)
dirname = (
os.path.dirname(pattern[: first_wildcard.start()])
if first_wildcard
else os.path.dirname(pattern)
)
if not dirname:
dirname = "."
names = [match.group("name") for match in _wildcard_regex.finditer(pattern)]
Wildcards = collections.namedtuple("Wildcards", names)
wildcards = Wildcards(*[list() for name in names])
pattern = re.compile(regex(pattern))
if files is None:
files = (
os.path.normpath(os.path.join(dirpath, f))
for dirpath, dirnames, filenames in os.walk(
dirname, followlinks=followlinks
)
for f in chain(filenames, dirnames)
)
for f in files:
match = re.match(pattern, f)
if match:
for name, value in match.groupdict().items():
getattr(wildcards, name).append(value)
return wildcards
def update_wildcard_constraints(
pattern, wildcard_constraints, global_wildcard_constraints
):
def replace_constraint(match):
name = match.group("name")
constraint = match.group("constraint")
newconstraint = wildcard_constraints.get(
name, global_wildcard_constraints.get(name)
)
if name in examined_names:
return match.group(0)
examined_names.add(name)
if constraint is not None:
return match.group(0)
# Only update if a new constraint has actually been set
elif newconstraint is not None:
return "{{{},{}}}".format(name, newconstraint)
else:
return match.group(0)
examined_names = set()
updated = _wildcard_regex.sub(replace_constraint, pattern)
# inherit flags
if isinstance(pattern, AnnotatedString):
updated = AnnotatedString(updated)
updated.flags = dict(pattern.flags)
return updated
def split_git_path(path):
file_sub = re.sub(r"^git\+file:/+", "/", path)
(file_path, version) = file_sub.split("@")
file_path = os.path.realpath(file_path)
root_path = get_git_root(file_path)
if file_path.startswith(root_path):
file_path = file_path[len(root_path) :].lstrip("/")
return (root_path, file_path, version)
def get_git_root(path):
import git
try:
git_repo = git.Repo(path, search_parent_directories=True)
return git_repo.git.rev_parse("--show-toplevel")
except git.exc.NoSuchPathError:
tail, head = os.path.split(path)
return get_git_root_parent_directory(tail, path)
def get_git_root_parent_directory(path, input_path):
import git
try:
git_repo = git.Repo(path, search_parent_directories=True)
return git_repo.git.rev_parse("--show-toplevel")
except git.exc.NoSuchPathError:
tail, head = os.path.split(path)
if tail is None:
raise WorkflowError(
"Neither provided git path ({}) ".format(input_path)
+ "or parent directories contain a valid git repo."
)
else:
return get_git_root_parent_directory(tail, input_path)
def git_content(git_file):
import git
if git_file.startswith("git+file:"):
(root_path, file_path, version) = split_git_path(git_file)
return git.Repo(root_path).git.show("{}:{}".format(version, file_path))
else:
raise WorkflowError(
"Provided git path ({}) doesn't meet the "
"expected format:".format(git_file) + ", expected format is "
"git+file://PATH_TO_REPO/PATH_TO_FILE_INSIDE_REPO@VERSION"
)
def strip_wildcard_constraints(pattern):
def strip_constraint(match):
return "{{{}}}".format(match.group("name"))
return _wildcard_regex.sub(strip_constraint, pattern)
class Namedlist(list):
def __init__(
self,
toclone=None,
fromdict=None,
plainstr=False,
strip_constraints=False,
custom_map=None,
):
list.__init__(self)
self._names = dict()
self._allowed_overrides = ["index", "sort"]
for name in self._allowed_overrides:
setattr(self, name, functools.partial(self._used_attribute, _name=name))
if toclone:
if custom_map is not None:
self.extend(map(custom_map, toclone))
elif plainstr:
self.extend(map(str, toclone))
elif strip_constraints:
self.extend(map(strip_wildcard_constraints, toclone))
else:
self.extend(toclone)
if isinstance(toclone, Namedlist):
self._take_names(toclone._get_names())
if fromdict:
for key, item in fromdict.items():
self.append(item)
self._add_name(key)
@staticmethod
def _used_attribute(*args, _name, **kwargs):
raise AttributeError(
"{_name}() cannot be used; attribute name reserved"
" for use in some existing workflows".format(_name=_name)
)
def _add_name(self, name):
self._set_name(name, len(self) - 1)
def _set_name(self, name, index, end=None):
if name not in self._allowed_overrides and hasattr(self.__class__, name):
raise AttributeError(
"invalid name for input, output, wildcard, "
"params or log: {name} is reserved for internal use".format(name=name)
)
self._names[name] = (index, end)
if end is None:
setattr(self, name, self[index])
else:
setattr(self, name, Namedlist(toclone=self[index:end]))
def _get_names(self):
for name, index in self._names.items():
yield name, index
def _take_names(self, names):
for name, (i, j) in names:
self._set_name(name, i, end=j)
def items(self):
for name in self._names:
yield name, getattr(self, name)
def _allitems(self):
next = 0
for name, index in sorted(
self._names.items(),
key=lambda item: (
item[1][0],
item[1][0] + 1 if item[1][1] is None else item[1][1],
),
):
start, end = index
if end is None:
end = start + 1
if start > next:
for item in self[next:start]:
yield None, item
yield name, getattr(self, name)
next = end
for item in self[next:]:
yield None, item
def _insert_items(self, index, items):
self[index : index + 1] = items
add = len(items) - 1
for name, (i, j) in self._names.items():
if i > index:
self._names[name] = (i + add, None if j is None else j + add)
elif i == index:
self._set_name(name, i, end=i + len(items))
def keys(self):
return self._names.keys()
def _plainstrings(self):
return self.__class__.__call__(toclone=self, plainstr=True)
def _stripped_constraints(self):
return self.__class__.__call__(toclone=self, strip_constraints=True)
def _clone(self):
return self.__class__.__call__(toclone=self)
def get(self, key, default_value=None):
return self.__dict__.get(key, default_value)
def __getitem__(self, key):
try:
return super().__getitem__(key)
except TypeError:
pass
return getattr(self, key)
def __hash__(self):
return hash(tuple(self))
def __str__(self):
return " ".join(map(str, self))
class InputFiles(Namedlist):
@property
def size(self):
return sum(f.size for f in self)
@property
def size_mb(self):
return self.size / 1024 / 1024
class OutputFiles(Namedlist):
pass
class Wildcards(Namedlist):
pass
class Params(Namedlist):
pass
class Resources(Namedlist):
pass
class Log(Namedlist):
pass
def _load_configfile(configpath_or_obj, filetype="Config"):
import yaml
if isinstance(configpath_or_obj, str) or isinstance(configpath_or_obj, Path):
obj = open(configpath_or_obj)
else:
obj = configpath_or_obj
try:
with obj as f:
try:
return json.load(f, object_pairs_hook=collections.OrderedDict)
except ValueError:
f.seek(0)
try:
class OrderedLoader(yaml.Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return collections.OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping
)
return yaml.load(f, Loader=OrderedLoader)
except yaml.YAMLError:
raise WorkflowError(
"Config file is not valid JSON or YAML. "
"In case of YAML, make sure to not mix "
"whitespace and tab indentation.".format(filetype)
)
except FileNotFoundError:
raise WorkflowError("{} file {} not found.".format(filetype, configpath))
def load_configfile(configpath):
config = _load_configfile(configpath)
if not isinstance(config, dict):
raise WorkflowError(
"Config file must be given as JSON or YAML " "with keys at top level."
)
return config
"((?P<value>.+)(?P=value){{{min_repeat},{max_repeat}}})$".format(
min_repeat=min_repeat - 1, max_repeat=max_repeat - 1
)
)
def is_periodic(self, value):
if len(value) < self.min_repeat:
return None
last_letter = value[-1]
counter = collections.Counter(value)
if counter[last_letter] < self.min_repeat:
return None
pos = 2
while (
value[-pos] != last_letter
):
if (
len(value) < (pos * self.min_repeat)
or counter[value[-pos]] < self.min_repeat
):
return None
pos += 1
m = self.regex.search(value)
if m is not None:
return m.group("value")
| true | true |
f71e5ea1ec662b1ee9b1d78de3fc6bfa709dc4fd | 2,100 | py | Python | src/compas_plotters/artists/pointartist.py | funkchaser/compas | b58de8771484aa0c6068d43df78b1679503215de | [
"MIT"
] | 235 | 2017-11-07T07:33:22.000Z | 2022-03-25T16:20:00.000Z | src/compas_plotters/artists/pointartist.py | funkchaser/compas | b58de8771484aa0c6068d43df78b1679503215de | [
"MIT"
] | 770 | 2017-09-22T13:42:06.000Z | 2022-03-31T21:26:45.000Z | src/compas_plotters/artists/pointartist.py | funkchaser/compas | b58de8771484aa0c6068d43df78b1679503215de | [
"MIT"
] | 99 | 2017-11-06T23:15:28.000Z | 2022-03-25T16:05:36.000Z | from typing import Tuple
from typing import List
from typing import Any
from matplotlib.patches import Circle
from matplotlib.transforms import ScaledTranslation
from compas.geometry import Point
from compas.artists import PrimitiveArtist
from .artist import PlotterArtist
Color = Tuple[float, float, float]
class PointArtist(PlotterArtist, PrimitiveArtist):
"""Artist for COMPAS points."""
def __init__(self,
point: Point,
size: int = 5,
facecolor: Color = (1.0, 1.0, 1.0),
edgecolor: Color = (0, 0, 0),
zorder: int = 9000,
**kwargs: Any):
super().__init__(primitive=point, **kwargs)
self._mpl_circle = None
self._size = None
self.size = size
self.facecolor = facecolor
self.edgecolor = edgecolor
self.zorder = zorder
@property
def point(self):
return self.primitive
@point.setter
def point(self, point):
self.primitive = point
@property
def _T(self):
F = self.plotter.figure.dpi_scale_trans
S = ScaledTranslation(self.point[0], self.point[1], self.plotter.axes.transData)
T = F + S
return T
@property
def size(self) -> float:
return self._size / self.plotter.dpi
@size.setter
def size(self, size: int):
self._size = size
@property
def data(self) -> List[List[float]]:
return [self.point[:2]]
def draw(self) -> None:
circle = Circle(
[0, 0],
radius=self.size,
facecolor=self.facecolor,
edgecolor=self.edgecolor,
transform=self._T,
zorder=self.zorder
)
self._mpl_circle = self.plotter.axes.add_artist(circle)
self.update_data()
def redraw(self) -> None:
self._mpl_circle.set_radius(self.size)
self._mpl_circle.set_edgecolor(self.edgecolor)
self._mpl_circle.set_facecolor(self.facecolor)
self._mpl_circle.set_transform(self._T)
self.update_data()
| 26.25 | 88 | 0.601905 | from typing import Tuple
from typing import List
from typing import Any
from matplotlib.patches import Circle
from matplotlib.transforms import ScaledTranslation
from compas.geometry import Point
from compas.artists import PrimitiveArtist
from .artist import PlotterArtist
Color = Tuple[float, float, float]
class PointArtist(PlotterArtist, PrimitiveArtist):
def __init__(self,
point: Point,
size: int = 5,
facecolor: Color = (1.0, 1.0, 1.0),
edgecolor: Color = (0, 0, 0),
zorder: int = 9000,
**kwargs: Any):
super().__init__(primitive=point, **kwargs)
self._mpl_circle = None
self._size = None
self.size = size
self.facecolor = facecolor
self.edgecolor = edgecolor
self.zorder = zorder
@property
def point(self):
return self.primitive
@point.setter
def point(self, point):
self.primitive = point
@property
def _T(self):
F = self.plotter.figure.dpi_scale_trans
S = ScaledTranslation(self.point[0], self.point[1], self.plotter.axes.transData)
T = F + S
return T
@property
def size(self) -> float:
return self._size / self.plotter.dpi
@size.setter
def size(self, size: int):
self._size = size
@property
def data(self) -> List[List[float]]:
return [self.point[:2]]
def draw(self) -> None:
circle = Circle(
[0, 0],
radius=self.size,
facecolor=self.facecolor,
edgecolor=self.edgecolor,
transform=self._T,
zorder=self.zorder
)
self._mpl_circle = self.plotter.axes.add_artist(circle)
self.update_data()
def redraw(self) -> None:
self._mpl_circle.set_radius(self.size)
self._mpl_circle.set_edgecolor(self.edgecolor)
self._mpl_circle.set_facecolor(self.facecolor)
self._mpl_circle.set_transform(self._T)
self.update_data()
| true | true |
f71e5fdeb867d750cbfc67468c01f5b8fc81fd15 | 9,510 | py | Python | modules/s3/s3fields.py | apocsantos/eden | 8b71af6b4dc72e2c4d656dbe1bb3943426705422 | [
"MIT"
] | 1 | 2017-03-15T23:29:41.000Z | 2017-03-15T23:29:41.000Z | modules/s3/s3fields.py | apocsantos/eden | 8b71af6b4dc72e2c4d656dbe1bb3943426705422 | [
"MIT"
] | null | null | null | modules/s3/s3fields.py | apocsantos/eden | 8b71af6b4dc72e2c4d656dbe1bb3943426705422 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
S3 Extensions for gluon.dal.Field, reusable fields
@requires: U{B{I{gluon}} <http://web2py.com>}
@author: Dominic König <dominic[at]aidiq.com>
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["QueryS3",
"FieldS3",
"S3ReusableField",
"s3uuid",
"s3_meta_uuid",
"s3_meta_mci",
"s3_uid",
"s3_meta_deletion_status",
"s3_meta_deletion_fk",
"s3_deletion_status",
"s3_meta_created_on",
"s3_meta_modified_on",
"s3_timestamp"]
import datetime
from gluon import current
from gluon.dal import Query, Field, SQLCustomType
from gluon.storage import Storage
from gluon.html import *
from gluon.validators import *
# =============================================================================
class QueryS3(Query):
"""
S3 extensions of the gluon.sql.Query class
If Server Side Pagination is on, the proper CAST is needed to match
the string-typed id to lookup table id
@author: sunneach
"""
def __init__(self, left, op=None, right=None):
if op <> "join_via":
Query.__init__(self, left, op, right)
else:
self.sql = "CAST(TRIM(%s,"|") AS INTEGER)=%s" % (left, right)
# =============================================================================
class FieldS3(Field):
"""
S3 extensions of the gluon.sql.Field clas
If Server Side Pagination is on, the proper CAST is needed to
match the lookup table id
@author: sunneach
"""
def __init__(self, fieldname,
type="string",
length=None,
default=None,
required=False,
requires="<default>",
ondelete="CASCADE",
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
compute=None,
sortby=None):
self.sortby = sortby
Field.__init__(self,
fieldname,
type,
length,
default,
required,
requires,
ondelete,
notnull,
unique,
uploadfield,
widget,
label,
comment,
writable,
readable,
update,
authorize,
autodelete,
represent,
uploadfolder,
compute)
def join_via(self, value):
if self.type.find("reference") == 0:
return Query(self, "=", value)
else:
return QueryS3(self, "join_via", value)
# =============================================================================
class S3ReusableField(object):
"""
DRY Helper for reusable fields:
This creates neither a Table nor a Field, but just
an argument store. The field is created with the __call__
method, which is faster than copying an existing field.
@author: Dominic König
"""
def __init__(self, name, type="string", **attr):
self.name = name
self.__type = type
self.attr = Storage(attr)
def __call__(self, name=None, **attr):
if not name:
name = self.name
ia = Storage(self.attr)
if attr:
if not attr.get("empty", True):
requires = ia.requires
if requires:
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
requires = r.other
ia.update(requires=requires)
if "empty" in attr:
del attr["empty"]
ia.update(**attr)
if "script" in ia:
if ia.script:
if ia.comment:
ia.comment = TAG[""](ia.comment, ia.script)
else:
ia.comment = ia.script
del ia["script"]
if ia.sortby is not None:
return FieldS3(name, self.__type, **ia)
else:
return Field(name, self.__type, **ia)
# =============================================================================
# Use URNs according to http://tools.ietf.org/html/rfc4122
def s3_uuid():
import uuid
return uuid.uuid4()
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = lambda x: "%s" % (s3_uuid().urn
if x == ""
else unicode(x.encode("utf-8"))),
decoder = lambda x: x)
try:
db = current.db
except:
# Running from 000_1st_run
pass
else:
if current.db._adapter.represent("X", s3uuid) != "'X'":
# Old web2py DAL, must add quotes in encoder
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = (lambda x: "'%s'" % (s3_uuid().urn
if x == ""
else str(x.encode("utf-8")).replace("'", "''"))),
decoder = (lambda x: x))
# =============================================================================
# Record identity meta-fields
# Universally unique identifier for a record
s3_meta_uuid = S3ReusableField("uuid",
type=s3uuid,
length=128,
notnull=True,
unique=True,
readable=False,
writable=False,
default="")
# Master-Copy-Index (for Sync)
s3_meta_mci = S3ReusableField("mci", "integer",
default=0,
readable=False,
writable=False)
def s3_uid():
return (s3_meta_uuid(),
s3_meta_mci())
# =============================================================================
# Record "soft"-deletion meta-fields
# "Deleted"-flag
s3_meta_deletion_status = S3ReusableField("deleted", "boolean",
readable=False,
writable=False,
default=False)
# Parked foreign keys of a deleted record
# => to be restored upon "un"-delete
s3_meta_deletion_fk = S3ReusableField("deleted_fk", #"text",
readable=False,
writable=False)
def s3_deletion_status():
return (s3_meta_deletion_status(),
s3_meta_deletion_fk())
# -----------------------------------------------------------------------------
# Record timestamp meta-fields
s3_meta_created_on = S3ReusableField("created_on", "datetime",
readable=False,
writable=False,
default=lambda: datetime.datetime.utcnow())
s3_meta_modified_on = S3ReusableField("modified_on", "datetime",
readable=False,
writable=False,
default=lambda: datetime.datetime.utcnow(),
update=lambda: datetime.datetime.utcnow())
def s3_timestamp():
return (s3_meta_created_on(),
s3_meta_modified_on())
# =============================================================================
| 33.020833 | 93 | 0.465089 |
"""
S3 Extensions for gluon.dal.Field, reusable fields
@requires: U{B{I{gluon}} <http://web2py.com>}
@author: Dominic König <dominic[at]aidiq.com>
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["QueryS3",
"FieldS3",
"S3ReusableField",
"s3uuid",
"s3_meta_uuid",
"s3_meta_mci",
"s3_uid",
"s3_meta_deletion_status",
"s3_meta_deletion_fk",
"s3_deletion_status",
"s3_meta_created_on",
"s3_meta_modified_on",
"s3_timestamp"]
import datetime
from gluon import current
from gluon.dal import Query, Field, SQLCustomType
from gluon.storage import Storage
from gluon.html import *
from gluon.validators import *
class QueryS3(Query):
"""
S3 extensions of the gluon.sql.Query class
If Server Side Pagination is on, the proper CAST is needed to match
the string-typed id to lookup table id
@author: sunneach
"""
def __init__(self, left, op=None, right=None):
if op <> "join_via":
Query.__init__(self, left, op, right)
else:
self.sql = "CAST(TRIM(%s,"|") AS INTEGER)=%s" % (left, right)
class FieldS3(Field):
"""
S3 extensions of the gluon.sql.Field clas
If Server Side Pagination is on, the proper CAST is needed to
match the lookup table id
@author: sunneach
"""
def __init__(self, fieldname,
type="string",
length=None,
default=None,
required=False,
requires="<default>",
ondelete="CASCADE",
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
compute=None,
sortby=None):
self.sortby = sortby
Field.__init__(self,
fieldname,
type,
length,
default,
required,
requires,
ondelete,
notnull,
unique,
uploadfield,
widget,
label,
comment,
writable,
readable,
update,
authorize,
autodelete,
represent,
uploadfolder,
compute)
def join_via(self, value):
if self.type.find("reference") == 0:
return Query(self, "=", value)
else:
return QueryS3(self, "join_via", value)
class S3ReusableField(object):
"""
DRY Helper for reusable fields:
This creates neither a Table nor a Field, but just
an argument store. The field is created with the __call__
method, which is faster than copying an existing field.
@author: Dominic König
"""
def __init__(self, name, type="string", **attr):
self.name = name
self.__type = type
self.attr = Storage(attr)
def __call__(self, name=None, **attr):
if not name:
name = self.name
ia = Storage(self.attr)
if attr:
if not attr.get("empty", True):
requires = ia.requires
if requires:
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
requires = r.other
ia.update(requires=requires)
if "empty" in attr:
del attr["empty"]
ia.update(**attr)
if "script" in ia:
if ia.script:
if ia.comment:
ia.comment = TAG[""](ia.comment, ia.script)
else:
ia.comment = ia.script
del ia["script"]
if ia.sortby is not None:
return FieldS3(name, self.__type, **ia)
else:
return Field(name, self.__type, **ia)
def s3_uuid():
import uuid
return uuid.uuid4()
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = lambda x: "%s" % (s3_uuid().urn
if x == ""
else unicode(x.encode("utf-8"))),
decoder = lambda x: x)
try:
db = current.db
except:
pass
else:
if current.db._adapter.represent("X", s3uuid) != "'X'":
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = (lambda x: "'%s'" % (s3_uuid().urn
if x == ""
else str(x.encode("utf-8")).replace("'", "''"))),
decoder = (lambda x: x))
# =============================================================================
# Record identity meta-fields
# Universally unique identifier for a record
s3_meta_uuid = S3ReusableField("uuid",
type=s3uuid,
length=128,
notnull=True,
unique=True,
readable=False,
writable=False,
default="")
# Master-Copy-Index (for Sync)
s3_meta_mci = S3ReusableField("mci", "integer",
default=0,
readable=False,
writable=False)
def s3_uid():
return (s3_meta_uuid(),
s3_meta_mci())
# =============================================================================
# Record "soft"-deletion meta-fields
# "Deleted"-flag
s3_meta_deletion_status = S3ReusableField("deleted", "boolean",
readable=False,
writable=False,
default=False)
# Parked foreign keys of a deleted record
# => to be restored upon "un"-delete
s3_meta_deletion_fk = S3ReusableField("deleted_fk", #"text",
readable=False,
writable=False)
def s3_deletion_status():
return (s3_meta_deletion_status(),
s3_meta_deletion_fk())
# -----------------------------------------------------------------------------
# Record timestamp meta-fields
s3_meta_created_on = S3ReusableField("created_on", "datetime",
readable=False,
writable=False,
default=lambda: datetime.datetime.utcnow())
s3_meta_modified_on = S3ReusableField("modified_on", "datetime",
readable=False,
writable=False,
default=lambda: datetime.datetime.utcnow(),
update=lambda: datetime.datetime.utcnow())
def s3_timestamp():
return (s3_meta_created_on(),
s3_meta_modified_on())
# =============================================================================
| false | true |
f71e5fe49a6e3ab2639977a6bb855247cd56baa2 | 5,565 | py | Python | train/exp_configs/rl/singleagent/test-stl-nw-profit-mid.py | mepear/flow | 4fc6ceaf64ca522b5a5c4104a3098b20cf207dd4 | [
"MIT"
] | 1 | 2021-03-05T07:39:51.000Z | 2021-03-05T07:39:51.000Z | train/exp_configs/rl/singleagent/test-stl-nw-profit-mid.py | mepear/flow | 4fc6ceaf64ca522b5a5c4104a3098b20cf207dd4 | [
"MIT"
] | 1 | 2021-09-13T02:16:02.000Z | 2021-09-13T02:16:02.000Z | train/exp_configs/rl/singleagent/test-stl-nw-profit-mid.py | mepear/flow | 4fc6ceaf64ca522b5a5c4104a3098b20cf207dd4 | [
"MIT"
] | 1 | 2021-08-21T13:58:30.000Z | 2021-08-21T13:58:30.000Z | """Grid example."""
from flow.controllers import GridRouter, IDMController, RLController
from flow.controllers.routing_controllers import MinicityRouter
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import VehicleParams, PersonParams
from flow.core.params import TrafficLightParams
from flow.core.params import SumoCarFollowingParams, SumoLaneChangeParams
from flow.core.params import InFlows
# from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.envs.dispatch_and_reposition import DispatchAndRepositionEnv, ADDITIONAL_ENV_PARAMS
from flow.networks import GridnxmNetwork
v_enter = 10
inner_length = 50
n_rows = 4
n_columns = 4
grid_array = {
"inner_length": inner_length,
"row_num": n_rows,
"col_num": n_columns,
"sub_edge_num": 1
}
def get_non_flow_params(enter_speed, add_net_params):
"""Define the network and initial params in the absence of inflows.
Note that when a vehicle leaves a network in this case, it is immediately
returns to the start of the row/column it was traversing, and in the same
direction as it was before.
Parameters
----------
enter_speed : float
initial speed of vehicles as they enter the network.
add_net_params: dict
additional network-specific parameters (unique to the grid)
Returns
-------
flow.core.params.InitialConfig
parameters specifying the initial configuration of vehicles in the
network
flow.core.params.NetParams
network-specific parameters used to generate the network
"""
additional_init_params = {'enter_speed': enter_speed}
initial = InitialConfig(
x0=2.5, spacing='uniform', min_gap=10, additional_params=additional_init_params) # gap needs to be large enough
net = NetParams(additional_params=add_net_params)
return initial, net
persons = PersonParams()
vehicles = VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(IDMController, {}),
routing_controller=(MinicityRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode='all_checks',
min_gap=5,
decel=10.0, # avoid collisions at emergency stops
max_speed=10,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode="no_lc_safe",
),
initial_speed=0,
num_vehicles=25)
vehicles.add(
veh_id="taxi",
initial_speed=0,
acceleration_controller=(RLController, {}),
# routing_controller=(MinicityRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode='all_checks',
min_gap=5,
decel=10.0, # avoid collisions at emergency stops
max_speed=10,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode="sumo_default",
),
num_vehicles=15,
is_taxi=False)
tl_logic = TrafficLightParams(baseline=False)
phases = [{
"duration": "5",
"minDur": "5",
"maxDur": "5",
"state": "GGggrrrrGGggrrrr"
}, {
"duration": "1",
"minDur": "1",
"maxDur": "1",
"state": "yyyyrrrryyyyrrrr"
}, {
"duration": "5",
"minDur": "5",
"maxDur": "5",
"state": "rrrrGGggrrrrGGgg"
}, {
"duration": "1",
"minDur": "1",
"maxDur": "1",
"state": "rrrryyyyrrrryyyy"
}]
tl_logic.add("center9", phases=phases)
tl_logic.add("center10", phases=phases)
tl_logic.add("center5", phases=phases)
tl_logic.add("center6", phases=phases)
additional_net_params = {
"grid_array": grid_array,
"speed_limit": 35,
"horizontal_lanes": 1,
"vertical_lanes": 1,
"print_warnings": False, # warnings in building net
}
initial_config, net_params = get_non_flow_params(
enter_speed=v_enter,
add_net_params=additional_net_params)
additional_params = ADDITIONAL_ENV_PARAMS.copy()
additional_params["time_price"] = 0.01
additional_params["distance_price"] = 0.01
additional_params["pickup_price"] = 1
additional_params["wait_penalty"] = 0.000
additional_params["tle_penalty"] = 0.01
additional_params["person_prob"] = 0.06
additional_params["max_waiting_time"] = 30
additional_params["free_pickup_time"] = 0.0
additional_params["distribution"] = 'mode-11'
additional_params["n_mid_edge"] = 1
flow_params = dict(
# name of the experiment
exp_tag='grid-intersection',
# name of the flow environment the experiment is running on
env_name=DispatchAndRepositionEnv,
# name of the network class the experiment is running on
network=GridnxmNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
sim_step=1,
render=False,
print_warnings=False,
restart_instance=True
# taxi_dispatch_alg="greedy"
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=500,
additional_params=additional_params,
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=net_params,
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
per=persons,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=initial_config,
# traffic lights to be introduced to specific nodes (see
# flow.core.params.TrafficLightParams)
tls=tl_logic,
)
| 29.919355 | 119 | 0.709793 | from flow.controllers import GridRouter, IDMController, RLController
from flow.controllers.routing_controllers import MinicityRouter
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import VehicleParams, PersonParams
from flow.core.params import TrafficLightParams
from flow.core.params import SumoCarFollowingParams, SumoLaneChangeParams
from flow.core.params import InFlows
from flow.envs.dispatch_and_reposition import DispatchAndRepositionEnv, ADDITIONAL_ENV_PARAMS
from flow.networks import GridnxmNetwork
v_enter = 10
inner_length = 50
n_rows = 4
n_columns = 4
grid_array = {
"inner_length": inner_length,
"row_num": n_rows,
"col_num": n_columns,
"sub_edge_num": 1
}
def get_non_flow_params(enter_speed, add_net_params):
additional_init_params = {'enter_speed': enter_speed}
initial = InitialConfig(
x0=2.5, spacing='uniform', min_gap=10, additional_params=additional_init_params)
net = NetParams(additional_params=add_net_params)
return initial, net
persons = PersonParams()
vehicles = VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(IDMController, {}),
routing_controller=(MinicityRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode='all_checks',
min_gap=5,
decel=10.0,
max_speed=10,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode="no_lc_safe",
),
initial_speed=0,
num_vehicles=25)
vehicles.add(
veh_id="taxi",
initial_speed=0,
acceleration_controller=(RLController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode='all_checks',
min_gap=5,
decel=10.0,
max_speed=10,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode="sumo_default",
),
num_vehicles=15,
is_taxi=False)
tl_logic = TrafficLightParams(baseline=False)
phases = [{
"duration": "5",
"minDur": "5",
"maxDur": "5",
"state": "GGggrrrrGGggrrrr"
}, {
"duration": "1",
"minDur": "1",
"maxDur": "1",
"state": "yyyyrrrryyyyrrrr"
}, {
"duration": "5",
"minDur": "5",
"maxDur": "5",
"state": "rrrrGGggrrrrGGgg"
}, {
"duration": "1",
"minDur": "1",
"maxDur": "1",
"state": "rrrryyyyrrrryyyy"
}]
tl_logic.add("center9", phases=phases)
tl_logic.add("center10", phases=phases)
tl_logic.add("center5", phases=phases)
tl_logic.add("center6", phases=phases)
additional_net_params = {
"grid_array": grid_array,
"speed_limit": 35,
"horizontal_lanes": 1,
"vertical_lanes": 1,
"print_warnings": False,
}
initial_config, net_params = get_non_flow_params(
enter_speed=v_enter,
add_net_params=additional_net_params)
additional_params = ADDITIONAL_ENV_PARAMS.copy()
additional_params["time_price"] = 0.01
additional_params["distance_price"] = 0.01
additional_params["pickup_price"] = 1
additional_params["wait_penalty"] = 0.000
additional_params["tle_penalty"] = 0.01
additional_params["person_prob"] = 0.06
additional_params["max_waiting_time"] = 30
additional_params["free_pickup_time"] = 0.0
additional_params["distribution"] = 'mode-11'
additional_params["n_mid_edge"] = 1
flow_params = dict(
exp_tag='grid-intersection',
env_name=DispatchAndRepositionEnv,
network=GridnxmNetwork,
simulator='traci',
sim=SumoParams(
sim_step=1,
render=False,
print_warnings=False,
restart_instance=True
),
env=EnvParams(
horizon=500,
additional_params=additional_params,
),
net=net_params,
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
per=persons,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=initial_config,
# traffic lights to be introduced to specific nodes (see
# flow.core.params.TrafficLightParams)
tls=tl_logic,
)
| true | true |
f71e607e4f8c3f3efd904c8c5129e23baa35a350 | 9,240 | py | Python | source/setup.py | asaranprasad/nvda | e9609694acbfb06398eb6552067a0dcd532d67af | [
"bzip2-1.0.6"
] | 2 | 2020-08-24T17:41:45.000Z | 2020-08-25T16:48:52.000Z | source/setup.py | asaranprasad/nvda | e9609694acbfb06398eb6552067a0dcd532d67af | [
"bzip2-1.0.6"
] | 3 | 2017-09-29T17:14:18.000Z | 2019-05-20T16:13:39.000Z | source/setup.py | asaranprasad/nvda | e9609694acbfb06398eb6552067a0dcd532d67af | [
"bzip2-1.0.6"
] | 1 | 2017-09-29T08:53:52.000Z | 2017-09-29T08:53:52.000Z | # -*- coding: UTF-8 -*-
#setup.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import os
import copy
import gettext
gettext.install("nvda", unicode=True)
from distutils.core import setup
import py2exe as py2exeModule
from glob import glob
import fnmatch
from versionInfo import *
from py2exe import build_exe
import wx
import imp
MAIN_MANIFEST_EXTRA = r"""
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.dll">
<comClass
description="HtBrailleDriver Class"
clsid="{209445BA-92ED-4AB2-83EC-F24ACEE77EE0}"
threadingModel="Apartment"
progid="HtBrailleDriverServer.HtBrailleDriver"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
</file>
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.tlb">
<typelib tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}"
version="1.0"
helpdir="" />
</file>
<comInterfaceExternalProxyStub
name="IHtBrailleDriverSink"
iid="{EF551F82-1C7E-421F-963D-D9D03548785A}"
proxyStubClsid32="{00020420-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<comInterfaceExternalProxyStub
name="IHtBrailleDriver"
iid="{43A71F9B-58EE-42D4-B58E-0F9FBA28D995}"
proxyStubClsid32="{00020424-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!-- Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<!-- Windows 8 -->
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
<!-- Windows 8.1 -->
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
"""
def getModuleExtention(thisModType):
for ext,mode,modType in imp.get_suffixes():
if modType==thisModType:
return ext
raise ValueError("unknown mod type %s"%thisModType)
# py2exe's idea of whether a dll is a system dll appears to be wrong sometimes, so monkey patch it.
origIsSystemDLL = build_exe.isSystemDLL
def isSystemDLL(pathname):
dll = os.path.basename(pathname).lower()
if dll in ("msvcp71.dll", "msvcp90.dll", "gdiplus.dll","mfc71.dll", "mfc90.dll"):
# These dlls don't exist on many systems, so make sure they're included.
return 0
elif dll.startswith("api-ms-win-") or dll in ("powrprof.dll", "mpr.dll", "crypt32.dll"):
# These are definitely system dlls available on all systems and must be excluded.
# Including them can cause serious problems when a binary build is run on a different version of Windows.
return 1
return origIsSystemDLL(pathname)
build_exe.isSystemDLL = isSystemDLL
class py2exe(build_exe.py2exe):
"""Overridden py2exe command to:
* Add a command line option --enable-uiAccess to enable uiAccess for the main executable
* Add extra info to the manifest
* Don't copy w9xpopen, as NVDA will never run on Win9x
"""
user_options = build_exe.py2exe.user_options + [
("enable-uiAccess", "u", "enable uiAccess for the main executable"),
]
def initialize_options(self):
build_exe.py2exe.initialize_options(self)
self.enable_uiAccess = False
def copy_w9xpopen(self, modules, dlls):
pass
def run(self):
dist = self.distribution
if self.enable_uiAccess:
# Add a target for nvda_uiAccess, using nvda_noUIAccess as a base.
target = copy.deepcopy(dist.windows[0])
target["dest_base"] = "nvda_uiAccess"
target["uac_info"] = (target["uac_info"][0], True)
dist.windows.insert(1, target)
# nvda_eoaProxy should have uiAccess.
target = dist.windows[3]
target["uac_info"] = (target["uac_info"][0], True)
build_exe.py2exe.run(self)
def build_manifest(self, target, template):
mfest, rid = build_exe.py2exe.build_manifest(self, target, template)
if getattr(target, "script", "").endswith(".pyw"):
# This is one of the main application executables.
mfest = mfest[:mfest.rindex("</assembly>")]
mfest += MAIN_MANIFEST_EXTRA + "</assembly>"
return mfest, rid
def getLocaleDataFiles():
wxDir=wx.__path__[0]
localeMoFiles=set()
for f in glob("locale/*/LC_MESSAGES"):
localeMoFiles.add((f, (os.path.join(f,"nvda.mo"),)))
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
lang=os.path.split(os.path.split(f)[0])[1]
if '_' in lang:
lang=lang.split('_')[0]
f=os.path.join('locale',lang,'lc_messages')
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
localeDicFiles=[(os.path.dirname(f), (f,)) for f in glob("locale/*/*.dic")]
NVDALocaleGestureMaps=[(os.path.dirname(f), (f,)) for f in glob("locale/*/gestures.ini")]
return list(localeMoFiles)+localeDicFiles+NVDALocaleGestureMaps
def getRecursiveDataFiles(dest,source,excludes=()):
rulesList=[]
rulesList.append((dest,
[f for f in glob("%s/*"%source) if not any(fnmatch.fnmatch(f,exclude) for exclude in excludes) and os.path.isfile(f)]))
[rulesList.extend(getRecursiveDataFiles(os.path.join(dest,dirName),os.path.join(source,dirName),excludes=excludes)) for dirName in os.listdir(source) if os.path.isdir(os.path.join(source,dirName)) and not dirName.startswith('.')]
return rulesList
compiledModExtention = getModuleExtention(imp.PY_COMPILED)
sourceModExtention = getModuleExtention(imp.PY_SOURCE)
setup(
name = name,
version=version,
description=description,
url=url,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Topic :: Adaptive Technologies'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
],
cmdclass={"py2exe": py2exe},
windows=[
{
"script":"nvda.pyw",
"dest_base":"nvda_noUIAccess",
"uac_info": ("asInvoker", False),
"icon_resources":[(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description":"NVDA application",
"product_version":version,
"copyright":copyright,
"company_name":publisher,
},
# The nvda_uiAccess target will be added at runtime if required.
{
"script": "nvda_slave.pyw",
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
{
"script": "nvda_eoaProxy.pyw",
# uiAccess will be enabled at runtime if appropriate.
"uac_info": ("asInvoker", False),
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": "NVDA Ease of Access proxy",
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
],
options = {"py2exe": {
"bundle_files": 3,
"excludes": ["Tkinter",
"serial.loopback_connection", "serial.rfc2217", "serial.serialcli", "serial.serialjava", "serial.serialposix", "serial.socket_connection"],
"packages": ["NVDAObjects","virtualBuffers","appModules","comInterfaces","brailleDisplayDrivers","synthDrivers"],
"includes": [
"nvdaBuiltin",
# #3368: bisect was implicitly included with Python 2.7.3, but isn't with 2.7.5.
"bisect",
# Also, the previous service executable used win32api, which some add-ons use for various purposes.
"win32api",
# #8628: include an import module for validate, which older add-ons import directly.
# Since configobj 5.1.0, validate is a part of the configobj package
# and should be imported as configobj.validate instead
"validate",
],
}},
data_files=[
(".",glob("*.dll")+glob("*.manifest")+["builtin.dic"]),
("documentation", ['../copying.txt', '../contributors.txt']),
("lib/%s"%version, glob("lib/*.dll")),
("lib64/%s"%version, glob("lib64/*.dll") + glob("lib64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
("louis/tables",glob("louis/tables/*")),
("COMRegistrationFixes", glob("COMRegistrationFixes/*.reg")),
(".", ['message.html' ])
] + (
getLocaleDataFiles()
+ getRecursiveDataFiles("synthDrivers", "synthDrivers",
excludes=("*%s" % sourceModExtention, "*%s" % compiledModExtention, "*.exp", "*.lib", "*.pdb"))
+ getRecursiveDataFiles("brailleDisplayDrivers", "brailleDisplayDrivers", excludes=("*%s"%sourceModExtention,"*%s"%compiledModExtention))
+ getRecursiveDataFiles('documentation', '../user_docs', excludes=('*.t2t', '*.t2tconf', '*/developerGuide.*'))
),
)
| 38.987342 | 231 | 0.695238 |
import os
import copy
import gettext
gettext.install("nvda", unicode=True)
from distutils.core import setup
import py2exe as py2exeModule
from glob import glob
import fnmatch
from versionInfo import *
from py2exe import build_exe
import wx
import imp
MAIN_MANIFEST_EXTRA = r"""
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.dll">
<comClass
description="HtBrailleDriver Class"
clsid="{209445BA-92ED-4AB2-83EC-F24ACEE77EE0}"
threadingModel="Apartment"
progid="HtBrailleDriverServer.HtBrailleDriver"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
</file>
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.tlb">
<typelib tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}"
version="1.0"
helpdir="" />
</file>
<comInterfaceExternalProxyStub
name="IHtBrailleDriverSink"
iid="{EF551F82-1C7E-421F-963D-D9D03548785A}"
proxyStubClsid32="{00020420-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<comInterfaceExternalProxyStub
name="IHtBrailleDriver"
iid="{43A71F9B-58EE-42D4-B58E-0F9FBA28D995}"
proxyStubClsid32="{00020424-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!-- Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<!-- Windows 8 -->
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
<!-- Windows 8.1 -->
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
"""
def getModuleExtention(thisModType):
for ext,mode,modType in imp.get_suffixes():
if modType==thisModType:
return ext
raise ValueError("unknown mod type %s"%thisModType)
origIsSystemDLL = build_exe.isSystemDLL
def isSystemDLL(pathname):
dll = os.path.basename(pathname).lower()
if dll in ("msvcp71.dll", "msvcp90.dll", "gdiplus.dll","mfc71.dll", "mfc90.dll"):
# These dlls don't exist on many systems, so make sure they're included.
return 0
elif dll.startswith("api-ms-win-") or dll in ("powrprof.dll", "mpr.dll", "crypt32.dll"):
# These are definitely system dlls available on all systems and must be excluded.
# Including them can cause serious problems when a binary build is run on a different version of Windows.
return 1
return origIsSystemDLL(pathname)
build_exe.isSystemDLL = isSystemDLL
class py2exe(build_exe.py2exe):
user_options = build_exe.py2exe.user_options + [
("enable-uiAccess", "u", "enable uiAccess for the main executable"),
]
def initialize_options(self):
build_exe.py2exe.initialize_options(self)
self.enable_uiAccess = False
def copy_w9xpopen(self, modules, dlls):
pass
def run(self):
dist = self.distribution
if self.enable_uiAccess:
# Add a target for nvda_uiAccess, using nvda_noUIAccess as a base.
target = copy.deepcopy(dist.windows[0])
target["dest_base"] = "nvda_uiAccess"
target["uac_info"] = (target["uac_info"][0], True)
dist.windows.insert(1, target)
# nvda_eoaProxy should have uiAccess.
target = dist.windows[3]
target["uac_info"] = (target["uac_info"][0], True)
build_exe.py2exe.run(self)
def build_manifest(self, target, template):
mfest, rid = build_exe.py2exe.build_manifest(self, target, template)
if getattr(target, "script", "").endswith(".pyw"):
# This is one of the main application executables.
mfest = mfest[:mfest.rindex("</assembly>")]
mfest += MAIN_MANIFEST_EXTRA + "</assembly>"
return mfest, rid
def getLocaleDataFiles():
wxDir=wx.__path__[0]
localeMoFiles=set()
for f in glob("locale/*/LC_MESSAGES"):
localeMoFiles.add((f, (os.path.join(f,"nvda.mo"),)))
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
lang=os.path.split(os.path.split(f)[0])[1]
if '_' in lang:
lang=lang.split('_')[0]
f=os.path.join('locale',lang,'lc_messages')
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
localeDicFiles=[(os.path.dirname(f), (f,)) for f in glob("locale/*/*.dic")]
NVDALocaleGestureMaps=[(os.path.dirname(f), (f,)) for f in glob("locale/*/gestures.ini")]
return list(localeMoFiles)+localeDicFiles+NVDALocaleGestureMaps
def getRecursiveDataFiles(dest,source,excludes=()):
rulesList=[]
rulesList.append((dest,
[f for f in glob("%s/*"%source) if not any(fnmatch.fnmatch(f,exclude) for exclude in excludes) and os.path.isfile(f)]))
[rulesList.extend(getRecursiveDataFiles(os.path.join(dest,dirName),os.path.join(source,dirName),excludes=excludes)) for dirName in os.listdir(source) if os.path.isdir(os.path.join(source,dirName)) and not dirName.startswith('.')]
return rulesList
compiledModExtention = getModuleExtention(imp.PY_COMPILED)
sourceModExtention = getModuleExtention(imp.PY_SOURCE)
setup(
name = name,
version=version,
description=description,
url=url,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Topic :: Adaptive Technologies'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
],
cmdclass={"py2exe": py2exe},
windows=[
{
"script":"nvda.pyw",
"dest_base":"nvda_noUIAccess",
"uac_info": ("asInvoker", False),
"icon_resources":[(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description":"NVDA application",
"product_version":version,
"copyright":copyright,
"company_name":publisher,
},
# The nvda_uiAccess target will be added at runtime if required.
{
"script": "nvda_slave.pyw",
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
{
"script": "nvda_eoaProxy.pyw",
# uiAccess will be enabled at runtime if appropriate.
"uac_info": ("asInvoker", False),
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": "NVDA Ease of Access proxy",
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
],
options = {"py2exe": {
"bundle_files": 3,
"excludes": ["Tkinter",
"serial.loopback_connection", "serial.rfc2217", "serial.serialcli", "serial.serialjava", "serial.serialposix", "serial.socket_connection"],
"packages": ["NVDAObjects","virtualBuffers","appModules","comInterfaces","brailleDisplayDrivers","synthDrivers"],
"includes": [
"nvdaBuiltin",
# #3368: bisect was implicitly included with Python 2.7.3, but isn't with 2.7.5.
"bisect",
"win32api",
nifest")+["builtin.dic"]),
("documentation", ['../copying.txt', '../contributors.txt']),
("lib/%s"%version, glob("lib/*.dll")),
("lib64/%s"%version, glob("lib64/*.dll") + glob("lib64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
("louis/tables",glob("louis/tables/*")),
("COMRegistrationFixes", glob("COMRegistrationFixes/*.reg")),
(".", ['message.html' ])
] + (
getLocaleDataFiles()
+ getRecursiveDataFiles("synthDrivers", "synthDrivers",
excludes=("*%s" % sourceModExtention, "*%s" % compiledModExtention, "*.exp", "*.lib", "*.pdb"))
+ getRecursiveDataFiles("brailleDisplayDrivers", "brailleDisplayDrivers", excludes=("*%s"%sourceModExtention,"*%s"%compiledModExtention))
+ getRecursiveDataFiles('documentation', '../user_docs', excludes=('*.t2t', '*.t2tconf', '*/developerGuide.*'))
),
)
| true | true |
f71e607e7707662acad32327b1147a6236003db1 | 14,288 | py | Python | api/resources/v2/containers.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | 28 | 2017-02-07T07:46:42.000Z | 2021-06-30T08:11:06.000Z | api/resources/v2/containers.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | 6 | 2018-01-18T08:00:54.000Z | 2019-04-11T04:51:41.000Z | api/resources/v2/containers.py | mythwm/yardstick | ea13581f450c9c44f6f73d383e6a192697a95cc1 | [
"Apache-2.0"
] | 46 | 2016-12-13T10:05:47.000Z | 2021-02-18T07:33:06.000Z | ##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
import logging
import threading
import time
import uuid
import os
import glob
from six.moves import configparser
from oslo_serialization import jsonutils
from docker import Client
from api import ApiResource
from api.utils import influx
from api.database.v2.handlers import V2ContainerHandler
from api.database.v2.handlers import V2EnvironmentHandler
from yardstick.common import constants as consts
from yardstick.common import utils
from yardstick.common.utils import result_handler
from yardstick.common.utils import get_free_port
from yardstick.common.httpClient import HttpClient
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
environment_handler = V2EnvironmentHandler()
container_handler = V2ContainerHandler()
class V2Containers(ApiResource):
def post(self):
return self._dispatch_post()
def create_influxdb(self, args):
try:
environment_id = args['environment_id']
except KeyError:
return result_handler(consts.API_ERROR, 'environment_id must be provided')
try:
uuid.UUID(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid environment id')
try:
environment = environment_handler.get_by_uuid(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such environment id')
container_info = environment.container_id
container_info = jsonutils.loads(container_info) if container_info else {}
if container_info.get('influxdb'):
return result_handler(consts.API_ERROR, 'influxdb container already exist')
name = 'influxdb-{}'.format(environment_id[:8])
port = get_free_port(consts.SERVER_IP)
container_id = str(uuid.uuid4())
LOG.info('%s will launch on : %s', name, port)
LOG.info('launch influxdb background')
args = (name, port, container_id)
thread = threading.Thread(target=self._create_influxdb, args=args)
thread.start()
LOG.info('record container in database')
container_init_data = {
'uuid': container_id,
'environment_id': environment_id,
'name': name,
'port': port,
'status': 0
}
container_handler.insert(container_init_data)
LOG.info('update container in environment')
container_info['influxdb'] = container_id
environment_info = {'container_id': jsonutils.dumps(container_info)}
environment_handler.update_attr(environment_id, environment_info)
return result_handler(consts.API_SUCCESS, {'uuid': container_id})
def _check_image_exist(self, client, t):
return any(t in a['RepoTags'][0]
for a in client.images() if a['RepoTags'])
def _create_influxdb(self, name, port, container_id):
client = Client(base_url=consts.DOCKER_URL)
try:
LOG.info('Checking if influxdb image exist')
if not self._check_image_exist(client, '%s:%s' %
(consts.INFLUXDB_IMAGE,
consts.INFLUXDB_TAG)):
LOG.info('Influxdb image not exist, start pulling')
client.pull(consts.INFLUXDB_IMAGE, tag=consts.INFLUXDB_TAG)
LOG.info('Createing influxdb container')
container = self._create_influxdb_container(client, name, port)
LOG.info('Influxdb container is created')
time.sleep(5)
container = client.inspect_container(container['Id'])
ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
LOG.debug('container ip is: %s', ip)
LOG.info('Changing output to influxdb')
self._change_output_to_influxdb(ip)
LOG.info('Config influxdb')
self._config_influxdb()
container_handler.update_attr(container_id, {'status': 1})
LOG.info('Finished')
except Exception:
container_handler.update_attr(container_id, {'status': 2})
LOG.exception('Creating influxdb failed')
def _create_influxdb_container(self, client, name, port):
ports = [port]
port_bindings = {8086: port}
restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
host_config = client.create_host_config(port_bindings=port_bindings,
restart_policy=restart_policy)
LOG.info('Creating container')
container = client.create_container(image='%s:%s' %
(consts.INFLUXDB_IMAGE,
consts.INFLUXDB_TAG),
ports=ports,
name=name,
detach=True,
tty=True,
host_config=host_config)
LOG.info('Starting container')
client.start(container)
return container
def _config_influxdb(self):
try:
client = influx.get_data_db_client()
client.create_user(consts.INFLUXDB_USER,
consts.INFLUXDB_PASS,
consts.INFLUXDB_DB_NAME)
client.create_database(consts.INFLUXDB_DB_NAME)
LOG.info('Success to config influxDB')
except Exception:
LOG.exception('Config influxdb failed')
def _change_output_to_influxdb(self, ip):
utils.makedirs(consts.CONF_DIR)
parser = configparser.ConfigParser()
LOG.info('Reading output sample configuration')
parser.read(consts.CONF_SAMPLE_FILE)
LOG.info('Set dispatcher to influxdb')
parser.set('DEFAULT', 'dispatcher', 'influxdb')
parser.set('dispatcher_influxdb', 'target',
'http://{}:{}'.format(ip, 8086))
LOG.info('Writing to %s', consts.CONF_FILE)
with open(consts.CONF_FILE, 'w') as f:
parser.write(f)
def create_grafana(self, args):
try:
environment_id = args['environment_id']
except KeyError:
return result_handler(consts.API_ERROR, 'environment_id must be provided')
try:
uuid.UUID(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid environment id')
try:
environment = environment_handler.get_by_uuid(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such environment id')
container_info = environment.container_id
container_info = jsonutils.loads(container_info) if container_info else {}
if not container_info.get('influxdb'):
return result_handler(consts.API_ERROR, 'influxdb not set')
if container_info.get('grafana'):
return result_handler(consts.API_ERROR, 'grafana container already exists')
name = 'grafana-{}'.format(environment_id[:8])
port = get_free_port(consts.SERVER_IP)
container_id = str(uuid.uuid4())
args = (name, port, container_id)
thread = threading.Thread(target=self._create_grafana, args=args)
thread.start()
container_init_data = {
'uuid': container_id,
'environment_id': environment_id,
'name': name,
'port': port,
'status': 0
}
container_handler.insert(container_init_data)
container_info['grafana'] = container_id
environment_info = {'container_id': jsonutils.dumps(container_info)}
environment_handler.update_attr(environment_id, environment_info)
return result_handler(consts.API_SUCCESS, {'uuid': container_id})
def _create_grafana(self, name, port, container_id):
client = Client(base_url=consts.DOCKER_URL)
try:
LOG.info('Checking if grafana image exist')
image = '{}:{}'.format(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
if not self._check_image_exist(client, image):
LOG.info('Grafana image not exist, start pulling')
client.pull(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
LOG.info('Createing grafana container')
container = self._create_grafana_container(client, name, port)
LOG.info('Grafana container is created')
time.sleep(5)
container = client.inspect_container(container['Id'])
ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
LOG.debug('container ip is: %s', ip)
LOG.info('Creating data source for grafana')
self._create_data_source(ip)
LOG.info('Creating dashboard for grafana')
self._create_dashboard(ip)
container_handler.update_attr(container_id, {'status': 1})
LOG.info('Finished')
except Exception:
container_handler.update_attr(container_id, {'status': 2})
LOG.exception('Create grafana failed')
def _create_dashboard(self, ip):
url = 'http://admin:admin@{}:{}/api/dashboards/db'.format(ip, 3000)
path = os.path.join(consts.REPOS_DIR, 'dashboard', 'opnfv_yardstick_tc*.json')
for i in sorted(glob.iglob(path)):
with open(i) as f:
data = jsonutils.load(f)
try:
HttpClient().post(url, {'dashboard': data})
except Exception:
LOG.exception('Create dashboard %s failed', i)
raise
def _create_data_source(self, ip):
url = 'http://admin:admin@{}:{}/api/datasources'.format(ip, 3000)
influx_conf = utils.parse_ini_file(consts.CONF_FILE).get('dispatcher_influxdb', {})
data = {
"name": "yardstick",
"type": "influxdb",
"access": "proxy",
"url": influx_conf.get('target', ''),
"password": influx_conf.get('password', ''),
"user": influx_conf.get('username', ''),
"database": "yardstick",
"basicAuth": True,
"basicAuthUser": "admin",
"basicAuthPassword": "admin",
"isDefault": False,
}
try:
HttpClient().post(url, data)
except Exception:
LOG.exception('Create datasources failed')
raise
def _create_grafana_container(self, client, name, port):
ports = [3000]
port_bindings = {3000: port}
restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
host_config = client.create_host_config(port_bindings=port_bindings,
restart_policy=restart_policy)
LOG.info('Creating container')
container = client.create_container(image='%s:%s' %
(consts.GRAFANA_IMAGE,
consts.GRAFANA_TAG),
name=name,
ports=ports,
detach=True,
tty=True,
host_config=host_config)
LOG.info('Starting container')
client.start(container)
return container
class V2Container(ApiResource):
def get(self, container_id):
try:
uuid.UUID(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid container id')
try:
container = container_handler.get_by_uuid(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such container id')
name = container.name
client = Client(base_url=consts.DOCKER_URL)
info = client.inspect_container(name)
data = {
'name': name,
'status': info.get('State', {}).get('Status', 'error'),
'time': info.get('Created'),
'port': container.port
}
return result_handler(consts.API_SUCCESS, {'container': data})
def delete(self, container_id):
try:
uuid.UUID(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid container id')
try:
container = container_handler.get_by_uuid(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such container id')
environment_id = container.environment_id
client = Client(base_url=consts.DOCKER_URL)
LOG.info('delete container: %s', container.name)
try:
client.remove_container(container.name, force=True)
except Exception:
LOG.exception('delete container failed')
return result_handler(consts.API_ERROR, 'delete container failed')
LOG.info('delete container in database')
container_handler.delete_by_uuid(container_id)
LOG.info('update container in environment')
environment = environment_handler.get_by_uuid(environment_id)
container_info = jsonutils.loads(environment.container_id)
key = next((k for k, v in container_info.items() if v == container_id))
container_info.pop(key)
environment_delete_data = {
'container_id': jsonutils.dumps(container_info)
}
environment_handler.update_attr(environment_id, environment_delete_data)
return result_handler(consts.API_SUCCESS, {'container': container_id})
| 37.798942 | 91 | 0.597424 | arser.set('dispatcher_influxdb', 'target',
'http://{}:{}'.format(ip, 8086))
LOG.info('Writing to %s', consts.CONF_FILE)
with open(consts.CONF_FILE, 'w') as f:
parser.write(f)
def create_grafana(self, args):
try:
environment_id = args['environment_id']
except KeyError:
return result_handler(consts.API_ERROR, 'environment_id must be provided')
try:
uuid.UUID(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid environment id')
try:
environment = environment_handler.get_by_uuid(environment_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such environment id')
container_info = environment.container_id
container_info = jsonutils.loads(container_info) if container_info else {}
if not container_info.get('influxdb'):
return result_handler(consts.API_ERROR, 'influxdb not set')
if container_info.get('grafana'):
return result_handler(consts.API_ERROR, 'grafana container already exists')
name = 'grafana-{}'.format(environment_id[:8])
port = get_free_port(consts.SERVER_IP)
container_id = str(uuid.uuid4())
args = (name, port, container_id)
thread = threading.Thread(target=self._create_grafana, args=args)
thread.start()
container_init_data = {
'uuid': container_id,
'environment_id': environment_id,
'name': name,
'port': port,
'status': 0
}
container_handler.insert(container_init_data)
container_info['grafana'] = container_id
environment_info = {'container_id': jsonutils.dumps(container_info)}
environment_handler.update_attr(environment_id, environment_info)
return result_handler(consts.API_SUCCESS, {'uuid': container_id})
def _create_grafana(self, name, port, container_id):
client = Client(base_url=consts.DOCKER_URL)
try:
LOG.info('Checking if grafana image exist')
image = '{}:{}'.format(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
if not self._check_image_exist(client, image):
LOG.info('Grafana image not exist, start pulling')
client.pull(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
LOG.info('Createing grafana container')
container = self._create_grafana_container(client, name, port)
LOG.info('Grafana container is created')
time.sleep(5)
container = client.inspect_container(container['Id'])
ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
LOG.debug('container ip is: %s', ip)
LOG.info('Creating data source for grafana')
self._create_data_source(ip)
LOG.info('Creating dashboard for grafana')
self._create_dashboard(ip)
container_handler.update_attr(container_id, {'status': 1})
LOG.info('Finished')
except Exception:
container_handler.update_attr(container_id, {'status': 2})
LOG.exception('Create grafana failed')
def _create_dashboard(self, ip):
url = 'http://admin:admin@{}:{}/api/dashboards/db'.format(ip, 3000)
path = os.path.join(consts.REPOS_DIR, 'dashboard', 'opnfv_yardstick_tc*.json')
for i in sorted(glob.iglob(path)):
with open(i) as f:
data = jsonutils.load(f)
try:
HttpClient().post(url, {'dashboard': data})
except Exception:
LOG.exception('Create dashboard %s failed', i)
raise
def _create_data_source(self, ip):
url = 'http://admin:admin@{}:{}/api/datasources'.format(ip, 3000)
influx_conf = utils.parse_ini_file(consts.CONF_FILE).get('dispatcher_influxdb', {})
data = {
"name": "yardstick",
"type": "influxdb",
"access": "proxy",
"url": influx_conf.get('target', ''),
"password": influx_conf.get('password', ''),
"user": influx_conf.get('username', ''),
"database": "yardstick",
"basicAuth": True,
"basicAuthUser": "admin",
"basicAuthPassword": "admin",
"isDefault": False,
}
try:
HttpClient().post(url, data)
except Exception:
LOG.exception('Create datasources failed')
raise
def _create_grafana_container(self, client, name, port):
ports = [3000]
port_bindings = {3000: port}
restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
host_config = client.create_host_config(port_bindings=port_bindings,
restart_policy=restart_policy)
LOG.info('Creating container')
container = client.create_container(image='%s:%s' %
(consts.GRAFANA_IMAGE,
consts.GRAFANA_TAG),
name=name,
ports=ports,
detach=True,
tty=True,
host_config=host_config)
LOG.info('Starting container')
client.start(container)
return container
class V2Container(ApiResource):
def get(self, container_id):
try:
uuid.UUID(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid container id')
try:
container = container_handler.get_by_uuid(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such container id')
name = container.name
client = Client(base_url=consts.DOCKER_URL)
info = client.inspect_container(name)
data = {
'name': name,
'status': info.get('State', {}).get('Status', 'error'),
'time': info.get('Created'),
'port': container.port
}
return result_handler(consts.API_SUCCESS, {'container': data})
def delete(self, container_id):
try:
uuid.UUID(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'invalid container id')
try:
container = container_handler.get_by_uuid(container_id)
except ValueError:
return result_handler(consts.API_ERROR, 'no such container id')
environment_id = container.environment_id
client = Client(base_url=consts.DOCKER_URL)
LOG.info('delete container: %s', container.name)
try:
client.remove_container(container.name, force=True)
except Exception:
LOG.exception('delete container failed')
return result_handler(consts.API_ERROR, 'delete container failed')
LOG.info('delete container in database')
container_handler.delete_by_uuid(container_id)
LOG.info('update container in environment')
environment = environment_handler.get_by_uuid(environment_id)
container_info = jsonutils.loads(environment.container_id)
key = next((k for k, v in container_info.items() if v == container_id))
container_info.pop(key)
environment_delete_data = {
'container_id': jsonutils.dumps(container_info)
}
environment_handler.update_attr(environment_id, environment_delete_data)
return result_handler(consts.API_SUCCESS, {'container': container_id})
| true | true |
f71e60c12f87497872af1c06e0fc7e53274e7bbd | 27,815 | py | Python | pymatgen/core/units.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | pymatgen/core/units.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | pymatgen/core/units.py | adozier/pymatgen | f1cc4d8db24ec11063be2fd84b4ea911f006eeb7 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
from six.moves import filter, zip
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
import numpy as np
import six
import collections
from numbers import Number
import numbers
from functools import partial
import re
import scipy.constants as const
"""
Some conversion factors
"""
Ha_to_eV = 1/const.physical_constants["electron volt-hartree relationship"][0]
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = const.physical_constants["atomic mass unit-kilogram relationship"][0]
mile_to_meters = const.mile
bohr_to_angstrom = const.physical_constants["Bohr radius"][0] * 1e10
bohr_to_ang = bohr_to_angstrom
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
},
"current": {
"A": 1
},
"temperature": {
"K": 1,
},
"amount": {
"mol": 1,
"atom": 1 / const.N_A
},
"intensity": {
"cd": 1
},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024**2,
"Gb": 1024**3,
"Tb": 1024**4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v
for k, v in BASE_UNITS["memory"].items()})
# This current list are supported derived units defined in terms of powers of
# SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, const.e: 1},
"meV": {"kg": 1, "m": 2, "s": -2, const.e * 1e-3: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, const.e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, const.e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1}
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, const.e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2},
"KN": {"kg": 1, "m": 1, "s": -2, 1000: 1},
"MN": {"kg": 1, "m": 1, "s": -2, 1e6: 1},
"GN": {"kg": 1, "m": 1, "s": -2, 1e9: 1},
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1}
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3},
"KW": {"m": 2, "kg": 1, "s": -3, 1000: 1},
"MW": {"m": 2, "kg": 1, "s": -3, 1e6: 1},
"GW": {"m": 2, "kg": 1, "s": -3, 1e9: 1}
},
"emf": {
"V": {"m": 2, "kg": 1, "s": -3, "A": -1}
},
"capacitance": {
"F": {"m": -2, "kg": -1, "s": 4, "A": 2}
},
"resistance": {
"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}
},
"conductance": {
"S": {"m": -2, "kg": -1, "s": 3, "A": 2}
},
"magnetic_flux": {
"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}
}
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items()))
SUPPORTED_UNIT_NAMES = tuple([i for d in ALL_UNITS.values() for i in d.keys()])
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {}
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,
BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \
all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):
return {k2: 1}
return u
class Unit(collections.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, six.string_types):
unit = collections.defaultdict(int)
for m in re.finditer("([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(),
key=lambda k: (-self._unit[k], k))
return " ".join(["{}^{}".format(k, self._unit[k])
if self._unit[k] != 1 else k
for k in sorted_keys if self._unit[k] != 0])
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(
"{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super(FloatWithUnit, self).__repr__()
def __str__(self):
s = super(FloatWithUnit, self).__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type,
unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type,
unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None,
unit=self._unit ** i)
def __div__(self, other):
val = super(FloatWithUnit, self).__div__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __truediv__(self, other):
val = super(FloatWithUnit, self).__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super(FloatWithUnit, self).__neg__(),
unit_type=self._unit_type,
unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
#print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
#print("in getstate %s" % state)
return state
def __setstate__(self, state):
#print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self):
return self._unit_type
@property
def unit(self):
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit)
@property
def as_base_units(self):
"""
Returns this FloatWithUnit in base SI units, including derived units.
Returns:
A FloatWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
#TODO abstract base class property?
@property
def unit_type(self):
return self._unit_type
#TODO abstract base class property?
@property
def unit(self):
return self._unit
def __reduce__(self):
#print("in reduce")
reduce = list(super(ArrayWithUnit, self).__reduce__())
#print("unit",self._unit)
#print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
#print("in setstate %s" % str(state))
super(ArrayWithUnit, self).__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is"
" not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit=self.unit * other.unit)
def __rmul__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit=self.unit * other.unit)
def __div__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__div__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__div__(np.array(other)),
unit=self.unit/other.unit)
def __truediv__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(),
unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type, unit=new_unit)
@property
def as_base_units(self):
"""
Returns this ArrayWithUnit in base SI units, including derived units.
Returns:
An ArrayWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
#TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
#TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class
methods defined in FloatWithUnit. This function calls partial and patches
the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
elif isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k,v in obj.items()}
else:
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. You can also
use it to standardize the output units of a function that already returns
a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences
are assigned the same unit. It works with Python sequences only. The creation
of numpy arrays loses all unit information. For mapping types, the values
are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, FloatWithUnit) or isinstance(val, ArrayWithUnit):
return val.to(unit)
elif isinstance(val, collections.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type,
unit=unit) for i in val])
elif isinstance(val, collections.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31.971264 | 97 | 0.565846 |
from __future__ import division, unicode_literals
from six.moves import filter, zip
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
import numpy as np
import six
import collections
from numbers import Number
import numbers
from functools import partial
import re
import scipy.constants as const
Ha_to_eV = 1/const.physical_constants["electron volt-hartree relationship"][0]
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = const.physical_constants["atomic mass unit-kilogram relationship"][0]
mile_to_meters = const.mile
bohr_to_angstrom = const.physical_constants["Bohr radius"][0] * 1e10
bohr_to_ang = bohr_to_angstrom
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
},
"current": {
"A": 1
},
"temperature": {
"K": 1,
},
"amount": {
"mol": 1,
"atom": 1 / const.N_A
},
"intensity": {
"cd": 1
},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024**2,
"Gb": 1024**3,
"Tb": 1024**4,
},
}
BASE_UNITS["memory"].update({k.lower(): v
for k, v in BASE_UNITS["memory"].items()})
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, const.e: 1},
"meV": {"kg": 1, "m": 2, "s": -2, const.e * 1e-3: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, const.e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, const.e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1}
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, const.e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2},
"KN": {"kg": 1, "m": 1, "s": -2, 1000: 1},
"MN": {"kg": 1, "m": 1, "s": -2, 1e6: 1},
"GN": {"kg": 1, "m": 1, "s": -2, 1e9: 1},
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1}
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3},
"KW": {"m": 2, "kg": 1, "s": -3, 1000: 1},
"MW": {"m": 2, "kg": 1, "s": -3, 1e6: 1},
"GW": {"m": 2, "kg": 1, "s": -3, 1e9: 1}
},
"emf": {
"V": {"m": 2, "kg": 1, "s": -3, "A": -1}
},
"capacitance": {
"F": {"m": -2, "kg": -1, "s": 4, "A": 2}
},
"resistance": {
"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}
},
"conductance": {
"S": {"m": -2, "kg": -1, "s": 3, "A": 2}
},
"magnetic_flux": {
"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}
}
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items()))
SUPPORTED_UNIT_NAMES = tuple([i for d in ALL_UNITS.values() for i in d.keys()])
_UNAME2UTYPE = {}
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,
BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
def check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \
all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):
return {k2: 1}
return u
class Unit(collections.Mapping):
Error = UnitError
def __init__(self, unit_def):
if isinstance(unit_def, six.string_types):
unit = collections.defaultdict(int)
for m in re.finditer("([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(),
key=lambda k: (-self._unit[k], k))
return " ".join(["{}^{}".format(k, self._unit[k])
if self._unit[k] != 1 else k
for k in sorted_keys if self._unit[k] != 0])
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
Error = UnitError
@classmethod
def from_string(cls, s):
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(
"{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super(FloatWithUnit, self).__repr__()
def __str__(self):
s = super(FloatWithUnit, self).__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type,
unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super(FloatWithUnit, self).__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type,
unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None,
unit=self._unit ** i)
def __div__(self, other):
val = super(FloatWithUnit, self).__div__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __truediv__(self, other):
val = super(FloatWithUnit, self).__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super(FloatWithUnit, self).__neg__(),
unit_type=self._unit_type,
unit=self._unit)
def __getnewargs__(self):
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
#print("in getstate %s" % state)
return state
def __setstate__(self, state):
#print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self):
return self._unit_type
@property
def unit(self):
return self._unit
def to(self, new_unit):
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit)
@property
def as_base_units(self):
return self.to(self.unit.as_base_units[0])
@property
def supported_units(self):
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
#TODO abstract base class property?
@property
def unit_type(self):
return self._unit_type
#TODO abstract base class property?
@property
def unit(self):
return self._unit
def __reduce__(self):
#print("in reduce")
reduce = list(super(ArrayWithUnit, self).__reduce__())
#print("unit",self._unit)
#print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
#print("in setstate %s" % str(state))
super(ArrayWithUnit, self).__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is"
" not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit=self.unit * other.unit)
def __rmul__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit=self.unit * other.unit)
def __div__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__div__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__div__(np.array(other)),
unit=self.unit/other.unit)
def __truediv__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
else:
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(),
unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type, unit=new_unit)
@property
def as_base_units(self):
return self.to(self.unit.as_base_units[0])
#TODO abstract base class property?
@property
def supported_units(self):
return ALL_UNITS[self.unit_type]
#TODO abstract base class method?
def conversions(self):
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
def obj_with_unit(obj, unit):
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
elif isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k,v in obj.items()}
else:
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, FloatWithUnit) or isinstance(val, ArrayWithUnit):
return val.to(unit)
elif isinstance(val, collections.Sequence):
# TODO: why don't we return a ArrayWithUnit?
return val.__class__([FloatWithUnit(i, unit_type=unit_type,
unit=unit) for i in val])
elif isinstance(val, collections.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
f71e616278962d75c4364a4c48d685060d730488 | 32,900 | py | Python | panel/param.py | marcelflygare/panel | 75ece664ebc55d482d74abc9b56028cea5218a40 | [
"BSD-3-Clause"
] | null | null | null | panel/param.py | marcelflygare/panel | 75ece664ebc55d482d74abc9b56028cea5218a40 | [
"BSD-3-Clause"
] | null | null | null | panel/param.py | marcelflygare/panel | 75ece664ebc55d482d74abc9b56028cea5218a40 | [
"BSD-3-Clause"
] | null | null | null | """
Defines the Param pane which converts Parameterized classes into a
set of widgets.
"""
from __future__ import absolute_import, division, unicode_literals
import os
import sys
import json
import types
import inspect
import itertools
from collections import OrderedDict, defaultdict, namedtuple
from six import string_types
import param
from bokeh.io import curdoc as _curdoc
from param.parameterized import classlist
from .io import state
from .layout import Row, Panel, Tabs, Column
from .pane.base import PaneBase, ReplacementPane
from .util import (
abbreviated_repr, full_groupby, get_method_owner, is_parameterized,
param_name, recursive_parameterized
)
from .viewable import Layoutable
from .widgets import (
Button, Checkbox, ColorPicker, DataFrame, DatePicker, DatetimeInput,
DateRangeSlider, FileSelector, FloatSlider, IntSlider, LiteralInput,
MultiSelect, RangeSlider, Select, Spinner, StaticText, TextInput,
Toggle, Widget
)
from .widgets.button import _ButtonBase
def SingleFileSelector(pobj):
"""
Determines whether to use a TextInput or Select widget for FileSelector
"""
if pobj.path:
return Select
else:
return TextInput
def LiteralInputTyped(pobj):
if isinstance(pobj, param.Tuple):
return type(str('TupleInput'), (LiteralInput,), {'type': tuple})
elif isinstance(pobj, param.Number):
return type(str('NumberInput'), (LiteralInput,), {'type': (int, float)})
elif isinstance(pobj, param.Dict):
return type(str('DictInput'), (LiteralInput,), {'type': dict})
elif isinstance(pobj, param.List):
return type(str('ListInput'), (LiteralInput,), {'type': list})
return LiteralInput
class Param(PaneBase):
"""
Param panes render a Parameterized class to a set of widgets which
are linked to the parameter values on the class.
"""
display_threshold = param.Number(default=0, precedence=-10, doc="""
Parameters with precedence below this value are not displayed.""")
default_layout = param.ClassSelector(default=Column, class_=Panel,
is_instance=False)
default_precedence = param.Number(default=1e-8, precedence=-10, doc="""
Precedence value to use for parameters with no declared
precedence. By default, zero predecence is available for
forcing some parameters to the top of the list, and other
values above the default_precedence values can be used to sort
or group parameters arbitrarily.""")
expand = param.Boolean(default=False, doc="""
Whether parameterized subobjects are expanded or collapsed on
instantiation.""")
expand_button = param.Boolean(default=None, doc="""
Whether to add buttons to expand and collapse sub-objects.""")
expand_layout = param.Parameter(default=Column, doc="""
Layout to expand sub-objects into.""")
height = param.Integer(default=None, bounds=(0, None), doc="""
Height of widgetbox the parameter widgets are displayed in.""")
initializer = param.Callable(default=None, doc="""
User-supplied function that will be called on initialization,
usually to update the default Parameter values of the
underlying parameterized object.""")
name = param.String(default='', doc="""
Title of the pane.""")
parameters = param.List(default=[], allow_None=True, doc="""
If set this serves as a whitelist of parameters to display on
the supplied Parameterized object.""")
show_labels = param.Boolean(default=True, doc="""
Whether to show labels for each widget""")
show_name = param.Boolean(default=True, doc="""
Whether to show the parameterized object's name""")
width = param.Integer(default=300, allow_None=True, bounds=(0, None), doc="""
Width of widgetbox the parameter widgets are displayed in.""")
widgets = param.Dict(doc="""
Dictionary of widget overrides, mapping from parameter name
to widget class.""")
priority = 0.1
_unpack = True
_mapping = {
param.Action: Button,
param.Boolean: Checkbox,
param.CalendarDate: DatePicker,
param.Color: ColorPicker,
param.Date: DatetimeInput,
param.DateRange: DateRangeSlider,
param.CalendarDateRange: DateRangeSlider,
param.DataFrame: DataFrame,
param.Dict: LiteralInputTyped,
param.FileSelector: SingleFileSelector,
param.Filename: TextInput,
param.Foldername: TextInput,
param.Integer: IntSlider,
param.List: LiteralInputTyped,
param.MultiFileSelector: FileSelector,
param.ListSelector: MultiSelect,
param.Number: FloatSlider,
param.ObjectSelector: Select,
param.Parameter: LiteralInputTyped,
param.Range: RangeSlider,
param.Selector: Select,
param.String: TextInput,
}
_rerender_params = []
def __init__(self, object=None, **params):
if isinstance(object, param.Parameter):
if not 'show_name' in params:
params['show_name'] = False
params['parameters'] = [object.name]
object = object.owner
if isinstance(object, param.parameterized.Parameters):
object = object.cls if object.self is None else object.self
if 'parameters' not in params and object is not None:
params['parameters'] = [p for p in object.param if p != 'name']
self._explicit_parameters = False
else:
self._explicit_parameters = object is not None
if object and 'name' not in params:
params['name'] = param_name(object.name)
super(Param, self).__init__(object, **params)
self._updating = []
# Construct Layout
kwargs = {p: v for p, v in self.param.get_param_values()
if p in Layoutable.param and v is not None}
self._widget_box = self.default_layout(**kwargs)
layout = self.expand_layout
if isinstance(layout, Panel):
self._expand_layout = layout
self.layout = self._widget_box
elif isinstance(self._widget_box, layout):
self.layout = self._expand_layout = self._widget_box
elif isinstance(layout, type) and issubclass(layout, Panel):
self.layout = self._expand_layout = layout(self._widget_box, **kwargs)
else:
raise ValueError('expand_layout expected to be a panel.layout.Panel'
'type or instance, found %s type.' %
type(layout).__name__)
self.param.watch(self._update_widgets, [
'object', 'parameters', 'name', 'display_threshold', 'expand_button',
'expand', 'expand_layout', 'widgets', 'show_labels', 'show_name'])
self._update_widgets()
def __repr__(self, depth=0):
cls = type(self).__name__
obj_cls = type(self.object).__name__
params = [] if self.object is None else list(self.object.param)
parameters = [k for k in params if k != 'name']
params = []
for p, v in sorted(self.param.get_param_values()):
if v is self.param[p].default: continue
elif v is None: continue
elif isinstance(v, string_types) and v == '': continue
elif p == 'object' or (p == 'name' and (v.startswith(obj_cls) or v.startswith(cls))): continue
elif p == 'parameters' and v == parameters: continue
try:
params.append('%s=%s' % (p, abbreviated_repr(v)))
except RuntimeError:
params.append('%s=%s' % (p, '...'))
obj = 'None' if self.object is None else '%s' % type(self.object).__name__
template = '{cls}({obj}, {params})' if params else '{cls}({obj})'
return template.format(cls=cls, params=', '.join(params), obj=obj)
#----------------------------------------------------------------
# Callback API
#----------------------------------------------------------------
def _synced_params(self):
ignored_params = ['default_layout']
return [p for p in Layoutable.param if p not in ignored_params]
def _update_widgets(self, *events):
parameters = []
for event in sorted(events, key=lambda x: x.name):
if event.name == 'object':
if isinstance(event.new, param.parameterized.Parameters):
# Setting object will trigger this method a second time
self.object = event.new.cls if event.new.self is None else event.new.self
return
if self._explicit_parameters:
parameters = self.parameters
elif event.new is None:
parameters = []
else:
parameters = [p for p in event.new.param if p != 'name']
self.name = param_name(event.new.name)
if event.name == 'parameters':
if event.new is None:
self._explicit_parameters = False
if self.object is not None:
parameters = [p for p in self.object.param if p != 'name']
else:
self._explicit_parameters = True
parameters = [] if event.new == [] else event.new
if parameters != [] and parameters != self.parameters:
# Setting parameters will trigger this method a second time
self.parameters = parameters
return
for cb in list(self._callbacks):
if cb.inst in self._widget_box.objects:
cb.inst.param.unwatch(cb)
self._callbacks.remove(cb)
# Construct widgets
if self.object is None:
self._widgets = {}
else:
self._widgets = self._get_widgets()
alias = {'_title': 'name'}
widgets = [widget for p, widget in self._widgets.items()
if (self.object.param[alias.get(p, p)].precedence is None)
or (self.object.param[alias.get(p, p)].precedence >= self.display_threshold)]
self._widget_box.objects = widgets
if not (self.expand_button == False and not self.expand):
self._link_subobjects()
def _link_subobjects(self):
for pname, widget in self._widgets.items():
widgets = [widget] if isinstance(widget, Widget) else widget
if not any(is_parameterized(getattr(w, 'value', None)) or
any(is_parameterized(o) for o in getattr(w, 'options', []))
for w in widgets):
continue
if (isinstance(widgets, Row) and isinstance(widgets[1], Toggle)):
selector, toggle = (widgets[0], widgets[1])
else:
selector, toggle = (widget, None)
def toggle_pane(change, parameter=pname):
"Adds or removes subpanel from layout"
parameterized = getattr(self.object, parameter)
existing = [p for p in self._expand_layout.objects
if isinstance(p, Param) and
p.object in recursive_parameterized(parameterized)]
if not change.new:
self._expand_layout[:] = [
e for e in self._expand_layout.objects
if e not in existing
]
elif change.new:
kwargs = {k: v for k, v in self.param.get_param_values()
if k not in ['name', 'object', 'parameters']}
pane = Param(parameterized, name=parameterized.name,
**kwargs)
if isinstance(self._expand_layout, Tabs):
title = self.object.param[pname].label
pane = (title, pane)
self._expand_layout.append(pane)
def update_pane(change, parameter=pname):
"Adds or removes subpanel from layout"
layout = self._expand_layout
existing = [p for p in layout.objects if isinstance(p, Param)
and p.object is change.old]
if toggle:
toggle.disabled = not is_parameterized(change.new)
if not existing:
return
elif is_parameterized(change.new):
parameterized = change.new
kwargs = {k: v for k, v in self.param.get_param_values()
if k not in ['name', 'object', 'parameters']}
pane = Param(parameterized, name=parameterized.name,
**kwargs)
layout[layout.objects.index(existing[0])] = pane
else:
layout.pop(existing[0])
watchers = [selector.param.watch(update_pane, 'value')]
if toggle:
watchers.append(toggle.param.watch(toggle_pane, 'value'))
self._callbacks += watchers
if self.expand:
if self.expand_button:
toggle.value = True
else:
toggle_pane(namedtuple('Change', 'new')(True))
def widget(self, p_name):
"""Get widget for param_name"""
p_obj = self.object.param[p_name]
kw_widget = {}
widget_class_overridden = True
if self.widgets is None or p_name not in self.widgets:
widget_class_overridden = False
widget_class = self.widget_type(p_obj)
elif isinstance(self.widgets[p_name], dict):
if 'type' in self.widgets[p_name]:
widget_class = self.widgets[p_name].pop('type')
else:
widget_class_overridden = False
widget_class = self.widget_type(p_obj)
kw_widget = self.widgets[p_name]
else:
widget_class = self.widgets[p_name]
if not self.show_labels and not issubclass(widget_class, _ButtonBase):
label = ''
else:
label = p_obj.label
kw = dict(disabled=p_obj.constant, name=label)
value = getattr(self.object, p_name)
if value is not None:
kw['value'] = value
# Update kwargs
kw.update(kw_widget)
if hasattr(p_obj, 'get_range'):
options = p_obj.get_range()
if not options and value is not None:
options = [value]
kw['options'] = options
if hasattr(p_obj, 'get_soft_bounds'):
bounds = p_obj.get_soft_bounds()
if bounds[0] is not None:
kw['start'] = bounds[0]
if bounds[1] is not None:
kw['end'] = bounds[1]
if ('start' not in kw or 'end' not in kw):
# Do not change widget class if _mapping was overridden
if not widget_class_overridden:
if (isinstance(p_obj, param.Number) and
not isinstance(p_obj, (param.Date, param.CalendarDate))):
widget_class = Spinner
if isinstance(p_obj, param.Integer):
kw['step'] = 1
elif not issubclass(widget_class, LiteralInput):
widget_class = LiteralInput
if hasattr(widget_class, 'step') and getattr(p_obj, 'step', None):
kw['step'] = p_obj.step
kwargs = {k: v for k, v in kw.items() if k in widget_class.param}
if isinstance(widget_class, Widget):
widget = widget_class
else:
widget = widget_class(**kwargs)
widget._param_pane = self
watchers = self._callbacks
def link_widget(change):
if p_name in self._updating:
return
try:
self._updating.append(p_name)
self.object.param.set_param(**{p_name: change.new})
finally:
self._updating.remove(p_name)
if isinstance(p_obj, param.Action):
def action(change):
value(self.object)
watcher = widget.param.watch(action, 'clicks')
else:
watcher = widget.param.watch(link_widget, 'value')
watchers.append(watcher)
def link(change, watchers=[watcher]):
updates = {}
if change.what == 'constant':
updates['disabled'] = change.new
elif change.what == 'precedence':
if (change.new < self.display_threshold and
widget in self._widget_box.objects):
self._widget_box.pop(widget)
elif change.new >= self.display_threshold:
precedence = lambda k: self.object.param['name' if k == '_title' else k].precedence
params = self._ordered_params
if self.show_name:
params.insert(0, '_title')
widgets = []
for k in params:
if precedence(k) is None or precedence(k) >= self.display_threshold:
widgets.append(self._widgets[k])
self._widget_box.objects = widgets
return
elif change.what == 'objects':
updates['options'] = p_obj.get_range()
elif change.what == 'bounds':
start, end = p_obj.get_soft_bounds()
updates['start'] = start
updates['end'] = end
elif change.what == 'step':
updates['step'] = p_obj.step
elif change.what == 'label':
updates['name'] = p_obj.label
elif p_name in self._updating:
return
elif isinstance(p_obj, param.Action):
prev_watcher = watchers[0]
widget.param.unwatch(prev_watcher)
def action(event):
change.new(self.object)
watchers[0] = widget.param.watch(action, 'clicks')
idx = self._callbacks.index(prev_watcher)
self._callbacks[idx] = watchers[0]
return
else:
updates['value'] = change.new
try:
self._updating.append(p_name)
widget.param.set_param(**updates)
finally:
self._updating.remove(p_name)
# Set up links to parameterized object
watchers.append(self.object.param.watch(link, p_name, 'constant'))
watchers.append(self.object.param.watch(link, p_name, 'precedence'))
watchers.append(self.object.param.watch(link, p_name, 'label'))
if hasattr(p_obj, 'get_range'):
watchers.append(self.object.param.watch(link, p_name, 'objects'))
if hasattr(p_obj, 'get_soft_bounds'):
watchers.append(self.object.param.watch(link, p_name, 'bounds'))
if 'step' in kw:
watchers.append(self.object.param.watch(link, p_name, 'step'))
watchers.append(self.object.param.watch(link, p_name))
options = kwargs.get('options', [])
if isinstance(options, dict):
options = options.values()
if ((is_parameterized(value) or any(is_parameterized(o) for o in options))
and (self.expand_button or (self.expand_button is None and not self.expand))):
widget.margin = (5, 0, 5, 10)
toggle = Toggle(name='\u22EE', button_type='primary',
disabled=not is_parameterized(value), max_height=30,
max_width=20, height_policy='fit', align='end',
margin=(0, 0, 5, 10))
widget.width = self._widget_box.width-60
return Row(widget, toggle, width_policy='max', margin=0)
else:
return widget
@property
def _ordered_params(self):
params = [(p, pobj) for p, pobj in self.object.param.objects('existing').items()
if p in self.parameters or p == 'name']
key_fn = lambda x: x[1].precedence if x[1].precedence is not None else self.default_precedence
sorted_precedence = sorted(params, key=key_fn)
filtered = [(k, p) for k, p in sorted_precedence]
groups = itertools.groupby(filtered, key=key_fn)
# Params preserve definition order in Python 3.6+
dict_ordered_py3 = (sys.version_info.major == 3 and sys.version_info.minor >= 6)
dict_ordered = dict_ordered_py3 or (sys.version_info.major > 3)
ordered_groups = [list(grp) if dict_ordered else sorted(grp) for (_, grp) in groups]
ordered_params = [el[0] for group in ordered_groups for el in group
if (el[0] != 'name' or el[0] in self.parameters)]
return ordered_params
#----------------------------------------------------------------
# Model API
#----------------------------------------------------------------
def _get_widgets(self):
"""Return name,widget boxes for all parameters (i.e., a property sheet)"""
# Format name specially
if self.expand_layout is Tabs:
widgets = []
elif self.show_name:
widgets = [('_title', StaticText(value='<b>{0}</b>'.format(self.name)))]
else:
widgets = []
widgets += [(pname, self.widget(pname)) for pname in self._ordered_params]
return OrderedDict(widgets)
def _get_model(self, doc, root=None, parent=None, comm=None):
model = self.layout._get_model(doc, root, parent, comm)
self._models[root.ref['id']] = (model, parent)
return model
def _cleanup(self, root):
self.layout._cleanup(root)
super(Param, self)._cleanup(root)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@classmethod
def applies(cls, obj):
return (is_parameterized(obj) or
isinstance(obj, param.parameterized.Parameters) or
(isinstance(obj, param.Parameter) and obj.owner is not None))
@classmethod
def widget_type(cls, pobj):
ptype = type(pobj)
for t in classlist(ptype)[::-1]:
if t in cls._mapping:
if isinstance(cls._mapping[t], types.FunctionType):
return cls._mapping[t](pobj)
return cls._mapping[t]
def select(self, selector=None):
"""
Iterates over the Viewable and any potential children in the
applying the Selector.
Arguments
---------
selector: type or callable or None
The selector allows selecting a subset of Viewables by
declaring a type or callable function to filter by.
Returns
-------
viewables: list(Viewable)
"""
return super().select(selector) + self.layout.select(selector)
def get_root(self, doc=None, comm=None):
"""
Returns the root model and applies pre-processing hooks
Arguments
---------
doc: bokeh.Document
Bokeh document the bokeh model will be attached to.
comm: pyviz_comms.Comm
Optional pyviz_comms when working in notebook
Returns
-------
Returns the bokeh model corresponding to this panel object
"""
doc = doc or _curdoc()
root = self.layout.get_root(doc, comm)
ref = root.ref['id']
self._models[ref] = (root, None)
state._views[ref] = (self, root, doc, comm)
return root
class ParamMethod(ReplacementPane):
"""
ParamMethod panes wrap methods on parameterized classes and
rerenders the plot when any of the method's parameters change. By
default ParamMethod will watch all parameters on the class owning
the method or can be restricted to certain parameters by annotating
the method using the param.depends decorator. The method may
return any object which itself can be rendered as a Pane.
"""
def __init__(self, object=None, **params):
super(ParamMethod, self).__init__(object, **params)
self._link_object_params()
if object is not None:
self._validate_object()
self._update_inner(self.eval(object))
@param.depends('object', watch=True)
def _validate_object(self):
dependencies = getattr(self.object, '_dinfo', None)
if not dependencies or not dependencies.get('watch'):
return
fn_type = 'method' if type(self) is ParamMethod else 'function'
self.param.warning(f"The {fn_type} supplied for Panel to display "
"was declared with `watch=True`, which will "
f"cause the {fn_type} to be called twice for "
"any change in a dependent Parameter. "
"`watch` should be False when Panel is "
"responsible for displaying the result "
f"of the {fn_type} call, while `watch=True` "
f"should be reserved for {fn_type}s that work "
"via side-effects, e.g. by modifying internal "
"state of a class or global state in an "
"application's namespace.")
#----------------------------------------------------------------
# Callback API
#----------------------------------------------------------------
@classmethod
def eval(self, function):
args, kwargs = (), {}
if hasattr(function, '_dinfo'):
arg_deps = function._dinfo['dependencies']
kw_deps = function._dinfo.get('kw', {})
if kw_deps or any(isinstance(d, param.Parameter) for d in arg_deps):
args = (getattr(dep.owner, dep.name) for dep in arg_deps)
kwargs = {n: getattr(dep.owner, dep.name) for n, dep in kw_deps.items()}
return function(*args, **kwargs)
def _update_pane(self, *events):
callbacks = []
for watcher in self._callbacks:
obj = watcher.inst if watcher.inst is None else watcher.cls
if obj is self:
callbacks.append(watcher)
continue
obj.param.unwatch(watcher)
self._callbacks = callbacks
self._link_object_params()
if object is not None:
self._update_inner(self.eval(self.object))
def _link_object_params(self):
parameterized = get_method_owner(self.object)
params = parameterized.param.params_depended_on(self.object.__name__)
deps = params
def update_pane(*events):
# Update nested dependencies if parameterized object events
if any(is_parameterized(event.new) for event in events):
new_deps = parameterized.param.params_depended_on(self.object.__name__)
for p in list(deps):
if p in new_deps: continue
watchers = self._callbacks
for w in list(watchers):
if (w.inst is p.inst and w.cls is p.cls and
p.name in w.parameter_names):
obj = p.cls if p.inst is None else p.inst
obj.param.unwatch(w)
watchers.pop(watchers.index(w))
deps.pop(deps.index(p))
new_deps = [dep for dep in new_deps if dep not in deps]
for _, params in full_groupby(new_deps, lambda x: (x.inst or x.cls, x.what)):
p = params[0]
pobj = p.cls if p.inst is None else p.inst
ps = [_p.name for _p in params]
watcher = pobj.param.watch(update_pane, ps, p.what)
self._callbacks.append(watcher)
for p in params:
deps.append(p)
new_object = self.eval(self.object)
self._update_inner(new_object)
for _, params in full_groupby(params, lambda x: (x.inst or x.cls, x.what)):
p = params[0]
pobj = (p.inst or p.cls)
ps = [_p.name for _p in params]
watcher = pobj.param.watch(update_pane, ps, p.what)
self._callbacks.append(watcher)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@classmethod
def applies(cls, obj):
return inspect.ismethod(obj) and isinstance(get_method_owner(obj), param.Parameterized)
class ParamFunction(ParamMethod):
"""
ParamFunction panes wrap functions decorated with the param.depends
decorator and rerenders the output when any of the function's
dependencies change. This allows building reactive components into
a Panel which depend on other parameters, e.g. tying the value of
a widget to some other output.
"""
priority = 0.6
def _replace_pane(self, *args):
new_object = self.eval(self.object)
self._update_inner(new_object)
def _link_object_params(self):
deps = self.object._dinfo
dep_params = list(deps['dependencies']) + list(deps.get('kw', {}).values())
grouped = defaultdict(list)
for dep in dep_params:
grouped[id(dep.owner)].append(dep)
for group in grouped.values():
watcher = group[0].owner.param.watch(self._replace_pane, [dep.name for dep in group])
self._callbacks.append(watcher)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@classmethod
def applies(cls, obj):
return isinstance(obj, types.FunctionType) and hasattr(obj, '_dinfo')
class JSONInit(param.Parameterized):
"""
Callable that can be passed to Widgets.initializer to set Parameter
values using JSON. There are three approaches that may be used:
1. If the json_file argument is specified, this takes precedence.
2. The JSON file path can be specified via an environment variable.
3. The JSON can be read directly from an environment variable.
Here is an easy example of setting such an environment variable on
the commandline:
PARAM_JSON_INIT='{"p1":5}' jupyter notebook
This addresses any JSONInit instances that are inspecting the
default environment variable called PARAM_JSON_INIT, instructing it to set
the 'p1' parameter to 5.
"""
varname = param.String(default='PARAM_JSON_INIT', doc="""
The name of the environment variable containing the JSON
specification.""")
target = param.String(default=None, doc="""
Optional key in the JSON specification dictionary containing the
desired parameter values.""")
json_file = param.String(default=None, doc="""
Optional path to a JSON file containing the parameter settings.""")
def __call__(self, parameterized):
warnobj = param.main if isinstance(parameterized, type) else parameterized
param_class = (parameterized if isinstance(parameterized, type)
else parameterized.__class__)
target = self.target if self.target is not None else param_class.__name__
env_var = os.environ.get(self.varname, None)
if env_var is None and self.json_file is None: return
if self.json_file or env_var.endswith('.json'):
try:
fname = self.json_file if self.json_file else env_var
spec = json.load(open(os.path.abspath(fname), 'r'))
except Exception:
warnobj.warning('Could not load JSON file %r' % spec)
else:
spec = json.loads(env_var)
if not isinstance(spec, dict):
warnobj.warning('JSON parameter specification must be a dictionary.')
return
if target in spec:
params = spec[target]
else:
params = spec
for name, value in params.items():
try:
parameterized.param.set_param(**{name:value})
except ValueError as e:
warnobj.warning(str(e))
| 41.331658 | 106 | 0.565015 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import json
import types
import inspect
import itertools
from collections import OrderedDict, defaultdict, namedtuple
from six import string_types
import param
from bokeh.io import curdoc as _curdoc
from param.parameterized import classlist
from .io import state
from .layout import Row, Panel, Tabs, Column
from .pane.base import PaneBase, ReplacementPane
from .util import (
abbreviated_repr, full_groupby, get_method_owner, is_parameterized,
param_name, recursive_parameterized
)
from .viewable import Layoutable
from .widgets import (
Button, Checkbox, ColorPicker, DataFrame, DatePicker, DatetimeInput,
DateRangeSlider, FileSelector, FloatSlider, IntSlider, LiteralInput,
MultiSelect, RangeSlider, Select, Spinner, StaticText, TextInput,
Toggle, Widget
)
from .widgets.button import _ButtonBase
def SingleFileSelector(pobj):
if pobj.path:
return Select
else:
return TextInput
def LiteralInputTyped(pobj):
if isinstance(pobj, param.Tuple):
return type(str('TupleInput'), (LiteralInput,), {'type': tuple})
elif isinstance(pobj, param.Number):
return type(str('NumberInput'), (LiteralInput,), {'type': (int, float)})
elif isinstance(pobj, param.Dict):
return type(str('DictInput'), (LiteralInput,), {'type': dict})
elif isinstance(pobj, param.List):
return type(str('ListInput'), (LiteralInput,), {'type': list})
return LiteralInput
class Param(PaneBase):
display_threshold = param.Number(default=0, precedence=-10, doc="""
Parameters with precedence below this value are not displayed.""")
default_layout = param.ClassSelector(default=Column, class_=Panel,
is_instance=False)
default_precedence = param.Number(default=1e-8, precedence=-10, doc="""
Precedence value to use for parameters with no declared
precedence. By default, zero predecence is available for
forcing some parameters to the top of the list, and other
values above the default_precedence values can be used to sort
or group parameters arbitrarily.""")
expand = param.Boolean(default=False, doc="""
Whether parameterized subobjects are expanded or collapsed on
instantiation.""")
expand_button = param.Boolean(default=None, doc="""
Whether to add buttons to expand and collapse sub-objects.""")
expand_layout = param.Parameter(default=Column, doc="""
Layout to expand sub-objects into.""")
height = param.Integer(default=None, bounds=(0, None), doc="""
Height of widgetbox the parameter widgets are displayed in.""")
initializer = param.Callable(default=None, doc="""
User-supplied function that will be called on initialization,
usually to update the default Parameter values of the
underlying parameterized object.""")
name = param.String(default='', doc="""
Title of the pane.""")
parameters = param.List(default=[], allow_None=True, doc="""
If set this serves as a whitelist of parameters to display on
the supplied Parameterized object.""")
show_labels = param.Boolean(default=True, doc="""
Whether to show labels for each widget""")
show_name = param.Boolean(default=True, doc="""
Whether to show the parameterized object's name""")
width = param.Integer(default=300, allow_None=True, bounds=(0, None), doc="""
Width of widgetbox the parameter widgets are displayed in.""")
widgets = param.Dict(doc="""
Dictionary of widget overrides, mapping from parameter name
to widget class.""")
priority = 0.1
_unpack = True
_mapping = {
param.Action: Button,
param.Boolean: Checkbox,
param.CalendarDate: DatePicker,
param.Color: ColorPicker,
param.Date: DatetimeInput,
param.DateRange: DateRangeSlider,
param.CalendarDateRange: DateRangeSlider,
param.DataFrame: DataFrame,
param.Dict: LiteralInputTyped,
param.FileSelector: SingleFileSelector,
param.Filename: TextInput,
param.Foldername: TextInput,
param.Integer: IntSlider,
param.List: LiteralInputTyped,
param.MultiFileSelector: FileSelector,
param.ListSelector: MultiSelect,
param.Number: FloatSlider,
param.ObjectSelector: Select,
param.Parameter: LiteralInputTyped,
param.Range: RangeSlider,
param.Selector: Select,
param.String: TextInput,
}
_rerender_params = []
def __init__(self, object=None, **params):
if isinstance(object, param.Parameter):
if not 'show_name' in params:
params['show_name'] = False
params['parameters'] = [object.name]
object = object.owner
if isinstance(object, param.parameterized.Parameters):
object = object.cls if object.self is None else object.self
if 'parameters' not in params and object is not None:
params['parameters'] = [p for p in object.param if p != 'name']
self._explicit_parameters = False
else:
self._explicit_parameters = object is not None
if object and 'name' not in params:
params['name'] = param_name(object.name)
super(Param, self).__init__(object, **params)
self._updating = []
# Construct Layout
kwargs = {p: v for p, v in self.param.get_param_values()
if p in Layoutable.param and v is not None}
self._widget_box = self.default_layout(**kwargs)
layout = self.expand_layout
if isinstance(layout, Panel):
self._expand_layout = layout
self.layout = self._widget_box
elif isinstance(self._widget_box, layout):
self.layout = self._expand_layout = self._widget_box
elif isinstance(layout, type) and issubclass(layout, Panel):
self.layout = self._expand_layout = layout(self._widget_box, **kwargs)
else:
raise ValueError('expand_layout expected to be a panel.layout.Panel'
'type or instance, found %s type.' %
type(layout).__name__)
self.param.watch(self._update_widgets, [
'object', 'parameters', 'name', 'display_threshold', 'expand_button',
'expand', 'expand_layout', 'widgets', 'show_labels', 'show_name'])
self._update_widgets()
def __repr__(self, depth=0):
cls = type(self).__name__
obj_cls = type(self.object).__name__
params = [] if self.object is None else list(self.object.param)
parameters = [k for k in params if k != 'name']
params = []
for p, v in sorted(self.param.get_param_values()):
if v is self.param[p].default: continue
elif v is None: continue
elif isinstance(v, string_types) and v == '': continue
elif p == 'object' or (p == 'name' and (v.startswith(obj_cls) or v.startswith(cls))): continue
elif p == 'parameters' and v == parameters: continue
try:
params.append('%s=%s' % (p, abbreviated_repr(v)))
except RuntimeError:
params.append('%s=%s' % (p, '...'))
obj = 'None' if self.object is None else '%s' % type(self.object).__name__
template = '{cls}({obj}, {params})' if params else '{cls}({obj})'
return template.format(cls=cls, params=', '.join(params), obj=obj)
#----------------------------------------------------------------
# Callback API
#----------------------------------------------------------------
def _synced_params(self):
ignored_params = ['default_layout']
return [p for p in Layoutable.param if p not in ignored_params]
def _update_widgets(self, *events):
parameters = []
for event in sorted(events, key=lambda x: x.name):
if event.name == 'object':
if isinstance(event.new, param.parameterized.Parameters):
# Setting object will trigger this method a second time
self.object = event.new.cls if event.new.self is None else event.new.self
return
if self._explicit_parameters:
parameters = self.parameters
elif event.new is None:
parameters = []
else:
parameters = [p for p in event.new.param if p != 'name']
self.name = param_name(event.new.name)
if event.name == 'parameters':
if event.new is None:
self._explicit_parameters = False
if self.object is not None:
parameters = [p for p in self.object.param if p != 'name']
else:
self._explicit_parameters = True
parameters = [] if event.new == [] else event.new
if parameters != [] and parameters != self.parameters:
# Setting parameters will trigger this method a second time
self.parameters = parameters
return
for cb in list(self._callbacks):
if cb.inst in self._widget_box.objects:
cb.inst.param.unwatch(cb)
self._callbacks.remove(cb)
# Construct widgets
if self.object is None:
self._widgets = {}
else:
self._widgets = self._get_widgets()
alias = {'_title': 'name'}
widgets = [widget for p, widget in self._widgets.items()
if (self.object.param[alias.get(p, p)].precedence is None)
or (self.object.param[alias.get(p, p)].precedence >= self.display_threshold)]
self._widget_box.objects = widgets
if not (self.expand_button == False and not self.expand):
self._link_subobjects()
def _link_subobjects(self):
for pname, widget in self._widgets.items():
widgets = [widget] if isinstance(widget, Widget) else widget
if not any(is_parameterized(getattr(w, 'value', None)) or
any(is_parameterized(o) for o in getattr(w, 'options', []))
for w in widgets):
continue
if (isinstance(widgets, Row) and isinstance(widgets[1], Toggle)):
selector, toggle = (widgets[0], widgets[1])
else:
selector, toggle = (widget, None)
def toggle_pane(change, parameter=pname):
parameterized = getattr(self.object, parameter)
existing = [p for p in self._expand_layout.objects
if isinstance(p, Param) and
p.object in recursive_parameterized(parameterized)]
if not change.new:
self._expand_layout[:] = [
e for e in self._expand_layout.objects
if e not in existing
]
elif change.new:
kwargs = {k: v for k, v in self.param.get_param_values()
if k not in ['name', 'object', 'parameters']}
pane = Param(parameterized, name=parameterized.name,
**kwargs)
if isinstance(self._expand_layout, Tabs):
title = self.object.param[pname].label
pane = (title, pane)
self._expand_layout.append(pane)
def update_pane(change, parameter=pname):
layout = self._expand_layout
existing = [p for p in layout.objects if isinstance(p, Param)
and p.object is change.old]
if toggle:
toggle.disabled = not is_parameterized(change.new)
if not existing:
return
elif is_parameterized(change.new):
parameterized = change.new
kwargs = {k: v for k, v in self.param.get_param_values()
if k not in ['name', 'object', 'parameters']}
pane = Param(parameterized, name=parameterized.name,
**kwargs)
layout[layout.objects.index(existing[0])] = pane
else:
layout.pop(existing[0])
watchers = [selector.param.watch(update_pane, 'value')]
if toggle:
watchers.append(toggle.param.watch(toggle_pane, 'value'))
self._callbacks += watchers
if self.expand:
if self.expand_button:
toggle.value = True
else:
toggle_pane(namedtuple('Change', 'new')(True))
def widget(self, p_name):
p_obj = self.object.param[p_name]
kw_widget = {}
widget_class_overridden = True
if self.widgets is None or p_name not in self.widgets:
widget_class_overridden = False
widget_class = self.widget_type(p_obj)
elif isinstance(self.widgets[p_name], dict):
if 'type' in self.widgets[p_name]:
widget_class = self.widgets[p_name].pop('type')
else:
widget_class_overridden = False
widget_class = self.widget_type(p_obj)
kw_widget = self.widgets[p_name]
else:
widget_class = self.widgets[p_name]
if not self.show_labels and not issubclass(widget_class, _ButtonBase):
label = ''
else:
label = p_obj.label
kw = dict(disabled=p_obj.constant, name=label)
value = getattr(self.object, p_name)
if value is not None:
kw['value'] = value
# Update kwargs
kw.update(kw_widget)
if hasattr(p_obj, 'get_range'):
options = p_obj.get_range()
if not options and value is not None:
options = [value]
kw['options'] = options
if hasattr(p_obj, 'get_soft_bounds'):
bounds = p_obj.get_soft_bounds()
if bounds[0] is not None:
kw['start'] = bounds[0]
if bounds[1] is not None:
kw['end'] = bounds[1]
if ('start' not in kw or 'end' not in kw):
# Do not change widget class if _mapping was overridden
if not widget_class_overridden:
if (isinstance(p_obj, param.Number) and
not isinstance(p_obj, (param.Date, param.CalendarDate))):
widget_class = Spinner
if isinstance(p_obj, param.Integer):
kw['step'] = 1
elif not issubclass(widget_class, LiteralInput):
widget_class = LiteralInput
if hasattr(widget_class, 'step') and getattr(p_obj, 'step', None):
kw['step'] = p_obj.step
kwargs = {k: v for k, v in kw.items() if k in widget_class.param}
if isinstance(widget_class, Widget):
widget = widget_class
else:
widget = widget_class(**kwargs)
widget._param_pane = self
watchers = self._callbacks
def link_widget(change):
if p_name in self._updating:
return
try:
self._updating.append(p_name)
self.object.param.set_param(**{p_name: change.new})
finally:
self._updating.remove(p_name)
if isinstance(p_obj, param.Action):
def action(change):
value(self.object)
watcher = widget.param.watch(action, 'clicks')
else:
watcher = widget.param.watch(link_widget, 'value')
watchers.append(watcher)
def link(change, watchers=[watcher]):
updates = {}
if change.what == 'constant':
updates['disabled'] = change.new
elif change.what == 'precedence':
if (change.new < self.display_threshold and
widget in self._widget_box.objects):
self._widget_box.pop(widget)
elif change.new >= self.display_threshold:
precedence = lambda k: self.object.param['name' if k == '_title' else k].precedence
params = self._ordered_params
if self.show_name:
params.insert(0, '_title')
widgets = []
for k in params:
if precedence(k) is None or precedence(k) >= self.display_threshold:
widgets.append(self._widgets[k])
self._widget_box.objects = widgets
return
elif change.what == 'objects':
updates['options'] = p_obj.get_range()
elif change.what == 'bounds':
start, end = p_obj.get_soft_bounds()
updates['start'] = start
updates['end'] = end
elif change.what == 'step':
updates['step'] = p_obj.step
elif change.what == 'label':
updates['name'] = p_obj.label
elif p_name in self._updating:
return
elif isinstance(p_obj, param.Action):
prev_watcher = watchers[0]
widget.param.unwatch(prev_watcher)
def action(event):
change.new(self.object)
watchers[0] = widget.param.watch(action, 'clicks')
idx = self._callbacks.index(prev_watcher)
self._callbacks[idx] = watchers[0]
return
else:
updates['value'] = change.new
try:
self._updating.append(p_name)
widget.param.set_param(**updates)
finally:
self._updating.remove(p_name)
# Set up links to parameterized object
watchers.append(self.object.param.watch(link, p_name, 'constant'))
watchers.append(self.object.param.watch(link, p_name, 'precedence'))
watchers.append(self.object.param.watch(link, p_name, 'label'))
if hasattr(p_obj, 'get_range'):
watchers.append(self.object.param.watch(link, p_name, 'objects'))
if hasattr(p_obj, 'get_soft_bounds'):
watchers.append(self.object.param.watch(link, p_name, 'bounds'))
if 'step' in kw:
watchers.append(self.object.param.watch(link, p_name, 'step'))
watchers.append(self.object.param.watch(link, p_name))
options = kwargs.get('options', [])
if isinstance(options, dict):
options = options.values()
if ((is_parameterized(value) or any(is_parameterized(o) for o in options))
and (self.expand_button or (self.expand_button is None and not self.expand))):
widget.margin = (5, 0, 5, 10)
toggle = Toggle(name='\u22EE', button_type='primary',
disabled=not is_parameterized(value), max_height=30,
max_width=20, height_policy='fit', align='end',
margin=(0, 0, 5, 10))
widget.width = self._widget_box.width-60
return Row(widget, toggle, width_policy='max', margin=0)
else:
return widget
@property
def _ordered_params(self):
params = [(p, pobj) for p, pobj in self.object.param.objects('existing').items()
if p in self.parameters or p == 'name']
key_fn = lambda x: x[1].precedence if x[1].precedence is not None else self.default_precedence
sorted_precedence = sorted(params, key=key_fn)
filtered = [(k, p) for k, p in sorted_precedence]
groups = itertools.groupby(filtered, key=key_fn)
# Params preserve definition order in Python 3.6+
dict_ordered_py3 = (sys.version_info.major == 3 and sys.version_info.minor >= 6)
dict_ordered = dict_ordered_py3 or (sys.version_info.major > 3)
ordered_groups = [list(grp) if dict_ordered else sorted(grp) for (_, grp) in groups]
ordered_params = [el[0] for group in ordered_groups for el in group
if (el[0] != 'name' or el[0] in self.parameters)]
return ordered_params
#----------------------------------------------------------------
# Model API
#----------------------------------------------------------------
def _get_widgets(self):
# Format name specially
if self.expand_layout is Tabs:
widgets = []
elif self.show_name:
widgets = [('_title', StaticText(value='<b>{0}</b>'.format(self.name)))]
else:
widgets = []
widgets += [(pname, self.widget(pname)) for pname in self._ordered_params]
return OrderedDict(widgets)
def _get_model(self, doc, root=None, parent=None, comm=None):
model = self.layout._get_model(doc, root, parent, comm)
self._models[root.ref['id']] = (model, parent)
return model
def _cleanup(self, root):
self.layout._cleanup(root)
super(Param, self)._cleanup(root)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@classmethod
def applies(cls, obj):
return (is_parameterized(obj) or
isinstance(obj, param.parameterized.Parameters) or
(isinstance(obj, param.Parameter) and obj.owner is not None))
@classmethod
def widget_type(cls, pobj):
ptype = type(pobj)
for t in classlist(ptype)[::-1]:
if t in cls._mapping:
if isinstance(cls._mapping[t], types.FunctionType):
return cls._mapping[t](pobj)
return cls._mapping[t]
def select(self, selector=None):
return super().select(selector) + self.layout.select(selector)
def get_root(self, doc=None, comm=None):
doc = doc or _curdoc()
root = self.layout.get_root(doc, comm)
ref = root.ref['id']
self._models[ref] = (root, None)
state._views[ref] = (self, root, doc, comm)
return root
class ParamMethod(ReplacementPane):
def __init__(self, object=None, **params):
super(ParamMethod, self).__init__(object, **params)
self._link_object_params()
if object is not None:
self._validate_object()
self._update_inner(self.eval(object))
@param.depends('object', watch=True)
def _validate_object(self):
dependencies = getattr(self.object, '_dinfo', None)
if not dependencies or not dependencies.get('watch'):
return
fn_type = 'method' if type(self) is ParamMethod else 'function'
self.param.warning(f"The {fn_type} supplied for Panel to display "
"was declared with `watch=True`, which will "
f"cause the {fn_type} to be called twice for "
"any change in a dependent Parameter. "
"`watch` should be False when Panel is "
"responsible for displaying the result "
f"of the {fn_type} call, while `watch=True` "
f"should be reserved for {fn_type}s that work "
"via side-effects, e.g. by modifying internal "
"state of a class or global state in an "
"application's namespace.")
@classmethod
def eval(self, function):
args, kwargs = (), {}
if hasattr(function, '_dinfo'):
arg_deps = function._dinfo['dependencies']
kw_deps = function._dinfo.get('kw', {})
if kw_deps or any(isinstance(d, param.Parameter) for d in arg_deps):
args = (getattr(dep.owner, dep.name) for dep in arg_deps)
kwargs = {n: getattr(dep.owner, dep.name) for n, dep in kw_deps.items()}
return function(*args, **kwargs)
def _update_pane(self, *events):
callbacks = []
for watcher in self._callbacks:
obj = watcher.inst if watcher.inst is None else watcher.cls
if obj is self:
callbacks.append(watcher)
continue
obj.param.unwatch(watcher)
self._callbacks = callbacks
self._link_object_params()
if object is not None:
self._update_inner(self.eval(self.object))
def _link_object_params(self):
parameterized = get_method_owner(self.object)
params = parameterized.param.params_depended_on(self.object.__name__)
deps = params
def update_pane(*events):
if any(is_parameterized(event.new) for event in events):
new_deps = parameterized.param.params_depended_on(self.object.__name__)
for p in list(deps):
if p in new_deps: continue
watchers = self._callbacks
for w in list(watchers):
if (w.inst is p.inst and w.cls is p.cls and
p.name in w.parameter_names):
obj = p.cls if p.inst is None else p.inst
obj.param.unwatch(w)
watchers.pop(watchers.index(w))
deps.pop(deps.index(p))
new_deps = [dep for dep in new_deps if dep not in deps]
for _, params in full_groupby(new_deps, lambda x: (x.inst or x.cls, x.what)):
p = params[0]
pobj = p.cls if p.inst is None else p.inst
ps = [_p.name for _p in params]
watcher = pobj.param.watch(update_pane, ps, p.what)
self._callbacks.append(watcher)
for p in params:
deps.append(p)
new_object = self.eval(self.object)
self._update_inner(new_object)
for _, params in full_groupby(params, lambda x: (x.inst or x.cls, x.what)):
p = params[0]
pobj = (p.inst or p.cls)
ps = [_p.name for _p in params]
watcher = pobj.param.watch(update_pane, ps, p.what)
self._callbacks.append(watcher)
@classmethod
def applies(cls, obj):
return inspect.ismethod(obj) and isinstance(get_method_owner(obj), param.Parameterized)
class ParamFunction(ParamMethod):
priority = 0.6
def _replace_pane(self, *args):
new_object = self.eval(self.object)
self._update_inner(new_object)
def _link_object_params(self):
deps = self.object._dinfo
dep_params = list(deps['dependencies']) + list(deps.get('kw', {}).values())
grouped = defaultdict(list)
for dep in dep_params:
grouped[id(dep.owner)].append(dep)
for group in grouped.values():
watcher = group[0].owner.param.watch(self._replace_pane, [dep.name for dep in group])
self._callbacks.append(watcher)
@classmethod
def applies(cls, obj):
return isinstance(obj, types.FunctionType) and hasattr(obj, '_dinfo')
class JSONInit(param.Parameterized):
varname = param.String(default='PARAM_JSON_INIT', doc="""
The name of the environment variable containing the JSON
specification.""")
target = param.String(default=None, doc="""
Optional key in the JSON specification dictionary containing the
desired parameter values.""")
json_file = param.String(default=None, doc="""
Optional path to a JSON file containing the parameter settings.""")
def __call__(self, parameterized):
warnobj = param.main if isinstance(parameterized, type) else parameterized
param_class = (parameterized if isinstance(parameterized, type)
else parameterized.__class__)
target = self.target if self.target is not None else param_class.__name__
env_var = os.environ.get(self.varname, None)
if env_var is None and self.json_file is None: return
if self.json_file or env_var.endswith('.json'):
try:
fname = self.json_file if self.json_file else env_var
spec = json.load(open(os.path.abspath(fname), 'r'))
except Exception:
warnobj.warning('Could not load JSON file %r' % spec)
else:
spec = json.loads(env_var)
if not isinstance(spec, dict):
warnobj.warning('JSON parameter specification must be a dictionary.')
return
if target in spec:
params = spec[target]
else:
params = spec
for name, value in params.items():
try:
parameterized.param.set_param(**{name:value})
except ValueError as e:
warnobj.warning(str(e))
| true | true |
f71e6172fb408174fe4da5ba6b742421a696d4dc | 1,235 | py | Python | python/dir/move_file.py | tagwan/scripts | f88846f13b1e3d05c093aff9124d927d6873280c | [
"MIT"
] | null | null | null | python/dir/move_file.py | tagwan/scripts | f88846f13b1e3d05c093aff9124d927d6873280c | [
"MIT"
] | null | null | null | python/dir/move_file.py | tagwan/scripts | f88846f13b1e3d05c093aff9124d927d6873280c | [
"MIT"
] | null | null | null | import shutil
import sys
import time
import os
import argparse
"""
将源目录240天以上的所有文件移动到目标目录
"""
usage = 'python move_files_over_x_days.py -src [SRC] -dst [DST] -days [DAYS]'
description = 'Move files from src to dst if they are older than a certain number of days. Default is 240 days'
args_parser = argparse.ArgumentParser(usage=usage, description=description)
args_parser.add_argument('-src', '--src', type=str, nargs='?', default='.', help='(OPTIONAL) Directory where files will be moved from. Defaults to current directory')
args_parser.add_argument('-dst', '--dst', type=str, nargs='?', required=True, help='(REQUIRED) Directory where files will be moved to.')
args_parser.add_argument('-days', '--days', type=int, nargs='?', default=240, help='(OPTIONAL) Days value specifies the minimum age of files to be moved. Default is 240.')
args = args_parser.parse_args()
if args.days < 0:
args.days = 0
src = args.src # 设置源目录
dst = args.dst # 设置目标目录
days = args.days # 设置天数
now = time.time() # 获得当前时间
if not os.path.exists(dst):
os.mkdir(dst)
for f in os.listdir(src): # 遍历源目录所有文件
if os.stat(f).st_mtime < now - days * 86400: # 判断是否超过240天
if os.path.isfile(f): # 检查是否是文件
shutil.move(f, dst) # 移动文件 | 36.323529 | 171 | 0.702024 | import shutil
import sys
import time
import os
import argparse
usage = 'python move_files_over_x_days.py -src [SRC] -dst [DST] -days [DAYS]'
description = 'Move files from src to dst if they are older than a certain number of days. Default is 240 days'
args_parser = argparse.ArgumentParser(usage=usage, description=description)
args_parser.add_argument('-src', '--src', type=str, nargs='?', default='.', help='(OPTIONAL) Directory where files will be moved from. Defaults to current directory')
args_parser.add_argument('-dst', '--dst', type=str, nargs='?', required=True, help='(REQUIRED) Directory where files will be moved to.')
args_parser.add_argument('-days', '--days', type=int, nargs='?', default=240, help='(OPTIONAL) Days value specifies the minimum age of files to be moved. Default is 240.')
args = args_parser.parse_args()
if args.days < 0:
args.days = 0
src = args.src
dst = args.dst
days = args.days
now = time.time()
if not os.path.exists(dst):
os.mkdir(dst)
for f in os.listdir(src):
if os.stat(f).st_mtime < now - days * 86400:
if os.path.isfile(f):
shutil.move(f, dst) | true | true |
f71e61cc8a120b27b799496cc36516d8bdc3d5e3 | 582 | py | Python | advanced/image_processing/examples/plot_numpy_array.py | rossbar/scipy-lecture-notes | 7f74e6925721c43bd81bf0bee34b4805ac4a3b57 | [
"CC-BY-4.0"
] | 2,538 | 2015-01-01T04:58:41.000Z | 2022-03-31T21:06:05.000Z | advanced/image_processing/examples/plot_numpy_array.py | rossbar/scipy-lecture-notes | 7f74e6925721c43bd81bf0bee34b4805ac4a3b57 | [
"CC-BY-4.0"
] | 362 | 2015-01-18T14:16:23.000Z | 2021-11-18T16:24:34.000Z | advanced/image_processing/examples/plot_numpy_array.py | rossbar/scipy-lecture-notes | 7f74e6925721c43bd81bf0bee34b4805ac4a3b57 | [
"CC-BY-4.0"
] | 1,127 | 2015-01-05T14:39:29.000Z | 2022-03-25T08:38:39.000Z | """
Image manipulation and numpy arrays
====================================
This example shows how to do image manipulation using common numpy arrays
tricks.
"""
import numpy as np
import scipy
import scipy.misc
import matplotlib.pyplot as plt
face = scipy.misc.face(gray=True)
face[10:13, 20:23]
face[100:120] = 255
lx, ly = face.shape
X, Y = np.ogrid[0:lx, 0:ly]
mask = (X - lx/2)**2 + (Y - ly/2)**2 > lx*ly/4
face[mask] = 0
face[range(400), range(400)] = 255
plt.figure(figsize=(3, 3))
plt.axes([0, 0, 1, 1])
plt.imshow(face, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
| 18.774194 | 73 | 0.632302 |
import numpy as np
import scipy
import scipy.misc
import matplotlib.pyplot as plt
face = scipy.misc.face(gray=True)
face[10:13, 20:23]
face[100:120] = 255
lx, ly = face.shape
X, Y = np.ogrid[0:lx, 0:ly]
mask = (X - lx/2)**2 + (Y - ly/2)**2 > lx*ly/4
face[mask] = 0
face[range(400), range(400)] = 255
plt.figure(figsize=(3, 3))
plt.axes([0, 0, 1, 1])
plt.imshow(face, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
| true | true |
f71e63d7fd418ca42a509c3b118e17c51808dd04 | 7,066 | py | Python | python/mxnet/_ctypes/ndarray.py | litaotju/incubator-mxnet | 83b28911cbc4a49521c69d8c4330a7234436b2fa | [
"Apache-2.0"
] | 3 | 2018-07-31T02:44:39.000Z | 2019-03-10T01:36:47.000Z | python/mxnet/_ctypes/ndarray.py | litaotju/incubator-mxnet | 83b28911cbc4a49521c69d8c4330a7234436b2fa | [
"Apache-2.0"
] | 3 | 2019-01-31T07:48:00.000Z | 2019-02-14T02:02:18.000Z | python/mxnet/_ctypes/ndarray.py | litaotju/incubator-mxnet | 83b28911cbc4a49521c69d8c4330a7234436b2fa | [
"Apache-2.0"
] | 1 | 2019-12-02T04:16:13.000Z | 2019-12-02T04:16:13.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments
# pylint: disable=global-statement, unused-import
"""NDArray configuration API."""
from __future__ import absolute_import as _abs
import ctypes
from ..base import _LIB
from ..base import c_str_array, c_handle_array
from ..base import NDArrayHandle, CachedOpHandle
from ..base import check_call
def _monitor_callback_wrapper(callback):
"""A wrapper for the user-defined handle."""
def callback_handle(name, opr_name, array, _):
""" ctypes function """
callback(name, opr_name, array)
return callback_handle
class NDArrayBase(object):
"""Base data structure for ndarray"""
__slots__ = ["handle", "writable"]
# pylint: disable= no-member
def __init__(self, handle, writable=True):
"""initialize a new NDArray
Parameters
----------
handle : NDArrayHandle
NDArray handle of C API
"""
if handle is not None:
assert isinstance(handle, NDArrayHandle)
self.handle = handle
self.writable = writable
def __del__(self):
check_call(_LIB.MXNDArrayFree(self.handle))
def __reduce__(self):
return (_ndarray_cls, (None,), self.__getstate__())
_ndarray_cls = None
_np_ndarray_cls = None
def _set_ndarray_class(cls):
"""Set the symbolic class to be cls"""
global _ndarray_cls
_ndarray_cls = cls
def _set_np_ndarray_class(cls):
"""Set the symbolic class to be cls"""
global _np_ndarray_cls
_np_ndarray_cls = cls
def _imperative_invoke(handle, ndargs, keys, vals, out, is_np_op, output_is_list):
"""ctypes implementation of imperative invoke wrapper"""
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXImperativeInvokeEx(
ctypes.c_void_p(handle),
ctypes.c_int(len(ndargs)),
c_handle_array(ndargs),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.c_int(len(keys)),
c_str_array(keys),
c_str_array([str(s) for s in vals]),
ctypes.byref(out_stypes)))
create_ndarray_fn = _np_ndarray_cls if is_np_op else _ndarray_cls
if original_output is not None:
return original_output
if num_output.value == 1 and not output_is_list:
return create_ndarray_fn(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [create_ndarray_fn(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i]) for i in range(num_output.value)]
class CachedOp(object):
"""Cached operator handle."""
__slots__ = ["handle", "is_np_sym", "_monitor_callback"]
def __init__(self, sym, flags=()):
self.handle = CachedOpHandle()
self._monitor_callback = None
from ..symbol.numpy._symbol import _Symbol
self.is_np_sym = bool(isinstance(sym, _Symbol))
check_call(_LIB.MXCreateCachedOpEx(
sym.handle,
len(flags),
c_str_array([key for key, _ in flags]),
c_str_array([str(val) for _, val in flags]),
ctypes.byref(self.handle)))
def __del__(self):
check_call(_LIB.MXFreeCachedOp(self.handle))
def __call__(self, *args, **kwargs):
"""ctypes implementation of imperative invoke wrapper"""
out = kwargs.pop('out', None)
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
if kwargs:
raise TypeError(
"CachedOp.__call__ got unexpected keyword argument(s): " + \
', '.join(kwargs.keys()))
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXInvokeCachedOpEx(
self.handle,
ctypes.c_int(len(args)),
c_handle_array(args),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.byref(out_stypes)))
if original_output is not None:
return original_output
create_ndarray_fn = _np_ndarray_cls if self.is_np_sym else _ndarray_cls
if num_output.value == 1:
return create_ndarray_fn(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [create_ndarray_fn(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i]) for i in range(num_output.value)]
def _register_op_hook(self, callback, monitor_all=False):
"""Install callback for monitor.
Parameters
----------
callback : function
Takes a string for node_name, string for op_name and a NDArrayHandle.
monitor_all : bool, default False
If true, monitor both input _imperative_invoked output, otherwise monitor output only.
"""
cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p)
if callback:
self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
check_call(_LIB.MXCachedOpRegisterOpHook(
self.handle,
self._monitor_callback,
ctypes.c_int(monitor_all)))
| 35.507538 | 106 | 0.647891 |
from __future__ import absolute_import as _abs
import ctypes
from ..base import _LIB
from ..base import c_str_array, c_handle_array
from ..base import NDArrayHandle, CachedOpHandle
from ..base import check_call
def _monitor_callback_wrapper(callback):
def callback_handle(name, opr_name, array, _):
callback(name, opr_name, array)
return callback_handle
class NDArrayBase(object):
__slots__ = ["handle", "writable"]
def __init__(self, handle, writable=True):
if handle is not None:
assert isinstance(handle, NDArrayHandle)
self.handle = handle
self.writable = writable
def __del__(self):
check_call(_LIB.MXNDArrayFree(self.handle))
def __reduce__(self):
return (_ndarray_cls, (None,), self.__getstate__())
_ndarray_cls = None
_np_ndarray_cls = None
def _set_ndarray_class(cls):
global _ndarray_cls
_ndarray_cls = cls
def _set_np_ndarray_class(cls):
global _np_ndarray_cls
_np_ndarray_cls = cls
def _imperative_invoke(handle, ndargs, keys, vals, out, is_np_op, output_is_list):
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXImperativeInvokeEx(
ctypes.c_void_p(handle),
ctypes.c_int(len(ndargs)),
c_handle_array(ndargs),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.c_int(len(keys)),
c_str_array(keys),
c_str_array([str(s) for s in vals]),
ctypes.byref(out_stypes)))
create_ndarray_fn = _np_ndarray_cls if is_np_op else _ndarray_cls
if original_output is not None:
return original_output
if num_output.value == 1 and not output_is_list:
return create_ndarray_fn(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [create_ndarray_fn(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i]) for i in range(num_output.value)]
class CachedOp(object):
__slots__ = ["handle", "is_np_sym", "_monitor_callback"]
def __init__(self, sym, flags=()):
self.handle = CachedOpHandle()
self._monitor_callback = None
from ..symbol.numpy._symbol import _Symbol
self.is_np_sym = bool(isinstance(sym, _Symbol))
check_call(_LIB.MXCreateCachedOpEx(
sym.handle,
len(flags),
c_str_array([key for key, _ in flags]),
c_str_array([str(val) for _, val in flags]),
ctypes.byref(self.handle)))
def __del__(self):
check_call(_LIB.MXFreeCachedOp(self.handle))
def __call__(self, *args, **kwargs):
out = kwargs.pop('out', None)
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
if kwargs:
raise TypeError(
"CachedOp.__call__ got unexpected keyword argument(s): " + \
', '.join(kwargs.keys()))
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXInvokeCachedOpEx(
self.handle,
ctypes.c_int(len(args)),
c_handle_array(args),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.byref(out_stypes)))
if original_output is not None:
return original_output
create_ndarray_fn = _np_ndarray_cls if self.is_np_sym else _ndarray_cls
if num_output.value == 1:
return create_ndarray_fn(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [create_ndarray_fn(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i]) for i in range(num_output.value)]
def _register_op_hook(self, callback, monitor_all=False):
cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p)
if callback:
self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
check_call(_LIB.MXCachedOpRegisterOpHook(
self.handle,
self._monitor_callback,
ctypes.c_int(monitor_all)))
| true | true |
f71e655c0d5a42907e4e8c54df2f89045dda2ece | 988 | py | Python | leadmanager/accounts/serializers.py | Nerrdii/lead-manager | 391a698ee4f2f389e57527e460f9f20b5fd73060 | [
"MIT"
] | null | null | null | leadmanager/accounts/serializers.py | Nerrdii/lead-manager | 391a698ee4f2f389e57527e460f9f20b5fd73060 | [
"MIT"
] | 3 | 2021-04-08T19:11:07.000Z | 2021-06-11T15:13:02.000Z | leadmanager/accounts/serializers.py | Nerrdii/lead-manager | 391a698ee4f2f389e57527e460f9f20b5fd73060 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data['username'], validated_data['email'], validated_data['password'])
return user
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect credentials")
| 28.228571 | 92 | 0.678138 | from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data['username'], validated_data['email'], validated_data['password'])
return user
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect credentials")
| true | true |
f71e6662c54d213d3019b65fbc96d3cbbbbdac09 | 6,350 | py | Python | google/cloud/vision_v1p2beta1/services/image_annotator/transports/base.py | dylancaponi/python-vision | f94fb5b03bf8932e75967249292d23fed2ae2213 | [
"Apache-2.0"
] | null | null | null | google/cloud/vision_v1p2beta1/services/image_annotator/transports/base.py | dylancaponi/python-vision | f94fb5b03bf8932e75967249292d23fed2ae2213 | [
"Apache-2.0"
] | 1 | 2021-02-23T12:41:14.000Z | 2021-02-23T12:41:14.000Z | google/cloud/vision_v1p2beta1/services/image_annotator/transports/base.py | dylancaponi/python-vision | f94fb5b03bf8932e75967249292d23fed2ae2213 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2 as operations # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ImageAnnotatorTransport(abc.ABC):
"""Abstract transport class for ImageAnnotator."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.batch_annotate_images: gapic_v1.method.wrap_method(
self.batch_annotate_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.async_batch_annotate_files: gapic_v1.method.wrap_method(
self.async_batch_annotate_files,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def batch_annotate_images(
self,
) -> typing.Callable[
[image_annotator.BatchAnnotateImagesRequest],
typing.Union[
image_annotator.BatchAnnotateImagesResponse,
typing.Awaitable[image_annotator.BatchAnnotateImagesResponse],
],
]:
raise NotImplementedError()
@property
def async_batch_annotate_files(
self,
) -> typing.Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
__all__ = ("ImageAnnotatorTransport",)
| 37.797619 | 85 | 0.639685 |
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2 as operations
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ImageAnnotatorTransport(abc.ABC):
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
if ":" not in host:
host += ":443"
self._host = host
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
self._credentials = credentials
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
self._wrapped_methods = {
self.batch_annotate_images: gapic_v1.method.wrap_method(
self.batch_annotate_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.async_batch_annotate_files: gapic_v1.method.wrap_method(
self.async_batch_annotate_files,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
raise NotImplementedError()
@property
def batch_annotate_images(
self,
) -> typing.Callable[
[image_annotator.BatchAnnotateImagesRequest],
typing.Union[
image_annotator.BatchAnnotateImagesResponse,
typing.Awaitable[image_annotator.BatchAnnotateImagesResponse],
],
]:
raise NotImplementedError()
@property
def async_batch_annotate_files(
self,
) -> typing.Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
__all__ = ("ImageAnnotatorTransport",)
| true | true |
f71e6798c5685f6274a337abf79482e269315bdc | 85,692 | py | Python | uhd_restpy/testplatform/sessions/ixnetwork/topology/pppoxclient_57c51b5ca094121e33c3a9ba5033980f.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/topology/pppoxclient_57c51b5ca094121e33c3a9ba5033980f.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/topology/pppoxclient_57c51b5ca094121e33c3a9ba5033980f.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Pppoxclient(Base):
"""PPPoX Client
The Pppoxclient class encapsulates a list of pppoxclient resources that are managed by the user.
A list of resources can be retrieved from the server using the Pppoxclient.find() method.
The list can be managed by using the Pppoxclient.add() and Pppoxclient.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'pppoxclient'
_SDM_ATT_MAP = {
'AcMatchMac': 'acMatchMac',
'AcMatchName': 'acMatchName',
'AcOptions': 'acOptions',
'ActualRateDownstream': 'actualRateDownstream',
'ActualRateUpstream': 'actualRateUpstream',
'AgentAccessAggregationCircuitId': 'agentAccessAggregationCircuitId',
'AgentCircuitId': 'agentCircuitId',
'AgentRemoteId': 'agentRemoteId',
'AuthRetries': 'authRetries',
'AuthTimeout': 'authTimeout',
'AuthType': 'authType',
'ChapName': 'chapName',
'ChapSecret': 'chapSecret',
'ClientDnsOptions': 'clientDnsOptions',
'ClientLocalIp': 'clientLocalIp',
'ClientLocalIpv6Iid': 'clientLocalIpv6Iid',
'ClientNcpOptions': 'clientNcpOptions',
'ClientNetmask': 'clientNetmask',
'ClientNetmaskOptions': 'clientNetmaskOptions',
'ClientPrimaryDnsAddress': 'clientPrimaryDnsAddress',
'ClientSecondaryDnsAddress': 'clientSecondaryDnsAddress',
'ClientSignalIWF': 'clientSignalIWF',
'ClientSignalLoopChar': 'clientSignalLoopChar',
'ClientSignalLoopEncapsulation': 'clientSignalLoopEncapsulation',
'ClientSignalLoopId': 'clientSignalLoopId',
'ClientV6NcpOptions': 'clientV6NcpOptions',
'ClientWinsOptions': 'clientWinsOptions',
'ClientWinsPrimaryAddress': 'clientWinsPrimaryAddress',
'ClientWinsSecondaryAddress': 'clientWinsSecondaryAddress',
'ConnectSpeedUpdateEnable': 'connectSpeedUpdateEnable',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DataLink': 'dataLink',
'DescriptiveName': 'descriptiveName',
'DiscoveredIpv4Addresses': 'discoveredIpv4Addresses',
'DiscoveredIpv6Addresses': 'discoveredIpv6Addresses',
'DiscoveredMacs': 'discoveredMacs',
'DiscoveredRemoteSessionIds': 'discoveredRemoteSessionIds',
'DiscoveredRemoteTunnelIds': 'discoveredRemoteTunnelIds',
'DiscoveredSessionIds': 'discoveredSessionIds',
'DiscoveredTunnelIPs': 'discoveredTunnelIPs',
'DiscoveredTunnelIds': 'discoveredTunnelIds',
'DomainList': 'domainList',
'DslTypeTlv': 'dslTypeTlv',
'EchoReqInterval': 'echoReqInterval',
'EnableDomainGroups': 'enableDomainGroups',
'EnableEchoReq': 'enableEchoReq',
'EnableEchoRsp': 'enableEchoRsp',
'EnableHostUniq': 'enableHostUniq',
'EnableMaxPayload': 'enableMaxPayload',
'EnableRedial': 'enableRedial',
'Encaps1': 'encaps1',
'Encaps2': 'encaps2',
'EndpointDiscNegotiation': 'endpointDiscNegotiation',
'EndpointDiscriminatorClass': 'endpointDiscriminatorClass',
'Errors': 'errors',
'HostUniq': 'hostUniq',
'HostUniqLength': 'hostUniqLength',
'LcpAccm': 'lcpAccm',
'LcpEnableAccm': 'lcpEnableAccm',
'LcpMaxFailure': 'lcpMaxFailure',
'LcpRetries': 'lcpRetries',
'LcpStartDelay': 'lcpStartDelay',
'LcpTermRetries': 'lcpTermRetries',
'LcpTimeout': 'lcpTimeout',
'MaxPayload': 'maxPayload',
'MlpppIPAddress': 'mlpppIPAddress',
'MlpppMACAddress': 'mlpppMACAddress',
'Mrru': 'mrru',
'MrruNegotiation': 'mrruNegotiation',
'MruNegotiation': 'mruNegotiation',
'Mtu': 'mtu',
'Multiplier': 'multiplier',
'Name': 'name',
'NcpRetries': 'ncpRetries',
'NcpTimeout': 'ncpTimeout',
'NcpType': 'ncpType',
'PadiRetries': 'padiRetries',
'PadiTimeout': 'padiTimeout',
'PadrRetries': 'padrRetries',
'PadrTimeout': 'padrTimeout',
'PapPassword': 'papPassword',
'PapUser': 'papUser',
'PonTypeTlv': 'ponTypeTlv',
'RedialMax': 'redialMax',
'RedialTimeout': 'redialTimeout',
'RxConnectSpeed': 'rxConnectSpeed',
'ServiceName': 'serviceName',
'ServiceOptions': 'serviceOptions',
'SessionInfo': 'sessionInfo',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TxConnectSpeed': 'txConnectSpeed',
'UnlimitedRedialAttempts': 'unlimitedRedialAttempts',
'UserDefinedDslType': 'userDefinedDslType',
'UserDefinedPonType': 'userDefinedPonType',
}
def __init__(self, parent):
super(Pppoxclient, self).__init__(parent)
@property
def Bfdv4Interface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface_91b557a3f744baf442dbe21ac75e8f2e.Bfdv4Interface): An instance of the Bfdv4Interface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface_91b557a3f744baf442dbe21ac75e8f2e import Bfdv4Interface
return Bfdv4Interface(self)
@property
def Bfdv6Interface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv6interface_b9a91920db1b70c8c6410d2de0b438d3.Bfdv6Interface): An instance of the Bfdv6Interface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv6interface_b9a91920db1b70c8c6410d2de0b438d3 import Bfdv6Interface
return Bfdv6Interface(self)
@property
def BgpIpv4Peer(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv4peer_9dd9eddcf2bd784d82d8a016e392f035.BgpIpv4Peer): An instance of the BgpIpv4Peer class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv4peer_9dd9eddcf2bd784d82d8a016e392f035 import BgpIpv4Peer
return BgpIpv4Peer(self)
@property
def BgpIpv6Peer(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6peer_d4ac277d9da759fd5a152b8e6eb0ab20.BgpIpv6Peer): An instance of the BgpIpv6Peer class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6peer_d4ac277d9da759fd5a152b8e6eb0ab20 import BgpIpv6Peer
return BgpIpv6Peer(self)
@property
def Connector(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def Dhcpv6client(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6client_355391ba11ab3c1555c827e2e4ac3c4c.Dhcpv6client): An instance of the Dhcpv6client class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6client_355391ba11ab3c1555c827e2e4ac3c4c import Dhcpv6client
return Dhcpv6client(self)
@property
def ECpriRe(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprire_51f1030cbafd2e567d3b517032a1b011.ECpriRe): An instance of the ECpriRe class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprire_51f1030cbafd2e567d3b517032a1b011 import ECpriRe
return ECpriRe(self)
@property
def ECpriRec(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprirec_129f1d43f285a4f806ade4e0df814255.ECpriRec): An instance of the ECpriRec class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprirec_129f1d43f285a4f806ade4e0df814255 import ECpriRec
return ECpriRec(self)
@property
def Geneve(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.geneve_14ab6f140956b4fc77d1d0f03c5e7514.Geneve): An instance of the Geneve class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.geneve_14ab6f140956b4fc77d1d0f03c5e7514 import Geneve
return Geneve(self)
@property
def IgmpHost(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.igmphost_8940887674c0387469423e8df3a33854.IgmpHost): An instance of the IgmpHost class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.igmphost_8940887674c0387469423e8df3a33854 import IgmpHost
return IgmpHost(self)
@property
def IgmpQuerier(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.igmpquerier_38c883b0cec7ffb5405af90bf1b8cda5.IgmpQuerier): An instance of the IgmpQuerier class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.igmpquerier_38c883b0cec7ffb5405af90bf1b8cda5 import IgmpQuerier
return IgmpQuerier(self)
@property
def MldHost(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.mldhost_824a1bed927138d4bb32f7d2631197a5.MldHost): An instance of the MldHost class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.mldhost_824a1bed927138d4bb32f7d2631197a5 import MldHost
return MldHost(self)
@property
def MldQuerier(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.mldquerier_e20671d730d138d65036e88d7cad63ac.MldQuerier): An instance of the MldQuerier class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.mldquerier_e20671d730d138d65036e88d7cad63ac import MldQuerier
return MldQuerier(self)
@property
def MplsOam(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.mplsoam_e01bb6affe899a4731aa60619f4aeadc.MplsOam): An instance of the MplsOam class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.mplsoam_e01bb6affe899a4731aa60619f4aeadc import MplsOam
return MplsOam(self)
@property
def NetconfClient(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfclient_1eaa2ab0efacd988796bdc1f5fe4291c.NetconfClient): An instance of the NetconfClient class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfclient_1eaa2ab0efacd988796bdc1f5fe4291c import NetconfClient
return NetconfClient(self)
@property
def NetconfServer(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfserver_ad256f8ca38068f1eaff839ed40b1e30.NetconfServer): An instance of the NetconfServer class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfserver_ad256f8ca38068f1eaff839ed40b1e30 import NetconfServer
return NetconfServer(self)
@property
def Ospfv2(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv2_27b7a27a991a50e01e629b9de482a2f0.Ospfv2): An instance of the Ospfv2 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv2_27b7a27a991a50e01e629b9de482a2f0 import Ospfv2
return Ospfv2(self)
@property
def Ospfv3(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3_c029fd7cd4a9e9897b7b4e4547458751.Ospfv3): An instance of the Ospfv3 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3_c029fd7cd4a9e9897b7b4e4547458751 import Ospfv3
return Ospfv3(self)
@property
def Pcc(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pcc_9346785b55d17399fecd6fe36c418219.Pcc): An instance of the Pcc class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pcc_9346785b55d17399fecd6fe36c418219 import Pcc
return Pcc(self)
@property
def Pce(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pce_bd5f6a11078a4f0deb5d56bef8e9674f.Pce): An instance of the Pce class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pce_bd5f6a11078a4f0deb5d56bef8e9674f import Pce
return Pce(self)
@property
def PimV4Interface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv4interface_92603cbceaf153039f7575ed9bc4aa67.PimV4Interface): An instance of the PimV4Interface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv4interface_92603cbceaf153039f7575ed9bc4aa67 import PimV4Interface
return PimV4Interface(self)
@property
def PimV6Interface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface_74a3aa08a315ca50732e853e3e8cdc43.PimV6Interface): An instance of the PimV6Interface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface_74a3aa08a315ca50732e853e3e8cdc43 import PimV6Interface
return PimV6Interface(self)
@property
def Tag(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
return Tag(self)
@property
def TlvProfile(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c.TlvProfile): An instance of the TlvProfile class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
return TlvProfile(self)
@property
def Vxlan(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlan_ed3df6fe7146492fc5fe0f77f53f9473.Vxlan): An instance of the Vxlan class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlan_ed3df6fe7146492fc5fe0f77f53f9473 import Vxlan
return Vxlan(self)
@property
def Vxlanv6(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlanv6_c18187deccae3db44b9e9de30ad538ec.Vxlanv6): An instance of the Vxlanv6 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlanv6_c18187deccae3db44b9e9de30ad538ec import Vxlanv6
return Vxlanv6(self)
@property
def AcMatchMac(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): ?
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AcMatchMac']))
@property
def AcMatchName(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): ?
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AcMatchName']))
@property
def AcOptions(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates PPPoE AC retrieval mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AcOptions']))
@property
def ActualRateDownstream(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This parameter specifies the value to be included in the vendor specific PPPoE tag. It is the actual downstream data rate (sub-option 0x81), in kbps.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ActualRateDownstream']))
@property
def ActualRateUpstream(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This parameter specifies the value to be included in the vendor specific PPPoE tag. It is the actual upstream data rate (sub-option 0x82), in kbps.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ActualRateUpstream']))
@property
def AgentAccessAggregationCircuitId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The value to be inserted into the Agent Access-Aggregation-Circuit-ID-ASCII-Value field of the PPPoX tag.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AgentAccessAggregationCircuitId']))
@property
def AgentCircuitId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The value to be inserted into the Agent Circuit ID field of the PPPoX tag.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AgentCircuitId']))
@property
def AgentRemoteId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The value to be inserted into the Agent Remote ID field of the PPPoX tag.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AgentRemoteId']))
@property
def AuthRetries(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of PPP authentication retries
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthRetries']))
@property
def AuthTimeout(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Timeout for PPP authentication, in seconds.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthTimeout']))
@property
def AuthType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The authentication type to use during link setup.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthType']))
@property
def ChapName(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): User name when CHAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ChapName']))
@property
def ChapSecret(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Secret when CHAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ChapSecret']))
@property
def ClientDnsOptions(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The client DNS options.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientDnsOptions']))
@property
def ClientLocalIp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The requested IPv4 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientLocalIp']))
@property
def ClientLocalIpv6Iid(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The requested IPv6 Interface Identifier (IID).
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientLocalIpv6Iid']))
@property
def ClientNcpOptions(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The NCP configuration mode for IPv4 addressing.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientNcpOptions']))
@property
def ClientNetmask(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The netmask that the client will use with the assigned IP address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientNetmask']))
@property
def ClientNetmaskOptions(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The client netmask option.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientNetmaskOptions']))
@property
def ClientPrimaryDnsAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is the primary DNS server address that the client requests from the server when the value of the Client DNS Options field is set to 'Request Primary only' or 'Request Primary and Secondary'.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientPrimaryDnsAddress']))
@property
def ClientSecondaryDnsAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is the secondary DNS server address that the client requests from the server when the value of the Client DNS Options field is set to 'Request Primary and Secondary'.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSecondaryDnsAddress']))
@property
def ClientSignalIWF(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This parameter enables or disables the insertion of sub-option 0xFE (signaling of interworked sessions) into the DSL tag in PADI and PADR packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalIWF']))
@property
def ClientSignalLoopChar(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This parameter enables or disables the insertion of sub-options 0x81 and 0x82 into the DSL tag in PADI and PADR packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalLoopChar']))
@property
def ClientSignalLoopEncapsulation(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This parameter enables or disables the insertion of sub-option 0x90 into the DSL tag in PADI and PADR packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalLoopEncapsulation']))
@property
def ClientSignalLoopId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This parameter enables or disables the insertion of sub-options 0x01 , 0x02, 0x03 (Remote ID,Circuit ID and Access Aggregation Circuit ID) into the DSL tag in PADI and PADR packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalLoopId']))
@property
def ClientV6NcpOptions(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The NCP configuration mode for IPv6 addressing.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientV6NcpOptions']))
@property
def ClientWinsOptions(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Specifies the mode in which WINS host addresses are configured.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientWinsOptions']))
@property
def ClientWinsPrimaryAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Specifies the primary WINS address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientWinsPrimaryAddress']))
@property
def ClientWinsSecondaryAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Specifies the secondary WINS address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientWinsSecondaryAddress']))
@property
def ConnectSpeedUpdateEnable(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If checked, LAC will send Connect Speed Update Enable AVP in ICRQ control message
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConnectSpeedUpdateEnable']))
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DataLink(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): A one-byte field included with sub-option 0x90.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DataLink']))
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DiscoveredIpv4Addresses(self):
"""
Returns
-------
- list(str): The discovered IPv4 addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredIpv4Addresses'])
@property
def DiscoveredIpv6Addresses(self):
"""
Returns
-------
- list(str): The discovered IPv6 addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredIpv6Addresses'])
@property
def DiscoveredMacs(self):
"""
Returns
-------
- list(str): The discovered remote MAC address.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredMacs'])
@property
def DiscoveredRemoteSessionIds(self):
"""
Returns
-------
- list(number): Remote session ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredRemoteSessionIds'])
@property
def DiscoveredRemoteTunnelIds(self):
"""
Returns
-------
- list(number): Remote tunnel ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredRemoteTunnelIds'])
@property
def DiscoveredSessionIds(self):
"""
Returns
-------
- list(number): The negotiated session ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredSessionIds'])
@property
def DiscoveredTunnelIPs(self):
"""
Returns
-------
- list(str): The discovered remote tunnel IP.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredTunnelIPs'])
@property
def DiscoveredTunnelIds(self):
"""
Returns
-------
- list(number): The negotiated tunnel ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredTunnelIds'])
@property
def DomainList(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Configure domain group settings
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DomainList']))
@property
def DslTypeTlv(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): DSL Type to be advertised in PPPoE VSA Tag. For undefined DSL type user has to select User-defined DSL Type.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DslTypeTlv']))
@property
def EchoReqInterval(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Keep alive interval, in seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EchoReqInterval']))
@property
def EnableDomainGroups(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable domain groups
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableDomainGroups']))
@property
def EnableEchoReq(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): ?
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableEchoReq']))
@property
def EnableEchoRsp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): ?
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableEchoRsp']))
@property
def EnableHostUniq(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enables PPPoE Host-Uniq tag
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableHostUniq']))
@property
def EnableMaxPayload(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enables PPPoE Max Payload tag
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableMaxPayload']))
@property
def EnableRedial(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If checked, PPPoE redial is enabled
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableRedial']))
@property
def Encaps1(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): A one-byte field included with sub-option 0x90.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Encaps1']))
@property
def Encaps2(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): A one-byte field included with sub-option 0x90.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Encaps2']))
@property
def EndpointDiscNegotiation(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Endpoint Discriminator Negotiation
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndpointDiscNegotiation']))
@property
def EndpointDiscriminatorClass(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Endpoint Discriminator for PPP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndpointDiscriminatorClass']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def HostUniq(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates Host-Uniq Tag
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HostUniq']))
@property
def HostUniqLength(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Host-Uniq Length, in bytes
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HostUniqLength']))
@property
def LcpAccm(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Async-Control-Character-Map
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpAccm']))
@property
def LcpEnableAccm(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Async-Control-Character-Map
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpEnableAccm']))
@property
def LcpMaxFailure(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of Configure-Nak packets sent without sending a Configure-Ack before assuming that configuration is not converging. Any further Configure-Nak packets for peer requested options are converted to Configure-Reject packets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpMaxFailure']))
@property
def LcpRetries(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of LCP retries
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpRetries']))
@property
def LcpStartDelay(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Delay time in milliseconds to wait before sending LCP Config Request packet
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpStartDelay']))
@property
def LcpTermRetries(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of LCP Termination Retries
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpTermRetries']))
@property
def LcpTimeout(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Timeout for LCP phase, in seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpTimeout']))
@property
def MaxPayload(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Max Payload
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxPayload']))
@property
def MlpppIPAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The IP address used in the ML-PPP endpoint discriminator option of the LCP configure request sent by PPP clients
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MlpppIPAddress']))
@property
def MlpppMACAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The MAC addresses are automatically derived from the local MAC address. An address in this class contains an IEEE 802.1 MAC address is canonical (802.3) format
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MlpppMACAddress']))
@property
def Mrru(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Max Receive Reconstructed Unit for PPP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Mrru']))
@property
def MrruNegotiation(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable MRRU Negotiation
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MrruNegotiation']))
@property
def MruNegotiation(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable MRU Negotiation
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MruNegotiation']))
@property
def Mtu(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Max Transmit Unit for PPP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Mtu']))
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NcpRetries(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of NCP retries
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NcpRetries']))
@property
def NcpTimeout(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Timeout for NCP phase, in seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NcpTimeout']))
@property
def NcpType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP address type (IPv4 or IPv6) for Network Control Protocol
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NcpType']))
@property
def PadiRetries(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of PADI Retries
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadiRetries']))
@property
def PadiTimeout(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Timeout for PADI no response, in seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadiTimeout']))
@property
def PadrRetries(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of PADR Retries
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadrRetries']))
@property
def PadrTimeout(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Timeout for PADR no response, in seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadrTimeout']))
@property
def PapPassword(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Password when PAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PapPassword']))
@property
def PapUser(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): User name when PAP Authentication is being used
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PapUser']))
@property
def PonTypeTlv(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): PON Type to be advertised in PPPoE VSA Tag. For undefined PON type user has to select User-defined PON Type.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PonTypeTlv']))
@property
def RedialMax(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Maximum number of PPPoE redials
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RedialMax']))
@property
def RedialTimeout(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): PPPoE redial timeout, in seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RedialTimeout']))
@property
def RxConnectSpeed(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Rx Connection Speed
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxConnectSpeed']))
@property
def ServiceName(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Access Concentrator Service Name - this option is only available for PPP servers.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServiceName']))
@property
def ServiceOptions(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates PPPoE service retrieval mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServiceOptions']))
@property
def SessionInfo(self):
"""
Returns
-------
- list(str[cLS_CFG_REJ_AUTH | cLS_CHAP_PEER_DET_FAIL | cLS_CHAP_PEER_RESP_BAD | cLS_CODE_REJ_IPCP | cLS_CODE_REJ_IPV6CP | cLS_CODE_REJ_LCP | cLS_ERR_PPP_NO_BUF | cLS_ERR_PPP_SEND_PKT | cLS_LINK_DISABLE | cLS_LOC_IPADDR_BROADCAST | cLS_LOC_IPADDR_CLASS_E | cLS_LOC_IPADDR_INVAL_ACKS_0 | cLS_LOC_IPADDR_INVAL_ACKS_DIFF | cLS_LOC_IPADDR_LOOPBACK | cLS_LOC_IPADDR_PEER_MATCH_LOC | cLS_LOC_IPADDR_PEER_NO_GIVE | cLS_LOC_IPADDR_PEER_NO_HELP | cLS_LOC_IPADDR_PEER_NO_TAKE | cLS_LOC_IPADDR_PEER_REJ | cLS_LOOPBACK_DETECT | cLS_NO_NCP | cLS_NONE | cLS_PAP_BAD_PASSWD | cLS_PEER_DISCONNECTED | cLS_PEER_DISCONNECTED_NEGO | cLS_PEER_IPADDR_MATCH_LOC | cLS_PEER_IPADDR_PEER_NO_SET | cLS_PPOE_AC_SYSTEM_ERROR | cLS_PPOE_GENERIC_ERROR | cLS_PPP_DISABLE | cLS_PPPOE_NO_HOST_UNIQ | cLS_PPPOE_PADI_TIMEOUT | cLS_PPPOE_PADO_TIMEOUT | cLS_PPPOE_PADR_TIMEOUT | cLS_PROTO_REJ_IPCP | cLS_PROTO_REJ_IPv6CP | cLS_TIMEOUT_CHAP_CHAL | cLS_TIMEOUT_CHAP_RESP | cLS_TIMEOUT_IPCP_CFG_REQ | cLS_TIMEOUT_IPV6CP_CFG_REQ | cLS_TIMEOUT_IPV6CP_RA | cLS_TIMEOUT_LCP_CFG_REQ | cLS_TIMEOUT_LCP_ECHO_REQ | cLS_TIMEOUT_PAP_AUTH_REQ | cLS_TUN_AUTH_FAILED | cLS_TUN_NO_RESOURCES | cLS_TUN_TIMEOUT_ICRQ | cLS_TUN_TIMEOUT_SCCRQ | cLS_TUN_VENDOR_SPECIFIC_ERR]): Logs additional information about the session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TxConnectSpeed(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Tx Connection Speed
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TxConnectSpeed']))
@property
def UnlimitedRedialAttempts(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If checked, PPPoE unlimited redial attempts is enabled
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnlimitedRedialAttempts']))
@property
def UserDefinedDslType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): User Defined DSL-Type Value.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UserDefinedDslType']))
@property
def UserDefinedPonType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): User Defined PON-Type Value.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UserDefinedPonType']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Updates pppoxclient resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Adds a new pppoxclient resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved pppoxclient resources using find and the newly added pppoxclient resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained pppoxclient resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, DiscoveredIpv4Addresses=None, DiscoveredIpv6Addresses=None, DiscoveredMacs=None, DiscoveredRemoteSessionIds=None, DiscoveredRemoteTunnelIds=None, DiscoveredSessionIds=None, DiscoveredTunnelIPs=None, DiscoveredTunnelIds=None, Errors=None, Multiplier=None, Name=None, SessionInfo=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves pppoxclient resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve pppoxclient resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all pppoxclient resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- DiscoveredIpv4Addresses (list(str)): The discovered IPv4 addresses.
- DiscoveredIpv6Addresses (list(str)): The discovered IPv6 addresses.
- DiscoveredMacs (list(str)): The discovered remote MAC address.
- DiscoveredRemoteSessionIds (list(number)): Remote session ID.
- DiscoveredRemoteTunnelIds (list(number)): Remote tunnel ID.
- DiscoveredSessionIds (list(number)): The negotiated session ID.
- DiscoveredTunnelIPs (list(str)): The discovered remote tunnel IP.
- DiscoveredTunnelIds (list(number)): The negotiated tunnel ID.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[cLS_CFG_REJ_AUTH | cLS_CHAP_PEER_DET_FAIL | cLS_CHAP_PEER_RESP_BAD | cLS_CODE_REJ_IPCP | cLS_CODE_REJ_IPV6CP | cLS_CODE_REJ_LCP | cLS_ERR_PPP_NO_BUF | cLS_ERR_PPP_SEND_PKT | cLS_LINK_DISABLE | cLS_LOC_IPADDR_BROADCAST | cLS_LOC_IPADDR_CLASS_E | cLS_LOC_IPADDR_INVAL_ACKS_0 | cLS_LOC_IPADDR_INVAL_ACKS_DIFF | cLS_LOC_IPADDR_LOOPBACK | cLS_LOC_IPADDR_PEER_MATCH_LOC | cLS_LOC_IPADDR_PEER_NO_GIVE | cLS_LOC_IPADDR_PEER_NO_HELP | cLS_LOC_IPADDR_PEER_NO_TAKE | cLS_LOC_IPADDR_PEER_REJ | cLS_LOOPBACK_DETECT | cLS_NO_NCP | cLS_NONE | cLS_PAP_BAD_PASSWD | cLS_PEER_DISCONNECTED | cLS_PEER_DISCONNECTED_NEGO | cLS_PEER_IPADDR_MATCH_LOC | cLS_PEER_IPADDR_PEER_NO_SET | cLS_PPOE_AC_SYSTEM_ERROR | cLS_PPOE_GENERIC_ERROR | cLS_PPP_DISABLE | cLS_PPPOE_NO_HOST_UNIQ | cLS_PPPOE_PADI_TIMEOUT | cLS_PPPOE_PADO_TIMEOUT | cLS_PPPOE_PADR_TIMEOUT | cLS_PROTO_REJ_IPCP | cLS_PROTO_REJ_IPv6CP | cLS_TIMEOUT_CHAP_CHAL | cLS_TIMEOUT_CHAP_RESP | cLS_TIMEOUT_IPCP_CFG_REQ | cLS_TIMEOUT_IPV6CP_CFG_REQ | cLS_TIMEOUT_IPV6CP_RA | cLS_TIMEOUT_LCP_CFG_REQ | cLS_TIMEOUT_LCP_ECHO_REQ | cLS_TIMEOUT_PAP_AUTH_REQ | cLS_TUN_AUTH_FAILED | cLS_TUN_NO_RESOURCES | cLS_TUN_TIMEOUT_ICRQ | cLS_TUN_TIMEOUT_SCCRQ | cLS_TUN_VENDOR_SPECIFIC_ERR])): Logs additional information about the session state
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching pppoxclient resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of pppoxclient data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the pppoxclient resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, AcMatchMac=None, AcMatchName=None, AcOptions=None, ActualRateDownstream=None, ActualRateUpstream=None, AgentAccessAggregationCircuitId=None, AgentCircuitId=None, AgentRemoteId=None, AuthRetries=None, AuthTimeout=None, AuthType=None, ChapName=None, ChapSecret=None, ClientDnsOptions=None, ClientLocalIp=None, ClientLocalIpv6Iid=None, ClientNcpOptions=None, ClientNetmask=None, ClientNetmaskOptions=None, ClientPrimaryDnsAddress=None, ClientSecondaryDnsAddress=None, ClientSignalIWF=None, ClientSignalLoopChar=None, ClientSignalLoopEncapsulation=None, ClientSignalLoopId=None, ClientV6NcpOptions=None, ClientWinsOptions=None, ClientWinsPrimaryAddress=None, ClientWinsSecondaryAddress=None, ConnectSpeedUpdateEnable=None, DataLink=None, DomainList=None, DslTypeTlv=None, EchoReqInterval=None, EnableDomainGroups=None, EnableEchoReq=None, EnableEchoRsp=None, EnableHostUniq=None, EnableMaxPayload=None, EnableRedial=None, Encaps1=None, Encaps2=None, EndpointDiscNegotiation=None, EndpointDiscriminatorClass=None, HostUniq=None, HostUniqLength=None, LcpAccm=None, LcpEnableAccm=None, LcpMaxFailure=None, LcpRetries=None, LcpStartDelay=None, LcpTermRetries=None, LcpTimeout=None, MaxPayload=None, MlpppIPAddress=None, MlpppMACAddress=None, Mrru=None, MrruNegotiation=None, MruNegotiation=None, Mtu=None, NcpRetries=None, NcpTimeout=None, NcpType=None, PadiRetries=None, PadiTimeout=None, PadrRetries=None, PadrTimeout=None, PapPassword=None, PapUser=None, PonTypeTlv=None, RedialMax=None, RedialTimeout=None, RxConnectSpeed=None, ServiceName=None, ServiceOptions=None, TxConnectSpeed=None, UnlimitedRedialAttempts=None, UserDefinedDslType=None, UserDefinedPonType=None):
"""Base class infrastructure that gets a list of pppoxclient device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- AcMatchMac (str): optional regex of acMatchMac
- AcMatchName (str): optional regex of acMatchName
- AcOptions (str): optional regex of acOptions
- ActualRateDownstream (str): optional regex of actualRateDownstream
- ActualRateUpstream (str): optional regex of actualRateUpstream
- AgentAccessAggregationCircuitId (str): optional regex of agentAccessAggregationCircuitId
- AgentCircuitId (str): optional regex of agentCircuitId
- AgentRemoteId (str): optional regex of agentRemoteId
- AuthRetries (str): optional regex of authRetries
- AuthTimeout (str): optional regex of authTimeout
- AuthType (str): optional regex of authType
- ChapName (str): optional regex of chapName
- ChapSecret (str): optional regex of chapSecret
- ClientDnsOptions (str): optional regex of clientDnsOptions
- ClientLocalIp (str): optional regex of clientLocalIp
- ClientLocalIpv6Iid (str): optional regex of clientLocalIpv6Iid
- ClientNcpOptions (str): optional regex of clientNcpOptions
- ClientNetmask (str): optional regex of clientNetmask
- ClientNetmaskOptions (str): optional regex of clientNetmaskOptions
- ClientPrimaryDnsAddress (str): optional regex of clientPrimaryDnsAddress
- ClientSecondaryDnsAddress (str): optional regex of clientSecondaryDnsAddress
- ClientSignalIWF (str): optional regex of clientSignalIWF
- ClientSignalLoopChar (str): optional regex of clientSignalLoopChar
- ClientSignalLoopEncapsulation (str): optional regex of clientSignalLoopEncapsulation
- ClientSignalLoopId (str): optional regex of clientSignalLoopId
- ClientV6NcpOptions (str): optional regex of clientV6NcpOptions
- ClientWinsOptions (str): optional regex of clientWinsOptions
- ClientWinsPrimaryAddress (str): optional regex of clientWinsPrimaryAddress
- ClientWinsSecondaryAddress (str): optional regex of clientWinsSecondaryAddress
- ConnectSpeedUpdateEnable (str): optional regex of connectSpeedUpdateEnable
- DataLink (str): optional regex of dataLink
- DomainList (str): optional regex of domainList
- DslTypeTlv (str): optional regex of dslTypeTlv
- EchoReqInterval (str): optional regex of echoReqInterval
- EnableDomainGroups (str): optional regex of enableDomainGroups
- EnableEchoReq (str): optional regex of enableEchoReq
- EnableEchoRsp (str): optional regex of enableEchoRsp
- EnableHostUniq (str): optional regex of enableHostUniq
- EnableMaxPayload (str): optional regex of enableMaxPayload
- EnableRedial (str): optional regex of enableRedial
- Encaps1 (str): optional regex of encaps1
- Encaps2 (str): optional regex of encaps2
- EndpointDiscNegotiation (str): optional regex of endpointDiscNegotiation
- EndpointDiscriminatorClass (str): optional regex of endpointDiscriminatorClass
- HostUniq (str): optional regex of hostUniq
- HostUniqLength (str): optional regex of hostUniqLength
- LcpAccm (str): optional regex of lcpAccm
- LcpEnableAccm (str): optional regex of lcpEnableAccm
- LcpMaxFailure (str): optional regex of lcpMaxFailure
- LcpRetries (str): optional regex of lcpRetries
- LcpStartDelay (str): optional regex of lcpStartDelay
- LcpTermRetries (str): optional regex of lcpTermRetries
- LcpTimeout (str): optional regex of lcpTimeout
- MaxPayload (str): optional regex of maxPayload
- MlpppIPAddress (str): optional regex of mlpppIPAddress
- MlpppMACAddress (str): optional regex of mlpppMACAddress
- Mrru (str): optional regex of mrru
- MrruNegotiation (str): optional regex of mrruNegotiation
- MruNegotiation (str): optional regex of mruNegotiation
- Mtu (str): optional regex of mtu
- NcpRetries (str): optional regex of ncpRetries
- NcpTimeout (str): optional regex of ncpTimeout
- NcpType (str): optional regex of ncpType
- PadiRetries (str): optional regex of padiRetries
- PadiTimeout (str): optional regex of padiTimeout
- PadrRetries (str): optional regex of padrRetries
- PadrTimeout (str): optional regex of padrTimeout
- PapPassword (str): optional regex of papPassword
- PapUser (str): optional regex of papUser
- PonTypeTlv (str): optional regex of ponTypeTlv
- RedialMax (str): optional regex of redialMax
- RedialTimeout (str): optional regex of redialTimeout
- RxConnectSpeed (str): optional regex of rxConnectSpeed
- ServiceName (str): optional regex of serviceName
- ServiceOptions (str): optional regex of serviceOptions
- TxConnectSpeed (str): optional regex of txConnectSpeed
- UnlimitedRedialAttempts (str): optional regex of unlimitedRedialAttempts
- UserDefinedDslType (str): optional regex of userDefinedDslType
- UserDefinedPonType (str): optional regex of userDefinedPonType
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def CloseIpcp(self, *args, **kwargs):
"""Executes the closeIpcp operation on the server.
Close IPCP for selected PPPoX items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
closeIpcp(SessionIndices=list)list
----------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
closeIpcp(SessionIndices=string)list
------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('closeIpcp', payload=payload, response_object=None)
def CloseIpv6cp(self, *args, **kwargs):
"""Executes the closeIpv6cp operation on the server.
Close IPv6CP for selected PPPoX items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
closeIpv6cp(SessionIndices=list)list
------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
closeIpv6cp(SessionIndices=string)list
--------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('closeIpv6cp', payload=payload, response_object=None)
def OpenIpcp(self, *args, **kwargs):
"""Executes the openIpcp operation on the server.
Open IPCP for selected PPPoX items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
openIpcp(SessionIndices=list)list
---------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
openIpcp(SessionIndices=string)list
-----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('openIpcp', payload=payload, response_object=None)
def OpenIpv6cp(self, *args, **kwargs):
"""Executes the openIpv6cp operation on the server.
Open IPv6CP for selected PPPoX items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
openIpv6cp(SessionIndices=list)list
-----------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
openIpv6cp(SessionIndices=string)list
-------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('openIpv6cp', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def SendPing(self, *args, **kwargs):
"""Executes the sendPing operation on the server.
Send Ping IPv4 for selected PPPoX items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
sendPing(DestIp=string)list
---------------------------
- DestIp (str): This parameter requires a destIp of type kString
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
sendPing(DestIp=string, SessionIndices=list)list
------------------------------------------------
- DestIp (str): This parameter requires a destIp of type kString
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
sendPing(SessionIndices=string, DestIp=string)list
--------------------------------------------------
- SessionIndices (str): This parameter requires a destIp of type kString
- DestIp (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPing', payload=payload, response_object=None)
def SendPing6(self, *args, **kwargs):
"""Executes the sendPing6 operation on the server.
Send Ping IPv6 for selected PPPoX items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
sendPing6(DestIp=string)list
----------------------------
- DestIp (str): This parameter requires a destIp of type kString
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
sendPing6(DestIp=string, SessionIndices=list)list
-------------------------------------------------
- DestIp (str): This parameter requires a destIp of type kString
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
sendPing6(SessionIndices=string, DestIp=string)list
---------------------------------------------------
- SessionIndices (str): This parameter requires a destIp of type kString
- DestIp (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPing6', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| 43.832225 | 1,709 | 0.667437 |
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Pppoxclient(Base):
__slots__ = ()
_SDM_NAME = 'pppoxclient'
_SDM_ATT_MAP = {
'AcMatchMac': 'acMatchMac',
'AcMatchName': 'acMatchName',
'AcOptions': 'acOptions',
'ActualRateDownstream': 'actualRateDownstream',
'ActualRateUpstream': 'actualRateUpstream',
'AgentAccessAggregationCircuitId': 'agentAccessAggregationCircuitId',
'AgentCircuitId': 'agentCircuitId',
'AgentRemoteId': 'agentRemoteId',
'AuthRetries': 'authRetries',
'AuthTimeout': 'authTimeout',
'AuthType': 'authType',
'ChapName': 'chapName',
'ChapSecret': 'chapSecret',
'ClientDnsOptions': 'clientDnsOptions',
'ClientLocalIp': 'clientLocalIp',
'ClientLocalIpv6Iid': 'clientLocalIpv6Iid',
'ClientNcpOptions': 'clientNcpOptions',
'ClientNetmask': 'clientNetmask',
'ClientNetmaskOptions': 'clientNetmaskOptions',
'ClientPrimaryDnsAddress': 'clientPrimaryDnsAddress',
'ClientSecondaryDnsAddress': 'clientSecondaryDnsAddress',
'ClientSignalIWF': 'clientSignalIWF',
'ClientSignalLoopChar': 'clientSignalLoopChar',
'ClientSignalLoopEncapsulation': 'clientSignalLoopEncapsulation',
'ClientSignalLoopId': 'clientSignalLoopId',
'ClientV6NcpOptions': 'clientV6NcpOptions',
'ClientWinsOptions': 'clientWinsOptions',
'ClientWinsPrimaryAddress': 'clientWinsPrimaryAddress',
'ClientWinsSecondaryAddress': 'clientWinsSecondaryAddress',
'ConnectSpeedUpdateEnable': 'connectSpeedUpdateEnable',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DataLink': 'dataLink',
'DescriptiveName': 'descriptiveName',
'DiscoveredIpv4Addresses': 'discoveredIpv4Addresses',
'DiscoveredIpv6Addresses': 'discoveredIpv6Addresses',
'DiscoveredMacs': 'discoveredMacs',
'DiscoveredRemoteSessionIds': 'discoveredRemoteSessionIds',
'DiscoveredRemoteTunnelIds': 'discoveredRemoteTunnelIds',
'DiscoveredSessionIds': 'discoveredSessionIds',
'DiscoveredTunnelIPs': 'discoveredTunnelIPs',
'DiscoveredTunnelIds': 'discoveredTunnelIds',
'DomainList': 'domainList',
'DslTypeTlv': 'dslTypeTlv',
'EchoReqInterval': 'echoReqInterval',
'EnableDomainGroups': 'enableDomainGroups',
'EnableEchoReq': 'enableEchoReq',
'EnableEchoRsp': 'enableEchoRsp',
'EnableHostUniq': 'enableHostUniq',
'EnableMaxPayload': 'enableMaxPayload',
'EnableRedial': 'enableRedial',
'Encaps1': 'encaps1',
'Encaps2': 'encaps2',
'EndpointDiscNegotiation': 'endpointDiscNegotiation',
'EndpointDiscriminatorClass': 'endpointDiscriminatorClass',
'Errors': 'errors',
'HostUniq': 'hostUniq',
'HostUniqLength': 'hostUniqLength',
'LcpAccm': 'lcpAccm',
'LcpEnableAccm': 'lcpEnableAccm',
'LcpMaxFailure': 'lcpMaxFailure',
'LcpRetries': 'lcpRetries',
'LcpStartDelay': 'lcpStartDelay',
'LcpTermRetries': 'lcpTermRetries',
'LcpTimeout': 'lcpTimeout',
'MaxPayload': 'maxPayload',
'MlpppIPAddress': 'mlpppIPAddress',
'MlpppMACAddress': 'mlpppMACAddress',
'Mrru': 'mrru',
'MrruNegotiation': 'mrruNegotiation',
'MruNegotiation': 'mruNegotiation',
'Mtu': 'mtu',
'Multiplier': 'multiplier',
'Name': 'name',
'NcpRetries': 'ncpRetries',
'NcpTimeout': 'ncpTimeout',
'NcpType': 'ncpType',
'PadiRetries': 'padiRetries',
'PadiTimeout': 'padiTimeout',
'PadrRetries': 'padrRetries',
'PadrTimeout': 'padrTimeout',
'PapPassword': 'papPassword',
'PapUser': 'papUser',
'PonTypeTlv': 'ponTypeTlv',
'RedialMax': 'redialMax',
'RedialTimeout': 'redialTimeout',
'RxConnectSpeed': 'rxConnectSpeed',
'ServiceName': 'serviceName',
'ServiceOptions': 'serviceOptions',
'SessionInfo': 'sessionInfo',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TxConnectSpeed': 'txConnectSpeed',
'UnlimitedRedialAttempts': 'unlimitedRedialAttempts',
'UserDefinedDslType': 'userDefinedDslType',
'UserDefinedPonType': 'userDefinedPonType',
}
def __init__(self, parent):
super(Pppoxclient, self).__init__(parent)
@property
def Bfdv4Interface(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface_91b557a3f744baf442dbe21ac75e8f2e import Bfdv4Interface
return Bfdv4Interface(self)
@property
def Bfdv6Interface(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv6interface_b9a91920db1b70c8c6410d2de0b438d3 import Bfdv6Interface
return Bfdv6Interface(self)
@property
def BgpIpv4Peer(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv4peer_9dd9eddcf2bd784d82d8a016e392f035 import BgpIpv4Peer
return BgpIpv4Peer(self)
@property
def BgpIpv6Peer(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6peer_d4ac277d9da759fd5a152b8e6eb0ab20 import BgpIpv6Peer
return BgpIpv6Peer(self)
@property
def Connector(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def Dhcpv6client(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6client_355391ba11ab3c1555c827e2e4ac3c4c import Dhcpv6client
return Dhcpv6client(self)
@property
def ECpriRe(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprire_51f1030cbafd2e567d3b517032a1b011 import ECpriRe
return ECpriRe(self)
@property
def ECpriRec(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprirec_129f1d43f285a4f806ade4e0df814255 import ECpriRec
return ECpriRec(self)
@property
def Geneve(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.geneve_14ab6f140956b4fc77d1d0f03c5e7514 import Geneve
return Geneve(self)
@property
def IgmpHost(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.igmphost_8940887674c0387469423e8df3a33854 import IgmpHost
return IgmpHost(self)
@property
def IgmpQuerier(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.igmpquerier_38c883b0cec7ffb5405af90bf1b8cda5 import IgmpQuerier
return IgmpQuerier(self)
@property
def MldHost(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.mldhost_824a1bed927138d4bb32f7d2631197a5 import MldHost
return MldHost(self)
@property
def MldQuerier(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.mldquerier_e20671d730d138d65036e88d7cad63ac import MldQuerier
return MldQuerier(self)
@property
def MplsOam(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.mplsoam_e01bb6affe899a4731aa60619f4aeadc import MplsOam
return MplsOam(self)
@property
def NetconfClient(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfclient_1eaa2ab0efacd988796bdc1f5fe4291c import NetconfClient
return NetconfClient(self)
@property
def NetconfServer(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfserver_ad256f8ca38068f1eaff839ed40b1e30 import NetconfServer
return NetconfServer(self)
@property
def Ospfv2(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv2_27b7a27a991a50e01e629b9de482a2f0 import Ospfv2
return Ospfv2(self)
@property
def Ospfv3(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3_c029fd7cd4a9e9897b7b4e4547458751 import Ospfv3
return Ospfv3(self)
@property
def Pcc(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pcc_9346785b55d17399fecd6fe36c418219 import Pcc
return Pcc(self)
@property
def Pce(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pce_bd5f6a11078a4f0deb5d56bef8e9674f import Pce
return Pce(self)
@property
def PimV4Interface(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv4interface_92603cbceaf153039f7575ed9bc4aa67 import PimV4Interface
return PimV4Interface(self)
@property
def PimV6Interface(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface_74a3aa08a315ca50732e853e3e8cdc43 import PimV6Interface
return PimV6Interface(self)
@property
def Tag(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
return Tag(self)
@property
def TlvProfile(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
return TlvProfile(self)
@property
def Vxlan(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlan_ed3df6fe7146492fc5fe0f77f53f9473 import Vxlan
return Vxlan(self)
@property
def Vxlanv6(self):
from uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlanv6_c18187deccae3db44b9e9de30ad538ec import Vxlanv6
return Vxlanv6(self)
@property
def AcMatchMac(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AcMatchMac']))
@property
def AcMatchName(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AcMatchName']))
@property
def AcOptions(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AcOptions']))
@property
def ActualRateDownstream(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ActualRateDownstream']))
@property
def ActualRateUpstream(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ActualRateUpstream']))
@property
def AgentAccessAggregationCircuitId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AgentAccessAggregationCircuitId']))
@property
def AgentCircuitId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AgentCircuitId']))
@property
def AgentRemoteId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AgentRemoteId']))
@property
def AuthRetries(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthRetries']))
@property
def AuthTimeout(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthTimeout']))
@property
def AuthType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthType']))
@property
def ChapName(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ChapName']))
@property
def ChapSecret(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ChapSecret']))
@property
def ClientDnsOptions(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientDnsOptions']))
@property
def ClientLocalIp(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientLocalIp']))
@property
def ClientLocalIpv6Iid(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientLocalIpv6Iid']))
@property
def ClientNcpOptions(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientNcpOptions']))
@property
def ClientNetmask(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientNetmask']))
@property
def ClientNetmaskOptions(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientNetmaskOptions']))
@property
def ClientPrimaryDnsAddress(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientPrimaryDnsAddress']))
@property
def ClientSecondaryDnsAddress(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSecondaryDnsAddress']))
@property
def ClientSignalIWF(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalIWF']))
@property
def ClientSignalLoopChar(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalLoopChar']))
@property
def ClientSignalLoopEncapsulation(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalLoopEncapsulation']))
@property
def ClientSignalLoopId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientSignalLoopId']))
@property
def ClientV6NcpOptions(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientV6NcpOptions']))
@property
def ClientWinsOptions(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientWinsOptions']))
@property
def ClientWinsPrimaryAddress(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientWinsPrimaryAddress']))
@property
def ClientWinsSecondaryAddress(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClientWinsSecondaryAddress']))
@property
def ConnectSpeedUpdateEnable(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConnectSpeedUpdateEnable']))
@property
def ConnectedVia(self):
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DataLink(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DataLink']))
@property
def DescriptiveName(self):
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DiscoveredIpv4Addresses(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredIpv4Addresses'])
@property
def DiscoveredIpv6Addresses(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredIpv6Addresses'])
@property
def DiscoveredMacs(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredMacs'])
@property
def DiscoveredRemoteSessionIds(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredRemoteSessionIds'])
@property
def DiscoveredRemoteTunnelIds(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredRemoteTunnelIds'])
@property
def DiscoveredSessionIds(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredSessionIds'])
@property
def DiscoveredTunnelIPs(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredTunnelIPs'])
@property
def DiscoveredTunnelIds(self):
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredTunnelIds'])
@property
def DomainList(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DomainList']))
@property
def DslTypeTlv(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DslTypeTlv']))
@property
def EchoReqInterval(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EchoReqInterval']))
@property
def EnableDomainGroups(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableDomainGroups']))
@property
def EnableEchoReq(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableEchoReq']))
@property
def EnableEchoRsp(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableEchoRsp']))
@property
def EnableHostUniq(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableHostUniq']))
@property
def EnableMaxPayload(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableMaxPayload']))
@property
def EnableRedial(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableRedial']))
@property
def Encaps1(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Encaps1']))
@property
def Encaps2(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Encaps2']))
@property
def EndpointDiscNegotiation(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndpointDiscNegotiation']))
@property
def EndpointDiscriminatorClass(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndpointDiscriminatorClass']))
@property
def Errors(self):
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def HostUniq(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HostUniq']))
@property
def HostUniqLength(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HostUniqLength']))
@property
def LcpAccm(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpAccm']))
@property
def LcpEnableAccm(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpEnableAccm']))
@property
def LcpMaxFailure(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpMaxFailure']))
@property
def LcpRetries(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpRetries']))
@property
def LcpStartDelay(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpStartDelay']))
@property
def LcpTermRetries(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpTermRetries']))
@property
def LcpTimeout(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LcpTimeout']))
@property
def MaxPayload(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxPayload']))
@property
def MlpppIPAddress(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MlpppIPAddress']))
@property
def MlpppMACAddress(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MlpppMACAddress']))
@property
def Mrru(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Mrru']))
@property
def MrruNegotiation(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MrruNegotiation']))
@property
def MruNegotiation(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MruNegotiation']))
@property
def Mtu(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Mtu']))
@property
def Multiplier(self):
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NcpRetries(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NcpRetries']))
@property
def NcpTimeout(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NcpTimeout']))
@property
def NcpType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NcpType']))
@property
def PadiRetries(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadiRetries']))
@property
def PadiTimeout(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadiTimeout']))
@property
def PadrRetries(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadrRetries']))
@property
def PadrTimeout(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PadrTimeout']))
@property
def PapPassword(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PapPassword']))
@property
def PapUser(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PapUser']))
@property
def PonTypeTlv(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PonTypeTlv']))
@property
def RedialMax(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RedialMax']))
@property
def RedialTimeout(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RedialTimeout']))
@property
def RxConnectSpeed(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxConnectSpeed']))
@property
def ServiceName(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServiceName']))
@property
def ServiceOptions(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServiceOptions']))
@property
def SessionInfo(self):
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
@property
def SessionStatus(self):
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TxConnectSpeed(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TxConnectSpeed']))
@property
def UnlimitedRedialAttempts(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnlimitedRedialAttempts']))
@property
def UserDefinedDslType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UserDefinedDslType']))
@property
def UserDefinedPonType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UserDefinedPonType']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, DiscoveredIpv4Addresses=None, DiscoveredIpv6Addresses=None, DiscoveredMacs=None, DiscoveredRemoteSessionIds=None, DiscoveredRemoteTunnelIds=None, DiscoveredSessionIds=None, DiscoveredTunnelIPs=None, DiscoveredTunnelIds=None, Errors=None, Multiplier=None, Name=None, SessionInfo=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
def get_device_ids(self, PortNames=None, AcMatchMac=None, AcMatchName=None, AcOptions=None, ActualRateDownstream=None, ActualRateUpstream=None, AgentAccessAggregationCircuitId=None, AgentCircuitId=None, AgentRemoteId=None, AuthRetries=None, AuthTimeout=None, AuthType=None, ChapName=None, ChapSecret=None, ClientDnsOptions=None, ClientLocalIp=None, ClientLocalIpv6Iid=None, ClientNcpOptions=None, ClientNetmask=None, ClientNetmaskOptions=None, ClientPrimaryDnsAddress=None, ClientSecondaryDnsAddress=None, ClientSignalIWF=None, ClientSignalLoopChar=None, ClientSignalLoopEncapsulation=None, ClientSignalLoopId=None, ClientV6NcpOptions=None, ClientWinsOptions=None, ClientWinsPrimaryAddress=None, ClientWinsSecondaryAddress=None, ConnectSpeedUpdateEnable=None, DataLink=None, DomainList=None, DslTypeTlv=None, EchoReqInterval=None, EnableDomainGroups=None, EnableEchoReq=None, EnableEchoRsp=None, EnableHostUniq=None, EnableMaxPayload=None, EnableRedial=None, Encaps1=None, Encaps2=None, EndpointDiscNegotiation=None, EndpointDiscriminatorClass=None, HostUniq=None, HostUniqLength=None, LcpAccm=None, LcpEnableAccm=None, LcpMaxFailure=None, LcpRetries=None, LcpStartDelay=None, LcpTermRetries=None, LcpTimeout=None, MaxPayload=None, MlpppIPAddress=None, MlpppMACAddress=None, Mrru=None, MrruNegotiation=None, MruNegotiation=None, Mtu=None, NcpRetries=None, NcpTimeout=None, NcpType=None, PadiRetries=None, PadiTimeout=None, PadrRetries=None, PadrTimeout=None, PapPassword=None, PapUser=None, PonTypeTlv=None, RedialMax=None, RedialTimeout=None, RxConnectSpeed=None, ServiceName=None, ServiceOptions=None, TxConnectSpeed=None, UnlimitedRedialAttempts=None, UserDefinedDslType=None, UserDefinedPonType=None):
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def CloseIpcp(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('closeIpcp', payload=payload, response_object=None)
def CloseIpv6cp(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('closeIpv6cp', payload=payload, response_object=None)
def OpenIpcp(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('openIpcp', payload=payload, response_object=None)
def OpenIpv6cp(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('openIpv6cp', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def SendPing(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPing', payload=payload, response_object=None)
def SendPing6(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPing6', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| true | true |
f71e695a37aca1817dcabfee1b7c4e42d4724bff | 35,646 | py | Python | panel/reactive.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | 1 | 2021-03-09T04:46:05.000Z | 2021-03-09T04:46:05.000Z | panel/reactive.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | null | null | null | panel/reactive.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | null | null | null | """
Declares Syncable and Reactive classes which provides baseclasses
for Panel components which sync their state with one or more bokeh
models rendered on the frontend.
"""
import difflib
import sys
import threading
from collections import namedtuple
from functools import partial
import numpy as np
import param
from bokeh.models import LayoutDOM
from tornado import gen
from .config import config
from .io.callbacks import PeriodicCallback
from .io.model import hold
from .io.notebook import push, push_on_root
from .io.server import unlocked
from .io.state import state
from .util import edit_readonly, updating
from .viewable import Renderable, Viewable
LinkWatcher = namedtuple("Watcher","inst cls fn mode onlychanged parameter_names what queued target links transformed bidirectional_watcher")
class Syncable(Renderable):
"""
Syncable is an extension of the Renderable object which can not
only render to a bokeh model but also sync the parameters on the
object with the properties on the model.
In order to bi-directionally link parameters with bokeh model
instances the _link_params and _link_props methods define
callbacks triggered when either the parameter or bokeh property
values change. Since there may not be a 1-to-1 mapping between
parameter and the model property the _process_property_change and
_process_param_change may be overridden to apply any necessary
transformations.
"""
# Timeout if a notebook comm message is swallowed
_timeout = 20000
# Timeout before the first event is processed
_debounce = 50
# Any parameters that require manual updates handling for the models
# e.g. parameters which affect some sub-model
_manual_params = []
# Mapping from parameter name to bokeh model property name
_rename = {}
# Allows defining a mapping from model property name to a JS code
# snippet that transforms the object before serialization
_js_transforms = {}
# Transforms from input value to bokeh property value
_source_transforms = {}
_target_transforms = {}
__abstract = True
def __init__(self, **params):
super().__init__(**params)
# Useful when updating model properties which trigger potentially
# recursive events
self._updating = False
# A dictionary of current property change events
self._events = {}
# Any watchers associated with links between two objects
self._links = []
self._link_params()
# A dictionary of bokeh property changes being processed
self._changing = {}
# Sets up watchers to process manual updates to models
if self._manual_params:
self.param.watch(self._update_manual, self._manual_params)
#----------------------------------------------------------------
# Model API
#----------------------------------------------------------------
def _process_property_change(self, msg):
"""
Transform bokeh model property changes into parameter updates.
Should be overridden to provide appropriate mapping between
parameter value and bokeh model change. By default uses the
_rename class level attribute to map between parameter and
property names.
"""
inverted = {v: k for k, v in self._rename.items()}
return {inverted.get(k, k): v for k, v in msg.items()}
def _process_param_change(self, msg):
"""
Transform parameter changes into bokeh model property updates.
Should be overridden to provide appropriate mapping between
parameter value and bokeh model change. By default uses the
_rename class level attribute to map between parameter and
property names.
"""
properties = {self._rename.get(k, k): v for k, v in msg.items()
if self._rename.get(k, False) is not None}
if 'width' in properties and self.sizing_mode is None:
properties['min_width'] = properties['width']
if 'height' in properties and self.sizing_mode is None:
properties['min_height'] = properties['height']
return properties
@property
def _linkable_params(self):
"""
Parameters that can be linked in JavaScript via source
transforms.
"""
return [p for p in self._synced_params if self._rename.get(p, False) is not None
and self._source_transforms.get(p, False) is not None] + ['loading']
@property
def _synced_params(self):
"""
Parameters which are synced with properties using transforms
applied in the _process_param_change method.
"""
ignored = ['default_layout', 'loading']
return [p for p in self.param if p not in self._manual_params+ignored]
def _init_params(self):
return {k: v for k, v in self.param.get_param_values()
if k in self._synced_params and v is not None}
def _link_params(self):
params = self._synced_params
if params:
watcher = self.param.watch(self._param_change, params)
self._callbacks.append(watcher)
def _link_props(self, model, properties, doc, root, comm=None):
ref = root.ref['id']
if config.embed:
return
for p in properties:
if isinstance(p, tuple):
_, p = p
if comm:
model.on_change(p, partial(self._comm_change, doc, ref, comm))
else:
model.on_change(p, partial(self._server_change, doc, ref))
def _manual_update(self, events, model, doc, root, parent, comm):
"""
Method for handling any manual update events, i.e. events triggered
by changes in the manual params.
"""
def _update_manual(self, *events):
for ref, (model, parent) in self._models.items():
if ref not in state._views or ref in state._fake_roots:
continue
viewable, root, doc, comm = state._views[ref]
if comm or state._unblocked(doc):
with unlocked():
self._manual_update(events, model, doc, root, parent, comm)
if comm and 'embedded' not in root.tags:
push(doc, comm)
else:
cb = partial(self._manual_update, events, model, doc, root, parent, comm)
if doc.session_context:
doc.add_next_tick_callback(cb)
else:
cb()
def _update_model(self, events, msg, root, model, doc, comm):
self._changing[root.ref['id']] = [
attr for attr, value in msg.items()
if not model.lookup(attr).property.matches(getattr(model, attr), value)
]
try:
model.update(**msg)
finally:
del self._changing[root.ref['id']]
def _cleanup(self, root):
super()._cleanup(root)
ref = root.ref['id']
self._models.pop(ref, None)
comm, client_comm = self._comms.pop(ref, (None, None))
if comm:
try:
comm.close()
except Exception:
pass
if client_comm:
try:
client_comm.close()
except Exception:
pass
def _param_change(self, *events):
msgs = []
for event in events:
msg = self._process_param_change({event.name: event.new})
if msg:
msgs.append(msg)
events = {event.name: event for event in events}
msg = {k: v for msg in msgs for k, v in msg.items()}
if not msg:
return
for ref, (model, parent) in self._models.items():
if ref not in state._views or ref in state._fake_roots:
continue
viewable, root, doc, comm = state._views[ref]
if comm or not doc.session_context or state._unblocked(doc):
with unlocked():
self._update_model(events, msg, root, model, doc, comm)
if comm and 'embedded' not in root.tags:
push(doc, comm)
else:
cb = partial(self._update_model, events, msg, root, model, doc, comm)
doc.add_next_tick_callback(cb)
def _process_events(self, events):
with edit_readonly(state):
state.busy = True
try:
with edit_readonly(self):
self.param.set_param(**self._process_property_change(events))
finally:
with edit_readonly(state):
state.busy = False
@gen.coroutine
def _change_coroutine(self, doc=None):
self._change_event(doc)
def _change_event(self, doc=None):
try:
state.curdoc = doc
thread = threading.current_thread()
thread_id = thread.ident if thread else None
state._thread_id = thread_id
events = self._events
self._events = {}
self._process_events(events)
finally:
state.curdoc = None
state._thread_id = None
def _comm_change(self, doc, ref, comm, attr, old, new):
if attr in self._changing.get(ref, []):
self._changing[ref].remove(attr)
return
with hold(doc, comm=comm):
self._process_events({attr: new})
def _server_change(self, doc, ref, attr, old, new):
if attr in self._changing.get(ref, []):
self._changing[ref].remove(attr)
return
state._locks.clear()
processing = bool(self._events)
self._events.update({attr: new})
if not processing:
if doc.session_context:
doc.add_timeout_callback(partial(self._change_coroutine, doc), self._debounce)
else:
self._change_event(doc)
class Reactive(Syncable, Viewable):
"""
Reactive is a Viewable object that also supports syncing between
the objects parameters and the underlying bokeh model either via
the defined pyviz_comms.Comm type or using bokeh server.
In addition it defines various methods which make it easy to link
the parameters to other objects.
"""
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
def add_periodic_callback(self, callback, period=500, count=None,
timeout=None, start=True):
"""
Schedules a periodic callback to be run at an interval set by
the period. Returns a PeriodicCallback object with the option
to stop and start the callback.
Arguments
---------
callback: callable
Callable function to be executed at periodic interval.
period: int
Interval in milliseconds at which callback will be executed.
count: int
Maximum number of times callback will be invoked.
timeout: int
Timeout in seconds when the callback should be stopped.
start: boolean (default=True)
Whether to start callback immediately.
Returns
-------
Return a PeriodicCallback object with start and stop methods.
"""
self.param.warning(
"Calling add_periodic_callback on a Panel component is "
"deprecated and will be removed in the next minor release. "
"Use the pn.state.add_periodic_callback API instead."
)
cb = PeriodicCallback(callback=callback, period=period,
count=count, timeout=timeout)
if start:
cb.start()
return cb
def link(self, target, callbacks=None, bidirectional=False, **links):
"""
Links the parameters on this object to attributes on another
object in Python. Supports two modes, either specify a mapping
between the source and target object parameters as keywords or
provide a dictionary of callbacks which maps from the source
parameter to a callback which is triggered when the parameter
changes.
Arguments
---------
target: object
The target object of the link.
callbacks: dict
Maps from a parameter in the source object to a callback.
bidirectional: boolean
Whether to link source and target bi-directionally
**links: dict
Maps between parameters on this object to the parameters
on the supplied object.
"""
if links and callbacks:
raise ValueError('Either supply a set of parameters to '
'link as keywords or a set of callbacks, '
'not both.')
elif not links and not callbacks:
raise ValueError('Declare parameters to link or a set of '
'callbacks, neither was defined.')
elif callbacks and bidirectional:
raise ValueError('Bidirectional linking not supported for '
'explicit callbacks. You must define '
'separate callbacks for each direction.')
_updating = []
def link(*events):
for event in events:
if event.name in _updating: continue
_updating.append(event.name)
try:
if callbacks:
callbacks[event.name](target, event)
else:
setattr(target, links[event.name], event.new)
finally:
_updating.pop(_updating.index(event.name))
params = list(callbacks) if callbacks else list(links)
cb = self.param.watch(link, params)
bidirectional_watcher = None
if bidirectional:
_reverse_updating = []
reverse_links = {v: k for k, v in links.items()}
def reverse_link(*events):
for event in events:
if event.name in _reverse_updating: continue
_reverse_updating.append(event.name)
try:
setattr(self, reverse_links[event.name], event.new)
finally:
_reverse_updating.remove(event.name)
bidirectional_watcher = target.param.watch(reverse_link, list(reverse_links))
link = LinkWatcher(*tuple(cb)+(target, links, callbacks is not None, bidirectional_watcher))
self._links.append(link)
return cb
def controls(self, parameters=[], jslink=True):
"""
Creates a set of widgets which allow manipulating the parameters
on this instance. By default all parameters which support
linking are exposed, but an explicit list of parameters can
be provided.
Arguments
---------
parameters: list(str)
An explicit list of parameters to return controls for.
jslink: bool
Whether to use jslinks instead of Python based links.
This does not allow using all types of parameters.
Returns
-------
A layout of the controls
"""
from .param import Param
from .layout import Tabs, WidgetBox
from .widgets import LiteralInput
if parameters:
linkable = parameters
elif jslink:
linkable = self._linkable_params + ['loading']
else:
linkable = list(self.param)
params = [p for p in linkable if p not in Viewable.param]
controls = Param(self.param, parameters=params, default_layout=WidgetBox,
name='Controls')
layout_params = [p for p in linkable if p in Viewable.param]
if 'name' not in layout_params and self._rename.get('name', False) is not None and not parameters:
layout_params.insert(0, 'name')
style = Param(self.param, parameters=layout_params, default_layout=WidgetBox,
name='Layout')
if jslink:
for p in params:
widget = controls._widgets[p]
widget.jslink(self, value=p, bidirectional=True)
if isinstance(widget, LiteralInput):
widget.serializer = 'json'
for p in layout_params:
widget = style._widgets[p]
widget.jslink(self, value=p, bidirectional=p != 'loading')
if isinstance(widget, LiteralInput):
widget.serializer = 'json'
if params and layout_params:
return Tabs(controls.layout[0], style.layout[0])
elif params:
return controls.layout[0]
return style.layout[0]
def jscallback(self, args={}, **callbacks):
"""
Allows defining a JS callback to be triggered when a property
changes on the source object. The keyword arguments define the
properties that trigger a callback and the JS code that gets
executed.
Arguments
----------
args: dict
A mapping of objects to make available to the JS callback
**callbacks: dict
A mapping between properties on the source model and the code
to execute when that property changes
Returns
-------
callback: Callback
The Callback which can be used to disable the callback.
"""
from .links import Callback
for k, v in list(callbacks.items()):
callbacks[k] = self._rename.get(v, v)
return Callback(self, code=callbacks, args=args)
def jslink(self, target, code=None, args=None, bidirectional=False, **links):
"""
Links properties on the source object to those on the target
object in JS code. Supports two modes, either specify a
mapping between the source and target model properties as
keywords or provide a dictionary of JS code snippets which
maps from the source parameter to a JS code snippet which is
executed when the property changes.
Arguments
----------
target: HoloViews object or bokeh Model or panel Viewable
The target to link the value to.
code: dict
Custom code which will be executed when the widget value
changes.
bidirectional: boolean
Whether to link source and target bi-directionally
**links: dict
A mapping between properties on the source model and the
target model property to link it to.
Returns
-------
link: GenericLink
The GenericLink which can be used unlink the widget and
the target model.
"""
if links and code:
raise ValueError('Either supply a set of properties to '
'link as keywords or a set of JS code '
'callbacks, not both.')
elif not links and not code:
raise ValueError('Declare parameters to link or a set of '
'callbacks, neither was defined.')
if args is None:
args = {}
mapping = code or links
for k in mapping:
if k.startswith('event:'):
continue
elif hasattr(self, 'object') and isinstance(self.object, LayoutDOM):
current = self.object
for attr in k.split('.'):
if not hasattr(current, attr):
raise ValueError(f"Could not resolve {k} on "
f"{self.object} model. Ensure "
"you jslink an attribute that "
"exists on the bokeh model.")
current = getattr(current, attr)
elif (k not in self.param and k not in list(self._rename.values())):
matches = difflib.get_close_matches(k, list(self.param))
if matches:
matches = ' Similar parameters include: %r' % matches
else:
matches = ''
raise ValueError("Could not jslink %r parameter (or property) "
"on %s object because it was not found.%s"
% (k, type(self).__name__, matches))
elif (self._source_transforms.get(k, False) is None or
self._rename.get(k, False) is None):
raise ValueError("Cannot jslink %r parameter on %s object, "
"the parameter requires a live Python kernel "
"to have an effect." % (k, type(self).__name__))
if isinstance(target, Syncable) and code is None:
for k, p in mapping.items():
if k.startswith('event:'):
continue
elif p not in target.param and p not in list(target._rename.values()):
matches = difflib.get_close_matches(p, list(target.param))
if matches:
matches = ' Similar parameters include: %r' % matches
else:
matches = ''
raise ValueError("Could not jslink %r parameter (or property) "
"on %s object because it was not found.%s"
% (p, type(self).__name__, matches))
elif (target._source_transforms.get(p, False) is None or
target._rename.get(p, False) is None):
raise ValueError("Cannot jslink %r parameter on %s object "
"to %r parameter on %s object. It requires "
"a live Python kernel to have an effect."
% (k, type(self).__name__, p, type(target).__name__))
from .links import Link
return Link(self, target, properties=links, code=code, args=args,
bidirectional=bidirectional)
class SyncableData(Reactive):
"""
A baseclass for components which sync one or more data parameters
with the frontend via a ColumnDataSource.
"""
selection = param.List(default=[], doc="""
The currently selected rows in the data.""")
# Parameters which when changed require an update of the data
_data_params = []
_rename = {'selection': None}
__abstract = True
def __init__(self, **params):
super().__init__(**params)
self._data = None
self._processed = None
self.param.watch(self._validate, self._data_params)
if self._data_params:
self.param.watch(self._update_cds, self._data_params)
self.param.watch(self._update_selected, 'selection')
self._validate(None)
self._update_cds()
def _validate(self, event):
"""
Allows implementing validation for the data parameters.
"""
def _get_data(self):
"""
Implemented by subclasses converting data parameter(s) into
a ColumnDataSource compatible data dictionary.
Returns
-------
processed: object
Raw data after pre-processing (e.g. after filtering)
data: dict
Dictionary of columns used to instantiate and update the
ColumnDataSource
"""
def _update_column(self, column, array):
"""
Implemented by subclasses converting changes in columns to
changes in the data parameter.
Parameters
----------
column: str
The name of the column to update.
array: numpy.ndarray
The array data to update the column with.
"""
data = getattr(self, self._data_params[0])
data[column] = array
def _update_data(self, data):
self.param.set_param(**{self._data_params[0]: data})
def _manual_update(self, events, model, doc, root, parent, comm):
for event in events:
if event.type == 'triggered' and self._updating:
continue
elif hasattr(self, '_update_' + event.name):
getattr(self, '_update_' + event.name)(model)
def _update_cds(self, *events):
if self._updating:
return
self._processed, self._data = self._get_data()
for ref, (m, _) in self._models.items():
m.source.data = self._data
push_on_root(ref)
def _update_selected(self, *events, indices=None):
if self._updating:
return
indices = self.selection if indices is None else indices
for ref, (m, _) in self._models.items():
m.source.selected.indices = indices
push_on_root(ref)
@updating
def _stream(self, stream, rollover=None):
for ref, (m, _) in self._models.items():
m.source.stream(stream, rollover)
push_on_root(ref)
@updating
def _patch(self, patch):
for ref, (m, _) in self._models.items():
m.source.patch(patch)
push_on_root(ref)
def stream(self, stream_value, rollover=None, reset_index=True):
"""
Streams (appends) the `stream_value` provided to the existing
value in an efficient manner.
Arguments
---------
stream_value: (Union[pd.DataFrame, pd.Series, Dict])
The new value(s) to append to the existing value.
rollover: int
A maximum column size, above which data from the start of
the column begins to be discarded. If None, then columns
will continue to grow unbounded.
reset_index (bool, default=True):
If True and the stream_value is a DataFrame, then its index
is reset. Helps to keep the index unique and named `index`.
Raises
------
ValueError: Raised if the stream_value is not a supported type.
Examples
--------
Stream a Series to a DataFrame
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> obj = DataComponent(value)
>>> stream_value = pd.Series({"x": 4, "y": "d"})
>>> obj.stream(stream_value)
>>> obj.value.to_dict("list")
{'x': [1, 2, 4], 'y': ['a', 'b', 'd']}
Stream a Dataframe to a Dataframe
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> obj = DataComponent(value)
>>> stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
>>> obj.stream(stream_value)
>>> obj.value.to_dict("list")
{'x': [1, 2, 3, 4], 'y': ['a', 'b', 'c', 'd']}
Stream a Dictionary row to a DataFrame
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> tabulator = DataComponent(value)
>>> stream_value = {"x": 4, "y": "d"}
>>> obj.stream(stream_value)
>>> obj.value.to_dict("list")
{'x': [1, 2, 4], 'y': ['a', 'b', 'd']}
Stream a Dictionary of Columns to a Dataframe
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> obj = DataComponent(value)
>>> stream_value = {"x": [3, 4], "y": ["c", "d"]}
>>> obj.stream(stream_value)
>>> obj.value.to_dict("list")
{'x': [1, 2, 3, 4], 'y': ['a', 'b', 'c', 'd']}
"""
if 'pandas' in sys.modules:
import pandas as pd
else:
pd = None
if pd and isinstance(stream_value, pd.DataFrame):
if isinstance(self._processed, dict):
self.stream(stream_value.to_dict(), rollover)
return
if reset_index:
value_index_start = self._processed.index.max() + 1
stream_value = stream_value.reset_index(drop=True)
stream_value.index += value_index_start
combined = pd.concat([self._processed, stream_value])
if rollover is not None:
combined = combined.iloc[-rollover:]
with param.discard_events(self):
self._update_data(combined)
try:
self._updating = True
self.param.trigger(self._data_params[0])
finally:
self._updating = False
try:
self._updating = True
self._stream(stream_value, rollover)
finally:
self._updating = False
elif pd and isinstance(stream_value, pd.Series):
if isinstance(self._processed, dict):
self.stream({k: [v] for k, v in stream_value.to_dict().items()}, rollover)
return
value_index_start = self._processed.index.max() + 1
self._processed.loc[value_index_start] = stream_value
with param.discard_events(self):
self._update_data(self._processed)
self._updating = True
try:
self._stream(self._processed.iloc[-1:], rollover)
finally:
self._updating = False
elif isinstance(stream_value, dict):
if isinstance(self._processed, dict):
if not all(col in stream_value for col in self._data):
raise ValueError("Stream update must append to all columns.")
for col, array in stream_value.items():
combined = np.concatenate([self._data[col], array])
if rollover is not None:
combined = combined[-rollover:]
self._update_column(col, combined)
self._updating = True
try:
self._stream(stream_value, rollover)
finally:
self._updating = False
else:
try:
stream_value = pd.DataFrame(stream_value)
except ValueError:
stream_value = pd.Series(stream_value)
self.stream(stream_value)
else:
raise ValueError("The stream value provided is not a DataFrame, Series or Dict!")
def patch(self, patch_value):
"""
Efficiently patches (updates) the existing value with the `patch_value`.
Arguments
---------
patch_value: (Union[pd.DataFrame, pd.Series, Dict])
The value(s) to patch the existing value with.
Raises
------
ValueError: Raised if the patch_value is not a supported type.
Examples
--------
Patch a DataFrame with a Dictionary row.
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> obj = DataComponent(value)
>>> patch_value = {"x": [(0, 3)]}
>>> obj.patch(patch_value)
>>> obj.value.to_dict("list")
{'x': [3, 2], 'y': ['a', 'b']}
Patch a Dataframe with a Dictionary of Columns.
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> obj = DataComponent(value)
>>> patch_value = {"x": [(slice(2), (3,4))], "y": [(1,'d')]}
>>> obj.patch(patch_value)
>>> obj.value.to_dict("list")
{'x': [3, 4], 'y': ['a', 'd']}
Patch a DataFrame with a Series. Please note the index is used in the update.
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> obj = DataComponent(value)
>>> patch_value = pd.Series({"index": 1, "x": 4, "y": "d"})
>>> obj.patch(patch_value)
>>> obj.value.to_dict("list")
{'x': [1, 4], 'y': ['a', 'd']}
Patch a Dataframe with a Dataframe. Please note the index is used in the update.
>>> value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
>>> obj = DataComponent(value)
>>> patch_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
>>> obj.patch(patch_value)
>>> obj.value.to_dict("list")
{'x': [3, 4], 'y': ['c', 'd']}
"""
if self._processed is None or isinstance(patch_value, dict):
self._patch(patch_value)
return
if 'pandas' in sys.modules:
import pandas as pd
else:
pd = None
data = getattr(self, self._data_params[0])
if pd and isinstance(patch_value, pd.DataFrame):
patch_value_dict = {}
for column in patch_value.columns:
patch_value_dict[column] = []
for index in patch_value.index:
patch_value_dict[column].append((index, patch_value.loc[index, column]))
self.patch(patch_value_dict)
elif pd and isinstance(patch_value, pd.Series):
if "index" in patch_value: # Series orient is row
patch_value_dict = {
k: [(patch_value["index"], v)] for k, v in patch_value.items()
}
patch_value_dict.pop("index")
else: # Series orient is column
patch_value_dict = {
patch_value.name: [(index, value) for index, value in patch_value.items()]
}
self.patch(patch_value_dict)
elif isinstance(patch_value, dict):
for k, v in patch_value.items():
for index, patch in v:
if pd and isinstance(self._processed, pd.DataFrame):
data.loc[index, k] = patch
else:
data[k][index] = patch
self._updating = True
try:
self._patch(patch_value)
finally:
self._updating = False
else:
raise ValueError(
f"Patching with a patch_value of type {type(patch_value).__name__} "
"is not supported. Please provide a DataFrame, Series or Dict."
)
class ReactiveData(SyncableData):
"""
An extension of SyncableData which bi-directionally syncs a data
parameter between frontend and backend using a ColumnDataSource.
"""
def _update_selection(self, indices):
self.selection = indices
def _process_events(self, events):
if 'data' in events:
data = events.pop('data')
if self._updating:
data = {}
_, old_data = self._get_data()
updated = False
for k, v in data.items():
if k in self.indexes:
continue
k = self._renamed_cols.get(k, k)
if isinstance(v, dict):
v = [v for _, v in sorted(v.items(), key=lambda it: int(it[0]))]
try:
isequal = (old_data[k] == np.asarray(v)).all()
except Exception:
isequal = False
if not isequal:
self._update_column(k, v)
updated = True
if updated:
self._updating = True
try:
self.param.trigger('value')
finally:
self._updating = False
if 'indices' in events:
self._updating = True
try:
self._update_selection(events.pop('indices'))
finally:
self._updating = False
super(ReactiveData, self)._process_events(events)
| 38.329032 | 141 | 0.558464 |
import difflib
import sys
import threading
from collections import namedtuple
from functools import partial
import numpy as np
import param
from bokeh.models import LayoutDOM
from tornado import gen
from .config import config
from .io.callbacks import PeriodicCallback
from .io.model import hold
from .io.notebook import push, push_on_root
from .io.server import unlocked
from .io.state import state
from .util import edit_readonly, updating
from .viewable import Renderable, Viewable
LinkWatcher = namedtuple("Watcher","inst cls fn mode onlychanged parameter_names what queued target links transformed bidirectional_watcher")
class Syncable(Renderable):
_timeout = 20000
_debounce = 50
_manual_params = []
_rename = {}
_js_transforms = {}
_source_transforms = {}
_target_transforms = {}
__abstract = True
def __init__(self, **params):
super().__init__(**params)
self._updating = False
self._events = {}
self._links = []
self._link_params()
self._changing = {}
if self._manual_params:
self.param.watch(self._update_manual, self._manual_params)
def _process_property_change(self, msg):
inverted = {v: k for k, v in self._rename.items()}
return {inverted.get(k, k): v for k, v in msg.items()}
def _process_param_change(self, msg):
properties = {self._rename.get(k, k): v for k, v in msg.items()
if self._rename.get(k, False) is not None}
if 'width' in properties and self.sizing_mode is None:
properties['min_width'] = properties['width']
if 'height' in properties and self.sizing_mode is None:
properties['min_height'] = properties['height']
return properties
@property
def _linkable_params(self):
return [p for p in self._synced_params if self._rename.get(p, False) is not None
and self._source_transforms.get(p, False) is not None] + ['loading']
@property
def _synced_params(self):
ignored = ['default_layout', 'loading']
return [p for p in self.param if p not in self._manual_params+ignored]
def _init_params(self):
return {k: v for k, v in self.param.get_param_values()
if k in self._synced_params and v is not None}
def _link_params(self):
params = self._synced_params
if params:
watcher = self.param.watch(self._param_change, params)
self._callbacks.append(watcher)
def _link_props(self, model, properties, doc, root, comm=None):
ref = root.ref['id']
if config.embed:
return
for p in properties:
if isinstance(p, tuple):
_, p = p
if comm:
model.on_change(p, partial(self._comm_change, doc, ref, comm))
else:
model.on_change(p, partial(self._server_change, doc, ref))
def _manual_update(self, events, model, doc, root, parent, comm):
def _update_manual(self, *events):
for ref, (model, parent) in self._models.items():
if ref not in state._views or ref in state._fake_roots:
continue
viewable, root, doc, comm = state._views[ref]
if comm or state._unblocked(doc):
with unlocked():
self._manual_update(events, model, doc, root, parent, comm)
if comm and 'embedded' not in root.tags:
push(doc, comm)
else:
cb = partial(self._manual_update, events, model, doc, root, parent, comm)
if doc.session_context:
doc.add_next_tick_callback(cb)
else:
cb()
def _update_model(self, events, msg, root, model, doc, comm):
self._changing[root.ref['id']] = [
attr for attr, value in msg.items()
if not model.lookup(attr).property.matches(getattr(model, attr), value)
]
try:
model.update(**msg)
finally:
del self._changing[root.ref['id']]
def _cleanup(self, root):
super()._cleanup(root)
ref = root.ref['id']
self._models.pop(ref, None)
comm, client_comm = self._comms.pop(ref, (None, None))
if comm:
try:
comm.close()
except Exception:
pass
if client_comm:
try:
client_comm.close()
except Exception:
pass
def _param_change(self, *events):
msgs = []
for event in events:
msg = self._process_param_change({event.name: event.new})
if msg:
msgs.append(msg)
events = {event.name: event for event in events}
msg = {k: v for msg in msgs for k, v in msg.items()}
if not msg:
return
for ref, (model, parent) in self._models.items():
if ref not in state._views or ref in state._fake_roots:
continue
viewable, root, doc, comm = state._views[ref]
if comm or not doc.session_context or state._unblocked(doc):
with unlocked():
self._update_model(events, msg, root, model, doc, comm)
if comm and 'embedded' not in root.tags:
push(doc, comm)
else:
cb = partial(self._update_model, events, msg, root, model, doc, comm)
doc.add_next_tick_callback(cb)
def _process_events(self, events):
with edit_readonly(state):
state.busy = True
try:
with edit_readonly(self):
self.param.set_param(**self._process_property_change(events))
finally:
with edit_readonly(state):
state.busy = False
@gen.coroutine
def _change_coroutine(self, doc=None):
self._change_event(doc)
def _change_event(self, doc=None):
try:
state.curdoc = doc
thread = threading.current_thread()
thread_id = thread.ident if thread else None
state._thread_id = thread_id
events = self._events
self._events = {}
self._process_events(events)
finally:
state.curdoc = None
state._thread_id = None
def _comm_change(self, doc, ref, comm, attr, old, new):
if attr in self._changing.get(ref, []):
self._changing[ref].remove(attr)
return
with hold(doc, comm=comm):
self._process_events({attr: new})
def _server_change(self, doc, ref, attr, old, new):
if attr in self._changing.get(ref, []):
self._changing[ref].remove(attr)
return
state._locks.clear()
processing = bool(self._events)
self._events.update({attr: new})
if not processing:
if doc.session_context:
doc.add_timeout_callback(partial(self._change_coroutine, doc), self._debounce)
else:
self._change_event(doc)
class Reactive(Syncable, Viewable):
def add_periodic_callback(self, callback, period=500, count=None,
timeout=None, start=True):
self.param.warning(
"Calling add_periodic_callback on a Panel component is "
"deprecated and will be removed in the next minor release. "
"Use the pn.state.add_periodic_callback API instead."
)
cb = PeriodicCallback(callback=callback, period=period,
count=count, timeout=timeout)
if start:
cb.start()
return cb
def link(self, target, callbacks=None, bidirectional=False, **links):
if links and callbacks:
raise ValueError('Either supply a set of parameters to '
'link as keywords or a set of callbacks, '
'not both.')
elif not links and not callbacks:
raise ValueError('Declare parameters to link or a set of '
'callbacks, neither was defined.')
elif callbacks and bidirectional:
raise ValueError('Bidirectional linking not supported for '
'explicit callbacks. You must define '
'separate callbacks for each direction.')
_updating = []
def link(*events):
for event in events:
if event.name in _updating: continue
_updating.append(event.name)
try:
if callbacks:
callbacks[event.name](target, event)
else:
setattr(target, links[event.name], event.new)
finally:
_updating.pop(_updating.index(event.name))
params = list(callbacks) if callbacks else list(links)
cb = self.param.watch(link, params)
bidirectional_watcher = None
if bidirectional:
_reverse_updating = []
reverse_links = {v: k for k, v in links.items()}
def reverse_link(*events):
for event in events:
if event.name in _reverse_updating: continue
_reverse_updating.append(event.name)
try:
setattr(self, reverse_links[event.name], event.new)
finally:
_reverse_updating.remove(event.name)
bidirectional_watcher = target.param.watch(reverse_link, list(reverse_links))
link = LinkWatcher(*tuple(cb)+(target, links, callbacks is not None, bidirectional_watcher))
self._links.append(link)
return cb
def controls(self, parameters=[], jslink=True):
from .param import Param
from .layout import Tabs, WidgetBox
from .widgets import LiteralInput
if parameters:
linkable = parameters
elif jslink:
linkable = self._linkable_params + ['loading']
else:
linkable = list(self.param)
params = [p for p in linkable if p not in Viewable.param]
controls = Param(self.param, parameters=params, default_layout=WidgetBox,
name='Controls')
layout_params = [p for p in linkable if p in Viewable.param]
if 'name' not in layout_params and self._rename.get('name', False) is not None and not parameters:
layout_params.insert(0, 'name')
style = Param(self.param, parameters=layout_params, default_layout=WidgetBox,
name='Layout')
if jslink:
for p in params:
widget = controls._widgets[p]
widget.jslink(self, value=p, bidirectional=True)
if isinstance(widget, LiteralInput):
widget.serializer = 'json'
for p in layout_params:
widget = style._widgets[p]
widget.jslink(self, value=p, bidirectional=p != 'loading')
if isinstance(widget, LiteralInput):
widget.serializer = 'json'
if params and layout_params:
return Tabs(controls.layout[0], style.layout[0])
elif params:
return controls.layout[0]
return style.layout[0]
def jscallback(self, args={}, **callbacks):
from .links import Callback
for k, v in list(callbacks.items()):
callbacks[k] = self._rename.get(v, v)
return Callback(self, code=callbacks, args=args)
def jslink(self, target, code=None, args=None, bidirectional=False, **links):
if links and code:
raise ValueError('Either supply a set of properties to '
'link as keywords or a set of JS code '
'callbacks, not both.')
elif not links and not code:
raise ValueError('Declare parameters to link or a set of '
'callbacks, neither was defined.')
if args is None:
args = {}
mapping = code or links
for k in mapping:
if k.startswith('event:'):
continue
elif hasattr(self, 'object') and isinstance(self.object, LayoutDOM):
current = self.object
for attr in k.split('.'):
if not hasattr(current, attr):
raise ValueError(f"Could not resolve {k} on "
f"{self.object} model. Ensure "
"you jslink an attribute that "
"exists on the bokeh model.")
current = getattr(current, attr)
elif (k not in self.param and k not in list(self._rename.values())):
matches = difflib.get_close_matches(k, list(self.param))
if matches:
matches = ' Similar parameters include: %r' % matches
else:
matches = ''
raise ValueError("Could not jslink %r parameter (or property) "
"on %s object because it was not found.%s"
% (k, type(self).__name__, matches))
elif (self._source_transforms.get(k, False) is None or
self._rename.get(k, False) is None):
raise ValueError("Cannot jslink %r parameter on %s object, "
"the parameter requires a live Python kernel "
"to have an effect." % (k, type(self).__name__))
if isinstance(target, Syncable) and code is None:
for k, p in mapping.items():
if k.startswith('event:'):
continue
elif p not in target.param and p not in list(target._rename.values()):
matches = difflib.get_close_matches(p, list(target.param))
if matches:
matches = ' Similar parameters include: %r' % matches
else:
matches = ''
raise ValueError("Could not jslink %r parameter (or property) "
"on %s object because it was not found.%s"
% (p, type(self).__name__, matches))
elif (target._source_transforms.get(p, False) is None or
target._rename.get(p, False) is None):
raise ValueError("Cannot jslink %r parameter on %s object "
"to %r parameter on %s object. It requires "
"a live Python kernel to have an effect."
% (k, type(self).__name__, p, type(target).__name__))
from .links import Link
return Link(self, target, properties=links, code=code, args=args,
bidirectional=bidirectional)
class SyncableData(Reactive):
selection = param.List(default=[], doc="""
The currently selected rows in the data.""")
_data_params = []
_rename = {'selection': None}
__abstract = True
def __init__(self, **params):
super().__init__(**params)
self._data = None
self._processed = None
self.param.watch(self._validate, self._data_params)
if self._data_params:
self.param.watch(self._update_cds, self._data_params)
self.param.watch(self._update_selected, 'selection')
self._validate(None)
self._update_cds()
def _validate(self, event):
def _get_data(self):
def _update_column(self, column, array):
data = getattr(self, self._data_params[0])
data[column] = array
def _update_data(self, data):
self.param.set_param(**{self._data_params[0]: data})
def _manual_update(self, events, model, doc, root, parent, comm):
for event in events:
if event.type == 'triggered' and self._updating:
continue
elif hasattr(self, '_update_' + event.name):
getattr(self, '_update_' + event.name)(model)
def _update_cds(self, *events):
if self._updating:
return
self._processed, self._data = self._get_data()
for ref, (m, _) in self._models.items():
m.source.data = self._data
push_on_root(ref)
def _update_selected(self, *events, indices=None):
if self._updating:
return
indices = self.selection if indices is None else indices
for ref, (m, _) in self._models.items():
m.source.selected.indices = indices
push_on_root(ref)
@updating
def _stream(self, stream, rollover=None):
for ref, (m, _) in self._models.items():
m.source.stream(stream, rollover)
push_on_root(ref)
@updating
def _patch(self, patch):
for ref, (m, _) in self._models.items():
m.source.patch(patch)
push_on_root(ref)
def stream(self, stream_value, rollover=None, reset_index=True):
if 'pandas' in sys.modules:
import pandas as pd
else:
pd = None
if pd and isinstance(stream_value, pd.DataFrame):
if isinstance(self._processed, dict):
self.stream(stream_value.to_dict(), rollover)
return
if reset_index:
value_index_start = self._processed.index.max() + 1
stream_value = stream_value.reset_index(drop=True)
stream_value.index += value_index_start
combined = pd.concat([self._processed, stream_value])
if rollover is not None:
combined = combined.iloc[-rollover:]
with param.discard_events(self):
self._update_data(combined)
try:
self._updating = True
self.param.trigger(self._data_params[0])
finally:
self._updating = False
try:
self._updating = True
self._stream(stream_value, rollover)
finally:
self._updating = False
elif pd and isinstance(stream_value, pd.Series):
if isinstance(self._processed, dict):
self.stream({k: [v] for k, v in stream_value.to_dict().items()}, rollover)
return
value_index_start = self._processed.index.max() + 1
self._processed.loc[value_index_start] = stream_value
with param.discard_events(self):
self._update_data(self._processed)
self._updating = True
try:
self._stream(self._processed.iloc[-1:], rollover)
finally:
self._updating = False
elif isinstance(stream_value, dict):
if isinstance(self._processed, dict):
if not all(col in stream_value for col in self._data):
raise ValueError("Stream update must append to all columns.")
for col, array in stream_value.items():
combined = np.concatenate([self._data[col], array])
if rollover is not None:
combined = combined[-rollover:]
self._update_column(col, combined)
self._updating = True
try:
self._stream(stream_value, rollover)
finally:
self._updating = False
else:
try:
stream_value = pd.DataFrame(stream_value)
except ValueError:
stream_value = pd.Series(stream_value)
self.stream(stream_value)
else:
raise ValueError("The stream value provided is not a DataFrame, Series or Dict!")
def patch(self, patch_value):
if self._processed is None or isinstance(patch_value, dict):
self._patch(patch_value)
return
if 'pandas' in sys.modules:
import pandas as pd
else:
pd = None
data = getattr(self, self._data_params[0])
if pd and isinstance(patch_value, pd.DataFrame):
patch_value_dict = {}
for column in patch_value.columns:
patch_value_dict[column] = []
for index in patch_value.index:
patch_value_dict[column].append((index, patch_value.loc[index, column]))
self.patch(patch_value_dict)
elif pd and isinstance(patch_value, pd.Series):
if "index" in patch_value:
patch_value_dict = {
k: [(patch_value["index"], v)] for k, v in patch_value.items()
}
patch_value_dict.pop("index")
else:
patch_value_dict = {
patch_value.name: [(index, value) for index, value in patch_value.items()]
}
self.patch(patch_value_dict)
elif isinstance(patch_value, dict):
for k, v in patch_value.items():
for index, patch in v:
if pd and isinstance(self._processed, pd.DataFrame):
data.loc[index, k] = patch
else:
data[k][index] = patch
self._updating = True
try:
self._patch(patch_value)
finally:
self._updating = False
else:
raise ValueError(
f"Patching with a patch_value of type {type(patch_value).__name__} "
"is not supported. Please provide a DataFrame, Series or Dict."
)
class ReactiveData(SyncableData):
def _update_selection(self, indices):
self.selection = indices
def _process_events(self, events):
if 'data' in events:
data = events.pop('data')
if self._updating:
data = {}
_, old_data = self._get_data()
updated = False
for k, v in data.items():
if k in self.indexes:
continue
k = self._renamed_cols.get(k, k)
if isinstance(v, dict):
v = [v for _, v in sorted(v.items(), key=lambda it: int(it[0]))]
try:
isequal = (old_data[k] == np.asarray(v)).all()
except Exception:
isequal = False
if not isequal:
self._update_column(k, v)
updated = True
if updated:
self._updating = True
try:
self.param.trigger('value')
finally:
self._updating = False
if 'indices' in events:
self._updating = True
try:
self._update_selection(events.pop('indices'))
finally:
self._updating = False
super(ReactiveData, self)._process_events(events)
| true | true |
f71e697a978e3d9c4101aee1b43bf18c1e22305d | 8,966 | py | Python | docs/examples/batzle_wang_1992.py | trhallam/digirock | 05b1199d741a384345a4930605be97369c9ec270 | [
"MIT"
] | null | null | null | docs/examples/batzle_wang_1992.py | trhallam/digirock | 05b1199d741a384345a4930605be97369c9ec270 | [
"MIT"
] | 2 | 2022-02-28T08:51:53.000Z | 2022-02-28T13:24:33.000Z | docs/examples/batzle_wang_1992.py | trhallam/digirock | 05b1199d741a384345a4930605be97369c9ec270 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.6
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# __Recreate the work by Batzle and Wang 1992 to check `digirock.fluids.bw92` functionality.__
#
# Tony Hallam 2022
# %% [markdown]
# This notebook contains working code to test the functionality of `bw98.py` in `fluids` module of `digirock`, ensuring that the functions honor the work by B&W 1992.
#
# _Batzle, M., and Wang, Z. [1992]. Seismic properties of pore fluids. Geophysics, 57(11), 1396–1408._
# [Available from the SEG](https://library.seg.org/doi/10.1190/1.1443207).
# %%
import numpy as np
from digirock.fluids import bw92
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import rc
rc("font", size=14)
figsize = (15, 5)
# %%
# Input parameters has defined by B&W 1992 for plotting purporses
temp_ar = np.arange(10, 350, 5) # degC
pres_ar = np.arange(1, 100, 0.1) # Mpa
sal_ar = np.arange(0, 0.3, 0.01)
pres = np.array([0.1, 10, 25, 50]) # Mpa
temps = np.array([10, 100, 200, 350]) # degC
gsg = [0.6, 1.2] # gas Gravity
or0 = [1.0, 0.88, 0.78] # oil density re 15.6degC
# %% [markdown]
# ## GAS
#
# Hydrocarbon density as a function of temperature and pressure using `bw92.gas_oga_density`, BW92 Eq 10a.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
for G in gsg:
for p in pres:
ax[0].plot(temp_ar, bw92.gas_oga_density(temp_ar, p, G), label=f'G={G}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.gas_oga_density(t, pres_ar, G), label=f'G={G}, T={t}')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 0.6)
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Density (g/cc)')
ax[0].legend()
_ = ax[0].set_title('B&W 1992, Figure 2')
ax[1].set_xlim(0, 50)
ax[1].set_ylim(0, 0.6)
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_ylabel('Density (g/cc)')
_ = ax[1].legend()
# %% [markdown]
# Gas adibatic bulk modulus using `bw92.gas_adiabatic_bulkmod`.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
for G in gsg:
for p in pres:
ax[0].plot(temp_ar, bw92.gas_adiabatic_bulkmod(temp_ar, p, G)*1000, label=f'G={G}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.gas_adiabatic_bulkmod(t, pres_ar, G)*1000, label=f'G={G}, T={t}')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 650)
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Bulk Modulus (MPa)')
ax[0].legend()
ax[0].set_title('B&W 1992 - Figure 3')
ax[1].set_xlim(0, 50)
ax[1].set_xlabel('Pressure (MPa)')
_ = ax[1].legend()
# %% [markdown]
# Gas viscosity using `bw92.gas_adiabatic_viscosity` using equations 12 and 13.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
for G in gsg:
for p in pres:
ax[0].plot(temp_ar, bw92.gas_adiabatic_viscosity(temp_ar, p, G), label=f'G={G}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.gas_adiabatic_viscosity(t, pres_ar, G), label=f'G={G}, T={t}')
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Viscosity (centipoise)')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 0.09)
ax[0].set_title('B&W 1992 - Figure 4')
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_xlim(0, 50)
_ = ax[1].legend()
# %% [markdown]
# ## OIL
#
# Dead oil density using `bw92.oil_density`, BW92 eq19.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
for p in pres:
for r0 in or0:
ax[0].plot(temp_ar, bw92.oil_density(r0, p, temp_ar), label=f'r0={r0}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.oil_density(r0, pres_ar, t), label=f'r0={r0}, T={t}')
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Oil Density (g/cc)')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0.55, 1.05)
ax[0].set_title('B&W 1992 - Figure 5')
ax[0].legend()
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_xlim(0, 50)
_ = ax[1].legend(loc=[1.1, 0])
# %% [markdown]
# Oil acoustic velocity using `bw92.oil_velocity`, BW92 eq 20a.
# %%
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.5,5))
api_ar = np.arange(0,70) # oil api
rho0_ar = 141/ (api_ar + 131.5)
ax.plot(api_ar, bw92.oil_velocity(rho0_ar, 15.6, 1E-4, 0.6, 50))
ax.set_xlim(0, 70)
ax.set_ylim(1100, 1800)
ax.set_xlabel('Oil API')
ax.set_ylabel('Oil Velocity (m/s)')
ax.set_title('B&W 1992 - Figure 6')
# %% [markdown]
# Oil bulk modulus using `bw92.bulkmod`.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 30)
for r0 in or0:
for p in pres:
oil_rho = bw92.oil_density(r0, p, temp_ar)
oil_vp = bw92.oil_velocity(r0, p, temp_ar, 0.6, 50)
ax[0].plot(temp_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f"{r0} {p}MPa")
for t in temps:
oil_rho = bw92.oil_density(r0, pres_ar, t)
oil_vp = bw92.oil_velocity(r0, pres_ar, t, 0.6, 50)
ax[1].plot(pres_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f"{r0} {t}degC")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Oil Bulk Modlus (MPa)')
ax[0].set_title('B&W 1992 - Figure 7')
ax[0].legend()#cols=2)
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_xlim(0, 50)
_ = ax[1].legend()
# %% [markdown]
# ## WATER
#
# Set up some parameters for plotting water.
# %%
presv = [50, 100, 110] # pressure MPa for velocity plots
presrho = [9.81, 49, 98.1] # pressure MPa for density plots
presk = [0.1, 50, 100] # pressure MPa for modulus plots
sal = np.array([20000, 150000, 240000])/1000000 # ppm to weigh fraction
salk = np.array([0, 150000, 300000])/1000000 # ppm to weigh fraction
# %% [markdown]
# Pure water sonic velocity using `bw92.wat_velocity_pure` and pure water density using `bw92.wat_density_pure`. The parameters Batzle and Wang use from Wilson for pure water velocity were only calibrated to 100degC and 100MPa. So the behaviour above that is a bit odd, even though the plot in the 1992 paper looks good.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)
presv = [50, 100, 130] # pressure MPa
tvp_mesh, pvvt_mesh = np.meshgrid(temp_ar, presv)
wvp_mesh = bw92.wat_velocity_pure(tvp_mesh, pvvt_mesh)
wdp_mesh = bw92.wat_density_pure(tvp_mesh, pvvt_mesh)
for i, p in enumerate(presv):
ax[0].plot(temp_ar, wvp_mesh[i, :], label=f"{p}MPa")
ax[1].plot(temp_ar, wdp_mesh[i, :], label=f"{p}MPa")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Velocity (m/s)')
ax[0].set_title('B&W 1992 - Figure 12')
ax[0].legend()#cols=2)
ax[0].set_xlim(0, 350)
ax[0].set_ylim(500, 2000)
ax[1].set_xlabel('Temp (C)')
ax[1].set_ylabel('Density (g/cc)')
_ = ax[1].legend()
# %% [markdown]
# Brine sonic velocity using `bw92.wat_velocity_brine` and `bw92.wat_density_brine`. Again, odd behaviour due to the influence of the pure water function on the brine velocity.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)
presv = [50, 100, 130] # pressure MPa
db1, db2, db3 = np.meshgrid(temp_ar, presrho, sal)
wdb_mesh = bw92.wat_density_brine(db1, db2, db3)
vb1, vb2, vb3 = np.meshgrid(temp_ar, presv, sal)
wvb_mesh = bw92.wat_velocity_brine(vb1, vb2, vb3)
for i, p in enumerate(presv):
ax[0].plot(temp_ar, wvb_mesh[i, :], label=f"{p}MPa")
ax[1].plot(temp_ar, wdb_mesh[i, :], label=f"{p}MPa")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Velocity (m/s)')
ax[0].set_title('B&W 1992 - Figure 13')
ax[0].legend()#cols=2)
ax[0].set_xlim(0, 350)
ax[0].set_ylim(1000, 2500)
ax[1].set_xlabel('Temp (C)')
ax[1].set_ylabel('Density (g/cc)')
_ = ax[1].legend()
# %% [markdown]
# Brine bulk modulus using `bw92.wat_bulkmod`. This relies on calculating the velocity and density first.
# %% tags=[]
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
kb1, kb2, kb3 = np.meshgrid(temp_ar, presk, salk)
kr = bw92.wat_density_brine(kb1, kb2, kb3)
kv = bw92.wat_velocity_brine(kb1, kb2, kb3)
wkb_mesh = bw92.wat_bulkmod(kr, kv)
for i, p in enumerate(presv):
ax[0].plot(temp_ar, wkb_mesh[i, :], label=f"{p}MPa")
kb1, kb2, kb3 = np.meshgrid(pres_ar, temps, salk)
kr = bw92.wat_density_brine(kb2, kb1, kb3)
kv = bw92.wat_velocity_brine(kb2, kb1, kb3)
wkb_mesh = bw92.wat_bulkmod(kr, kv)
for i, t in enumerate(temps):
ax[1].plot(pres_ar, wkb_mesh[i, :], label=f"{t}degC")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Bulk Modulus (GPa)')
ax[0].set_ylim(0.5, 5.5)
ax[0].set_title('B&W 1992 - Figure 14')
ax[0].legend()#cols=2)
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_ylabel('Bulk Modulus (GPa)')
_ = ax[1].legend()
# %% [markdown]
# ## Other Methods
#
# For a full list of the BW92 equations available with `digirock` see the [`digirock.fluids.bw92` api](../api/fluid_methods.html#batzle-and-wang-92).
| 29.11039 | 320 | 0.65135 |
import numpy as np
from digirock.fluids import bw92
import matplotlib.pyplot as plt
from matplotlib import rc
rc("font", size=14)
figsize = (15, 5)
temp_ar = np.arange(10, 350, 5)
pres_ar = np.arange(1, 100, 0.1)
sal_ar = np.arange(0, 0.3, 0.01)
pres = np.array([0.1, 10, 25, 50])
temps = np.array([10, 100, 200, 350])
gsg = [0.6, 1.2]
or0 = [1.0, 0.88, 0.78]
x = plt.subplots(nrows=1, ncols=2, figsize=figsize)
for G in gsg:
for p in pres:
ax[0].plot(temp_ar, bw92.gas_oga_density(temp_ar, p, G), label=f'G={G}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.gas_oga_density(t, pres_ar, G), label=f'G={G}, T={t}')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 0.6)
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Density (g/cc)')
ax[0].legend()
_ = ax[0].set_title('B&W 1992, Figure 2')
ax[1].set_xlim(0, 50)
ax[1].set_ylim(0, 0.6)
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_ylabel('Density (g/cc)')
_ = ax[1].legend()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
for G in gsg:
for p in pres:
ax[0].plot(temp_ar, bw92.gas_adiabatic_bulkmod(temp_ar, p, G)*1000, label=f'G={G}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.gas_adiabatic_bulkmod(t, pres_ar, G)*1000, label=f'G={G}, T={t}')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 650)
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Bulk Modulus (MPa)')
ax[0].legend()
ax[0].set_title('B&W 1992 - Figure 3')
ax[1].set_xlim(0, 50)
ax[1].set_xlabel('Pressure (MPa)')
_ = ax[1].legend()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
for G in gsg:
for p in pres:
ax[0].plot(temp_ar, bw92.gas_adiabatic_viscosity(temp_ar, p, G), label=f'G={G}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.gas_adiabatic_viscosity(t, pres_ar, G), label=f'G={G}, T={t}')
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Viscosity (centipoise)')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 0.09)
ax[0].set_title('B&W 1992 - Figure 4')
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_xlim(0, 50)
_ = ax[1].legend()
x = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
for p in pres:
for r0 in or0:
ax[0].plot(temp_ar, bw92.oil_density(r0, p, temp_ar), label=f'r0={r0}, P={p}')
for t in temps:
ax[1].plot(pres_ar, bw92.oil_density(r0, pres_ar, t), label=f'r0={r0}, T={t}')
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Oil Density (g/cc)')
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0.55, 1.05)
ax[0].set_title('B&W 1992 - Figure 5')
ax[0].legend()
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_xlim(0, 50)
_ = ax[1].legend(loc=[1.1, 0])
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.5,5))
api_ar = np.arange(0,70)
rho0_ar = 141/ (api_ar + 131.5)
ax.plot(api_ar, bw92.oil_velocity(rho0_ar, 15.6, 1E-4, 0.6, 50))
ax.set_xlim(0, 70)
ax.set_ylim(1100, 1800)
ax.set_xlabel('Oil API')
ax.set_ylabel('Oil Velocity (m/s)')
ax.set_title('B&W 1992 - Figure 6')
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)
ax[0].set_xlim(0, 350)
ax[0].set_ylim(0, 30)
for r0 in or0:
for p in pres:
oil_rho = bw92.oil_density(r0, p, temp_ar)
oil_vp = bw92.oil_velocity(r0, p, temp_ar, 0.6, 50)
ax[0].plot(temp_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f"{r0} {p}MPa")
for t in temps:
oil_rho = bw92.oil_density(r0, pres_ar, t)
oil_vp = bw92.oil_velocity(r0, pres_ar, t, 0.6, 50)
ax[1].plot(pres_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f"{r0} {t}degC")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Oil Bulk Modlus (MPa)')
ax[0].set_title('B&W 1992 - Figure 7')
ax[0].legend()
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_xlim(0, 50)
_ = ax[1].legend()
0, 100, 110]
presrho = [9.81, 49, 98.1]
presk = [0.1, 50, 100]
sal = np.array([20000, 150000, 240000])/1000000
salk = np.array([0, 150000, 300000])/1000000
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)
presv = [50, 100, 130]
tvp_mesh, pvvt_mesh = np.meshgrid(temp_ar, presv)
wvp_mesh = bw92.wat_velocity_pure(tvp_mesh, pvvt_mesh)
wdp_mesh = bw92.wat_density_pure(tvp_mesh, pvvt_mesh)
for i, p in enumerate(presv):
ax[0].plot(temp_ar, wvp_mesh[i, :], label=f"{p}MPa")
ax[1].plot(temp_ar, wdp_mesh[i, :], label=f"{p}MPa")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Velocity (m/s)')
ax[0].set_title('B&W 1992 - Figure 12')
ax[0].legend()
ax[0].set_xlim(0, 350)
ax[0].set_ylim(500, 2000)
ax[1].set_xlabel('Temp (C)')
ax[1].set_ylabel('Density (g/cc)')
_ = ax[1].legend()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)
presv = [50, 100, 130]
db1, db2, db3 = np.meshgrid(temp_ar, presrho, sal)
wdb_mesh = bw92.wat_density_brine(db1, db2, db3)
vb1, vb2, vb3 = np.meshgrid(temp_ar, presv, sal)
wvb_mesh = bw92.wat_velocity_brine(vb1, vb2, vb3)
for i, p in enumerate(presv):
ax[0].plot(temp_ar, wvb_mesh[i, :], label=f"{p}MPa")
ax[1].plot(temp_ar, wdb_mesh[i, :], label=f"{p}MPa")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Velocity (m/s)')
ax[0].set_title('B&W 1992 - Figure 13')
ax[0].legend()
ax[0].set_xlim(0, 350)
ax[0].set_ylim(1000, 2500)
ax[1].set_xlabel('Temp (C)')
ax[1].set_ylabel('Density (g/cc)')
_ = ax[1].legend()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
kb1, kb2, kb3 = np.meshgrid(temp_ar, presk, salk)
kr = bw92.wat_density_brine(kb1, kb2, kb3)
kv = bw92.wat_velocity_brine(kb1, kb2, kb3)
wkb_mesh = bw92.wat_bulkmod(kr, kv)
for i, p in enumerate(presv):
ax[0].plot(temp_ar, wkb_mesh[i, :], label=f"{p}MPa")
kb1, kb2, kb3 = np.meshgrid(pres_ar, temps, salk)
kr = bw92.wat_density_brine(kb2, kb1, kb3)
kv = bw92.wat_velocity_brine(kb2, kb1, kb3)
wkb_mesh = bw92.wat_bulkmod(kr, kv)
for i, t in enumerate(temps):
ax[1].plot(pres_ar, wkb_mesh[i, :], label=f"{t}degC")
ax[0].set_xlabel('Temp (C)')
ax[0].set_ylabel('Bulk Modulus (GPa)')
ax[0].set_ylim(0.5, 5.5)
ax[0].set_title('B&W 1992 - Figure 14')
ax[0].legend()
ax[1].set_xlabel('Pressure (MPa)')
ax[1].set_ylabel('Bulk Modulus (GPa)')
_ = ax[1].legend()
| true | true |
f71e69d9aaba3771528b04fae2b24551b321c43d | 25,996 | py | Python | homeassistant/components/simplisafe/__init__.py | stravinci/AIS-home-assistant | ead4dafd3f801ebeb32860bd34443ed24a4f4167 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/simplisafe/__init__.py | stravinci/AIS-home-assistant | ead4dafd3f801ebeb32860bd34443ed24a4f4167 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/simplisafe/__init__.py | stravinci/AIS-home-assistant | ead4dafd3f801ebeb32860bd34443ed24a4f4167 | [
"Apache-2.0"
] | null | null | null | """Support for SimpliSafe alarm systems."""
import asyncio
from uuid import UUID
from simplipy import API
from simplipy.entity import EntityTypes
from simplipy.errors import EndpointUnavailable, InvalidCredentialsError, SimplipyError
from simplipy.websocket import (
EVENT_CAMERA_MOTION_DETECTED,
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_LOCK_LOCKED,
EVENT_LOCK_UNLOCKED,
EVENT_MOTION_DETECTED,
)
import voluptuous as vol
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import (
ATTR_CODE,
CONF_CODE,
CONF_TOKEN,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_ALARM_DURATION,
ATTR_ALARM_VOLUME,
ATTR_CHIME_VOLUME,
ATTR_ENTRY_DELAY_AWAY,
ATTR_ENTRY_DELAY_HOME,
ATTR_EXIT_DELAY_AWAY,
ATTR_EXIT_DELAY_HOME,
ATTR_LIGHT,
ATTR_VOICE_PROMPT_VOLUME,
DATA_CLIENT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
LOGGER,
VOLUMES,
)
DATA_LISTENER = "listener"
TOPIC_UPDATE_WEBSOCKET = "simplisafe_update_websocket_{0}"
EVENT_SIMPLISAFE_EVENT = "SIMPLISAFE_EVENT"
EVENT_SIMPLISAFE_NOTIFICATION = "SIMPLISAFE_NOTIFICATION"
DEFAULT_SOCKET_MIN_RETRY = 15
SUPPORTED_PLATFORMS = (
"alarm_control_panel",
"binary_sensor",
"lock",
"sensor",
)
WEBSOCKET_EVENTS_REQUIRING_SERIAL = [EVENT_LOCK_LOCKED, EVENT_LOCK_UNLOCKED]
WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT = [
EVENT_CAMERA_MOTION_DETECTED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_MOTION_DETECTED,
]
ATTR_CATEGORY = "category"
ATTR_LAST_EVENT_CHANGED_BY = "last_event_changed_by"
ATTR_LAST_EVENT_INFO = "last_event_info"
ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name"
ATTR_LAST_EVENT_SENSOR_SERIAL = "last_event_sensor_serial"
ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type"
ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_MESSAGE = "message"
ATTR_PIN_LABEL = "label"
ATTR_PIN_LABEL_OR_VALUE = "label_or_pin"
ATTR_PIN_VALUE = "pin"
ATTR_SYSTEM_ID = "system_id"
ATTR_TIMESTAMP = "timestamp"
SERVICE_BASE_SCHEMA = vol.Schema({vol.Required(ATTR_SYSTEM_ID): cv.positive_int})
SERVICE_REMOVE_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL_OR_VALUE): cv.string}
)
SERVICE_SET_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL): cv.string, vol.Required(ATTR_PIN_VALUE): cv.string}
)
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(ATTR_ALARM_DURATION): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=480)
),
vol.Optional(ATTR_ALARM_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_CHIME_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_ENTRY_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=255)
),
vol.Optional(ATTR_ENTRY_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_EXIT_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=45, max=255)
),
vol.Optional(ATTR_EXIT_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_LIGHT): cv.boolean,
vol.Optional(ATTR_VOICE_PROMPT_VOLUME): vol.All(
vol.Coerce(int), vol.In(VOLUMES)
),
}
)
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.119")
@callback
def _async_save_refresh_token(hass, config_entry, token):
"""Save a refresh token to the config entry."""
hass.config_entries.async_update_entry(
config_entry, data={**config_entry.data, CONF_TOKEN: token}
)
async def async_get_client_id(hass):
"""Get a client ID (based on the HASS unique ID) for the SimpliSafe API.
Note that SimpliSafe requires full, "dashed" versions of UUIDs.
"""
hass_id = await hass.helpers.instance_id.async_get()
return str(UUID(hass_id))
async def async_register_base_station(hass, system, config_entry_id):
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry_id,
identifiers={(DOMAIN, system.serial)},
manufacturer="SimpliSafe",
model=system.version,
name=system.address,
)
async def async_setup(hass, config):
"""Set up the SimpliSafe component."""
hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up SimpliSafe as config entry."""
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_USERNAME]
if CONF_CODE in config_entry.data:
# If an alarm code was provided as part of configuration.yaml, pop it out of
# the config entry's data and move it to options:
data = {**config_entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**config_entry.options,
CONF_CODE: data.pop(CONF_CODE),
}
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
client_id = await async_get_client_id(hass)
websession = aiohttp_client.async_get_clientsession(hass)
try:
api = await API.login_via_token(
config_entry.data[CONF_TOKEN], client_id=client_id, session=websession
)
except InvalidCredentialsError:
LOGGER.error("Invalid credentials provided")
return False
except SimplipyError as err:
LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
_async_save_refresh_token(hass, config_entry, api.refresh_token)
simplisafe = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = SimpliSafe(
hass, api, config_entry
)
await simplisafe.async_init()
for platform in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
@callback
def verify_system_exists(coro):
"""Log an error if a service call uses an invalid system ID."""
async def decorator(call):
"""Decorate."""
system_id = int(call.data[ATTR_SYSTEM_ID])
if system_id not in simplisafe.systems:
LOGGER.error("Unknown system ID in service call: %s", system_id)
return
await coro(call)
return decorator
@callback
def v3_only(coro):
"""Log an error if the decorated coroutine is called with a v2 system."""
async def decorator(call):
"""Decorate."""
system = simplisafe.systems[int(call.data[ATTR_SYSTEM_ID])]
if system.version != 3:
LOGGER.error("Service only available on V3 systems")
return
await coro(call)
return decorator
@verify_system_exists
@_verify_domain_control
async def clear_notifications(call):
"""Clear all active notifications."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.clear_notifications()
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def remove_pin(call):
"""Remove a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.remove_pin(call.data[ATTR_PIN_LABEL_OR_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def set_pin(call):
"""Set a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_pin(call.data[ATTR_PIN_LABEL], call.data[ATTR_PIN_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@v3_only
@_verify_domain_control
async def set_system_properties(call):
"""Set one or more system parameters."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_properties(
{
prop: value
for prop, value in call.data.items()
if prop != ATTR_SYSTEM_ID
}
)
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
for service, method, schema in [
("clear_notifications", clear_notifications, None),
("remove_pin", remove_pin, SERVICE_REMOVE_PIN_SCHEMA),
("set_pin", set_pin, SERVICE_SET_PIN_SCHEMA),
(
"set_system_properties",
set_system_properties,
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA,
),
]:
async_register_admin_service(hass, DOMAIN, service, method, schema=schema)
config_entry.add_update_listener(async_reload_entry)
return True
async def async_unload_entry(hass, entry):
"""Unload a SimpliSafe config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in SUPPORTED_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_CLIENT].pop(entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(entry.entry_id)
remove_listener()
return unload_ok
async def async_reload_entry(hass, config_entry):
"""Handle an options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
class SimpliSafeWebsocket:
"""Define a SimpliSafe websocket "manager" object."""
def __init__(self, hass, websocket):
"""Initialize."""
self._hass = hass
self._websocket = websocket
@staticmethod
def _on_connect():
"""Define a handler to fire when the websocket is connected."""
LOGGER.info("Connected to websocket")
@staticmethod
def _on_disconnect():
"""Define a handler to fire when the websocket is disconnected."""
LOGGER.info("Disconnected from websocket")
def _on_event(self, event):
"""Define a handler to fire when a new SimpliSafe event arrives."""
LOGGER.debug("New websocket event: %s", event)
async_dispatcher_send(
self._hass, TOPIC_UPDATE_WEBSOCKET.format(event.system_id), event
)
if event.event_type not in WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT:
return
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_EVENT,
event_data={
ATTR_LAST_EVENT_CHANGED_BY: event.changed_by,
ATTR_LAST_EVENT_TYPE: event.event_type,
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_SERIAL: event.sensor_serial,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_SYSTEM_ID: event.system_id,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
},
)
async def async_connect(self):
"""Register handlers and connect to the websocket."""
self._websocket.on_connect(self._on_connect)
self._websocket.on_disconnect(self._on_disconnect)
self._websocket.on_event(self._on_event)
await self._websocket.async_connect()
async def async_disconnect(self):
"""Disconnect from the websocket."""
await self._websocket.async_disconnect()
class SimpliSafe:
"""Define a SimpliSafe data object."""
def __init__(self, hass, api, config_entry):
"""Initialize."""
self._api = api
self._emergency_refresh_token_used = False
self._hass = hass
self._system_notifications = {}
self.config_entry = config_entry
self.coordinator = None
self.initial_event_to_use = {}
self.systems = {}
self.websocket = SimpliSafeWebsocket(hass, api.websocket)
@callback
def _async_process_new_notifications(self, system):
"""Act on any new system notifications."""
if self._hass.state != CoreState.running:
# If HASS isn't fully running yet, it may cause the SIMPLISAFE_NOTIFICATION
# event to fire before dependent components (like automation) are fully
# ready. If that's the case, skip:
return
latest_notifications = set(system.notifications)
to_add = latest_notifications.difference(
self._system_notifications[system.system_id]
)
if not to_add:
return
LOGGER.debug("New system notifications: %s", to_add)
self._system_notifications[system.system_id].update(to_add)
for notification in to_add:
text = notification.text
if notification.link:
text = f"{text} For more information: {notification.link}"
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_NOTIFICATION,
event_data={
ATTR_CATEGORY: notification.category,
ATTR_CODE: notification.code,
ATTR_MESSAGE: text,
ATTR_TIMESTAMP: notification.timestamp,
},
)
async def async_init(self):
"""Initialize the data class."""
asyncio.create_task(self.websocket.async_connect())
async def async_websocket_disconnect(_):
"""Define an event handler to disconnect from the websocket."""
await self.websocket.async_disconnect()
self._hass.data[DOMAIN][DATA_LISTENER][
self.config_entry.entry_id
] = self._hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_websocket_disconnect
)
self.systems = await self._api.get_systems()
for system in self.systems.values():
self._system_notifications[system.system_id] = set()
self._hass.async_create_task(
async_register_base_station(
self._hass, system, self.config_entry.entry_id
)
)
# Future events will come from the websocket, but since subscription to the
# websocket doesn't provide the most recent event, we grab it from the REST
# API to ensure event-related attributes aren't empty on startup:
try:
self.initial_event_to_use[
system.system_id
] = await system.get_latest_event()
except SimplipyError as err:
LOGGER.error("Error while fetching initial event: %s", err)
self.initial_event_to_use[system.system_id] = {}
self.coordinator = DataUpdateCoordinator(
self._hass,
LOGGER,
name=self.config_entry.data[CONF_USERNAME],
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=self.async_update,
)
async def async_update(self):
"""Get updated data from SimpliSafe."""
async def async_update_system(system):
"""Update a system."""
await system.update(cached=system.version != 3)
self._async_process_new_notifications(system)
tasks = [async_update_system(system) for system in self.systems.values()]
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, InvalidCredentialsError):
if self._emergency_refresh_token_used:
matching_flows = [
flow
for flow in self._hass.config_entries.flow.async_progress()
if flow["context"].get("source") == SOURCE_REAUTH
and flow["context"].get("unique_id")
== self.config_entry.unique_id
]
if not matching_flows:
self._hass.async_create_task(
self._hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": self.config_entry.unique_id,
},
data=self.config_entry.data,
)
)
LOGGER.error("Update failed with stored refresh token")
raise UpdateFailed from result
LOGGER.warning("SimpliSafe cloud error; trying stored refresh token")
self._emergency_refresh_token_used = True
try:
await self._api.refresh_access_token(
self.config_entry.data[CONF_TOKEN]
)
return
except SimplipyError as err:
LOGGER.error("Error while using stored refresh token: %s", err)
raise UpdateFailed from err
if isinstance(result, EndpointUnavailable):
# In case the user attempt an action not allowed in their current plan,
# we merely log that message at INFO level (so the user is aware,
# but not spammed with ERROR messages that they cannot change):
LOGGER.info(result)
raise UpdateFailed from result
if isinstance(result, SimplipyError):
LOGGER.error("SimpliSafe error while updating: %s", result)
raise UpdateFailed from result
if isinstance(result, Exception):
LOGGER.error("Unknown error while updating: %s", result)
raise UpdateFailed from result
if self._api.refresh_token != self.config_entry.data[CONF_TOKEN]:
_async_save_refresh_token(
self._hass, self.config_entry, self._api.refresh_token
)
# If we've reached this point using an emergency refresh token, we're in the
# clear and we can discard it:
if self._emergency_refresh_token_used:
self._emergency_refresh_token_used = False
class SimpliSafeEntity(CoordinatorEntity):
"""Define a base SimpliSafe entity."""
def __init__(self, simplisafe, system, name, *, serial=None):
"""Initialize."""
super().__init__(simplisafe.coordinator)
self._name = name
self._online = True
self._simplisafe = simplisafe
self._system = system
self.websocket_events_to_listen_for = [
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
]
if serial:
self._serial = serial
else:
self._serial = system.serial
try:
sensor_type = EntityTypes(
simplisafe.initial_event_to_use[system.system_id].get("sensorType")
)
except ValueError:
sensor_type = EntityTypes.unknown
self._attrs = {
ATTR_LAST_EVENT_INFO: simplisafe.initial_event_to_use[system.system_id].get(
"info"
),
ATTR_LAST_EVENT_SENSOR_NAME: simplisafe.initial_event_to_use[
system.system_id
].get("sensorName"),
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type.name,
ATTR_LAST_EVENT_TIMESTAMP: simplisafe.initial_event_to_use[
system.system_id
].get("eventTimestamp"),
ATTR_SYSTEM_ID: system.system_id,
}
self._device_info = {
"identifiers": {(DOMAIN, system.system_id)},
"manufacturer": "SimpliSafe",
"model": system.version,
"name": name,
"via_device": (DOMAIN, system.serial),
}
@property
def available(self):
"""Return whether the entity is available."""
# We can easily detect if the V3 system is offline, but no simple check exists
# for the V2 system. Therefore, assuming the coordinator hasn't failed, we mark
# the entity as available if:
# 1. We can verify that the system is online (assuming True if we can't)
# 2. We can verify that the entity is online
return not (self._system.version == 3 and self._system.offline) and self._online
@property
def device_info(self):
"""Return device registry information for this entity."""
return self._device_info
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def name(self):
"""Return the name of the entity."""
return f"{self._system.address} {self._name}"
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._serial
@callback
def _async_internal_update_from_websocket_event(self, event):
"""Perform internal websocket handling prior to handing off."""
if event.event_type == EVENT_CONNECTION_LOST:
self._online = False
elif event.event_type == EVENT_CONNECTION_RESTORED:
self._online = True
# It's uncertain whether SimpliSafe events will still propagate down the
# websocket when the base station is offline. Just in case, we guard against
# further action until connection is restored:
if not self._online:
return
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._attrs.update(
{
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
}
)
self.async_update_from_websocket_event(event)
@callback
def _handle_coordinator_update(self):
"""Update the entity with new REST API data."""
self.async_update_from_rest_api()
self.async_write_ha_state()
@callback
def _handle_websocket_update(self, event):
"""Update the entity with new websocket data."""
# Ignore this event if it belongs to a system other than this one:
if event.system_id != self._system.system_id:
return
# Ignore this event if this entity hasn't expressed interest in its type:
if event.event_type not in self.websocket_events_to_listen_for:
return
# Ignore this event if it belongs to a entity with a different serial
# number from this one's:
if (
event.event_type in WEBSOCKET_EVENTS_REQUIRING_SERIAL
and event.sensor_serial != self._serial
):
return
self._async_internal_update_from_websocket_event(event)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
TOPIC_UPDATE_WEBSOCKET.format(self._system.system_id),
self._handle_websocket_update,
)
)
self.async_update_from_rest_api()
@callback
def async_update_from_rest_api(self):
"""Update the entity with the provided REST API data."""
raise NotImplementedError()
@callback
def async_update_from_websocket_event(self, event):
"""Update the entity with the provided websocket event."""
class SimpliSafeBaseSensor(SimpliSafeEntity):
"""Define a SimpliSafe base (binary) sensor."""
def __init__(self, simplisafe, system, sensor):
"""Initialize."""
super().__init__(simplisafe, system, sensor.name, serial=sensor.serial)
self._device_info["identifiers"] = {(DOMAIN, sensor.serial)}
self._device_info["model"] = sensor.type.name
self._device_info["name"] = sensor.name
self._sensor = sensor
self._sensor_type_human_name = " ".join(
[w.title() for w in self._sensor.type.name.split("_")]
)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._system.address} {self._name} {self._sensor_type_human_name}"
| 34.52324 | 88 | 0.637021 | import asyncio
from uuid import UUID
from simplipy import API
from simplipy.entity import EntityTypes
from simplipy.errors import EndpointUnavailable, InvalidCredentialsError, SimplipyError
from simplipy.websocket import (
EVENT_CAMERA_MOTION_DETECTED,
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_LOCK_LOCKED,
EVENT_LOCK_UNLOCKED,
EVENT_MOTION_DETECTED,
)
import voluptuous as vol
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import (
ATTR_CODE,
CONF_CODE,
CONF_TOKEN,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_ALARM_DURATION,
ATTR_ALARM_VOLUME,
ATTR_CHIME_VOLUME,
ATTR_ENTRY_DELAY_AWAY,
ATTR_ENTRY_DELAY_HOME,
ATTR_EXIT_DELAY_AWAY,
ATTR_EXIT_DELAY_HOME,
ATTR_LIGHT,
ATTR_VOICE_PROMPT_VOLUME,
DATA_CLIENT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
LOGGER,
VOLUMES,
)
DATA_LISTENER = "listener"
TOPIC_UPDATE_WEBSOCKET = "simplisafe_update_websocket_{0}"
EVENT_SIMPLISAFE_EVENT = "SIMPLISAFE_EVENT"
EVENT_SIMPLISAFE_NOTIFICATION = "SIMPLISAFE_NOTIFICATION"
DEFAULT_SOCKET_MIN_RETRY = 15
SUPPORTED_PLATFORMS = (
"alarm_control_panel",
"binary_sensor",
"lock",
"sensor",
)
WEBSOCKET_EVENTS_REQUIRING_SERIAL = [EVENT_LOCK_LOCKED, EVENT_LOCK_UNLOCKED]
WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT = [
EVENT_CAMERA_MOTION_DETECTED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_MOTION_DETECTED,
]
ATTR_CATEGORY = "category"
ATTR_LAST_EVENT_CHANGED_BY = "last_event_changed_by"
ATTR_LAST_EVENT_INFO = "last_event_info"
ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name"
ATTR_LAST_EVENT_SENSOR_SERIAL = "last_event_sensor_serial"
ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type"
ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_MESSAGE = "message"
ATTR_PIN_LABEL = "label"
ATTR_PIN_LABEL_OR_VALUE = "label_or_pin"
ATTR_PIN_VALUE = "pin"
ATTR_SYSTEM_ID = "system_id"
ATTR_TIMESTAMP = "timestamp"
SERVICE_BASE_SCHEMA = vol.Schema({vol.Required(ATTR_SYSTEM_ID): cv.positive_int})
SERVICE_REMOVE_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL_OR_VALUE): cv.string}
)
SERVICE_SET_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL): cv.string, vol.Required(ATTR_PIN_VALUE): cv.string}
)
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(ATTR_ALARM_DURATION): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=480)
),
vol.Optional(ATTR_ALARM_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_CHIME_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_ENTRY_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=255)
),
vol.Optional(ATTR_ENTRY_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_EXIT_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=45, max=255)
),
vol.Optional(ATTR_EXIT_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_LIGHT): cv.boolean,
vol.Optional(ATTR_VOICE_PROMPT_VOLUME): vol.All(
vol.Coerce(int), vol.In(VOLUMES)
),
}
)
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.119")
@callback
def _async_save_refresh_token(hass, config_entry, token):
hass.config_entries.async_update_entry(
config_entry, data={**config_entry.data, CONF_TOKEN: token}
)
async def async_get_client_id(hass):
hass_id = await hass.helpers.instance_id.async_get()
return str(UUID(hass_id))
async def async_register_base_station(hass, system, config_entry_id):
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry_id,
identifiers={(DOMAIN, system.serial)},
manufacturer="SimpliSafe",
model=system.version,
name=system.address,
)
async def async_setup(hass, config):
hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass, config_entry):
entry_updates = {}
if not config_entry.unique_id:
entry_updates["unique_id"] = config_entry.data[CONF_USERNAME]
if CONF_CODE in config_entry.data:
# If an alarm code was provided as part of configuration.yaml, pop it out of
# the config entry's data and move it to options:
data = {**config_entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**config_entry.options,
CONF_CODE: data.pop(CONF_CODE),
}
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
client_id = await async_get_client_id(hass)
websession = aiohttp_client.async_get_clientsession(hass)
try:
api = await API.login_via_token(
config_entry.data[CONF_TOKEN], client_id=client_id, session=websession
)
except InvalidCredentialsError:
LOGGER.error("Invalid credentials provided")
return False
except SimplipyError as err:
LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
_async_save_refresh_token(hass, config_entry, api.refresh_token)
simplisafe = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = SimpliSafe(
hass, api, config_entry
)
await simplisafe.async_init()
for platform in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
@callback
def verify_system_exists(coro):
async def decorator(call):
system_id = int(call.data[ATTR_SYSTEM_ID])
if system_id not in simplisafe.systems:
LOGGER.error("Unknown system ID in service call: %s", system_id)
return
await coro(call)
return decorator
@callback
def v3_only(coro):
async def decorator(call):
system = simplisafe.systems[int(call.data[ATTR_SYSTEM_ID])]
if system.version != 3:
LOGGER.error("Service only available on V3 systems")
return
await coro(call)
return decorator
@verify_system_exists
@_verify_domain_control
async def clear_notifications(call):
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.clear_notifications()
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def remove_pin(call):
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.remove_pin(call.data[ATTR_PIN_LABEL_OR_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def set_pin(call):
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_pin(call.data[ATTR_PIN_LABEL], call.data[ATTR_PIN_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@v3_only
@_verify_domain_control
async def set_system_properties(call):
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_properties(
{
prop: value
for prop, value in call.data.items()
if prop != ATTR_SYSTEM_ID
}
)
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
for service, method, schema in [
("clear_notifications", clear_notifications, None),
("remove_pin", remove_pin, SERVICE_REMOVE_PIN_SCHEMA),
("set_pin", set_pin, SERVICE_SET_PIN_SCHEMA),
(
"set_system_properties",
set_system_properties,
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA,
),
]:
async_register_admin_service(hass, DOMAIN, service, method, schema=schema)
config_entry.add_update_listener(async_reload_entry)
return True
async def async_unload_entry(hass, entry):
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in SUPPORTED_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_CLIENT].pop(entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(entry.entry_id)
remove_listener()
return unload_ok
async def async_reload_entry(hass, config_entry):
await hass.config_entries.async_reload(config_entry.entry_id)
class SimpliSafeWebsocket:
def __init__(self, hass, websocket):
self._hass = hass
self._websocket = websocket
@staticmethod
def _on_connect():
LOGGER.info("Connected to websocket")
@staticmethod
def _on_disconnect():
LOGGER.info("Disconnected from websocket")
def _on_event(self, event):
LOGGER.debug("New websocket event: %s", event)
async_dispatcher_send(
self._hass, TOPIC_UPDATE_WEBSOCKET.format(event.system_id), event
)
if event.event_type not in WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT:
return
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_EVENT,
event_data={
ATTR_LAST_EVENT_CHANGED_BY: event.changed_by,
ATTR_LAST_EVENT_TYPE: event.event_type,
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_SERIAL: event.sensor_serial,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_SYSTEM_ID: event.system_id,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
},
)
async def async_connect(self):
self._websocket.on_connect(self._on_connect)
self._websocket.on_disconnect(self._on_disconnect)
self._websocket.on_event(self._on_event)
await self._websocket.async_connect()
async def async_disconnect(self):
await self._websocket.async_disconnect()
class SimpliSafe:
def __init__(self, hass, api, config_entry):
self._api = api
self._emergency_refresh_token_used = False
self._hass = hass
self._system_notifications = {}
self.config_entry = config_entry
self.coordinator = None
self.initial_event_to_use = {}
self.systems = {}
self.websocket = SimpliSafeWebsocket(hass, api.websocket)
@callback
def _async_process_new_notifications(self, system):
if self._hass.state != CoreState.running:
# event to fire before dependent components (like automation) are fully
# ready. If that's the case, skip:
return
latest_notifications = set(system.notifications)
to_add = latest_notifications.difference(
self._system_notifications[system.system_id]
)
if not to_add:
return
LOGGER.debug("New system notifications: %s", to_add)
self._system_notifications[system.system_id].update(to_add)
for notification in to_add:
text = notification.text
if notification.link:
text = f"{text} For more information: {notification.link}"
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_NOTIFICATION,
event_data={
ATTR_CATEGORY: notification.category,
ATTR_CODE: notification.code,
ATTR_MESSAGE: text,
ATTR_TIMESTAMP: notification.timestamp,
},
)
async def async_init(self):
asyncio.create_task(self.websocket.async_connect())
async def async_websocket_disconnect(_):
await self.websocket.async_disconnect()
self._hass.data[DOMAIN][DATA_LISTENER][
self.config_entry.entry_id
] = self._hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_websocket_disconnect
)
self.systems = await self._api.get_systems()
for system in self.systems.values():
self._system_notifications[system.system_id] = set()
self._hass.async_create_task(
async_register_base_station(
self._hass, system, self.config_entry.entry_id
)
)
# API to ensure event-related attributes aren't empty on startup:
try:
self.initial_event_to_use[
system.system_id
] = await system.get_latest_event()
except SimplipyError as err:
LOGGER.error("Error while fetching initial event: %s", err)
self.initial_event_to_use[system.system_id] = {}
self.coordinator = DataUpdateCoordinator(
self._hass,
LOGGER,
name=self.config_entry.data[CONF_USERNAME],
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=self.async_update,
)
async def async_update(self):
async def async_update_system(system):
await system.update(cached=system.version != 3)
self._async_process_new_notifications(system)
tasks = [async_update_system(system) for system in self.systems.values()]
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, InvalidCredentialsError):
if self._emergency_refresh_token_used:
matching_flows = [
flow
for flow in self._hass.config_entries.flow.async_progress()
if flow["context"].get("source") == SOURCE_REAUTH
and flow["context"].get("unique_id")
== self.config_entry.unique_id
]
if not matching_flows:
self._hass.async_create_task(
self._hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": self.config_entry.unique_id,
},
data=self.config_entry.data,
)
)
LOGGER.error("Update failed with stored refresh token")
raise UpdateFailed from result
LOGGER.warning("SimpliSafe cloud error; trying stored refresh token")
self._emergency_refresh_token_used = True
try:
await self._api.refresh_access_token(
self.config_entry.data[CONF_TOKEN]
)
return
except SimplipyError as err:
LOGGER.error("Error while using stored refresh token: %s", err)
raise UpdateFailed from err
if isinstance(result, EndpointUnavailable):
LOGGER.info(result)
raise UpdateFailed from result
if isinstance(result, SimplipyError):
LOGGER.error("SimpliSafe error while updating: %s", result)
raise UpdateFailed from result
if isinstance(result, Exception):
LOGGER.error("Unknown error while updating: %s", result)
raise UpdateFailed from result
if self._api.refresh_token != self.config_entry.data[CONF_TOKEN]:
_async_save_refresh_token(
self._hass, self.config_entry, self._api.refresh_token
)
if self._emergency_refresh_token_used:
self._emergency_refresh_token_used = False
class SimpliSafeEntity(CoordinatorEntity):
def __init__(self, simplisafe, system, name, *, serial=None):
super().__init__(simplisafe.coordinator)
self._name = name
self._online = True
self._simplisafe = simplisafe
self._system = system
self.websocket_events_to_listen_for = [
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
]
if serial:
self._serial = serial
else:
self._serial = system.serial
try:
sensor_type = EntityTypes(
simplisafe.initial_event_to_use[system.system_id].get("sensorType")
)
except ValueError:
sensor_type = EntityTypes.unknown
self._attrs = {
ATTR_LAST_EVENT_INFO: simplisafe.initial_event_to_use[system.system_id].get(
"info"
),
ATTR_LAST_EVENT_SENSOR_NAME: simplisafe.initial_event_to_use[
system.system_id
].get("sensorName"),
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type.name,
ATTR_LAST_EVENT_TIMESTAMP: simplisafe.initial_event_to_use[
system.system_id
].get("eventTimestamp"),
ATTR_SYSTEM_ID: system.system_id,
}
self._device_info = {
"identifiers": {(DOMAIN, system.system_id)},
"manufacturer": "SimpliSafe",
"model": system.version,
"name": name,
"via_device": (DOMAIN, system.serial),
}
@property
def available(self):
# the entity as available if:
# 1. We can verify that the system is online (assuming True if we can't)
return not (self._system.version == 3 and self._system.offline) and self._online
@property
def device_info(self):
return self._device_info
@property
def device_state_attributes(self):
return self._attrs
@property
def name(self):
return f"{self._system.address} {self._name}"
@property
def unique_id(self):
return self._serial
@callback
def _async_internal_update_from_websocket_event(self, event):
if event.event_type == EVENT_CONNECTION_LOST:
self._online = False
elif event.event_type == EVENT_CONNECTION_RESTORED:
self._online = True
# websocket when the base station is offline. Just in case, we guard against
# further action until connection is restored:
if not self._online:
return
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._attrs.update(
{
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
}
)
self.async_update_from_websocket_event(event)
@callback
def _handle_coordinator_update(self):
self.async_update_from_rest_api()
self.async_write_ha_state()
@callback
def _handle_websocket_update(self, event):
# Ignore this event if it belongs to a system other than this one:
if event.system_id != self._system.system_id:
return
# Ignore this event if this entity hasn't expressed interest in its type:
if event.event_type not in self.websocket_events_to_listen_for:
return
if (
event.event_type in WEBSOCKET_EVENTS_REQUIRING_SERIAL
and event.sensor_serial != self._serial
):
return
self._async_internal_update_from_websocket_event(event)
self.async_write_ha_state()
async def async_added_to_hass(self):
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
TOPIC_UPDATE_WEBSOCKET.format(self._system.system_id),
self._handle_websocket_update,
)
)
self.async_update_from_rest_api()
@callback
def async_update_from_rest_api(self):
raise NotImplementedError()
@callback
def async_update_from_websocket_event(self, event):
class SimpliSafeBaseSensor(SimpliSafeEntity):
def __init__(self, simplisafe, system, sensor):
super().__init__(simplisafe, system, sensor.name, serial=sensor.serial)
self._device_info["identifiers"] = {(DOMAIN, sensor.serial)}
self._device_info["model"] = sensor.type.name
self._device_info["name"] = sensor.name
self._sensor = sensor
self._sensor_type_human_name = " ".join(
[w.title() for w in self._sensor.type.name.split("_")]
)
@property
def name(self):
return f"{self._system.address} {self._name} {self._sensor_type_human_name}"
| true | true |
f71e69e86ee5de49d09df8256001723a4356642b | 4,280 | py | Python | parsing/tracking_logs/generate_course_tracking_logs.py | andyzsf/edx_data_research | 07a587edb1dc4797f116bfeb60ffecbf4ce5bd7a | [
"MIT"
] | null | null | null | parsing/tracking_logs/generate_course_tracking_logs.py | andyzsf/edx_data_research | 07a587edb1dc4797f116bfeb60ffecbf4ce5bd7a | [
"MIT"
] | null | null | null | parsing/tracking_logs/generate_course_tracking_logs.py | andyzsf/edx_data_research | 07a587edb1dc4797f116bfeb60ffecbf4ce5bd7a | [
"MIT"
] | null | null | null | '''
This module will extract tracking logs for a given course and date range
between when course enrollment start and when the course ended. For each log,
the parent_data and meta_data from the course_structure collection will be
appended to the log based on the event key in the log
'''
import pymongo
import sys
from datetime import datetime
import json
def connect_to_db_collection(db_name, collection_name):
'''
Return collection of a given database name and collection name
'''
connection = pymongo.Connection('localhost', 27017)
db = connection[db_name]
collection = db[collection_name]
return collection
def load_config(config_file):
'''
Return course ids and ranges of dates from which course specific tracking
logs will be extracted
'''
with open(config_file) as file_handler:
data = json.load(file_handler)
if not isinstance(data['course_ids'], list):
raise ValueError('Expecting list of course ids')
try:
start_date = datetime.strptime(data['date_of_course_enrollment'], '%Y-%m-%d')
end_date = datetime.strptime(data['date_of_course_completion'], '%Y-%m-%d')
except ValueError:
raise ValueError('Incorrect data format, should be YYYY-MM-DD')
return data['course_ids'], start_date.date(), end_date.date()
def append_course_structure_data(course_structure_collection, _id, document):
'''
Append parent_data and metadata (if exists) from course structure to
tracking log
'''
try:
data = course_structure_collection.find({"_id" : _id})[0]
if 'parent_data' in data:
document['parent_data'] = data['parent_data']
if 'metadata' in data:
document['metadata'] = data['metadata']
except:
pass
def extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date):
'''
Return all trackings logs that contain given ids and that contain dates
within the given range
'''
documents = source_collection.find({'course_id' : { '$in' : course_ids }})
for document in documents:
if start_date <= datetime.strptime(document['time'].split('T')[0], "%Y-%m-%d").date() <= end_date:
# Bind parent_data and metadata from course_structure to tracking document
bound = False
if document['event']:
if isinstance(document['event'], dict):
if 'id' in document['event']:
splitted = document['event']['id'].split('-')
if len(splitted) > 3:
document['event']['id'] = splitted[-1]
if not bound:
append_course_structure_data(course_structure_collection, document['event']['id'], document)
bound = True
if document['page']:
splitted = document['page'].split('/')
if len(splitted) > 2:
document['page'] = splitted[-2]
if not bound:
append_course_structure_data(course_structure_collection, document['page'], document)
# End of binding, now insert document into collection
destination_collection.insert(document)
def main():
if len(sys.argv) != 6:
usage_message = """usage: %s source_db destination_db course_config_file
Provide name of course database to insert tracking logs to and
config file to load configurations\n
"""
sys.stderr.write(usage_message % sys.argv[0])
sys.exit(1)
source_db = sys.argv[1]
destination_db = sys.argv[2]
source_collection = connect_to_db_collection(source_db, 'tracking')
destination_collection = connect_to_db_collection(destination_db, 'tracking')
course_structure_collection = connect_to_db_collection(destination_db, 'course_structure')
course_ids, start_date, end_date = load_config(sys.argv[3])
extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date)
if __name__ == '__main__':
main()
| 40 | 132 | 0.642991 |
import pymongo
import sys
from datetime import datetime
import json
def connect_to_db_collection(db_name, collection_name):
connection = pymongo.Connection('localhost', 27017)
db = connection[db_name]
collection = db[collection_name]
return collection
def load_config(config_file):
with open(config_file) as file_handler:
data = json.load(file_handler)
if not isinstance(data['course_ids'], list):
raise ValueError('Expecting list of course ids')
try:
start_date = datetime.strptime(data['date_of_course_enrollment'], '%Y-%m-%d')
end_date = datetime.strptime(data['date_of_course_completion'], '%Y-%m-%d')
except ValueError:
raise ValueError('Incorrect data format, should be YYYY-MM-DD')
return data['course_ids'], start_date.date(), end_date.date()
def append_course_structure_data(course_structure_collection, _id, document):
try:
data = course_structure_collection.find({"_id" : _id})[0]
if 'parent_data' in data:
document['parent_data'] = data['parent_data']
if 'metadata' in data:
document['metadata'] = data['metadata']
except:
pass
def extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date):
documents = source_collection.find({'course_id' : { '$in' : course_ids }})
for document in documents:
if start_date <= datetime.strptime(document['time'].split('T')[0], "%Y-%m-%d").date() <= end_date:
bound = False
if document['event']:
if isinstance(document['event'], dict):
if 'id' in document['event']:
splitted = document['event']['id'].split('-')
if len(splitted) > 3:
document['event']['id'] = splitted[-1]
if not bound:
append_course_structure_data(course_structure_collection, document['event']['id'], document)
bound = True
if document['page']:
splitted = document['page'].split('/')
if len(splitted) > 2:
document['page'] = splitted[-2]
if not bound:
append_course_structure_data(course_structure_collection, document['page'], document)
destination_collection.insert(document)
def main():
if len(sys.argv) != 6:
usage_message = """usage: %s source_db destination_db course_config_file
Provide name of course database to insert tracking logs to and
config file to load configurations\n
"""
sys.stderr.write(usage_message % sys.argv[0])
sys.exit(1)
source_db = sys.argv[1]
destination_db = sys.argv[2]
source_collection = connect_to_db_collection(source_db, 'tracking')
destination_collection = connect_to_db_collection(destination_db, 'tracking')
course_structure_collection = connect_to_db_collection(destination_db, 'course_structure')
course_ids, start_date, end_date = load_config(sys.argv[3])
extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date)
if __name__ == '__main__':
main()
| true | true |
f71e6aa4cc52a591b3bd382a510f0d28274d8670 | 2,419 | py | Python | tests/test_endians.py | bwoodsend/rockhopper | 7a4de06de69d68beed6f0f5d2961e8fb07fd62e6 | [
"MIT"
] | 1 | 2021-07-07T18:21:38.000Z | 2021-07-07T18:21:38.000Z | tests/test_endians.py | bwoodsend/rockhopper | 7a4de06de69d68beed6f0f5d2961e8fb07fd62e6 | [
"MIT"
] | null | null | null | tests/test_endians.py | bwoodsend/rockhopper | 7a4de06de69d68beed6f0f5d2961e8fb07fd62e6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
import sys
import ctypes
import pytest
from rockhopper._ragged_array import slug
pytestmark = pytest.mark.order(1)
def log_range(start, stop, base):
while start < stop:
# These sequences give a quick way to test the full range of an
# integer type.
yield start
start *= base
def test_log_range():
assert list(log_range(1, 10, 3)) == [1, 3, 9]
@pytest.mark.parametrize("int_base", range(4))
def test_endian_swap(int_base):
"""Test the family of :c:`swap_endian_xx()` functions."""
bytes = (1 << int_base)
bits = bytes * 8
swap = getattr(slug.dll, f"swap_endian_{bits}")
for i in log_range(1, 1 << bytes, 3):
assert swap(i).to_bytes(bytes, "big") == i.to_bytes(bytes, "little")
def test_is_big_endian():
"""Test :c:`is_big_endian()` matched :attr:`sys.byteorder`."""
assert slug.dll.is_big_endian() == (sys.byteorder == "big")
def f_ptr(f):
"""Get the raw memory address of a :mod:`ctypes` function pointer."""
return ctypes.cast(f, ctypes.c_void_p).value
@pytest.mark.parametrize("int_base", range(4))
@pytest.mark.parametrize("byteorder", ["little", "big"])
def test_int_write(int_base, byteorder):
"""
Test the family of :c:`write_xx()` and :c:`write_swap_xx()` integer
writing functions and the selector :c:`choose_int_write()`.
"""
bytes = 1 << int_base
bits = 8 * bytes
native = sys.byteorder == byteorder
# The real return type of `choose_int_write()` is `IntWrite` which is a
# typedef (which cslug doesn't support) to a function pointer (which
# cslug also doesn't support). We only need to test which function it
# returns so raw a void pointer is sufficient.
slug.dll.choose_int_write.restype = ctypes.c_void_p
# Get the writer we expect to get.
name = f"write_{bits}" if native else f"write_swap_{bits}"
write = getattr(slug.dll, name)
# Check it matches the output of choose_int_write()`.
assert slug.dll.choose_int_write(int_base,
byteorder == "big") == f_ptr(write)
# Try writing an integer with it.
x = 0x1122334455667788 & ((1 << bits) - 1)
out = ctypes.create_string_buffer(bytes)
write(x, out)
assert list(out[:]) == list(x.to_bytes(bytes, byteorder))
read = getattr(slug.dll, name.replace("write", "read"))
assert read(out) == x
| 29.5 | 76 | 0.646135 |
import sys
import ctypes
import pytest
from rockhopper._ragged_array import slug
pytestmark = pytest.mark.order(1)
def log_range(start, stop, base):
while start < stop:
yield start
start *= base
def test_log_range():
assert list(log_range(1, 10, 3)) == [1, 3, 9]
@pytest.mark.parametrize("int_base", range(4))
def test_endian_swap(int_base):
bytes = (1 << int_base)
bits = bytes * 8
swap = getattr(slug.dll, f"swap_endian_{bits}")
for i in log_range(1, 1 << bytes, 3):
assert swap(i).to_bytes(bytes, "big") == i.to_bytes(bytes, "little")
def test_is_big_endian():
assert slug.dll.is_big_endian() == (sys.byteorder == "big")
def f_ptr(f):
return ctypes.cast(f, ctypes.c_void_p).value
@pytest.mark.parametrize("int_base", range(4))
@pytest.mark.parametrize("byteorder", ["little", "big"])
def test_int_write(int_base, byteorder):
bytes = 1 << int_base
bits = 8 * bytes
native = sys.byteorder == byteorder
# cslug also doesn't support). We only need to test which function it
slug.dll.choose_int_write.restype = ctypes.c_void_p
name = f"write_{bits}" if native else f"write_swap_{bits}"
write = getattr(slug.dll, name)
assert slug.dll.choose_int_write(int_base,
byteorder == "big") == f_ptr(write)
x = 0x1122334455667788 & ((1 << bits) - 1)
out = ctypes.create_string_buffer(bytes)
write(x, out)
assert list(out[:]) == list(x.to_bytes(bytes, byteorder))
read = getattr(slug.dll, name.replace("write", "read"))
assert read(out) == x
| true | true |
f71e6b23d7feba00b1926a7f43eb59a12d8f6a40 | 5,720 | py | Python | utils/catgirlmoe.py | Papyrus12/nekosu | d83315caf764376361cb9680b4297a0061d02305 | [
"MIT"
] | null | null | null | utils/catgirlmoe.py | Papyrus12/nekosu | d83315caf764376361cb9680b4297a0061d02305 | [
"MIT"
] | null | null | null | utils/catgirlmoe.py | Papyrus12/nekosu | d83315caf764376361cb9680b4297a0061d02305 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
from cmyui.discord import Webhook
from cmyui.discord import Embed
from objects import glob
from objects.score import Score
from objects.score import Grade
from objects.player import Player
from objects.beatmap import Beatmap
from objects.match import Match
from objects.match import Slot
from constants.gamemodes import GameMode
CHAT_HOOK = glob.config.webhooks['chat-bridge']
SCORE_HOOK = glob.config.webhooks['score-log']
GRADE_EMOTES = {
Grade.XH: "<:grade_xh:833673474836660265>",
Grade.SH: "<:grade_sh:833673474277900318>",
Grade.X: "<:grade_x:833673474270167060>",
Grade.S: "<:grade_s:833673474022572032>",
Grade.A: "<:grade_a:833673433941934091>",
Grade.B: "<:grade_b:833673434122289172>",
Grade.C: "<:grade_c:833673433656721418>",
Grade.D: "<:grade_d:833673433408733194>",
Grade.F: "",
Grade.N: ""
}
GRADE_COLORS = {
Grade.XH: 0xE0F7FA, #Silver SS
Grade.SH: 0xE0F7FA, #Silver S
Grade.X: 0xFFEB3B, #SS
Grade.S: 0xFFEB3B, #S
Grade.A: 0x8BC34A,
Grade.B: 0x2196F3,
Grade.C: 0x9C27B0,
Grade.D: 0xF44336,
Grade.F: 0x212121,
Grade.N: 0x212121
}
MOD_EMOTES = {
'NF': "<:nf:833699841955201114>",
'EZ': "<:ez:833699842118647819>",
'TD': "TD",
'HD': "<:hd:833699841741422642>",
'HR': "<:hr:833699841644691456>",
'SD': "<:sd:833699840999424041>",
'DT': "<:dt:833699841741422645>",
'RX': "<:rx:833699841267597343>",
'HT': "<:ht:833699842022178847>",
'NC': "<:nc:833699841489895488>",
'FL': "<:fl:833699841510211588>",
'AU': "<:au:833699842269642762>",
'SO': "<:so:833699841287782441>",
'AP': "<:ap:833699842177368125>",
'PF': "<:pf:833699841510211585>",
'FI': "FI",
'RN': "RN",
'CN': "<:cn:833699841955201115>",
'TP': "<:tp:833699841288699944>",
'V2': "V2",
'MR': "MR",
'1K': "1K",
'2K': "2K",
'3K': "3K",
'4K': "4K",
'5K': "5K",
'6K': "6K",
'7K': "7K",
'8K': "8K",
'9K': "9K",
'CO': "CO",
}
MODE_EMOTES = {
GameMode.vn_std: "std",
GameMode.vn_taiko: "taiko",
GameMode.vn_catch: "catch",
GameMode.vn_mania: "mania",
GameMode.rx_std: "std (Rx)",
GameMode.rx_taiko: "taiko (Rx)",
GameMode.rx_catch: "catch (Rx)",
GameMode.ap_std: "std (Ap)",
}
def sanitize(m: str):
return m.replace("@", "[@]")
async def sendSubmitScore(s: Score):
wh = Webhook(url=SCORE_HOOK)
diff=[f'{s.sr:.2f}★']
if s.mods:
diff.insert(1, f'({"".join(map(lambda mod: MOD_EMOTES[mod], re.findall("..",repr(s.mods).replace("DTNC","NC"))))})')
e = Embed(title=s.bmap.full, url=f'https://osu.ppy.sh/b/{s.bmap.id}',color=GRADE_COLORS[s.grade])
e.set_author(name=f'{s.player.name} achieved #{s.rank} in {MODE_EMOTES[s.mode]}', url=f'https://osu.catgirl.moe/u/{s.player.id}', icon_url=f'https://a.osu.catgirl.moe/{s.player.id}')
e.add_field("Difficulty:", ' '.join(diff), True)
e.add_field("Accuracy:", f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)', True)
e.add_field("Score:", f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)', True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{s.bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendLogin(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📥 **{sanitize(p.name)}** has joined the game.')
await wh.post(glob.http)
async def sendLogout(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📤 **{sanitize(p.name)}** has left the game.')
await wh.post(glob.http)
async def sendRankMap(p: Player, b: Beatmap, s: str):
wh = Webhook(url=CHAT_HOOK)
e = Embed(title=b.full, url=f'https://osu.ppy.sh/b/{b.id}', color=0xE91E63)
e.set_author(name=f'{p.name} {s} a map', url=f'https://osu.catgirl.moe/u/{p.id}', icon_url=f'https://a.osu.catgirl.moe/{p.id}')
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{b.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendSendMessage(p: Player, m: str):
wh = Webhook(url=CHAT_HOOK, username=p.name, avatar_url=f'https://a.osu.catgirl.moe/{p.id}', content=sanitize(m))
await wh.post(glob.http)
async def sendMatchCreate(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⭐ **{sanitize(p.name)}** created lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchJoin(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'➡️ **{sanitize(p.name)}** joined lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchPart(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⬅️ **{sanitize(p.name)}** left lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchComplete(slots: list[Slot], m: Match):
submitted, not_submitted = await m.await_submissions(slots)
print(submitted)
print(not_submitted)
if submitted:
player_names = []
player_accuracy = []
player_scores = []
wh = Webhook(url=CHAT_HOOK)
bmap = next(iter(submitted)).recent_score.bmap
e = Embed(title=bmap.full, url=f'https://osu.ppy.sh/b/{bmap.id}',color=0xF44336)
for p, z in sorted(submitted.items(), key=lambda item: item[0].recent_score.score, reverse=True):
s = p.recent_score
player_names.append(p.name)
player_accuracy.append(f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)')
player_scores.append(f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)')
e.set_author(name=f'Lobby "{sanitize(m.name)}" finished a map')
e.add_field("Players:", '\n'.join(player_names), True)
e.add_field("Accuracy:", '\n'.join(player_accuracy), True)
e.add_field("Score:", '\n'.join(player_scores), True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http) | 33.255814 | 184 | 0.65542 |
import re
from cmyui.discord import Webhook
from cmyui.discord import Embed
from objects import glob
from objects.score import Score
from objects.score import Grade
from objects.player import Player
from objects.beatmap import Beatmap
from objects.match import Match
from objects.match import Slot
from constants.gamemodes import GameMode
CHAT_HOOK = glob.config.webhooks['chat-bridge']
SCORE_HOOK = glob.config.webhooks['score-log']
GRADE_EMOTES = {
Grade.XH: "<:grade_xh:833673474836660265>",
Grade.SH: "<:grade_sh:833673474277900318>",
Grade.X: "<:grade_x:833673474270167060>",
Grade.S: "<:grade_s:833673474022572032>",
Grade.A: "<:grade_a:833673433941934091>",
Grade.B: "<:grade_b:833673434122289172>",
Grade.C: "<:grade_c:833673433656721418>",
Grade.D: "<:grade_d:833673433408733194>",
Grade.F: "",
Grade.N: ""
}
GRADE_COLORS = {
Grade.XH: 0xE0F7FA,
Grade.SH: 0xE0F7FA,
Grade.X: 0xFFEB3B,
Grade.S: 0xFFEB3B,
Grade.A: 0x8BC34A,
Grade.B: 0x2196F3,
Grade.C: 0x9C27B0,
Grade.D: 0xF44336,
Grade.F: 0x212121,
Grade.N: 0x212121
}
MOD_EMOTES = {
'NF': "<:nf:833699841955201114>",
'EZ': "<:ez:833699842118647819>",
'TD': "TD",
'HD': "<:hd:833699841741422642>",
'HR': "<:hr:833699841644691456>",
'SD': "<:sd:833699840999424041>",
'DT': "<:dt:833699841741422645>",
'RX': "<:rx:833699841267597343>",
'HT': "<:ht:833699842022178847>",
'NC': "<:nc:833699841489895488>",
'FL': "<:fl:833699841510211588>",
'AU': "<:au:833699842269642762>",
'SO': "<:so:833699841287782441>",
'AP': "<:ap:833699842177368125>",
'PF': "<:pf:833699841510211585>",
'FI': "FI",
'RN': "RN",
'CN': "<:cn:833699841955201115>",
'TP': "<:tp:833699841288699944>",
'V2': "V2",
'MR': "MR",
'1K': "1K",
'2K': "2K",
'3K': "3K",
'4K': "4K",
'5K': "5K",
'6K': "6K",
'7K': "7K",
'8K': "8K",
'9K': "9K",
'CO': "CO",
}
MODE_EMOTES = {
GameMode.vn_std: "std",
GameMode.vn_taiko: "taiko",
GameMode.vn_catch: "catch",
GameMode.vn_mania: "mania",
GameMode.rx_std: "std (Rx)",
GameMode.rx_taiko: "taiko (Rx)",
GameMode.rx_catch: "catch (Rx)",
GameMode.ap_std: "std (Ap)",
}
def sanitize(m: str):
return m.replace("@", "[@]")
async def sendSubmitScore(s: Score):
wh = Webhook(url=SCORE_HOOK)
diff=[f'{s.sr:.2f}★']
if s.mods:
diff.insert(1, f'({"".join(map(lambda mod: MOD_EMOTES[mod], re.findall("..",repr(s.mods).replace("DTNC","NC"))))})')
e = Embed(title=s.bmap.full, url=f'https://osu.ppy.sh/b/{s.bmap.id}',color=GRADE_COLORS[s.grade])
e.set_author(name=f'{s.player.name} achieved #{s.rank} in {MODE_EMOTES[s.mode]}', url=f'https://osu.catgirl.moe/u/{s.player.id}', icon_url=f'https://a.osu.catgirl.moe/{s.player.id}')
e.add_field("Difficulty:", ' '.join(diff), True)
e.add_field("Accuracy:", f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)', True)
e.add_field("Score:", f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)', True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{s.bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendLogin(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📥 **{sanitize(p.name)}** has joined the game.')
await wh.post(glob.http)
async def sendLogout(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📤 **{sanitize(p.name)}** has left the game.')
await wh.post(glob.http)
async def sendRankMap(p: Player, b: Beatmap, s: str):
wh = Webhook(url=CHAT_HOOK)
e = Embed(title=b.full, url=f'https://osu.ppy.sh/b/{b.id}', color=0xE91E63)
e.set_author(name=f'{p.name} {s} a map', url=f'https://osu.catgirl.moe/u/{p.id}', icon_url=f'https://a.osu.catgirl.moe/{p.id}')
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{b.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendSendMessage(p: Player, m: str):
wh = Webhook(url=CHAT_HOOK, username=p.name, avatar_url=f'https://a.osu.catgirl.moe/{p.id}', content=sanitize(m))
await wh.post(glob.http)
async def sendMatchCreate(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⭐ **{sanitize(p.name)}** created lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchJoin(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'➡️ **{sanitize(p.name)}** joined lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchPart(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⬅️ **{sanitize(p.name)}** left lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchComplete(slots: list[Slot], m: Match):
submitted, not_submitted = await m.await_submissions(slots)
print(submitted)
print(not_submitted)
if submitted:
player_names = []
player_accuracy = []
player_scores = []
wh = Webhook(url=CHAT_HOOK)
bmap = next(iter(submitted)).recent_score.bmap
e = Embed(title=bmap.full, url=f'https://osu.ppy.sh/b/{bmap.id}',color=0xF44336)
for p, z in sorted(submitted.items(), key=lambda item: item[0].recent_score.score, reverse=True):
s = p.recent_score
player_names.append(p.name)
player_accuracy.append(f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)')
player_scores.append(f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)')
e.set_author(name=f'Lobby "{sanitize(m.name)}" finished a map')
e.add_field("Players:", '\n'.join(player_names), True)
e.add_field("Accuracy:", '\n'.join(player_accuracy), True)
e.add_field("Score:", '\n'.join(player_scores), True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http) | true | true |
f71e6b80047cea9a2670020af62cbeb40bc92652 | 1,412 | py | Python | pygears/lib/reduce2.py | bogdanvuk/pygears | a0b21d445e1d5c89ad66751447b8253536b835ee | [
"MIT"
] | 120 | 2018-04-23T08:29:04.000Z | 2022-03-30T14:41:52.000Z | pygears/lib/reduce2.py | FZP1607152286/pygears | a0b21d445e1d5c89ad66751447b8253536b835ee | [
"MIT"
] | 12 | 2019-07-09T17:12:58.000Z | 2022-03-18T09:05:10.000Z | pygears/lib/reduce2.py | FZP1607152286/pygears | a0b21d445e1d5c89ad66751447b8253536b835ee | [
"MIT"
] | 12 | 2019-05-10T19:42:08.000Z | 2022-03-28T18:26:44.000Z | from pygears import gear, Intf
from pygears.lib import czip
from pygears.typing import Tuple, Uint, Union, Queue
from pygears.lib import fmap, demux, decouple, fifo, union_collapse
from pygears.lib import priority_mux, replicate
TCfg = Tuple[{'reduce_size': Uint['w_reduce_size'], 'init': 't_acc'}]
@gear
def reduce2(din, cfg: TCfg, *, f, max_size):
"""Similar to the Python reduce function, applies a rolling computation to
sequential pairs of values in a list. The ``din`` input is of type
:class:`Queue` which holds the values to be used for computation while the
``cfg`` input is a :class:`Tuple` consisting of a ``reduce_size`` field and
the ``init`` field holding the inital value.
Args:
f: Function to be performed
max_size: Maximal length of the input `Queue` which is the depth of the
FIFO used for storing intermediate values
Returns:
The result of the reduce operation
"""
acctype = cfg.dtype['init']
qtype = Queue[acctype, din.dtype.lvl - 1]
temp_res = Intf(dtype=qtype)
cfg_rep = cfg | replicate
sec_opnd = (cfg_rep, temp_res) \
| priority_mux \
| fmap(f=union_collapse, fcat=czip, lvl=1)
result = czip(din, sec_opnd) | decouple | fmap(f=f, fcat=czip, lvl=2)
acc, fin_res = result | Union[qtype, qtype] | demux
acc | fifo(intfs=[temp_res], depth=max_size)
return fin_res
| 33.619048 | 79 | 0.67847 | from pygears import gear, Intf
from pygears.lib import czip
from pygears.typing import Tuple, Uint, Union, Queue
from pygears.lib import fmap, demux, decouple, fifo, union_collapse
from pygears.lib import priority_mux, replicate
TCfg = Tuple[{'reduce_size': Uint['w_reduce_size'], 'init': 't_acc'}]
@gear
def reduce2(din, cfg: TCfg, *, f, max_size):
acctype = cfg.dtype['init']
qtype = Queue[acctype, din.dtype.lvl - 1]
temp_res = Intf(dtype=qtype)
cfg_rep = cfg | replicate
sec_opnd = (cfg_rep, temp_res) \
| priority_mux \
| fmap(f=union_collapse, fcat=czip, lvl=1)
result = czip(din, sec_opnd) | decouple | fmap(f=f, fcat=czip, lvl=2)
acc, fin_res = result | Union[qtype, qtype] | demux
acc | fifo(intfs=[temp_res], depth=max_size)
return fin_res
| true | true |
f71e6cf1c4c87d2f388c85fe5d06ee0e82f8e1e9 | 13,098 | py | Python | examples/char-lstm-classification.py | iamgroot42/opacus | 51708309e71c030aa2bf15d6dccc7bcbbe9ed570 | [
"Apache-2.0"
] | 195 | 2019-12-11T23:55:47.000Z | 2020-08-27T04:17:29.000Z | examples/char-lstm-classification.py | iamgroot42/opacus | 51708309e71c030aa2bf15d6dccc7bcbbe9ed570 | [
"Apache-2.0"
] | 35 | 2020-01-21T11:04:29.000Z | 2020-08-27T05:30:57.000Z | examples/char-lstm-classification.py | iamgroot42/opacus | 51708309e71c030aa2bf15d6dccc7bcbbe9ed570 | [
"Apache-2.0"
] | 39 | 2020-01-04T20:05:20.000Z | 2020-08-25T23:09:38.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import Counter
from pathlib import Path
from statistics import mean
import torch
import torch.nn as nn
from opacus import PrivacyEngine
from opacus.layers import DPGRU, DPLSTM, DPRNN
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
parser = argparse.ArgumentParser(
description="PyTorch Name language classification DP Training",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data-root",
required=True,
type=str,
help="Path to training set of names (ie. ~/data/names/)",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="GPU ID for this process",
)
parser.add_argument(
"-b",
"--batch-size",
default=800,
type=int,
metavar="N",
help="mini-batch size",
)
parser.add_argument(
"--mode",
default="lstm",
choices=["lstm", "gru", "rnn"],
help="recursive network type",
)
parser.add_argument(
"--embedding-size", default=64, type=int, help="Character embedding dimension"
)
parser.add_argument(
"--hidden-size", default=128, type=int, help="hidden state dimensions"
)
parser.add_argument("--n-layers", default=1, type=int, help="How many layers to use")
parser.add_argument(
"--test-every",
default=0,
type=int,
help="Run evaluation on the test every these many epochs",
)
parser.add_argument(
"--bidirectional",
action="store_true",
default=False,
help="If turned on, makes the RNN bidirectional",
)
parser.add_argument(
"--learning-rate",
default=2.0,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument("--epochs", type=int, default=10, help="Number of training epochs")
parser.add_argument(
"--train-split",
type=float,
default=0.8,
help="Fraction of data to utilize for training (rest for evaluation)",
)
parser.add_argument(
"--sigma",
type=float,
default=1.0,
metavar="S",
help="Noise multiplier",
)
parser.add_argument(
"-c",
"--max-per-sample-grad-norm",
type=float,
default=1.5,
metavar="C",
help="Clip per-sample gradients to this norm",
)
parser.add_argument(
"--disable-dp",
action="store_true",
default=False,
help="Disable privacy training and just train with vanilla SGD",
)
parser.add_argument(
"--secure-rng",
action="store_true",
default=False,
help="Enable Secure RNG to have trustworthy privacy guarantees. Comes at a performance cost",
)
parser.add_argument(
"--delta",
type=float,
default=8e-5,
metavar="D",
help="Target delta",
)
parser.add_argument(
"--print-every",
type=int,
default=5,
help="Print the evaluation accuracy every these many iterations",
)
class CharByteEncoder(nn.Module):
"""
This encoder takes a UTF-8 string and encodes its bytes into a Tensor. It can also
perform the opposite operation to check a result.
Examples:
>>> encoder = CharByteEncoder()
>>> t = encoder('Ślusàrski') # returns tensor([256, 197, 154, 108, 117, 115, 195, 160, 114, 115, 107, 105, 257])
>>> encoder.decode(t) # returns "<s>Ślusàrski</s>"
"""
def __init__(self):
super().__init__()
self.start_token = "<s>"
self.end_token = "</s>"
self.pad_token = "<pad>"
self.start_idx = 256
self.end_idx = 257
self.pad_idx = 258
def forward(self, s: str, pad_to=0) -> torch.LongTensor:
"""
Encodes a string. It will append a start token <s> (id=self.start_idx) and an end token </s>
(id=self.end_idx).
Args:
s: The string to encode.
pad_to: If not zero, pad by appending self.pad_idx until string is of length `pad_to`.
Defaults to 0.
Returns:
The encoded LongTensor of indices.
"""
encoded = s.encode()
n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0
return torch.LongTensor(
[self.start_idx]
+ [c for c in encoded] # noqa
+ [self.end_idx]
+ [self.pad_idx for _ in range(n_pad)]
)
def decode(self, char_ids_tensor: torch.LongTensor) -> str:
"""
The inverse of `forward`. Keeps the start, end and pad indices.
"""
char_ids = char_ids_tensor.cpu().detach().tolist()
out = []
buf = []
for c in char_ids:
if c < 256:
buf.append(c)
else:
if buf:
out.append(bytes(buf).decode())
buf = []
if c == self.start_idx:
out.append(self.start_token)
elif c == self.end_idx:
out.append(self.end_token)
elif c == self.pad_idx:
out.append(self.pad_token)
if buf: # in case some are left
out.append(bytes(buf).decode())
return "".join(out)
def __len__(self):
"""
The length of our encoder space. This is fixed to 256 (one byte) + 3 special chars
(start, end, pad).
Returns:
259
"""
return 259
class NamesDataset(Dataset):
def __init__(self, root):
self.root = Path(root)
self.labels = list({langfile.stem for langfile in self.root.iterdir()})
self.labels_dict = {label: i for i, label in enumerate(self.labels)}
self.encoder = CharByteEncoder()
self.samples = self.construct_samples()
def __getitem__(self, i):
return self.samples[i]
def __len__(self):
return len(self.samples)
def construct_samples(self):
samples = []
for langfile in self.root.iterdir():
label_name = langfile.stem
label_id = self.labels_dict[label_name]
with open(langfile, "r") as fin:
for row in fin:
samples.append(
(self.encoder(row.strip()), torch.tensor(label_id).long())
)
return samples
def label_count(self):
cnt = Counter()
for _x, y in self.samples:
label = self.labels[int(y)]
cnt[label] += 1
return cnt
VOCAB_SIZE = 256 + 3 # 256 alternatives in one byte, plus 3 special characters.
class CharNNClassifier(nn.Module):
def __init__(
self,
rnn_type,
embedding_size,
hidden_size,
output_size,
num_layers=1,
bidirectional=False,
vocab_size=VOCAB_SIZE,
):
super().__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.output_size = output_size
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.rnn = rnn_type(
embedding_size,
hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True,
)
self.out_layer = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden=None):
x = self.embedding(x) # -> [B, T, D]
x, _ = self.rnn(x, hidden) # -> [B, T, H]
x = x[:, -1, :] # -> [B, H]
x = self.out_layer(x) # -> [B, C]
return x
def padded_collate(batch, padding_idx=0):
x = pad_sequence(
[elem[0] for elem in batch], batch_first=True, padding_value=padding_idx
)
y = torch.stack([elem[1] for elem in batch]).long()
return x, y
def train(
model,
criterion,
optimizer,
train_loader,
epoch,
privacy_engine,
target_delta,
device="cuda:0",
):
model.train()
accs = []
losses = []
for x, y in tqdm(train_loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
loss = criterion(logits, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
preds = logits.argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
losses.append(float(loss))
printstr = (
f"\t Epoch {epoch}. Accuracy: {mean(accs):.6f} | Loss: {mean(losses):.6f}"
)
try:
epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(
delta=target_delta
)
printstr += f" | (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}"
except AttributeError:
pass
print(printstr)
return
def test(model, test_loader, privacy_engine, target_delta, device="cuda:0"):
model.eval()
accs = []
with torch.no_grad():
for x, y in tqdm(test_loader):
x = x.to(device)
y = y.to(device)
preds = model(x).argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
mean_acc = mean(accs)
printstr = "\n----------------------------\n" f"Test Accuracy: {mean_acc:.6f}"
if privacy_engine:
epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(
delta=target_delta
)
printstr += f" (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}"
print(printstr + "\n----------------------------\n")
return mean_acc
def main():
args = parser.parse_args()
device = torch.device(args.device)
ds = NamesDataset(args.data_root)
train_len = int(args.train_split * len(ds))
test_len = len(ds) - train_len
print(f"{train_len} samples for training, {test_len} for testing")
if args.secure_rng:
try:
import torchcsprng as prng
except ImportError as e:
msg = (
"To use secure RNG, you must install the torchcsprng package! "
"Check out the instructions here: https://github.com/pytorch/csprng#installation"
)
raise ImportError(msg) from e
generator = prng.create_random_device_generator("/dev/urandom")
else:
generator = None
train_ds, test_ds = torch.utils.data.random_split(
ds, [train_len, test_len], generator=generator
)
if args.mode == "rnn":
rnn_type = DPRNN
elif args.mode == "gru":
rnn_type = DPGRU
elif args.mode == "lstm":
rnn_type = DPLSTM
else:
raise ValueError(f"Invalid network type: {args.mode}")
model = CharNNClassifier(
rnn_type,
args.embedding_size,
args.hidden_size,
len(ds.labels),
args.n_layers,
args.bidirectional,
)
model = model.to(device)
train_ds, test_ds = torch.utils.data.random_split(
ds, [train_len, test_len], generator=generator
)
train_loader = DataLoader(
train_ds,
batch_size=args.batch_size,
num_workers=1,
pin_memory=True,
collate_fn=padded_collate,
)
test_loader = DataLoader(
test_ds,
batch_size=2 * args.batch_size,
shuffle=False,
num_workers=1,
pin_memory=True,
collate_fn=padded_collate,
)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
if not args.disable_dp:
privacy_engine = PrivacyEngine(secure_mode=args.secure_rng)
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
)
else:
privacy_engine = None
print(f"Train stats ({args.mode}): \n")
for epoch in tqdm(range(args.epochs)):
train(
model,
criterion,
optimizer,
train_loader,
epoch,
privacy_engine,
args.delta,
device=device,
)
if args.test_every:
if epoch % args.test_every == 0:
test(model, test_loader, privacy_engine, args.delta, device=device)
mean_acc = test(model, test_loader, privacy_engine, args.delta, device=device)
torch.save(mean_acc, f"run_results_chr_{args.mode}_classification.pt")
if __name__ == "__main__":
main()
| 27.574737 | 117 | 0.595358 |
import argparse
from collections import Counter
from pathlib import Path
from statistics import mean
import torch
import torch.nn as nn
from opacus import PrivacyEngine
from opacus.layers import DPGRU, DPLSTM, DPRNN
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
parser = argparse.ArgumentParser(
description="PyTorch Name language classification DP Training",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data-root",
required=True,
type=str,
help="Path to training set of names (ie. ~/data/names/)",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="GPU ID for this process",
)
parser.add_argument(
"-b",
"--batch-size",
default=800,
type=int,
metavar="N",
help="mini-batch size",
)
parser.add_argument(
"--mode",
default="lstm",
choices=["lstm", "gru", "rnn"],
help="recursive network type",
)
parser.add_argument(
"--embedding-size", default=64, type=int, help="Character embedding dimension"
)
parser.add_argument(
"--hidden-size", default=128, type=int, help="hidden state dimensions"
)
parser.add_argument("--n-layers", default=1, type=int, help="How many layers to use")
parser.add_argument(
"--test-every",
default=0,
type=int,
help="Run evaluation on the test every these many epochs",
)
parser.add_argument(
"--bidirectional",
action="store_true",
default=False,
help="If turned on, makes the RNN bidirectional",
)
parser.add_argument(
"--learning-rate",
default=2.0,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument("--epochs", type=int, default=10, help="Number of training epochs")
parser.add_argument(
"--train-split",
type=float,
default=0.8,
help="Fraction of data to utilize for training (rest for evaluation)",
)
parser.add_argument(
"--sigma",
type=float,
default=1.0,
metavar="S",
help="Noise multiplier",
)
parser.add_argument(
"-c",
"--max-per-sample-grad-norm",
type=float,
default=1.5,
metavar="C",
help="Clip per-sample gradients to this norm",
)
parser.add_argument(
"--disable-dp",
action="store_true",
default=False,
help="Disable privacy training and just train with vanilla SGD",
)
parser.add_argument(
"--secure-rng",
action="store_true",
default=False,
help="Enable Secure RNG to have trustworthy privacy guarantees. Comes at a performance cost",
)
parser.add_argument(
"--delta",
type=float,
default=8e-5,
metavar="D",
help="Target delta",
)
parser.add_argument(
"--print-every",
type=int,
default=5,
help="Print the evaluation accuracy every these many iterations",
)
class CharByteEncoder(nn.Module):
def __init__(self):
super().__init__()
self.start_token = "<s>"
self.end_token = "</s>"
self.pad_token = "<pad>"
self.start_idx = 256
self.end_idx = 257
self.pad_idx = 258
def forward(self, s: str, pad_to=0) -> torch.LongTensor:
encoded = s.encode()
n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0
return torch.LongTensor(
[self.start_idx]
+ [c for c in encoded]
+ [self.end_idx]
+ [self.pad_idx for _ in range(n_pad)]
)
def decode(self, char_ids_tensor: torch.LongTensor) -> str:
char_ids = char_ids_tensor.cpu().detach().tolist()
out = []
buf = []
for c in char_ids:
if c < 256:
buf.append(c)
else:
if buf:
out.append(bytes(buf).decode())
buf = []
if c == self.start_idx:
out.append(self.start_token)
elif c == self.end_idx:
out.append(self.end_token)
elif c == self.pad_idx:
out.append(self.pad_token)
if buf:
out.append(bytes(buf).decode())
return "".join(out)
def __len__(self):
return 259
class NamesDataset(Dataset):
def __init__(self, root):
self.root = Path(root)
self.labels = list({langfile.stem for langfile in self.root.iterdir()})
self.labels_dict = {label: i for i, label in enumerate(self.labels)}
self.encoder = CharByteEncoder()
self.samples = self.construct_samples()
def __getitem__(self, i):
return self.samples[i]
def __len__(self):
return len(self.samples)
def construct_samples(self):
samples = []
for langfile in self.root.iterdir():
label_name = langfile.stem
label_id = self.labels_dict[label_name]
with open(langfile, "r") as fin:
for row in fin:
samples.append(
(self.encoder(row.strip()), torch.tensor(label_id).long())
)
return samples
def label_count(self):
cnt = Counter()
for _x, y in self.samples:
label = self.labels[int(y)]
cnt[label] += 1
return cnt
VOCAB_SIZE = 256 + 3
class CharNNClassifier(nn.Module):
def __init__(
self,
rnn_type,
embedding_size,
hidden_size,
output_size,
num_layers=1,
bidirectional=False,
vocab_size=VOCAB_SIZE,
):
super().__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.output_size = output_size
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.rnn = rnn_type(
embedding_size,
hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True,
)
self.out_layer = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden=None):
x = self.embedding(x)
x, _ = self.rnn(x, hidden)
x = x[:, -1, :]
x = self.out_layer(x)
return x
def padded_collate(batch, padding_idx=0):
x = pad_sequence(
[elem[0] for elem in batch], batch_first=True, padding_value=padding_idx
)
y = torch.stack([elem[1] for elem in batch]).long()
return x, y
def train(
model,
criterion,
optimizer,
train_loader,
epoch,
privacy_engine,
target_delta,
device="cuda:0",
):
model.train()
accs = []
losses = []
for x, y in tqdm(train_loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
loss = criterion(logits, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
preds = logits.argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
losses.append(float(loss))
printstr = (
f"\t Epoch {epoch}. Accuracy: {mean(accs):.6f} | Loss: {mean(losses):.6f}"
)
try:
epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(
delta=target_delta
)
printstr += f" | (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}"
except AttributeError:
pass
print(printstr)
return
def test(model, test_loader, privacy_engine, target_delta, device="cuda:0"):
model.eval()
accs = []
with torch.no_grad():
for x, y in tqdm(test_loader):
x = x.to(device)
y = y.to(device)
preds = model(x).argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
mean_acc = mean(accs)
printstr = "\n----------------------------\n" f"Test Accuracy: {mean_acc:.6f}"
if privacy_engine:
epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(
delta=target_delta
)
printstr += f" (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}"
print(printstr + "\n----------------------------\n")
return mean_acc
def main():
args = parser.parse_args()
device = torch.device(args.device)
ds = NamesDataset(args.data_root)
train_len = int(args.train_split * len(ds))
test_len = len(ds) - train_len
print(f"{train_len} samples for training, {test_len} for testing")
if args.secure_rng:
try:
import torchcsprng as prng
except ImportError as e:
msg = (
"To use secure RNG, you must install the torchcsprng package! "
"Check out the instructions here: https://github.com/pytorch/csprng#installation"
)
raise ImportError(msg) from e
generator = prng.create_random_device_generator("/dev/urandom")
else:
generator = None
train_ds, test_ds = torch.utils.data.random_split(
ds, [train_len, test_len], generator=generator
)
if args.mode == "rnn":
rnn_type = DPRNN
elif args.mode == "gru":
rnn_type = DPGRU
elif args.mode == "lstm":
rnn_type = DPLSTM
else:
raise ValueError(f"Invalid network type: {args.mode}")
model = CharNNClassifier(
rnn_type,
args.embedding_size,
args.hidden_size,
len(ds.labels),
args.n_layers,
args.bidirectional,
)
model = model.to(device)
train_ds, test_ds = torch.utils.data.random_split(
ds, [train_len, test_len], generator=generator
)
train_loader = DataLoader(
train_ds,
batch_size=args.batch_size,
num_workers=1,
pin_memory=True,
collate_fn=padded_collate,
)
test_loader = DataLoader(
test_ds,
batch_size=2 * args.batch_size,
shuffle=False,
num_workers=1,
pin_memory=True,
collate_fn=padded_collate,
)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
if not args.disable_dp:
privacy_engine = PrivacyEngine(secure_mode=args.secure_rng)
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
)
else:
privacy_engine = None
print(f"Train stats ({args.mode}): \n")
for epoch in tqdm(range(args.epochs)):
train(
model,
criterion,
optimizer,
train_loader,
epoch,
privacy_engine,
args.delta,
device=device,
)
if args.test_every:
if epoch % args.test_every == 0:
test(model, test_loader, privacy_engine, args.delta, device=device)
mean_acc = test(model, test_loader, privacy_engine, args.delta, device=device)
torch.save(mean_acc, f"run_results_chr_{args.mode}_classification.pt")
if __name__ == "__main__":
main()
| true | true |
f71e6d57516ab998da824cc8373fa30884176384 | 8,384 | py | Python | tensorflow_datasets/object_detection/voc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-10-11T19:15:49.000Z | 2020-10-11T19:15:49.000Z | tensorflow_datasets/object_detection/voc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/object_detection/voc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-08-03T20:19:12.000Z | 2020-08-03T20:19:12.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PASCAL VOC datasets."""
import os
import xml.etree.ElementTree
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_VOC_CITATION = """\
@misc{{pascal-voc-{year},
author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.",
title = "The {{PASCAL}} {{V}}isual {{O}}bject {{C}}lasses {{C}}hallenge {year} {{(VOC{year})}} {{R}}esults",
howpublished = "http://www.pascal-network.org/challenges/VOC/voc{year}/workshop/index.html"}}
"""
_VOC_DESCRIPTION = """\
This dataset contains the data from the PASCAL Visual Object Classes Challenge
{year}, a.k.a. VOC{year}, corresponding to the Classification and Detection
competitions.
A total of {num_images} images are included in this dataset, where each image
contains a set of objects, out of 20 different classes, making a total of
{num_objects} annotated objects.
In the Classification competition, the goal is to predict the set of labels
contained in the image, while in the Detection competition the goal is to
predict the bounding box and label of each individual object.
WARNING: As per the official dataset, the test set of VOC2012 does not contain
annotations.
"""
_VOC_URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/"
# Original site, it is down very often.
# _VOC_DATA_URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/"
# Data mirror:
_VOC_DATA_URL = "http://pjreddie.com/media/files/"
_VOC_LABELS = (
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
_VOC_POSES = (
"frontal",
"rear",
"left",
"right",
"unspecified",
)
def _get_example_objects(annon_filepath):
"""Function to get all the objects from the annotation XML file."""
with tf.io.gfile.GFile(annon_filepath, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
# Disable pytype to avoid attribute-error due to find returning
# Optional[Element]
# pytype: disable=attribute-error
size = root.find("size")
width = float(size.find("width").text)
height = float(size.find("height").text)
for obj in root.findall("object"):
# Get object's label name.
label = obj.find("name").text.lower()
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = (obj.find("truncated").text == "1")
is_difficult = (obj.find("difficult").text == "1")
bndbox = obj.find("bndbox")
xmax = float(bndbox.find("xmax").text)
xmin = float(bndbox.find("xmin").text)
ymax = float(bndbox.find("ymax").text)
ymin = float(bndbox.find("ymin").text)
yield {
"label": label,
"pose": pose,
"bbox": tfds.features.BBox(
ymin / height, xmin / width, ymax / height, xmax / width),
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
# pytype: enable=attribute-error
class VocConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Voc."""
def __init__(
self, year=None, filenames=None, has_test_annotations=True, **kwargs):
self.year = year
self.filenames = filenames
self.has_test_annotations = has_test_annotations
super(VocConfig, self).__init__(
name=year,
# Version history:
# 4.0.0: Added BuildConfig and 2012 version support, deprecate Voc2007.
# 3.0.0: S3 with new hashing function (different shuffle).
# 2.0.0: S3 (new shuffling, sharding and slicing mechanism).
version=tfds.core.Version("4.0.0"),
**kwargs)
class Voc(tfds.core.GeneratorBasedBuilder):
"""Pascal VOC 2007 or 2012."""
BUILDER_CONFIGS = [
VocConfig(
year="2007",
description=_VOC_DESCRIPTION.format(
year=2007, num_images=9963, num_objects=24640),
filenames={
"trainval": "VOCtrainval_06-Nov-2007.tar",
"test": "VOCtest_06-Nov-2007.tar",
},
has_test_annotations=True,
),
VocConfig(
year="2012",
description=_VOC_DESCRIPTION.format(
year=2012, num_images=11540, num_objects=27450),
filenames={
"trainval": "VOCtrainval_11-May-2012.tar",
"test": "VOC2012test.tar",
},
has_test_annotations=False,
),
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=self.builder_config.description,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"image/filename": tfds.features.Text(),
"objects": tfds.features.Sequence({
"label": tfds.features.ClassLabel(names=_VOC_LABELS),
"bbox": tfds.features.BBoxFeature(),
"pose": tfds.features.ClassLabel(names=_VOC_POSES),
"is_truncated": tf.bool,
"is_difficult": tf.bool,
}),
"labels": tfds.features.Sequence(
tfds.features.ClassLabel(names=_VOC_LABELS)),
"labels_no_difficult": tfds.features.Sequence(
tfds.features.ClassLabel(names=_VOC_LABELS)),
}),
homepage=_VOC_URL.format(year=self.builder_config.year),
citation=_VOC_CITATION.format(year=self.builder_config.year),
)
def _split_generators(self, dl_manager):
paths = dl_manager.download_and_extract({
k: os.path.join(_VOC_DATA_URL, v)
for k, v in self.builder_config.filenames.items()
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs=dict(data_path=paths["test"], set_name="test")),
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs=dict(data_path=paths["trainval"], set_name="train")),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs=dict(data_path=paths["trainval"], set_name="val")),
]
def _generate_examples(self, data_path, set_name):
"""Yields examples."""
set_filepath = os.path.join(
data_path,
os.path.normpath("VOCdevkit/VOC{}/ImageSets/Main/{}.txt".format(
self.builder_config.year, set_name)))
load_annotations = (
self.builder_config.has_test_annotations or set_name != "test")
with tf.io.gfile.GFile(set_filepath, "r") as f:
for line in f:
image_id = line.strip()
example = self._generate_example(data_path, image_id, load_annotations)
yield image_id, example
def _generate_example(self, data_path, image_id, load_annotations):
image_filepath = os.path.join(
data_path,
os.path.normpath("VOCdevkit/VOC{}/JPEGImages/{}.jpg".format(
self.builder_config.year, image_id)))
annon_filepath = os.path.join(
data_path,
os.path.normpath("VOCdevkit/VOC{}/Annotations/{}.xml".format(
self.builder_config.year, image_id)))
if load_annotations:
objects = list(_get_example_objects(annon_filepath))
# Use set() to remove duplicates
labels = sorted(set(obj["label"] for obj in objects))
labels_no_difficult = sorted(set(
obj["label"] for obj in objects if obj["is_difficult"] == 0
))
else: # The test set of VOC2012 does not contain annotations
objects = []
labels = []
labels_no_difficult = []
return {
"image": image_filepath,
"image/filename": image_id + ".jpg",
"objects": objects,
"labels": labels,
"labels_no_difficult": labels_no_difficult,
}
| 34.933333 | 109 | 0.637643 |
import os
import xml.etree.ElementTree
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_VOC_CITATION = """\
@misc{{pascal-voc-{year},
author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.",
title = "The {{PASCAL}} {{V}}isual {{O}}bject {{C}}lasses {{C}}hallenge {year} {{(VOC{year})}} {{R}}esults",
howpublished = "http://www.pascal-network.org/challenges/VOC/voc{year}/workshop/index.html"}}
"""
_VOC_DESCRIPTION = """\
This dataset contains the data from the PASCAL Visual Object Classes Challenge
{year}, a.k.a. VOC{year}, corresponding to the Classification and Detection
competitions.
A total of {num_images} images are included in this dataset, where each image
contains a set of objects, out of 20 different classes, making a total of
{num_objects} annotated objects.
In the Classification competition, the goal is to predict the set of labels
contained in the image, while in the Detection competition the goal is to
predict the bounding box and label of each individual object.
WARNING: As per the official dataset, the test set of VOC2012 does not contain
annotations.
"""
_VOC_URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/"
_VOC_DATA_URL = "http://pjreddie.com/media/files/"
_VOC_LABELS = (
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
_VOC_POSES = (
"frontal",
"rear",
"left",
"right",
"unspecified",
)
def _get_example_objects(annon_filepath):
with tf.io.gfile.GFile(annon_filepath, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
size = root.find("size")
width = float(size.find("width").text)
height = float(size.find("height").text)
for obj in root.findall("object"):
label = obj.find("name").text.lower()
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = (obj.find("truncated").text == "1")
is_difficult = (obj.find("difficult").text == "1")
bndbox = obj.find("bndbox")
xmax = float(bndbox.find("xmax").text)
xmin = float(bndbox.find("xmin").text)
ymax = float(bndbox.find("ymax").text)
ymin = float(bndbox.find("ymin").text)
yield {
"label": label,
"pose": pose,
"bbox": tfds.features.BBox(
ymin / height, xmin / width, ymax / height, xmax / width),
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
class VocConfig(tfds.core.BuilderConfig):
def __init__(
self, year=None, filenames=None, has_test_annotations=True, **kwargs):
self.year = year
self.filenames = filenames
self.has_test_annotations = has_test_annotations
super(VocConfig, self).__init__(
name=year,
version=tfds.core.Version("4.0.0"),
**kwargs)
class Voc(tfds.core.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
VocConfig(
year="2007",
description=_VOC_DESCRIPTION.format(
year=2007, num_images=9963, num_objects=24640),
filenames={
"trainval": "VOCtrainval_06-Nov-2007.tar",
"test": "VOCtest_06-Nov-2007.tar",
},
has_test_annotations=True,
),
VocConfig(
year="2012",
description=_VOC_DESCRIPTION.format(
year=2012, num_images=11540, num_objects=27450),
filenames={
"trainval": "VOCtrainval_11-May-2012.tar",
"test": "VOC2012test.tar",
},
has_test_annotations=False,
),
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=self.builder_config.description,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"image/filename": tfds.features.Text(),
"objects": tfds.features.Sequence({
"label": tfds.features.ClassLabel(names=_VOC_LABELS),
"bbox": tfds.features.BBoxFeature(),
"pose": tfds.features.ClassLabel(names=_VOC_POSES),
"is_truncated": tf.bool,
"is_difficult": tf.bool,
}),
"labels": tfds.features.Sequence(
tfds.features.ClassLabel(names=_VOC_LABELS)),
"labels_no_difficult": tfds.features.Sequence(
tfds.features.ClassLabel(names=_VOC_LABELS)),
}),
homepage=_VOC_URL.format(year=self.builder_config.year),
citation=_VOC_CITATION.format(year=self.builder_config.year),
)
def _split_generators(self, dl_manager):
paths = dl_manager.download_and_extract({
k: os.path.join(_VOC_DATA_URL, v)
for k, v in self.builder_config.filenames.items()
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs=dict(data_path=paths["test"], set_name="test")),
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs=dict(data_path=paths["trainval"], set_name="train")),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs=dict(data_path=paths["trainval"], set_name="val")),
]
def _generate_examples(self, data_path, set_name):
set_filepath = os.path.join(
data_path,
os.path.normpath("VOCdevkit/VOC{}/ImageSets/Main/{}.txt".format(
self.builder_config.year, set_name)))
load_annotations = (
self.builder_config.has_test_annotations or set_name != "test")
with tf.io.gfile.GFile(set_filepath, "r") as f:
for line in f:
image_id = line.strip()
example = self._generate_example(data_path, image_id, load_annotations)
yield image_id, example
def _generate_example(self, data_path, image_id, load_annotations):
image_filepath = os.path.join(
data_path,
os.path.normpath("VOCdevkit/VOC{}/JPEGImages/{}.jpg".format(
self.builder_config.year, image_id)))
annon_filepath = os.path.join(
data_path,
os.path.normpath("VOCdevkit/VOC{}/Annotations/{}.xml".format(
self.builder_config.year, image_id)))
if load_annotations:
objects = list(_get_example_objects(annon_filepath))
labels = sorted(set(obj["label"] for obj in objects))
labels_no_difficult = sorted(set(
obj["label"] for obj in objects if obj["is_difficult"] == 0
))
else:
objects = []
labels = []
labels_no_difficult = []
return {
"image": image_filepath,
"image/filename": image_id + ".jpg",
"objects": objects,
"labels": labels,
"labels_no_difficult": labels_no_difficult,
}
| true | true |
f71e6de94e01412e6319d7bc86f671f1d05aba6e | 82,678 | py | Python | electrum_dash/dash_ps.py | STR1017/electrum-dash | f0e3f16c506239bb2f7cf6920464a24706760876 | [
"MIT"
] | 1 | 2021-04-05T15:50:33.000Z | 2021-04-05T15:50:33.000Z | electrum_dash/dash_ps.py | STR1017/electrum-dash | f0e3f16c506239bb2f7cf6920464a24706760876 | [
"MIT"
] | null | null | null | electrum_dash/dash_ps.py | STR1017/electrum-dash | f0e3f16c506239bb2f7cf6920464a24706760876 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import asyncio
import copy
import random
import time
import threading
from collections import deque
from uuid import uuid4
from . import util
from .dash_msg import PRIVATESEND_ENTRY_MAX_SIZE
from .dash_ps_net import PSMixSession, PRIVATESEND_SESSION_MSG_TIMEOUT
from .dash_ps_wallet import (PSDataMixin, PSKeystoreMixin, KeyPairsMixin,
KPStates, NotFoundInKeypairs, AddPSDataError,
SignWithKeypairsFailed)
from .dash_ps_util import (PSOptsMixin, PSUtilsMixin, PSGUILogHandler,
PSManLogAdapter, PSCoinRounds, PSStates,
PS_DENOMS_DICT, COLLATERAL_VAL, MIN_DENOM_VAL,
CREATE_COLLATERAL_VAL, CREATE_COLLATERAL_VALS,
PSTxWorkflow, PSDenominateWorkflow, calc_tx_fee)
from .dash_tx import PSTxTypes, SPEC_TX_NAMES, CTxIn
from .logging import Logger
from .transaction import Transaction, PartialTxOutput, PartialTransaction
from .util import (NoDynamicFeeEstimates, log_exceptions, SilentTaskGroup,
NotEnoughFunds, bfh, is_android)
from .i18n import _
PS_DENOM_REVERSE_DICT = {int(v): k for k, v in PS_DENOMS_DICT.items()}
class TooManyUtxos(Exception):
"""Thrown when creating new denoms/collateral txs from coins"""
class TooLargeUtxoVal(Exception):
"""Thrown when creating new collateral txs from coins"""
class PSManager(Logger, PSKeystoreMixin, PSDataMixin, PSOptsMixin,
PSUtilsMixin, KeyPairsMixin):
'''Class representing wallet PrivateSend manager'''
LOGGING_SHORTCUT = 'A'
ADD_PS_DATA_ERR_MSG = _('Error on adding PrivateSend transaction data.')
SPEND_TO_PS_ADDRS_MSG = _('For privacy reasons blocked attempt to'
' transfer coins to PrivateSend address.')
WATCHING_ONLY_MSG = _('This is a watching-only wallet.'
' Mixing can not be run.')
ALL_MIXED_MSG = _('PrivateSend mixing is done')
CLEAR_PS_DATA_MSG = _('Are you sure to clear all wallet PrivateSend data?'
' This is not recommended if there is'
' no particular need.')
NO_NETWORK_MSG = _('Can not start mixing. Network is not available')
NO_DASH_NET_MSG = _('Can not start mixing. DashNet is not available')
LLMQ_DATA_NOT_READY = _('LLMQ quorums data is not fully loaded.')
MNS_DATA_NOT_READY = _('Masternodes data is not fully loaded.')
NOT_ENABLED_MSG = _('PrivateSend mixing is not enabled')
INITIALIZING_MSG = _('PrivateSend mixing is initializing.'
' Please try again soon')
MIXING_ALREADY_RUNNING_MSG = _('PrivateSend mixing is already running.')
MIXING_NOT_RUNNING_MSG = _('PrivateSend mixing is not running.')
FIND_UNTRACKED_RUN_MSG = _('PrivateSend mixing can not start. Process of'
' finding untracked PS transactions'
' is currently run')
ERRORED_MSG = _('PrivateSend mixing can not start.'
' Please check errors in PS Log tab')
UNKNOWN_STATE_MSG = _('PrivateSend mixing can not start.'
' Unknown state: {}')
WAIT_MIXING_STOP_MSG = _('Mixing is not stopped. If mixing sessions ends'
' prematurely additional pay collateral may be'
' paid. Do you really want to close wallet?')
NO_NETWORK_STOP_MSG = _('Network is not available')
OTHER_COINS_ARRIVED_MSG1 = _('Some unknown coins arrived on addresses'
' reserved for PrivateSend use, txid: {}.')
OTHER_COINS_ARRIVED_MSG2 = _('WARNING: it is not recommended to spend'
' these coins in regular transactions!')
OTHER_COINS_ARRIVED_MSG3 = _('You can use these coins in PrivateSend'
' mixing process by manually selecting UTXO'
' and creating new denoms or new collateral,'
' depending on UTXO value.')
OTHER_COINS_ARRIVED_Q = _('Do you want to use other coins now?')
if is_android():
NO_DYNAMIC_FEE_MSG = _('{}\n\nYou can switch fee estimation method'
' on send screen')
OTHER_COINS_ARRIVED_MSG4 = _('You can view and use these coins from'
' Coins popup from PrivateSend options.')
else:
NO_DYNAMIC_FEE_MSG = _('{}\n\nYou can switch to static fee estimation'
' on Fees Preferences tab')
OTHER_COINS_ARRIVED_MSG4 = _('You can view and use these coins from'
' Coins tab.')
def __init__(self, wallet):
Logger.__init__(self)
PSDataMixin.__init__(self, wallet)
PSKeystoreMixin.__init__(self, wallet)
KeyPairsMixin.__init__(self, wallet)
PSOptsMixin.__init__(self, wallet)
PSUtilsMixin.__init__(self, wallet)
self.log_handler = PSGUILogHandler(self)
self.logger = PSManLogAdapter(self.logger, {'psman_id': id(self)})
self.state_lock = threading.Lock()
self.states = s = PSStates
self.mixing_running_states = [s.StartMixing, s.Mixing, s.StopMixing]
self.no_clean_history_states = [s.Initializing, s.Errored,
s.StartMixing, s.Mixing, s.StopMixing,
s.FindingUntracked]
self.config = wallet.config
self._state = PSStates.Unsupported
self.wallet_types_supported = ['standard']
self.keystore_types_supported = ['bip32', 'hardware']
keystore = wallet.db.get('keystore')
if keystore:
self.w_ks_type = keystore.get('type', 'unknown')
else:
self.w_ks_type = 'unknown'
self.w_type = wallet.wallet_type
if (self.w_type in self.wallet_types_supported
and self.w_ks_type in self.keystore_types_supported):
if wallet.db.get_ps_data('ps_enabled', False):
self.state = PSStates.Initializing
else:
self.state = PSStates.Disabled
if self.unsupported:
supported_w = ', '.join(self.wallet_types_supported)
supported_ks = ', '.join(self.keystore_types_supported)
this_type = self.w_type
this_ks_type = self.w_ks_type
self.unsupported_msg = _(f'PrivateSend is currently supported on'
f' next wallet types: "{supported_w}"'
f' and keystore types: "{supported_ks}".'
f'\n\nThis wallet has type "{this_type}"'
f' and kestore type "{this_ks_type}".')
else:
self.unsupported_msg = ''
if self.is_hw_ks:
self.enable_ps_keystore()
self.network = None
self.dash_net = None
self.loop = None
self._loop_thread = None
self.main_taskgroup = None
self.mix_sessions_lock = asyncio.Lock()
self.mix_sessions = {} # dict peer -> PSMixSession
self.recent_mixes_mns = deque([], 10) # added from mixing sessions
self.denoms_lock = threading.Lock()
self.collateral_lock = threading.Lock()
self.others_lock = threading.Lock()
self.new_denoms_wfl_lock = threading.Lock()
self.new_collateral_wfl_lock = threading.Lock()
self.pay_collateral_wfl_lock = threading.Lock()
self.denominate_wfl_lock = threading.Lock()
self._not_enough_funds = False
# electrum network disconnect time
self.disconnect_time = 0
@property
def unsupported(self):
return self.state == PSStates.Unsupported
@property
def enabled(self):
return self.state not in [PSStates.Unsupported, PSStates.Disabled]
@property
def is_hw_ks(self):
return self.w_ks_type == 'hardware'
def enable_ps(self):
if (self.w_type == 'standard' and self.is_hw_ks
and 'ps_keystore' not in self.wallet.db.data):
self.logger.info('ps_keystore for hw wallets must be created')
return
if not self.enabled:
self.wallet.db.set_ps_data('ps_enabled', True)
coro = self._enable_ps()
asyncio.run_coroutine_threadsafe(coro, self.loop)
async def _enable_ps(self):
if self.enabled:
return
self.state = PSStates.Initializing
util.trigger_callback('ps-state-changes', self.wallet, None, None)
_load_and_cleanup = self.load_and_cleanup
await self.loop.run_in_executor(None, _load_and_cleanup)
await self.find_untracked_ps_txs()
self.wallet.save_db()
def can_find_untracked(self):
w = self.wallet
network = self.network
if network is None:
return False
server_height = network.get_server_height()
if server_height == 0:
return False
local_height = network.get_local_height()
if local_height < server_height:
return False
with w.lock:
unverified_no_islock = []
for txid in w.unverified_tx:
if txid not in w.db.islocks:
unverified_no_islock.append(txid)
if (unverified_no_islock
or not w.is_up_to_date()
or not w.synchronizer.is_up_to_date()):
return False
return True
@property
def state(self):
return self._state
@property
def is_waiting(self):
if self.state not in self.mixing_running_states:
return False
if self.keypairs_state in [KPStates.NeedCache, KPStates.Caching]:
return False
active_wfls_cnt = 0
active_wfls_cnt += len(self.denominate_wfl_list)
if self.new_denoms_wfl:
active_wfls_cnt += 1
if self.new_collateral_wfl:
active_wfls_cnt += 1
return (active_wfls_cnt == 0)
@state.setter
def state(self, state):
self._state = state
def on_network_start(self, network):
self.network = network
util.register_callback(self.on_wallet_updated, ['wallet_updated'])
util.register_callback(self.on_network_status, ['status'])
self.dash_net = network.dash_net
self.loop = network.asyncio_loop
self._loop_thread = network._loop_thread
asyncio.ensure_future(self.clean_keypairs_on_timeout())
asyncio.ensure_future(self.cleanup_staled_denominate_wfls())
asyncio.ensure_future(self.trigger_postponed_notifications())
asyncio.ensure_future(self.broadcast_new_denoms_new_collateral_wfls())
def on_stop_threads(self):
if self.state == PSStates.Mixing:
self.stop_mixing()
util.unregister_callback(self.on_wallet_updated)
util.unregister_callback(self.on_network_status)
def on_network_status(self, event, *args):
connected = self.network.is_connected()
if connected:
self.disconnect_time = 0
else:
now = time.time()
if self.disconnect_time == 0:
self.disconnect_time = now
if now - self.disconnect_time > 30: # disconnected for 30 seconds
if self.state == PSStates.Mixing:
self.stop_mixing(self.NO_NETWORK_STOP_MSG)
async def on_wallet_updated(self, event, *args):
if not self.enabled:
return
w = args[0]
if w != self.wallet:
return
if w.is_up_to_date():
self._not_enough_funds = False
if self.state in [PSStates.Initializing, PSStates.Ready]:
await self.find_untracked_ps_txs()
# Methods related to mixing process
def start_mixing(self, password, nowait=True):
w = self.wallet
msg = None
if w.is_watching_only():
msg = self.WATCHING_ONLY_MSG, 'err'
elif self.all_mixed:
msg = self.ALL_MIXED_MSG, 'inf'
elif not self.network or not self.network.is_connected():
msg = self.NO_NETWORK_MSG, 'err'
elif not self.dash_net.run_dash_net:
msg = self.NO_DASH_NET_MSG, 'err'
if msg:
msg, inf = msg
self.logger.info(f'Can not start PrivateSend Mixing: {msg}')
util.trigger_callback('ps-state-changes', w, msg, inf)
return
coro = self.find_untracked_ps_txs()
asyncio.run_coroutine_threadsafe(coro, self.loop).result()
with self.state_lock:
if self.state == PSStates.Ready:
self.state = PSStates.StartMixing
elif self.state in [PSStates.Unsupported, PSStates.Disabled]:
msg = self.NOT_ENABLED_MSG
elif self.state == PSStates.Initializing:
msg = self.INITIALIZING_MSG
elif self.state in self.mixing_running_states:
msg = self.MIXING_ALREADY_RUNNING_MSG
elif self.state == PSStates.FindingUntracked:
msg = self.FIND_UNTRACKED_RUN_MSG
elif self.state == PSStates.FindingUntracked:
msg = self.ERRORED_MSG
else:
msg = self.UNKNOWN_STATE_MSG.format(self.state)
if msg:
util.trigger_callback('ps-state-changes', w, msg, None)
self.logger.info(f'Can not start PrivateSend Mixing: {msg}')
return
else:
util.trigger_callback('ps-state-changes', w, None, None)
fut = asyncio.run_coroutine_threadsafe(self._start_mixing(password),
self.loop)
if nowait:
return
try:
fut.result(timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError):
pass
async def _start_mixing(self, password):
if not self.enabled or not self.network:
return
assert not self.main_taskgroup
self._not_enough_funds = False
self.main_taskgroup = main_taskgroup = SilentTaskGroup()
self.logger.info('Starting PrivateSend Mixing')
async def main():
try:
async with main_taskgroup as group:
if (self.w_type == 'standard'
and self.is_hw_ks):
await group.spawn(self._prepare_funds_from_hw_wallet())
await group.spawn(self._make_keypairs_cache(password))
await group.spawn(self._check_not_enough_funds())
await group.spawn(self._check_all_mixed())
await group.spawn(self._maintain_pay_collateral_tx())
await group.spawn(self._maintain_collateral_amount())
await group.spawn(self._maintain_denoms())
await group.spawn(self._mix_denoms())
except Exception as e:
self.logger.info(f'error starting mixing: {str(e)}')
raise e
asyncio.run_coroutine_threadsafe(main(), self.loop)
with self.state_lock:
self.state = PSStates.Mixing
self.last_mix_start_time = time.time()
self.logger.info('Started PrivateSend Mixing')
w = self.wallet
util.trigger_callback('ps-state-changes', w, None, None)
async def stop_mixing_from_async_thread(self, msg, msg_type=None):
await self.loop.run_in_executor(None, self.stop_mixing, msg, msg_type)
def stop_mixing(self, msg=None, msg_type=None, nowait=True):
w = self.wallet
with self.state_lock:
if self.state == PSStates.Mixing:
self.state = PSStates.StopMixing
elif self.state == PSStates.StopMixing:
return
else:
msg = self.MIXING_NOT_RUNNING_MSG
util.trigger_callback('ps-state-changes', w, msg, 'inf')
self.logger.info(f'Can not stop PrivateSend Mixing: {msg}')
return
if msg:
self.logger.info(f'Stopping PrivateSend Mixing: {msg}')
if not msg_type or not msg_type.startswith('inf'):
stopped_prefix = _('PrivateSend mixing is stopping!')
msg = f'{stopped_prefix}\n\n{msg}'
util.trigger_callback('ps-state-changes', w, msg, msg_type)
else:
self.logger.info('Stopping PrivateSend Mixing')
util.trigger_callback('ps-state-changes', w, None, None)
self.last_mix_stop_time = time.time() # write early if later time lost
fut = asyncio.run_coroutine_threadsafe(self._stop_mixing(), self.loop)
if nowait:
return
try:
fut.result(timeout=PRIVATESEND_SESSION_MSG_TIMEOUT+5)
except (asyncio.TimeoutError, asyncio.CancelledError):
pass
@log_exceptions
async def _stop_mixing(self):
if self.keypairs_state == KPStates.Caching:
self.logger.info('Waiting for keypairs caching to finish')
while self.keypairs_state == KPStates.Caching:
await asyncio.sleep(0.5)
if self.main_taskgroup:
sess_cnt = len(self.mix_sessions)
if sess_cnt > 0:
self.logger.info(f'Waiting for {sess_cnt}'
f' mixing sessions to finish')
while sess_cnt > 0:
await asyncio.sleep(0.5)
sess_cnt = len(self.mix_sessions)
try:
await asyncio.wait_for(self.main_taskgroup.cancel_remaining(),
timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError) as e:
self.logger.debug(f'Exception during main_taskgroup'
f' cancellation: {repr(e)}')
self.main_taskgroup = None
with self.keypairs_state_lock:
if self.keypairs_state == KPStates.Ready:
self.logger.info('Mark keypairs as unused')
self.keypairs_state = KPStates.Unused
self.logger.info('Stopped PrivateSend Mixing')
self.last_mix_stop_time = time.time()
with self.state_lock:
self.state = PSStates.Ready
w = self.wallet
util.trigger_callback('ps-state-changes', w, None, None)
async def _check_all_mixed(self):
while not self.main_taskgroup.closed():
await asyncio.sleep(10)
if self.all_mixed:
await self.stop_mixing_from_async_thread(self.ALL_MIXED_MSG,
'inf')
async def _check_not_enough_funds(self):
while not self.main_taskgroup.closed():
if self._not_enough_funds:
await asyncio.sleep(30)
self._not_enough_funds = False
await asyncio.sleep(5)
async def _maintain_pay_collateral_tx(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.pay_collateral_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_pay_collateral_wfl()
elif self.ps_collateral_cnt > 0:
if kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('Pay collateral workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
if not self.get_confirmed_ps_collateral_data():
await asyncio.sleep(5)
continue
await self.prepare_pay_collateral_wfl()
await asyncio.sleep(0.25)
async def broadcast_new_denoms_new_collateral_wfls(self):
w = self.wallet
while True:
if self.enabled:
wfl = self.new_denoms_wfl
if wfl and wfl.completed and wfl.next_to_send(w):
await self.broadcast_new_denoms_wfl()
await asyncio.sleep(0.25)
wfl = self.new_collateral_wfl
if wfl and wfl.completed and wfl.next_to_send(w):
await self.broadcast_new_collateral_wfl()
await asyncio.sleep(0.25)
else:
await asyncio.sleep(1)
async def _maintain_collateral_amount(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.new_collateral_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_new_collateral_wfl()
elif (not self._not_enough_funds
and not self.ps_collateral_cnt
and not self.calc_need_denoms_amounts(use_cache=True)):
coins = await self.get_next_coins_for_mixing(for_denoms=False)
if not coins:
await asyncio.sleep(5)
continue
if not self.check_llmq_ready():
self.logger.info(_('New collateral workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('New collateral workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
await self.create_new_collateral_wfl()
await asyncio.sleep(0.25)
async def _maintain_denoms(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.new_denoms_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_new_denoms_wfl()
elif (not self._not_enough_funds
and self.calc_need_denoms_amounts(use_cache=True)):
coins = await self.get_next_coins_for_mixing()
if not coins:
await asyncio.sleep(5)
continue
if not self.check_llmq_ready():
self.logger.info(_('New denoms workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('New denoms workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
await self.create_new_denoms_wfl()
await asyncio.sleep(0.25)
async def _mix_denoms(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
def _cleanup():
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if wfl and not wfl.completed:
self._cleanup_denominate_wfl(wfl)
await self.loop.run_in_executor(None, _cleanup)
main_taskgroup = self.main_taskgroup
while not main_taskgroup.closed():
if (self._denoms_to_mix_cache
and self.pay_collateral_wfl
and self.active_denominate_wfl_cnt < self.max_sessions):
if not self.check_llmq_ready():
self.logger.info(_('Denominate workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif not self.check_protx_info_completeness():
self.logger.info(_('Denominate workflow: {}')
.format(self.MNS_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('Denominate workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
if self.state == PSStates.Mixing:
await main_taskgroup.spawn(self.start_denominate_wfl())
await asyncio.sleep(0.25)
async def start_mix_session(self, denom_value, dsq, wfl_lid):
n_denom = PS_DENOMS_DICT[denom_value]
sess = PSMixSession(self, denom_value, n_denom, dsq, wfl_lid)
peer_str = sess.peer_str
async with self.mix_sessions_lock:
if peer_str in self.mix_sessions:
raise Exception(f'Session with {peer_str} already exists')
await sess.run_peer()
self.mix_sessions[peer_str] = sess
return sess
async def stop_mix_session(self, peer_str):
async with self.mix_sessions_lock:
sess = self.mix_sessions.pop(peer_str)
if not sess:
self.logger.debug(f'Peer {peer_str} not found in mix_session')
return
sess.close_peer()
return sess
# Workflow methods for pay collateral transaction
def get_confirmed_ps_collateral_data(self):
w = self.wallet
for outpoint, ps_collateral in w.db.get_ps_collaterals().items():
addr, value = ps_collateral
utxos = w.get_utxos([addr], min_rounds=PSCoinRounds.COLLATERAL,
confirmed_only=True, consider_islocks=True)
utxos = self.filter_out_hw_ks_coins(utxos)
inputs = []
for utxo in utxos:
if utxo.prevout.to_str() != outpoint:
continue
w.add_input_info(utxo)
inputs.append(utxo)
if inputs:
return outpoint, value, inputs
else:
self.logger.wfl_err(f'ps_collateral outpoint {outpoint}'
f' is not confirmed')
async def prepare_pay_collateral_wfl(self):
try:
_prepare = self._prepare_pay_collateral_tx
res = await self.loop.run_in_executor(None, _prepare)
if res:
txid, wfl = res
self.logger.wfl_ok(f'Completed pay collateral workflow with'
f' tx: {txid}, workflow: {wfl.lid}')
self.wallet.save_db()
except Exception as e:
wfl = self.pay_collateral_wfl
if wfl:
self.logger.wfl_err(f'Error creating pay collateral tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_pay_collateral_wfl(force=True)
else:
self.logger.wfl_err(f'Error during creation of pay collateral'
f' worfklow: {str(e)}')
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
if msg:
await self.stop_mixing_from_async_thread(msg)
def _prepare_pay_collateral_tx(self):
with self.pay_collateral_wfl_lock:
if self.pay_collateral_wfl:
return
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_pay_collateral_wfl(wfl)
self.logger.info(f'Started up pay collateral workflow: {wfl.lid}')
res = self.get_confirmed_ps_collateral_data()
if not res:
raise Exception('No confirmed ps_collateral found')
outpoint, value, inputs = res
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
self.add_ps_spending_collateral(outpoint, wfl.uuid)
if value >= COLLATERAL_VAL*2:
ovalue = value - COLLATERAL_VAL
output_addr = None
for addr, data in self.wallet.db.get_ps_reserved().items():
if data == outpoint:
output_addr = addr
break
if not output_addr:
reserved = self.reserve_addresses(1, for_change=True,
data=outpoint)
output_addr = reserved[0]
outputs = [PartialTxOutput.from_address_and_value(output_addr, ovalue)]
else:
# OP_RETURN as ouptut script
outputs = [PartialTxOutput(scriptpubkey=bfh('6a'), value=0)]
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
tx.inputs()[0].nsequence = 0xffffffff
tx = self.sign_transaction(tx, None)
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.PAY_COLLATERAL
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
wfl.completed = True
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if not saved:
raise Exception('pay_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('pay_collateral_wfl differs from original')
self.set_pay_collateral_wfl(wfl)
return txid, wfl
async def cleanup_pay_collateral_wfl(self, force=False):
_cleanup = self._cleanup_pay_collateral_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_pay_collateral_wfl(self, force=False):
with self.pay_collateral_wfl_lock:
wfl = self.pay_collateral_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]: # use reversed tx_order
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_pay_collateral_wfl_tx_data(txid)
else:
self._cleanup_pay_collateral_wfl_tx_data()
return True
def _cleanup_pay_collateral_wfl_tx_data(self, txid=None):
with self.pay_collateral_wfl_lock:
wfl = self.pay_collateral_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_pay_collateral_wfl(wfl)
self.logger.info(f'Cleaned up pay collateral tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_collaterals().items()):
if uuid != wfl.uuid:
continue
with self.collateral_lock:
self.pop_ps_spending_collateral(outpoint)
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_pay_collateral_wfl()
self.logger.info(f'Cleaned up pay collateral workflow: {wfl.lid}')
def _search_pay_collateral_wfl(self, txid, tx):
err = self._check_pay_collateral_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.pay_collateral_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_pay_collateral_wfl(self, txid, tx):
wfl = self._search_pay_collateral_wfl(txid, tx)
err = self._check_pay_collateral_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_pay_collateral_wfl(self, txid, tx):
wfl = self._search_pay_collateral_wfl(txid, tx)
if not wfl:
return
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_pay_collateral_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from pay'
f' collateral workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_collaterals().items()):
if uuid != wfl.uuid:
continue
with self.collateral_lock:
self.pop_ps_spending_collateral(outpoint)
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_pay_collateral_wfl()
self.logger.wfl_done(f'Finished processing of pay collateral'
f' workflow: {wfl.lid}')
def get_pay_collateral_tx(self):
wfl = self.pay_collateral_wfl
if not wfl or not wfl.tx_order:
return
txid = wfl.tx_order[0]
tx_data = wfl.tx_data.get(txid)
if not tx_data:
return
return tx_data.raw_tx
# Workflow methods for new collateral transaction
def new_collateral_from_coins_info(self, coins):
if not coins or len(coins) > 1:
return
coins_val = sum([c.value_sats() for c in coins])
if (coins_val >= self.min_new_denoms_from_coins_val
or coins_val < self.min_new_collateral_from_coins_val):
return
fee_per_kb = self.config.fee_per_kb()
for collateral_val in CREATE_COLLATERAL_VALS[::-1]:
new_collateral_fee = calc_tx_fee(1, 1, fee_per_kb, max_size=True)
if coins_val - new_collateral_fee >= collateral_val:
tx_type = SPEC_TX_NAMES[PSTxTypes.NEW_COLLATERAL]
info = _('Transactions type: {}').format(tx_type)
info += '\n'
info += _('Count of transactions: {}').format(1)
info += '\n'
info += _('Total sent amount: {}').format(coins_val)
info += '\n'
info += _('Total output amount: {}').format(collateral_val)
info += '\n'
info += _('Total fee: {}').format(coins_val - collateral_val)
return info
def create_new_collateral_wfl_from_gui(self, coins, password):
if self.state in self.mixing_running_states:
return None, ('Can not create new collateral as mixing'
' process is currently run.')
if len(coins) > 1:
return None, ('Can not create new collateral amount,'
' too many coins selected')
wfl = self._start_new_collateral_wfl()
if not wfl:
return None, ('Can not create new collateral as other new'
' collateral creation process is in progress')
try:
w = self.wallet
txid, tx = self._make_new_collateral_tx(wfl, coins, password)
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
if not w.db.get_ps_tx(txid)[0] == PSTxTypes.NEW_COLLATERAL:
self._add_ps_data(txid, tx, PSTxTypes.NEW_COLLATERAL)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
wfl.completed = True
self.set_new_collateral_wfl(wfl)
self.logger.wfl_ok(f'Completed new collateral workflow'
f' with tx: {txid},'
f' workflow: {wfl.lid}')
return wfl, None
except Exception as e:
err = str(e)
self.logger.wfl_err(f'Error creating new collateral tx:'
f' {err}, workflow: {wfl.lid}')
self._cleanup_new_collateral_wfl(force=True)
self.logger.info(f'Cleaned up new collateral workflow:'
f' {wfl.lid}')
return None, err
async def create_new_collateral_wfl(self):
coins_data = await self.get_next_coins_for_mixing(for_denoms=False)
coins = coins_data['coins']
_start = self._start_new_collateral_wfl
wfl = await self.loop.run_in_executor(None, _start)
if not wfl:
return
try:
_make_tx = self._make_new_collateral_tx
txid, tx = await self.loop.run_in_executor(None, _make_tx,
wfl, coins)
w = self.wallet
# add_transaction need run in network therad
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
def _after_create_tx():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
wfl.completed = True
self.set_new_collateral_wfl(wfl)
self.logger.wfl_ok(f'Completed new collateral workflow'
f' with tx: {txid},'
f' workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _after_create_tx)
w.save_db()
except Exception as e:
self.logger.wfl_err(f'Error creating new collateral tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_new_collateral_wfl(force=True)
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == AddPSDataError:
msg = self.ADD_PS_DATA_ERR_MSG
type_name = SPEC_TX_NAMES[PSTxTypes.NEW_COLLATERAL]
msg = f'{msg} {type_name} {txid}:\n{str(e)}'
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
elif type_e == NotEnoughFunds:
self._not_enough_funds = True
if msg:
await self.stop_mixing_from_async_thread(msg)
def _start_new_collateral_wfl(self):
with self.new_collateral_wfl_lock:
if self.new_collateral_wfl:
return
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_new_collateral_wfl(wfl)
self.logger.info(f'Started up new collateral workflow: {wfl.lid}')
return self.new_collateral_wfl
def _make_new_collateral_tx(self, wfl, coins=None, password=None):
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
w = self.wallet
fee_per_kb = self.config.fee_per_kb()
uuid = wfl.uuid
oaddr = self.reserve_addresses(1, data=uuid)[0]
if not coins:
# try select minimal denom utxo with mimial rounds
coins = w.get_utxos(None, mature_only=True, confirmed_only=True,
consider_islocks=True, min_rounds=0)
coins = [c for c in coins if c.value_sats() == MIN_DENOM_VAL]
coins = self.filter_out_hw_ks_coins(coins)
if not coins:
raise NotEnoughFunds()
coins = sorted(coins, key=lambda x: x.ps_rounds)
coins = coins[0:1]
no_change = False
outputs = None
coins_val = sum([c.value_sats() for c in coins])
if (len(coins) == 1 # Minimal denom or PS other selected, no change
and coins[0].ps_rounds is not None
and coins[0].ps_rounds != PSCoinRounds.MIX_ORIGIN):
if coins_val >= self.min_new_denoms_from_coins_val:
raise TooLargeUtxoVal('To large utxo selected')
no_change = True
if no_change:
for val in CREATE_COLLATERAL_VALS[::-1]:
new_collateral_fee = calc_tx_fee(1, 1, fee_per_kb,
max_size=True)
if coins_val - new_collateral_fee < val:
continue
outputs = [PartialTxOutput.from_address_and_value(oaddr, val)]
break
if outputs is None:
raise NotEnoughFunds()
else:
val = CREATE_COLLATERAL_VAL
outputs = [PartialTxOutput.from_address_and_value(oaddr, val)]
tx = w.make_unsigned_transaction(coins=coins, outputs=outputs)
inputs = tx.inputs()
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
if no_change:
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
for txin in tx.inputs():
txin.nsequence = 0xffffffff
else: # use first input address as a change, use selected inputs
change_addr = inputs[0].address
tx = w.make_unsigned_transaction(coins=inputs, outputs=outputs,
change_addr=change_addr)
tx = self.sign_transaction(tx, password)
estimated_fee = calc_tx_fee(len(tx.inputs()), len(tx.outputs()),
fee_per_kb, max_size=True)
overfee = tx.get_fee() - estimated_fee
assert overfee < self.min_new_collateral_from_coins_val, 'too high fee'
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.NEW_COLLATERAL
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
self.set_new_collateral_wfl(wfl)
return txid, tx
async def cleanup_new_collateral_wfl(self, force=False):
_cleanup = self._cleanup_new_collateral_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_new_collateral_wfl(self, force=False):
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]: # use reversed tx_order
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_new_collateral_wfl_tx_data(txid)
else:
self._cleanup_new_collateral_wfl_tx_data()
return True
def _cleanup_new_collateral_wfl_tx_data(self, txid=None):
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_collateral_wfl(wfl)
self.logger.info(f'Cleaned up new collateral tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_collateral_wfl()
self.logger.info(f'Cleaned up new collateral workflow: {wfl.lid}')
async def broadcast_new_collateral_wfl(self):
def _check_wfl():
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl:
return
if not wfl.completed:
return
return wfl
wfl = await self.loop.run_in_executor(None, _check_wfl)
if not wfl:
return
w = self.wallet
tx_data = wfl.next_to_send(w)
if not tx_data:
return
txid = tx_data.txid
sent, err = await tx_data.send(self)
if err:
def _on_fail():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
self.set_new_collateral_wfl(wfl)
self.logger.wfl_err(f'Failed broadcast of new collateral tx'
f' {txid}: {err}, workflow {wfl.lid}')
await self.loop.run_in_executor(None, _on_fail)
if sent:
def _on_success():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
self.set_new_collateral_wfl(wfl)
self.logger.wfl_done(f'Broadcasted transaction {txid} from new'
f' collateral workflow: {wfl.lid}')
tx = Transaction(wfl.tx_data[txid].raw_tx)
self._process_by_new_collateral_wfl(txid, tx)
if not wfl.next_to_send(w):
self.logger.wfl_done(f'Broadcast completed for new'
f' collateral workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _on_success)
def _search_new_collateral_wfl(self, txid, tx):
err = self._check_new_collateral_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.new_collateral_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_new_collateral_wfl(self, txid, tx):
wfl = self._search_new_collateral_wfl(txid, tx)
err = self._check_new_collateral_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_new_collateral_wfl(self, txid, tx):
wfl = self._search_new_collateral_wfl(txid, tx)
if not wfl:
return
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_collateral_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from new'
f' collateral workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_collateral_wfl()
self.logger.wfl_done(f'Finished processing of new collateral'
f' workflow: {wfl.lid}')
# Workflow methods for new denoms transaction
def new_denoms_from_coins_info(self, coins):
if not coins or len(coins) > 1:
return
coins_val = sum([c.value_sats() for c in coins])
if coins_val < self.min_new_denoms_from_coins_val:
return
fee_per_kb = self.config.fee_per_kb()
denoms_amounts = self._calc_denoms_amounts_from_coins(coins,
fee_per_kb)
if denoms_amounts:
tx_cnt = len(denoms_amounts)
outputs_val = sum([sum(amounts) for amounts in denoms_amounts])
tx_type = SPEC_TX_NAMES[PSTxTypes.NEW_DENOMS]
info = _('Transactions type: {}').format(tx_type)
info += '\n'
info += _('Count of transactions: {}').format(tx_cnt)
info += '\n'
info += _('Total sent amount: {}').format(coins_val)
info += '\n'
info += _('Total output amount: {}').format(outputs_val)
info += '\n'
info += _('Total fee: {}').format(coins_val - outputs_val)
return info
def create_new_denoms_wfl_from_gui(self, coins, password):
if self.state in self.mixing_running_states:
return None, ('Can not create new denoms as mixing process'
' is currently run.')
if len(coins) > 1:
return None, ('Can not create new denoms,'
' too many coins selected')
wfl, outputs_amounts = self._start_new_denoms_wfl(coins,
use_all_coins=True)
if not outputs_amounts:
return None, ('Can not create new denoms,'
' not enough coins selected')
if not wfl:
return None, ('Can not create new denoms as other new'
' denoms creation process is in progress')
last_tx_idx = len(outputs_amounts) - 1
for i, tx_amounts in enumerate(outputs_amounts):
try:
w = self.wallet
txid, tx = self._make_new_denoms_tx(wfl, tx_amounts,
last_tx_idx, i,
coins, password,
use_all_coins=True)
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
if not w.db.get_ps_tx(txid)[0] == PSTxTypes.NEW_DENOMS:
self._add_ps_data(txid, tx, PSTxTypes.NEW_DENOMS)
self.logger.info(f'Created new denoms tx: {txid},'
f' workflow: {wfl.lid}')
if i == last_tx_idx:
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs'
' from original')
wfl.completed = True
self.set_new_denoms_wfl(wfl)
self.logger.wfl_ok(f'Completed new denoms'
f' workflow: {wfl.lid}')
return wfl, None
else:
txin0 = copy.deepcopy(tx.inputs()[0])
txin0_addr = w.get_txin_address(txin0)
utxos = w.get_utxos([txin0_addr],
min_rounds=PSCoinRounds.OTHER)
change_outpoint = None
for change_idx, o in enumerate(tx.outputs()):
if o.address == txin0_addr:
change_outpoint = f'{txid}:{change_idx}'
break
coins = []
for utxo in utxos:
if utxo.prevout.to_str() != change_outpoint:
continue
coins.append(utxo)
except Exception as e:
err = str(e)
self.logger.wfl_err(f'Error creating new denoms tx:'
f' {err}, workflow: {wfl.lid}')
self._cleanup_new_denoms_wfl(force=True)
self.logger.info(f'Cleaned up new denoms workflow:'
f' {wfl.lid}')
return None, err
async def create_new_denoms_wfl(self):
coins_data = await self.get_next_coins_for_mixing()
coins = coins_data['coins']
if not coins:
return
_start = self._start_new_denoms_wfl
wfl, outputs_amounts = await self.loop.run_in_executor(None, _start,
coins)
if not wfl:
return
last_tx_idx = len(outputs_amounts) - 1
for i, tx_amounts in enumerate(outputs_amounts):
try:
w = self.wallet
_make_tx = self._make_new_denoms_tx
txid, tx = await self.loop.run_in_executor(None, _make_tx,
wfl, tx_amounts,
last_tx_idx, i,
coins)
# add_transaction need run in network therad
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
def _after_create_tx():
with self.new_denoms_wfl_lock:
self.logger.info(f'Created new denoms tx: {txid},'
f' workflow: {wfl.lid}')
if i == last_tx_idx:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs'
' from original')
wfl.completed = True
self.set_new_denoms_wfl(wfl)
self.logger.wfl_ok(f'Completed new denoms'
f' workflow: {wfl.lid}')
coins_data = self._get_next_coins_for_mixing()
coins = coins_data['coins']
txin0 = copy.deepcopy(tx.inputs()[0])
txin0_addr = w.get_txin_address(txin0)
if i != last_tx_idx:
utxos = w.get_utxos([txin0_addr])
change_outpoint = None
for change_idx, o in enumerate(tx.outputs()):
if o.address == txin0_addr:
change_outpoint = f'{txid}:{change_idx}'
break
for utxo in utxos:
if utxo.prevout.to_str() != change_outpoint:
continue
coins.append(utxo)
if self.group_origin_coins_by_addr:
coins = [c for c in coins if c.address == txin0_addr]
return coins
coins = await self.loop.run_in_executor(None, _after_create_tx)
w.save_db()
except Exception as e:
self.logger.wfl_err(f'Error creating new denoms tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_new_denoms_wfl(force=True)
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == AddPSDataError:
msg = self.ADD_PS_DATA_ERR_MSG
type_name = SPEC_TX_NAMES[PSTxTypes.NEW_DENOMS]
msg = f'{msg} {type_name} {txid}:\n{str(e)}'
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
elif type_e == NotEnoughFunds:
self._not_enough_funds = True
if msg:
await self.stop_mixing_from_async_thread(msg)
break
def _start_new_denoms_wfl(self, coins, use_all_coins=False):
outputs_amounts = \
self.calc_need_denoms_amounts(coins=coins,
use_all_coins=use_all_coins)
if not outputs_amounts:
return None, None
with self.new_denoms_wfl_lock, \
self.pay_collateral_wfl_lock, \
self.new_collateral_wfl_lock:
if self.new_denoms_wfl:
return None, None
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_new_denoms_wfl(wfl)
self.logger.info(f'Started up new denoms workflow: {wfl.lid}')
return wfl, outputs_amounts
def _make_new_denoms_tx(self, wfl, tx_amounts, last_tx_idx, i,
coins, password=None, use_all_coins=False):
w = self.wallet
# try to create new denoms tx with change outupt at first
addrs_cnt = len(tx_amounts)
oaddrs = self.reserve_addresses(addrs_cnt, data=wfl.uuid)
outputs = [PartialTxOutput.from_address_and_value(addr, a)
for addr, a in zip(oaddrs, tx_amounts)]
tx = w.make_unsigned_transaction(coins=coins, outputs=outputs)
inputs = tx.inputs()
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
no_change = False
fee_per_kb = self.config.fee_per_kb()
if i == last_tx_idx:
if use_all_coins:
no_change = True
if no_change:
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
for txin in tx.inputs():
txin.nsequence = 0xffffffff
else:
# use first input address as a change, use selected inputs
in0 = inputs[0].address
tx = w.make_unsigned_transaction(coins=inputs, outputs=outputs,
change_addr=in0)
tx = self.sign_transaction(tx, password)
estimated_fee = calc_tx_fee(len(tx.inputs()), len(tx.outputs()),
fee_per_kb, max_size=True)
overfee = tx.get_fee() - estimated_fee
assert overfee < self.min_new_collateral_from_coins_val, 'too high fee'
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.NEW_DENOMS
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
return txid, tx
async def cleanup_new_denoms_wfl(self, force=False):
_cleanup = self._cleanup_new_denoms_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_new_denoms_wfl(self, force=False):
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]: # use reversed tx_order
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_new_denoms_wfl_tx_data(txid)
else:
self._cleanup_new_denoms_wfl_tx_data()
return True
def _cleanup_new_denoms_wfl_tx_data(self, txid=None):
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_denoms_wfl(wfl)
self.logger.info(f'Cleaned up new denoms tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_denoms_wfl()
self.logger.info(f'Cleaned up new denoms workflow: {wfl.lid}')
async def broadcast_new_denoms_wfl(self):
def _check_wfl():
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl:
return
if not wfl.completed:
return
return wfl
wfl = await self.loop.run_in_executor(None, _check_wfl)
if not wfl:
return
w = self.wallet
tx_data = wfl.next_to_send(w)
if not tx_data:
return
txid = tx_data.txid
sent, err = await tx_data.send(self)
if err:
def _on_fail():
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
self.logger.wfl_err(f'Failed broadcast of new denoms tx'
f' {txid}: {err}, workflow {wfl.lid}')
await self.loop.run_in_executor(None, _on_fail)
if sent:
def _on_success():
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
self.logger.wfl_done(f'Broadcasted transaction {txid} from new'
f' denoms workflow: {wfl.lid}')
self.last_denoms_tx_time = time.time()
tx = Transaction(wfl.tx_data[txid].raw_tx)
self._process_by_new_denoms_wfl(txid, tx)
if not wfl.next_to_send(w):
self.logger.wfl_done(f'Broadcast completed for new denoms'
f' workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _on_success)
def _search_new_denoms_wfl(self, txid, tx):
err = self._check_new_denoms_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.new_denoms_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_new_denoms_wfl(self, txid, tx):
wfl = self._search_new_denoms_wfl(txid, tx)
err = self._check_new_denoms_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_new_denoms_wfl(self, txid, tx):
wfl = self._search_new_denoms_wfl(txid, tx)
if not wfl:
return
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_denoms_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from new denoms'
f' workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_denoms_wfl()
self.logger.wfl_done(f'Finished processing of new denoms'
f' workflow: {wfl.lid}')
# Workflow methods for denominate transaction
async def cleanup_staled_denominate_wfls(self):
def _cleanup_staled():
changed = False
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if not wfl or not wfl.completed:
continue
now = time.time()
if now - wfl.completed > self.wait_for_mn_txs_time:
self.logger.info(f'Cleaning staled denominate'
f' workflow: {wfl.lid}')
self._cleanup_denominate_wfl(wfl)
changed = True
return changed
while True:
if self.enabled:
done = await self.loop.run_in_executor(None, _cleanup_staled)
if done:
self.wallet.save_db()
await asyncio.sleep(self.wait_for_mn_txs_time/12)
async def start_denominate_wfl(self):
wfl = None
try:
_start = self._start_denominate_wfl
dsq = None
session = None
if random.random() > 0.33:
self.logger.debug('try to get masternode from recent dsq')
recent_mns = self.recent_mixes_mns
while self.state == PSStates.Mixing:
dsq = self.dash_net.get_recent_dsq(recent_mns)
if dsq is not None:
self.logger.debug(f'get dsq from recent dsq queue'
f' {dsq.masternodeOutPoint}')
dval = PS_DENOM_REVERSE_DICT[dsq.nDenom]
wfl = await self.loop.run_in_executor(None,
_start, dval)
break
await asyncio.sleep(0.5)
else:
self.logger.debug('try to create new queue'
' on random masternode')
wfl = await self.loop.run_in_executor(None, _start)
if not wfl:
return
if self.state != PSStates.Mixing:
raise Exception('Mixing is finished')
else:
session = await self.start_mix_session(wfl.denom, dsq, wfl.lid)
pay_collateral_tx = self.get_pay_collateral_tx()
if not pay_collateral_tx:
raise Exception('Absent suitable pay collateral tx')
await session.send_dsa(pay_collateral_tx)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsq' and session.fReady:
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dsa sent')
pay_collateral_tx = self.get_pay_collateral_tx()
if not pay_collateral_tx:
raise Exception('Absent suitable pay collateral tx')
final_tx = None
await session.send_dsi(wfl.inputs, pay_collateral_tx, wfl.outputs)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsf':
final_tx = PartialTransaction.from_tx(res)
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dsi sent')
signed_inputs = self._sign_inputs(final_tx, wfl.inputs)
await session.send_dss(signed_inputs)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsc':
def _on_dsc():
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if saved:
saved.completed = time.time()
self.set_denominate_wfl(saved)
return saved
else: # already processed from _add_ps_data
self.logger.debug(f'denominate workflow:'
f' {wfl.lid} not found')
saved = await self.loop.run_in_executor(None, _on_dsc)
if saved:
wfl = saved
self.wallet.save_db()
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dss sent')
self.logger.wfl_ok(f'Completed denominate workflow: {wfl.lid}')
except Exception as e:
type_e = type(e)
if type_e != asyncio.CancelledError:
if wfl:
self.logger.wfl_err(f'Error in denominate worfklow:'
f' {str(e)}, workflow: {wfl.lid}')
else:
self.logger.wfl_err(f'Error during creation of denominate'
f' worfklow: {str(e)}')
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
if msg:
await self.stop_mixing_from_async_thread(msg)
finally:
if session:
await self.stop_mix_session(session.peer_str)
if wfl:
await self.cleanup_denominate_wfl(wfl)
def _select_denoms_to_mix(self, denom_value=None):
if not self._denoms_to_mix_cache:
self.logger.debug('No suitable denoms to mix,'
' _denoms_to_mix_cache is empty')
return None, None
if denom_value is not None:
denoms = self.denoms_to_mix(denom_value=denom_value)
else:
denoms = self.denoms_to_mix()
outpoints = list(denoms.keys())
w = self.wallet
icnt = 0
txids = []
inputs = []
while icnt < random.randint(1, PRIVATESEND_ENTRY_MAX_SIZE):
if not outpoints:
break
outpoint = outpoints.pop(random.randint(0, len(outpoints)-1))
if not w.db.get_ps_denom(outpoint): # already spent
continue
if w.db.get_ps_spending_denom(outpoint): # reserved to spend
continue
txid = outpoint.split(':')[0]
if txid in txids: # skip outputs from same tx
continue
height = w.get_tx_height(txid).height
islock = w.db.get_islock(txid)
if not islock and height <= 0: # skip not islocked/confirmed
continue
denom = denoms.pop(outpoint)
if denom[2] >= self.mix_rounds:
continue
if not self.is_ps_ks(denom[0]) and self.is_hw_ks:
continue # skip denoms on hw keystore
if denom_value is None:
denom_value = denom[1]
elif denom[1] != denom_value: # skip other denom values
continue
inputs.append(outpoint)
txids.append(txid)
icnt += 1
if not inputs:
self.logger.debug(f'No suitable denoms to mix:'
f' denom_value={denom_value}')
return None, None
else:
return inputs, denom_value
def _start_denominate_wfl(self, denom_value=None):
if self.active_denominate_wfl_cnt >= self.max_sessions:
return
selected_inputs, denom_value = self._select_denoms_to_mix(denom_value)
if not selected_inputs:
return
with self.denominate_wfl_lock, self.denoms_lock:
if self.active_denominate_wfl_cnt >= self.max_sessions:
return
icnt = 0
inputs = []
input_addrs = []
w = self.wallet
for outpoint in selected_inputs:
denom = w.db.get_ps_denom(outpoint)
if not denom:
continue # already spent
if w.db.get_ps_spending_denom(outpoint):
continue # already used by other wfl
if self.is_hw_ks and not self.is_ps_ks(denom[0]):
continue # skip denoms from hardware keystore
inputs.append(outpoint)
input_addrs.append(denom[0])
icnt += 1
if icnt < 1:
self.logger.debug(f'No suitable denoms to mix after'
f' denoms_lock: denom_value={denom_value}')
return
uuid = str(uuid4())
wfl = PSDenominateWorkflow(uuid=uuid)
wfl.inputs = inputs
wfl.denom = denom_value
self.set_denominate_wfl(wfl)
for outpoint in inputs:
self.add_ps_spending_denom(outpoint, wfl.uuid)
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
output_addrs = []
found_outpoints = []
for addr, data in w.db.get_ps_reserved().items():
if data in inputs:
output_addrs.append(addr)
found_outpoints.append(data)
for outpoint in inputs:
if outpoint not in found_outpoints:
force_main_ks = False
if self.is_hw_ks:
denom = w.db.get_ps_denom(outpoint)
if denom[2] == self.mix_rounds - 1:
force_main_ks = True
reserved = self.reserve_addresses(1, data=outpoint,
force_main_ks=force_main_ks)
output_addrs.append(reserved[0])
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if not saved:
raise Exception(f'denominate_wfl {wfl.lid} not found')
wfl = saved
wfl.outputs = output_addrs
self.set_denominate_wfl(saved)
self.logger.info(f'Created denominate workflow: {wfl.lid}, with inputs'
f' value {wfl.denom}, count {len(wfl.inputs)}')
return wfl
def _sign_inputs(self, tx, inputs):
signed_inputs = []
tx = self._sign_denominate_tx(tx)
for i in tx.inputs():
if i.prevout.to_str() not in inputs:
continue
signed_inputs.append(CTxIn(i.prevout.txid[::-1], i.prevout.out_idx,
i.script_sig, i.nsequence))
return signed_inputs
def _sign_denominate_tx(self, tx):
mine_txins_cnt = 0
for txin in tx.inputs():
self.wallet.add_input_info(txin)
if txin.address is None:
continue
mine_txins_cnt += 1
self.sign_transaction(tx, None, mine_txins_cnt)
return tx
async def cleanup_denominate_wfl(self, wfl):
_cleanup = self._cleanup_denominate_wfl
changed = await self.loop.run_in_executor(None, _cleanup, wfl)
if changed:
self.wallet.save_db()
def _cleanup_denominate_wfl(self, wfl):
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if not saved: # already processed from _add_ps_data
return
else:
wfl = saved
completed = wfl.completed
if completed:
now = time.time()
if now - wfl.completed <= self.wait_for_mn_txs_time:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_denoms().items()):
if uuid != wfl.uuid:
continue
with self.denoms_lock:
self.pop_ps_spending_denom(outpoint)
with self.denominate_wfl_lock:
self.clear_denominate_wfl(wfl.uuid)
self.logger.info(f'Cleaned up denominate workflow: {wfl.lid}')
return True
def _search_denominate_wfl(self, txid, tx):
err = self._check_denominate_tx_err(txid, tx, full_check=False)
if not err:
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if not wfl or not wfl.completed:
continue
if self._check_denominate_tx_io_on_wfl(txid, tx, wfl):
return wfl
def _check_on_denominate_wfl(self, txid, tx):
wfl = self._search_denominate_wfl(txid, tx)
err = self._check_denominate_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_denominate_wfl(self, txid, tx):
wfl = self._search_denominate_wfl(txid, tx)
if not wfl:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_denoms().items()):
if uuid != wfl.uuid:
continue
with self.denoms_lock:
self.pop_ps_spending_denom(outpoint)
with self.denominate_wfl_lock:
self.clear_denominate_wfl(wfl.uuid)
self.logger.wfl_done(f'Finished processing of denominate'
f' workflow: {wfl.lid} with tx: {txid}')
def get_workflow_tx_info(self, wfl):
w = self.wallet
tx_cnt = len(wfl.tx_order)
tx_type = None if not tx_cnt else wfl.tx_data[wfl.tx_order[0]].tx_type
total = 0
total_fee = 0
for txid in wfl.tx_order:
tx = Transaction(wfl.tx_data[txid].raw_tx)
tx_info = w.get_tx_info(tx)
total += tx_info.amount
total_fee += tx_info.fee
return tx_type, tx_cnt, total, total_fee
| 43.151357 | 83 | 0.550037 |
import asyncio
import copy
import random
import time
import threading
from collections import deque
from uuid import uuid4
from . import util
from .dash_msg import PRIVATESEND_ENTRY_MAX_SIZE
from .dash_ps_net import PSMixSession, PRIVATESEND_SESSION_MSG_TIMEOUT
from .dash_ps_wallet import (PSDataMixin, PSKeystoreMixin, KeyPairsMixin,
KPStates, NotFoundInKeypairs, AddPSDataError,
SignWithKeypairsFailed)
from .dash_ps_util import (PSOptsMixin, PSUtilsMixin, PSGUILogHandler,
PSManLogAdapter, PSCoinRounds, PSStates,
PS_DENOMS_DICT, COLLATERAL_VAL, MIN_DENOM_VAL,
CREATE_COLLATERAL_VAL, CREATE_COLLATERAL_VALS,
PSTxWorkflow, PSDenominateWorkflow, calc_tx_fee)
from .dash_tx import PSTxTypes, SPEC_TX_NAMES, CTxIn
from .logging import Logger
from .transaction import Transaction, PartialTxOutput, PartialTransaction
from .util import (NoDynamicFeeEstimates, log_exceptions, SilentTaskGroup,
NotEnoughFunds, bfh, is_android)
from .i18n import _
PS_DENOM_REVERSE_DICT = {int(v): k for k, v in PS_DENOMS_DICT.items()}
class TooManyUtxos(Exception):
class TooLargeUtxoVal(Exception):
class PSManager(Logger, PSKeystoreMixin, PSDataMixin, PSOptsMixin,
PSUtilsMixin, KeyPairsMixin):
LOGGING_SHORTCUT = 'A'
ADD_PS_DATA_ERR_MSG = _('Error on adding PrivateSend transaction data.')
SPEND_TO_PS_ADDRS_MSG = _('For privacy reasons blocked attempt to'
' transfer coins to PrivateSend address.')
WATCHING_ONLY_MSG = _('This is a watching-only wallet.'
' Mixing can not be run.')
ALL_MIXED_MSG = _('PrivateSend mixing is done')
CLEAR_PS_DATA_MSG = _('Are you sure to clear all wallet PrivateSend data?'
' This is not recommended if there is'
' no particular need.')
NO_NETWORK_MSG = _('Can not start mixing. Network is not available')
NO_DASH_NET_MSG = _('Can not start mixing. DashNet is not available')
LLMQ_DATA_NOT_READY = _('LLMQ quorums data is not fully loaded.')
MNS_DATA_NOT_READY = _('Masternodes data is not fully loaded.')
NOT_ENABLED_MSG = _('PrivateSend mixing is not enabled')
INITIALIZING_MSG = _('PrivateSend mixing is initializing.'
' Please try again soon')
MIXING_ALREADY_RUNNING_MSG = _('PrivateSend mixing is already running.')
MIXING_NOT_RUNNING_MSG = _('PrivateSend mixing is not running.')
FIND_UNTRACKED_RUN_MSG = _('PrivateSend mixing can not start. Process of'
' finding untracked PS transactions'
' is currently run')
ERRORED_MSG = _('PrivateSend mixing can not start.'
' Please check errors in PS Log tab')
UNKNOWN_STATE_MSG = _('PrivateSend mixing can not start.'
' Unknown state: {}')
WAIT_MIXING_STOP_MSG = _('Mixing is not stopped. If mixing sessions ends'
' prematurely additional pay collateral may be'
' paid. Do you really want to close wallet?')
NO_NETWORK_STOP_MSG = _('Network is not available')
OTHER_COINS_ARRIVED_MSG1 = _('Some unknown coins arrived on addresses'
' reserved for PrivateSend use, txid: {}.')
OTHER_COINS_ARRIVED_MSG2 = _('WARNING: it is not recommended to spend'
' these coins in regular transactions!')
OTHER_COINS_ARRIVED_MSG3 = _('You can use these coins in PrivateSend'
' mixing process by manually selecting UTXO'
' and creating new denoms or new collateral,'
' depending on UTXO value.')
OTHER_COINS_ARRIVED_Q = _('Do you want to use other coins now?')
if is_android():
NO_DYNAMIC_FEE_MSG = _('{}\n\nYou can switch fee estimation method'
' on send screen')
OTHER_COINS_ARRIVED_MSG4 = _('You can view and use these coins from'
' Coins popup from PrivateSend options.')
else:
NO_DYNAMIC_FEE_MSG = _('{}\n\nYou can switch to static fee estimation'
' on Fees Preferences tab')
OTHER_COINS_ARRIVED_MSG4 = _('You can view and use these coins from'
' Coins tab.')
def __init__(self, wallet):
Logger.__init__(self)
PSDataMixin.__init__(self, wallet)
PSKeystoreMixin.__init__(self, wallet)
KeyPairsMixin.__init__(self, wallet)
PSOptsMixin.__init__(self, wallet)
PSUtilsMixin.__init__(self, wallet)
self.log_handler = PSGUILogHandler(self)
self.logger = PSManLogAdapter(self.logger, {'psman_id': id(self)})
self.state_lock = threading.Lock()
self.states = s = PSStates
self.mixing_running_states = [s.StartMixing, s.Mixing, s.StopMixing]
self.no_clean_history_states = [s.Initializing, s.Errored,
s.StartMixing, s.Mixing, s.StopMixing,
s.FindingUntracked]
self.config = wallet.config
self._state = PSStates.Unsupported
self.wallet_types_supported = ['standard']
self.keystore_types_supported = ['bip32', 'hardware']
keystore = wallet.db.get('keystore')
if keystore:
self.w_ks_type = keystore.get('type', 'unknown')
else:
self.w_ks_type = 'unknown'
self.w_type = wallet.wallet_type
if (self.w_type in self.wallet_types_supported
and self.w_ks_type in self.keystore_types_supported):
if wallet.db.get_ps_data('ps_enabled', False):
self.state = PSStates.Initializing
else:
self.state = PSStates.Disabled
if self.unsupported:
supported_w = ', '.join(self.wallet_types_supported)
supported_ks = ', '.join(self.keystore_types_supported)
this_type = self.w_type
this_ks_type = self.w_ks_type
self.unsupported_msg = _(f'PrivateSend is currently supported on'
f' next wallet types: "{supported_w}"'
f' and keystore types: "{supported_ks}".'
f'\n\nThis wallet has type "{this_type}"'
f' and kestore type "{this_ks_type}".')
else:
self.unsupported_msg = ''
if self.is_hw_ks:
self.enable_ps_keystore()
self.network = None
self.dash_net = None
self.loop = None
self._loop_thread = None
self.main_taskgroup = None
self.mix_sessions_lock = asyncio.Lock()
self.mix_sessions = {}
self.recent_mixes_mns = deque([], 10)
self.denoms_lock = threading.Lock()
self.collateral_lock = threading.Lock()
self.others_lock = threading.Lock()
self.new_denoms_wfl_lock = threading.Lock()
self.new_collateral_wfl_lock = threading.Lock()
self.pay_collateral_wfl_lock = threading.Lock()
self.denominate_wfl_lock = threading.Lock()
self._not_enough_funds = False
self.disconnect_time = 0
@property
def unsupported(self):
return self.state == PSStates.Unsupported
@property
def enabled(self):
return self.state not in [PSStates.Unsupported, PSStates.Disabled]
@property
def is_hw_ks(self):
return self.w_ks_type == 'hardware'
def enable_ps(self):
if (self.w_type == 'standard' and self.is_hw_ks
and 'ps_keystore' not in self.wallet.db.data):
self.logger.info('ps_keystore for hw wallets must be created')
return
if not self.enabled:
self.wallet.db.set_ps_data('ps_enabled', True)
coro = self._enable_ps()
asyncio.run_coroutine_threadsafe(coro, self.loop)
async def _enable_ps(self):
if self.enabled:
return
self.state = PSStates.Initializing
util.trigger_callback('ps-state-changes', self.wallet, None, None)
_load_and_cleanup = self.load_and_cleanup
await self.loop.run_in_executor(None, _load_and_cleanup)
await self.find_untracked_ps_txs()
self.wallet.save_db()
def can_find_untracked(self):
w = self.wallet
network = self.network
if network is None:
return False
server_height = network.get_server_height()
if server_height == 0:
return False
local_height = network.get_local_height()
if local_height < server_height:
return False
with w.lock:
unverified_no_islock = []
for txid in w.unverified_tx:
if txid not in w.db.islocks:
unverified_no_islock.append(txid)
if (unverified_no_islock
or not w.is_up_to_date()
or not w.synchronizer.is_up_to_date()):
return False
return True
@property
def state(self):
return self._state
@property
def is_waiting(self):
if self.state not in self.mixing_running_states:
return False
if self.keypairs_state in [KPStates.NeedCache, KPStates.Caching]:
return False
active_wfls_cnt = 0
active_wfls_cnt += len(self.denominate_wfl_list)
if self.new_denoms_wfl:
active_wfls_cnt += 1
if self.new_collateral_wfl:
active_wfls_cnt += 1
return (active_wfls_cnt == 0)
@state.setter
def state(self, state):
self._state = state
def on_network_start(self, network):
self.network = network
util.register_callback(self.on_wallet_updated, ['wallet_updated'])
util.register_callback(self.on_network_status, ['status'])
self.dash_net = network.dash_net
self.loop = network.asyncio_loop
self._loop_thread = network._loop_thread
asyncio.ensure_future(self.clean_keypairs_on_timeout())
asyncio.ensure_future(self.cleanup_staled_denominate_wfls())
asyncio.ensure_future(self.trigger_postponed_notifications())
asyncio.ensure_future(self.broadcast_new_denoms_new_collateral_wfls())
def on_stop_threads(self):
if self.state == PSStates.Mixing:
self.stop_mixing()
util.unregister_callback(self.on_wallet_updated)
util.unregister_callback(self.on_network_status)
def on_network_status(self, event, *args):
connected = self.network.is_connected()
if connected:
self.disconnect_time = 0
else:
now = time.time()
if self.disconnect_time == 0:
self.disconnect_time = now
if now - self.disconnect_time > 30:
if self.state == PSStates.Mixing:
self.stop_mixing(self.NO_NETWORK_STOP_MSG)
async def on_wallet_updated(self, event, *args):
if not self.enabled:
return
w = args[0]
if w != self.wallet:
return
if w.is_up_to_date():
self._not_enough_funds = False
if self.state in [PSStates.Initializing, PSStates.Ready]:
await self.find_untracked_ps_txs()
def start_mixing(self, password, nowait=True):
w = self.wallet
msg = None
if w.is_watching_only():
msg = self.WATCHING_ONLY_MSG, 'err'
elif self.all_mixed:
msg = self.ALL_MIXED_MSG, 'inf'
elif not self.network or not self.network.is_connected():
msg = self.NO_NETWORK_MSG, 'err'
elif not self.dash_net.run_dash_net:
msg = self.NO_DASH_NET_MSG, 'err'
if msg:
msg, inf = msg
self.logger.info(f'Can not start PrivateSend Mixing: {msg}')
util.trigger_callback('ps-state-changes', w, msg, inf)
return
coro = self.find_untracked_ps_txs()
asyncio.run_coroutine_threadsafe(coro, self.loop).result()
with self.state_lock:
if self.state == PSStates.Ready:
self.state = PSStates.StartMixing
elif self.state in [PSStates.Unsupported, PSStates.Disabled]:
msg = self.NOT_ENABLED_MSG
elif self.state == PSStates.Initializing:
msg = self.INITIALIZING_MSG
elif self.state in self.mixing_running_states:
msg = self.MIXING_ALREADY_RUNNING_MSG
elif self.state == PSStates.FindingUntracked:
msg = self.FIND_UNTRACKED_RUN_MSG
elif self.state == PSStates.FindingUntracked:
msg = self.ERRORED_MSG
else:
msg = self.UNKNOWN_STATE_MSG.format(self.state)
if msg:
util.trigger_callback('ps-state-changes', w, msg, None)
self.logger.info(f'Can not start PrivateSend Mixing: {msg}')
return
else:
util.trigger_callback('ps-state-changes', w, None, None)
fut = asyncio.run_coroutine_threadsafe(self._start_mixing(password),
self.loop)
if nowait:
return
try:
fut.result(timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError):
pass
async def _start_mixing(self, password):
if not self.enabled or not self.network:
return
assert not self.main_taskgroup
self._not_enough_funds = False
self.main_taskgroup = main_taskgroup = SilentTaskGroup()
self.logger.info('Starting PrivateSend Mixing')
async def main():
try:
async with main_taskgroup as group:
if (self.w_type == 'standard'
and self.is_hw_ks):
await group.spawn(self._prepare_funds_from_hw_wallet())
await group.spawn(self._make_keypairs_cache(password))
await group.spawn(self._check_not_enough_funds())
await group.spawn(self._check_all_mixed())
await group.spawn(self._maintain_pay_collateral_tx())
await group.spawn(self._maintain_collateral_amount())
await group.spawn(self._maintain_denoms())
await group.spawn(self._mix_denoms())
except Exception as e:
self.logger.info(f'error starting mixing: {str(e)}')
raise e
asyncio.run_coroutine_threadsafe(main(), self.loop)
with self.state_lock:
self.state = PSStates.Mixing
self.last_mix_start_time = time.time()
self.logger.info('Started PrivateSend Mixing')
w = self.wallet
util.trigger_callback('ps-state-changes', w, None, None)
async def stop_mixing_from_async_thread(self, msg, msg_type=None):
await self.loop.run_in_executor(None, self.stop_mixing, msg, msg_type)
def stop_mixing(self, msg=None, msg_type=None, nowait=True):
w = self.wallet
with self.state_lock:
if self.state == PSStates.Mixing:
self.state = PSStates.StopMixing
elif self.state == PSStates.StopMixing:
return
else:
msg = self.MIXING_NOT_RUNNING_MSG
util.trigger_callback('ps-state-changes', w, msg, 'inf')
self.logger.info(f'Can not stop PrivateSend Mixing: {msg}')
return
if msg:
self.logger.info(f'Stopping PrivateSend Mixing: {msg}')
if not msg_type or not msg_type.startswith('inf'):
stopped_prefix = _('PrivateSend mixing is stopping!')
msg = f'{stopped_prefix}\n\n{msg}'
util.trigger_callback('ps-state-changes', w, msg, msg_type)
else:
self.logger.info('Stopping PrivateSend Mixing')
util.trigger_callback('ps-state-changes', w, None, None)
self.last_mix_stop_time = time.time()
fut = asyncio.run_coroutine_threadsafe(self._stop_mixing(), self.loop)
if nowait:
return
try:
fut.result(timeout=PRIVATESEND_SESSION_MSG_TIMEOUT+5)
except (asyncio.TimeoutError, asyncio.CancelledError):
pass
@log_exceptions
async def _stop_mixing(self):
if self.keypairs_state == KPStates.Caching:
self.logger.info('Waiting for keypairs caching to finish')
while self.keypairs_state == KPStates.Caching:
await asyncio.sleep(0.5)
if self.main_taskgroup:
sess_cnt = len(self.mix_sessions)
if sess_cnt > 0:
self.logger.info(f'Waiting for {sess_cnt}'
f' mixing sessions to finish')
while sess_cnt > 0:
await asyncio.sleep(0.5)
sess_cnt = len(self.mix_sessions)
try:
await asyncio.wait_for(self.main_taskgroup.cancel_remaining(),
timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError) as e:
self.logger.debug(f'Exception during main_taskgroup'
f' cancellation: {repr(e)}')
self.main_taskgroup = None
with self.keypairs_state_lock:
if self.keypairs_state == KPStates.Ready:
self.logger.info('Mark keypairs as unused')
self.keypairs_state = KPStates.Unused
self.logger.info('Stopped PrivateSend Mixing')
self.last_mix_stop_time = time.time()
with self.state_lock:
self.state = PSStates.Ready
w = self.wallet
util.trigger_callback('ps-state-changes', w, None, None)
async def _check_all_mixed(self):
while not self.main_taskgroup.closed():
await asyncio.sleep(10)
if self.all_mixed:
await self.stop_mixing_from_async_thread(self.ALL_MIXED_MSG,
'inf')
async def _check_not_enough_funds(self):
while not self.main_taskgroup.closed():
if self._not_enough_funds:
await asyncio.sleep(30)
self._not_enough_funds = False
await asyncio.sleep(5)
async def _maintain_pay_collateral_tx(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.pay_collateral_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_pay_collateral_wfl()
elif self.ps_collateral_cnt > 0:
if kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('Pay collateral workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
if not self.get_confirmed_ps_collateral_data():
await asyncio.sleep(5)
continue
await self.prepare_pay_collateral_wfl()
await asyncio.sleep(0.25)
async def broadcast_new_denoms_new_collateral_wfls(self):
w = self.wallet
while True:
if self.enabled:
wfl = self.new_denoms_wfl
if wfl and wfl.completed and wfl.next_to_send(w):
await self.broadcast_new_denoms_wfl()
await asyncio.sleep(0.25)
wfl = self.new_collateral_wfl
if wfl and wfl.completed and wfl.next_to_send(w):
await self.broadcast_new_collateral_wfl()
await asyncio.sleep(0.25)
else:
await asyncio.sleep(1)
async def _maintain_collateral_amount(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.new_collateral_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_new_collateral_wfl()
elif (not self._not_enough_funds
and not self.ps_collateral_cnt
and not self.calc_need_denoms_amounts(use_cache=True)):
coins = await self.get_next_coins_for_mixing(for_denoms=False)
if not coins:
await asyncio.sleep(5)
continue
if not self.check_llmq_ready():
self.logger.info(_('New collateral workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('New collateral workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
await self.create_new_collateral_wfl()
await asyncio.sleep(0.25)
async def _maintain_denoms(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.new_denoms_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_new_denoms_wfl()
elif (not self._not_enough_funds
and self.calc_need_denoms_amounts(use_cache=True)):
coins = await self.get_next_coins_for_mixing()
if not coins:
await asyncio.sleep(5)
continue
if not self.check_llmq_ready():
self.logger.info(_('New denoms workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('New denoms workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
await self.create_new_denoms_wfl()
await asyncio.sleep(0.25)
async def _mix_denoms(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
def _cleanup():
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if wfl and not wfl.completed:
self._cleanup_denominate_wfl(wfl)
await self.loop.run_in_executor(None, _cleanup)
main_taskgroup = self.main_taskgroup
while not main_taskgroup.closed():
if (self._denoms_to_mix_cache
and self.pay_collateral_wfl
and self.active_denominate_wfl_cnt < self.max_sessions):
if not self.check_llmq_ready():
self.logger.info(_('Denominate workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif not self.check_protx_info_completeness():
self.logger.info(_('Denominate workflow: {}')
.format(self.MNS_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('Denominate workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
if self.state == PSStates.Mixing:
await main_taskgroup.spawn(self.start_denominate_wfl())
await asyncio.sleep(0.25)
async def start_mix_session(self, denom_value, dsq, wfl_lid):
n_denom = PS_DENOMS_DICT[denom_value]
sess = PSMixSession(self, denom_value, n_denom, dsq, wfl_lid)
peer_str = sess.peer_str
async with self.mix_sessions_lock:
if peer_str in self.mix_sessions:
raise Exception(f'Session with {peer_str} already exists')
await sess.run_peer()
self.mix_sessions[peer_str] = sess
return sess
async def stop_mix_session(self, peer_str):
async with self.mix_sessions_lock:
sess = self.mix_sessions.pop(peer_str)
if not sess:
self.logger.debug(f'Peer {peer_str} not found in mix_session')
return
sess.close_peer()
return sess
def get_confirmed_ps_collateral_data(self):
w = self.wallet
for outpoint, ps_collateral in w.db.get_ps_collaterals().items():
addr, value = ps_collateral
utxos = w.get_utxos([addr], min_rounds=PSCoinRounds.COLLATERAL,
confirmed_only=True, consider_islocks=True)
utxos = self.filter_out_hw_ks_coins(utxos)
inputs = []
for utxo in utxos:
if utxo.prevout.to_str() != outpoint:
continue
w.add_input_info(utxo)
inputs.append(utxo)
if inputs:
return outpoint, value, inputs
else:
self.logger.wfl_err(f'ps_collateral outpoint {outpoint}'
f' is not confirmed')
async def prepare_pay_collateral_wfl(self):
try:
_prepare = self._prepare_pay_collateral_tx
res = await self.loop.run_in_executor(None, _prepare)
if res:
txid, wfl = res
self.logger.wfl_ok(f'Completed pay collateral workflow with'
f' tx: {txid}, workflow: {wfl.lid}')
self.wallet.save_db()
except Exception as e:
wfl = self.pay_collateral_wfl
if wfl:
self.logger.wfl_err(f'Error creating pay collateral tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_pay_collateral_wfl(force=True)
else:
self.logger.wfl_err(f'Error during creation of pay collateral'
f' worfklow: {str(e)}')
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
if msg:
await self.stop_mixing_from_async_thread(msg)
def _prepare_pay_collateral_tx(self):
with self.pay_collateral_wfl_lock:
if self.pay_collateral_wfl:
return
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_pay_collateral_wfl(wfl)
self.logger.info(f'Started up pay collateral workflow: {wfl.lid}')
res = self.get_confirmed_ps_collateral_data()
if not res:
raise Exception('No confirmed ps_collateral found')
outpoint, value, inputs = res
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
self.add_ps_spending_collateral(outpoint, wfl.uuid)
if value >= COLLATERAL_VAL*2:
ovalue = value - COLLATERAL_VAL
output_addr = None
for addr, data in self.wallet.db.get_ps_reserved().items():
if data == outpoint:
output_addr = addr
break
if not output_addr:
reserved = self.reserve_addresses(1, for_change=True,
data=outpoint)
output_addr = reserved[0]
outputs = [PartialTxOutput.from_address_and_value(output_addr, ovalue)]
else:
outputs = [PartialTxOutput(scriptpubkey=bfh('6a'), value=0)]
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
tx.inputs()[0].nsequence = 0xffffffff
tx = self.sign_transaction(tx, None)
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.PAY_COLLATERAL
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
wfl.completed = True
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if not saved:
raise Exception('pay_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('pay_collateral_wfl differs from original')
self.set_pay_collateral_wfl(wfl)
return txid, wfl
async def cleanup_pay_collateral_wfl(self, force=False):
_cleanup = self._cleanup_pay_collateral_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_pay_collateral_wfl(self, force=False):
with self.pay_collateral_wfl_lock:
wfl = self.pay_collateral_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]:
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_pay_collateral_wfl_tx_data(txid)
else:
self._cleanup_pay_collateral_wfl_tx_data()
return True
def _cleanup_pay_collateral_wfl_tx_data(self, txid=None):
with self.pay_collateral_wfl_lock:
wfl = self.pay_collateral_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_pay_collateral_wfl(wfl)
self.logger.info(f'Cleaned up pay collateral tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_collaterals().items()):
if uuid != wfl.uuid:
continue
with self.collateral_lock:
self.pop_ps_spending_collateral(outpoint)
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_pay_collateral_wfl()
self.logger.info(f'Cleaned up pay collateral workflow: {wfl.lid}')
def _search_pay_collateral_wfl(self, txid, tx):
err = self._check_pay_collateral_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.pay_collateral_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_pay_collateral_wfl(self, txid, tx):
wfl = self._search_pay_collateral_wfl(txid, tx)
err = self._check_pay_collateral_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_pay_collateral_wfl(self, txid, tx):
wfl = self._search_pay_collateral_wfl(txid, tx)
if not wfl:
return
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_pay_collateral_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from pay'
f' collateral workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_collaterals().items()):
if uuid != wfl.uuid:
continue
with self.collateral_lock:
self.pop_ps_spending_collateral(outpoint)
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_pay_collateral_wfl()
self.logger.wfl_done(f'Finished processing of pay collateral'
f' workflow: {wfl.lid}')
def get_pay_collateral_tx(self):
wfl = self.pay_collateral_wfl
if not wfl or not wfl.tx_order:
return
txid = wfl.tx_order[0]
tx_data = wfl.tx_data.get(txid)
if not tx_data:
return
return tx_data.raw_tx
def new_collateral_from_coins_info(self, coins):
if not coins or len(coins) > 1:
return
coins_val = sum([c.value_sats() for c in coins])
if (coins_val >= self.min_new_denoms_from_coins_val
or coins_val < self.min_new_collateral_from_coins_val):
return
fee_per_kb = self.config.fee_per_kb()
for collateral_val in CREATE_COLLATERAL_VALS[::-1]:
new_collateral_fee = calc_tx_fee(1, 1, fee_per_kb, max_size=True)
if coins_val - new_collateral_fee >= collateral_val:
tx_type = SPEC_TX_NAMES[PSTxTypes.NEW_COLLATERAL]
info = _('Transactions type: {}').format(tx_type)
info += '\n'
info += _('Count of transactions: {}').format(1)
info += '\n'
info += _('Total sent amount: {}').format(coins_val)
info += '\n'
info += _('Total output amount: {}').format(collateral_val)
info += '\n'
info += _('Total fee: {}').format(coins_val - collateral_val)
return info
def create_new_collateral_wfl_from_gui(self, coins, password):
if self.state in self.mixing_running_states:
return None, ('Can not create new collateral as mixing'
' process is currently run.')
if len(coins) > 1:
return None, ('Can not create new collateral amount,'
' too many coins selected')
wfl = self._start_new_collateral_wfl()
if not wfl:
return None, ('Can not create new collateral as other new'
' collateral creation process is in progress')
try:
w = self.wallet
txid, tx = self._make_new_collateral_tx(wfl, coins, password)
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
if not w.db.get_ps_tx(txid)[0] == PSTxTypes.NEW_COLLATERAL:
self._add_ps_data(txid, tx, PSTxTypes.NEW_COLLATERAL)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
wfl.completed = True
self.set_new_collateral_wfl(wfl)
self.logger.wfl_ok(f'Completed new collateral workflow'
f' with tx: {txid},'
f' workflow: {wfl.lid}')
return wfl, None
except Exception as e:
err = str(e)
self.logger.wfl_err(f'Error creating new collateral tx:'
f' {err}, workflow: {wfl.lid}')
self._cleanup_new_collateral_wfl(force=True)
self.logger.info(f'Cleaned up new collateral workflow:'
f' {wfl.lid}')
return None, err
async def create_new_collateral_wfl(self):
coins_data = await self.get_next_coins_for_mixing(for_denoms=False)
coins = coins_data['coins']
_start = self._start_new_collateral_wfl
wfl = await self.loop.run_in_executor(None, _start)
if not wfl:
return
try:
_make_tx = self._make_new_collateral_tx
txid, tx = await self.loop.run_in_executor(None, _make_tx,
wfl, coins)
w = self.wallet
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
def _after_create_tx():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
wfl.completed = True
self.set_new_collateral_wfl(wfl)
self.logger.wfl_ok(f'Completed new collateral workflow'
f' with tx: {txid},'
f' workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _after_create_tx)
w.save_db()
except Exception as e:
self.logger.wfl_err(f'Error creating new collateral tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_new_collateral_wfl(force=True)
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == AddPSDataError:
msg = self.ADD_PS_DATA_ERR_MSG
type_name = SPEC_TX_NAMES[PSTxTypes.NEW_COLLATERAL]
msg = f'{msg} {type_name} {txid}:\n{str(e)}'
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
elif type_e == NotEnoughFunds:
self._not_enough_funds = True
if msg:
await self.stop_mixing_from_async_thread(msg)
def _start_new_collateral_wfl(self):
with self.new_collateral_wfl_lock:
if self.new_collateral_wfl:
return
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_new_collateral_wfl(wfl)
self.logger.info(f'Started up new collateral workflow: {wfl.lid}')
return self.new_collateral_wfl
def _make_new_collateral_tx(self, wfl, coins=None, password=None):
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
w = self.wallet
fee_per_kb = self.config.fee_per_kb()
uuid = wfl.uuid
oaddr = self.reserve_addresses(1, data=uuid)[0]
if not coins:
coins = w.get_utxos(None, mature_only=True, confirmed_only=True,
consider_islocks=True, min_rounds=0)
coins = [c for c in coins if c.value_sats() == MIN_DENOM_VAL]
coins = self.filter_out_hw_ks_coins(coins)
if not coins:
raise NotEnoughFunds()
coins = sorted(coins, key=lambda x: x.ps_rounds)
coins = coins[0:1]
no_change = False
outputs = None
coins_val = sum([c.value_sats() for c in coins])
if (len(coins) == 1
and coins[0].ps_rounds is not None
and coins[0].ps_rounds != PSCoinRounds.MIX_ORIGIN):
if coins_val >= self.min_new_denoms_from_coins_val:
raise TooLargeUtxoVal('To large utxo selected')
no_change = True
if no_change:
for val in CREATE_COLLATERAL_VALS[::-1]:
new_collateral_fee = calc_tx_fee(1, 1, fee_per_kb,
max_size=True)
if coins_val - new_collateral_fee < val:
continue
outputs = [PartialTxOutput.from_address_and_value(oaddr, val)]
break
if outputs is None:
raise NotEnoughFunds()
else:
val = CREATE_COLLATERAL_VAL
outputs = [PartialTxOutput.from_address_and_value(oaddr, val)]
tx = w.make_unsigned_transaction(coins=coins, outputs=outputs)
inputs = tx.inputs()
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
if no_change:
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
for txin in tx.inputs():
txin.nsequence = 0xffffffff
else:
change_addr = inputs[0].address
tx = w.make_unsigned_transaction(coins=inputs, outputs=outputs,
change_addr=change_addr)
tx = self.sign_transaction(tx, password)
estimated_fee = calc_tx_fee(len(tx.inputs()), len(tx.outputs()),
fee_per_kb, max_size=True)
overfee = tx.get_fee() - estimated_fee
assert overfee < self.min_new_collateral_from_coins_val, 'too high fee'
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.NEW_COLLATERAL
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
self.set_new_collateral_wfl(wfl)
return txid, tx
async def cleanup_new_collateral_wfl(self, force=False):
_cleanup = self._cleanup_new_collateral_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_new_collateral_wfl(self, force=False):
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]:
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_new_collateral_wfl_tx_data(txid)
else:
self._cleanup_new_collateral_wfl_tx_data()
return True
def _cleanup_new_collateral_wfl_tx_data(self, txid=None):
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_collateral_wfl(wfl)
self.logger.info(f'Cleaned up new collateral tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_collateral_wfl()
self.logger.info(f'Cleaned up new collateral workflow: {wfl.lid}')
async def broadcast_new_collateral_wfl(self):
def _check_wfl():
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl:
return
if not wfl.completed:
return
return wfl
wfl = await self.loop.run_in_executor(None, _check_wfl)
if not wfl:
return
w = self.wallet
tx_data = wfl.next_to_send(w)
if not tx_data:
return
txid = tx_data.txid
sent, err = await tx_data.send(self)
if err:
def _on_fail():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
self.set_new_collateral_wfl(wfl)
self.logger.wfl_err(f'Failed broadcast of new collateral tx'
f' {txid}: {err}, workflow {wfl.lid}')
await self.loop.run_in_executor(None, _on_fail)
if sent:
def _on_success():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
self.set_new_collateral_wfl(wfl)
self.logger.wfl_done(f'Broadcasted transaction {txid} from new'
f' collateral workflow: {wfl.lid}')
tx = Transaction(wfl.tx_data[txid].raw_tx)
self._process_by_new_collateral_wfl(txid, tx)
if not wfl.next_to_send(w):
self.logger.wfl_done(f'Broadcast completed for new'
f' collateral workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _on_success)
def _search_new_collateral_wfl(self, txid, tx):
err = self._check_new_collateral_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.new_collateral_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_new_collateral_wfl(self, txid, tx):
wfl = self._search_new_collateral_wfl(txid, tx)
err = self._check_new_collateral_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_new_collateral_wfl(self, txid, tx):
wfl = self._search_new_collateral_wfl(txid, tx)
if not wfl:
return
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_collateral_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from new'
f' collateral workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_collateral_wfl()
self.logger.wfl_done(f'Finished processing of new collateral'
f' workflow: {wfl.lid}')
def new_denoms_from_coins_info(self, coins):
if not coins or len(coins) > 1:
return
coins_val = sum([c.value_sats() for c in coins])
if coins_val < self.min_new_denoms_from_coins_val:
return
fee_per_kb = self.config.fee_per_kb()
denoms_amounts = self._calc_denoms_amounts_from_coins(coins,
fee_per_kb)
if denoms_amounts:
tx_cnt = len(denoms_amounts)
outputs_val = sum([sum(amounts) for amounts in denoms_amounts])
tx_type = SPEC_TX_NAMES[PSTxTypes.NEW_DENOMS]
info = _('Transactions type: {}').format(tx_type)
info += '\n'
info += _('Count of transactions: {}').format(tx_cnt)
info += '\n'
info += _('Total sent amount: {}').format(coins_val)
info += '\n'
info += _('Total output amount: {}').format(outputs_val)
info += '\n'
info += _('Total fee: {}').format(coins_val - outputs_val)
return info
def create_new_denoms_wfl_from_gui(self, coins, password):
if self.state in self.mixing_running_states:
return None, ('Can not create new denoms as mixing process'
' is currently run.')
if len(coins) > 1:
return None, ('Can not create new denoms,'
' too many coins selected')
wfl, outputs_amounts = self._start_new_denoms_wfl(coins,
use_all_coins=True)
if not outputs_amounts:
return None, ('Can not create new denoms,'
' not enough coins selected')
if not wfl:
return None, ('Can not create new denoms as other new'
' denoms creation process is in progress')
last_tx_idx = len(outputs_amounts) - 1
for i, tx_amounts in enumerate(outputs_amounts):
try:
w = self.wallet
txid, tx = self._make_new_denoms_tx(wfl, tx_amounts,
last_tx_idx, i,
coins, password,
use_all_coins=True)
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
if not w.db.get_ps_tx(txid)[0] == PSTxTypes.NEW_DENOMS:
self._add_ps_data(txid, tx, PSTxTypes.NEW_DENOMS)
self.logger.info(f'Created new denoms tx: {txid},'
f' workflow: {wfl.lid}')
if i == last_tx_idx:
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs'
' from original')
wfl.completed = True
self.set_new_denoms_wfl(wfl)
self.logger.wfl_ok(f'Completed new denoms'
f' workflow: {wfl.lid}')
return wfl, None
else:
txin0 = copy.deepcopy(tx.inputs()[0])
txin0_addr = w.get_txin_address(txin0)
utxos = w.get_utxos([txin0_addr],
min_rounds=PSCoinRounds.OTHER)
change_outpoint = None
for change_idx, o in enumerate(tx.outputs()):
if o.address == txin0_addr:
change_outpoint = f'{txid}:{change_idx}'
break
coins = []
for utxo in utxos:
if utxo.prevout.to_str() != change_outpoint:
continue
coins.append(utxo)
except Exception as e:
err = str(e)
self.logger.wfl_err(f'Error creating new denoms tx:'
f' {err}, workflow: {wfl.lid}')
self._cleanup_new_denoms_wfl(force=True)
self.logger.info(f'Cleaned up new denoms workflow:'
f' {wfl.lid}')
return None, err
async def create_new_denoms_wfl(self):
coins_data = await self.get_next_coins_for_mixing()
coins = coins_data['coins']
if not coins:
return
_start = self._start_new_denoms_wfl
wfl, outputs_amounts = await self.loop.run_in_executor(None, _start,
coins)
if not wfl:
return
last_tx_idx = len(outputs_amounts) - 1
for i, tx_amounts in enumerate(outputs_amounts):
try:
w = self.wallet
_make_tx = self._make_new_denoms_tx
txid, tx = await self.loop.run_in_executor(None, _make_tx,
wfl, tx_amounts,
last_tx_idx, i,
coins)
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
def _after_create_tx():
with self.new_denoms_wfl_lock:
self.logger.info(f'Created new denoms tx: {txid},'
f' workflow: {wfl.lid}')
if i == last_tx_idx:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs'
' from original')
wfl.completed = True
self.set_new_denoms_wfl(wfl)
self.logger.wfl_ok(f'Completed new denoms'
f' workflow: {wfl.lid}')
coins_data = self._get_next_coins_for_mixing()
coins = coins_data['coins']
txin0 = copy.deepcopy(tx.inputs()[0])
txin0_addr = w.get_txin_address(txin0)
if i != last_tx_idx:
utxos = w.get_utxos([txin0_addr])
change_outpoint = None
for change_idx, o in enumerate(tx.outputs()):
if o.address == txin0_addr:
change_outpoint = f'{txid}:{change_idx}'
break
for utxo in utxos:
if utxo.prevout.to_str() != change_outpoint:
continue
coins.append(utxo)
if self.group_origin_coins_by_addr:
coins = [c for c in coins if c.address == txin0_addr]
return coins
coins = await self.loop.run_in_executor(None, _after_create_tx)
w.save_db()
except Exception as e:
self.logger.wfl_err(f'Error creating new denoms tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_new_denoms_wfl(force=True)
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == AddPSDataError:
msg = self.ADD_PS_DATA_ERR_MSG
type_name = SPEC_TX_NAMES[PSTxTypes.NEW_DENOMS]
msg = f'{msg} {type_name} {txid}:\n{str(e)}'
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
elif type_e == NotEnoughFunds:
self._not_enough_funds = True
if msg:
await self.stop_mixing_from_async_thread(msg)
break
def _start_new_denoms_wfl(self, coins, use_all_coins=False):
outputs_amounts = \
self.calc_need_denoms_amounts(coins=coins,
use_all_coins=use_all_coins)
if not outputs_amounts:
return None, None
with self.new_denoms_wfl_lock, \
self.pay_collateral_wfl_lock, \
self.new_collateral_wfl_lock:
if self.new_denoms_wfl:
return None, None
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_new_denoms_wfl(wfl)
self.logger.info(f'Started up new denoms workflow: {wfl.lid}')
return wfl, outputs_amounts
def _make_new_denoms_tx(self, wfl, tx_amounts, last_tx_idx, i,
coins, password=None, use_all_coins=False):
w = self.wallet
addrs_cnt = len(tx_amounts)
oaddrs = self.reserve_addresses(addrs_cnt, data=wfl.uuid)
outputs = [PartialTxOutput.from_address_and_value(addr, a)
for addr, a in zip(oaddrs, tx_amounts)]
tx = w.make_unsigned_transaction(coins=coins, outputs=outputs)
inputs = tx.inputs()
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
no_change = False
fee_per_kb = self.config.fee_per_kb()
if i == last_tx_idx:
if use_all_coins:
no_change = True
if no_change:
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
for txin in tx.inputs():
txin.nsequence = 0xffffffff
else:
in0 = inputs[0].address
tx = w.make_unsigned_transaction(coins=inputs, outputs=outputs,
change_addr=in0)
tx = self.sign_transaction(tx, password)
estimated_fee = calc_tx_fee(len(tx.inputs()), len(tx.outputs()),
fee_per_kb, max_size=True)
overfee = tx.get_fee() - estimated_fee
assert overfee < self.min_new_collateral_from_coins_val, 'too high fee'
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.NEW_DENOMS
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
return txid, tx
async def cleanup_new_denoms_wfl(self, force=False):
_cleanup = self._cleanup_new_denoms_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_new_denoms_wfl(self, force=False):
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]:
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_new_denoms_wfl_tx_data(txid)
else:
self._cleanup_new_denoms_wfl_tx_data()
return True
def _cleanup_new_denoms_wfl_tx_data(self, txid=None):
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_denoms_wfl(wfl)
self.logger.info(f'Cleaned up new denoms tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_denoms_wfl()
self.logger.info(f'Cleaned up new denoms workflow: {wfl.lid}')
async def broadcast_new_denoms_wfl(self):
def _check_wfl():
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl:
return
if not wfl.completed:
return
return wfl
wfl = await self.loop.run_in_executor(None, _check_wfl)
if not wfl:
return
w = self.wallet
tx_data = wfl.next_to_send(w)
if not tx_data:
return
txid = tx_data.txid
sent, err = await tx_data.send(self)
if err:
def _on_fail():
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
self.logger.wfl_err(f'Failed broadcast of new denoms tx'
f' {txid}: {err}, workflow {wfl.lid}')
await self.loop.run_in_executor(None, _on_fail)
if sent:
def _on_success():
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
self.logger.wfl_done(f'Broadcasted transaction {txid} from new'
f' denoms workflow: {wfl.lid}')
self.last_denoms_tx_time = time.time()
tx = Transaction(wfl.tx_data[txid].raw_tx)
self._process_by_new_denoms_wfl(txid, tx)
if not wfl.next_to_send(w):
self.logger.wfl_done(f'Broadcast completed for new denoms'
f' workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _on_success)
def _search_new_denoms_wfl(self, txid, tx):
err = self._check_new_denoms_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.new_denoms_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_new_denoms_wfl(self, txid, tx):
wfl = self._search_new_denoms_wfl(txid, tx)
err = self._check_new_denoms_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_new_denoms_wfl(self, txid, tx):
wfl = self._search_new_denoms_wfl(txid, tx)
if not wfl:
return
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_denoms_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from new denoms'
f' workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_denoms_wfl()
self.logger.wfl_done(f'Finished processing of new denoms'
f' workflow: {wfl.lid}')
async def cleanup_staled_denominate_wfls(self):
def _cleanup_staled():
changed = False
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if not wfl or not wfl.completed:
continue
now = time.time()
if now - wfl.completed > self.wait_for_mn_txs_time:
self.logger.info(f'Cleaning staled denominate'
f' workflow: {wfl.lid}')
self._cleanup_denominate_wfl(wfl)
changed = True
return changed
while True:
if self.enabled:
done = await self.loop.run_in_executor(None, _cleanup_staled)
if done:
self.wallet.save_db()
await asyncio.sleep(self.wait_for_mn_txs_time/12)
async def start_denominate_wfl(self):
wfl = None
try:
_start = self._start_denominate_wfl
dsq = None
session = None
if random.random() > 0.33:
self.logger.debug('try to get masternode from recent dsq')
recent_mns = self.recent_mixes_mns
while self.state == PSStates.Mixing:
dsq = self.dash_net.get_recent_dsq(recent_mns)
if dsq is not None:
self.logger.debug(f'get dsq from recent dsq queue'
f' {dsq.masternodeOutPoint}')
dval = PS_DENOM_REVERSE_DICT[dsq.nDenom]
wfl = await self.loop.run_in_executor(None,
_start, dval)
break
await asyncio.sleep(0.5)
else:
self.logger.debug('try to create new queue'
' on random masternode')
wfl = await self.loop.run_in_executor(None, _start)
if not wfl:
return
if self.state != PSStates.Mixing:
raise Exception('Mixing is finished')
else:
session = await self.start_mix_session(wfl.denom, dsq, wfl.lid)
pay_collateral_tx = self.get_pay_collateral_tx()
if not pay_collateral_tx:
raise Exception('Absent suitable pay collateral tx')
await session.send_dsa(pay_collateral_tx)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsq' and session.fReady:
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dsa sent')
pay_collateral_tx = self.get_pay_collateral_tx()
if not pay_collateral_tx:
raise Exception('Absent suitable pay collateral tx')
final_tx = None
await session.send_dsi(wfl.inputs, pay_collateral_tx, wfl.outputs)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsf':
final_tx = PartialTransaction.from_tx(res)
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dsi sent')
signed_inputs = self._sign_inputs(final_tx, wfl.inputs)
await session.send_dss(signed_inputs)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsc':
def _on_dsc():
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if saved:
saved.completed = time.time()
self.set_denominate_wfl(saved)
return saved
else:
self.logger.debug(f'denominate workflow:'
f' {wfl.lid} not found')
saved = await self.loop.run_in_executor(None, _on_dsc)
if saved:
wfl = saved
self.wallet.save_db()
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dss sent')
self.logger.wfl_ok(f'Completed denominate workflow: {wfl.lid}')
except Exception as e:
type_e = type(e)
if type_e != asyncio.CancelledError:
if wfl:
self.logger.wfl_err(f'Error in denominate worfklow:'
f' {str(e)}, workflow: {wfl.lid}')
else:
self.logger.wfl_err(f'Error during creation of denominate'
f' worfklow: {str(e)}')
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
if msg:
await self.stop_mixing_from_async_thread(msg)
finally:
if session:
await self.stop_mix_session(session.peer_str)
if wfl:
await self.cleanup_denominate_wfl(wfl)
def _select_denoms_to_mix(self, denom_value=None):
if not self._denoms_to_mix_cache:
self.logger.debug('No suitable denoms to mix,'
' _denoms_to_mix_cache is empty')
return None, None
if denom_value is not None:
denoms = self.denoms_to_mix(denom_value=denom_value)
else:
denoms = self.denoms_to_mix()
outpoints = list(denoms.keys())
w = self.wallet
icnt = 0
txids = []
inputs = []
while icnt < random.randint(1, PRIVATESEND_ENTRY_MAX_SIZE):
if not outpoints:
break
outpoint = outpoints.pop(random.randint(0, len(outpoints)-1))
if not w.db.get_ps_denom(outpoint):
continue
if w.db.get_ps_spending_denom(outpoint):
continue
txid = outpoint.split(':')[0]
if txid in txids:
continue
height = w.get_tx_height(txid).height
islock = w.db.get_islock(txid)
if not islock and height <= 0:
continue
denom = denoms.pop(outpoint)
if denom[2] >= self.mix_rounds:
continue
if not self.is_ps_ks(denom[0]) and self.is_hw_ks:
continue
if denom_value is None:
denom_value = denom[1]
elif denom[1] != denom_value:
continue
inputs.append(outpoint)
txids.append(txid)
icnt += 1
if not inputs:
self.logger.debug(f'No suitable denoms to mix:'
f' denom_value={denom_value}')
return None, None
else:
return inputs, denom_value
def _start_denominate_wfl(self, denom_value=None):
if self.active_denominate_wfl_cnt >= self.max_sessions:
return
selected_inputs, denom_value = self._select_denoms_to_mix(denom_value)
if not selected_inputs:
return
with self.denominate_wfl_lock, self.denoms_lock:
if self.active_denominate_wfl_cnt >= self.max_sessions:
return
icnt = 0
inputs = []
input_addrs = []
w = self.wallet
for outpoint in selected_inputs:
denom = w.db.get_ps_denom(outpoint)
if not denom:
continue
if w.db.get_ps_spending_denom(outpoint):
continue
if self.is_hw_ks and not self.is_ps_ks(denom[0]):
continue
inputs.append(outpoint)
input_addrs.append(denom[0])
icnt += 1
if icnt < 1:
self.logger.debug(f'No suitable denoms to mix after'
f' denoms_lock: denom_value={denom_value}')
return
uuid = str(uuid4())
wfl = PSDenominateWorkflow(uuid=uuid)
wfl.inputs = inputs
wfl.denom = denom_value
self.set_denominate_wfl(wfl)
for outpoint in inputs:
self.add_ps_spending_denom(outpoint, wfl.uuid)
if self._keypairs_cache:
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
output_addrs = []
found_outpoints = []
for addr, data in w.db.get_ps_reserved().items():
if data in inputs:
output_addrs.append(addr)
found_outpoints.append(data)
for outpoint in inputs:
if outpoint not in found_outpoints:
force_main_ks = False
if self.is_hw_ks:
denom = w.db.get_ps_denom(outpoint)
if denom[2] == self.mix_rounds - 1:
force_main_ks = True
reserved = self.reserve_addresses(1, data=outpoint,
force_main_ks=force_main_ks)
output_addrs.append(reserved[0])
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if not saved:
raise Exception(f'denominate_wfl {wfl.lid} not found')
wfl = saved
wfl.outputs = output_addrs
self.set_denominate_wfl(saved)
self.logger.info(f'Created denominate workflow: {wfl.lid}, with inputs'
f' value {wfl.denom}, count {len(wfl.inputs)}')
return wfl
def _sign_inputs(self, tx, inputs):
signed_inputs = []
tx = self._sign_denominate_tx(tx)
for i in tx.inputs():
if i.prevout.to_str() not in inputs:
continue
signed_inputs.append(CTxIn(i.prevout.txid[::-1], i.prevout.out_idx,
i.script_sig, i.nsequence))
return signed_inputs
def _sign_denominate_tx(self, tx):
mine_txins_cnt = 0
for txin in tx.inputs():
self.wallet.add_input_info(txin)
if txin.address is None:
continue
mine_txins_cnt += 1
self.sign_transaction(tx, None, mine_txins_cnt)
return tx
async def cleanup_denominate_wfl(self, wfl):
_cleanup = self._cleanup_denominate_wfl
changed = await self.loop.run_in_executor(None, _cleanup, wfl)
if changed:
self.wallet.save_db()
def _cleanup_denominate_wfl(self, wfl):
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if not saved:
return
else:
wfl = saved
completed = wfl.completed
if completed:
now = time.time()
if now - wfl.completed <= self.wait_for_mn_txs_time:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_denoms().items()):
if uuid != wfl.uuid:
continue
with self.denoms_lock:
self.pop_ps_spending_denom(outpoint)
with self.denominate_wfl_lock:
self.clear_denominate_wfl(wfl.uuid)
self.logger.info(f'Cleaned up denominate workflow: {wfl.lid}')
return True
def _search_denominate_wfl(self, txid, tx):
err = self._check_denominate_tx_err(txid, tx, full_check=False)
if not err:
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if not wfl or not wfl.completed:
continue
if self._check_denominate_tx_io_on_wfl(txid, tx, wfl):
return wfl
def _check_on_denominate_wfl(self, txid, tx):
wfl = self._search_denominate_wfl(txid, tx)
err = self._check_denominate_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_denominate_wfl(self, txid, tx):
wfl = self._search_denominate_wfl(txid, tx)
if not wfl:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_denoms().items()):
if uuid != wfl.uuid:
continue
with self.denoms_lock:
self.pop_ps_spending_denom(outpoint)
with self.denominate_wfl_lock:
self.clear_denominate_wfl(wfl.uuid)
self.logger.wfl_done(f'Finished processing of denominate'
f' workflow: {wfl.lid} with tx: {txid}')
def get_workflow_tx_info(self, wfl):
w = self.wallet
tx_cnt = len(wfl.tx_order)
tx_type = None if not tx_cnt else wfl.tx_data[wfl.tx_order[0]].tx_type
total = 0
total_fee = 0
for txid in wfl.tx_order:
tx = Transaction(wfl.tx_data[txid].raw_tx)
tx_info = w.get_tx_info(tx)
total += tx_info.amount
total_fee += tx_info.fee
return tx_type, tx_cnt, total, total_fee
| true | true |
f71e6e34a925999aadf7dbefd40bb6d81463830a | 6,170 | py | Python | src/gpgodzilla.py | yirzhou/gpgodzilla | b7d4e4d8db730f99d7ad6caab126ff1f14b2d39c | [
"Apache-2.0"
] | 1 | 2020-12-12T00:36:31.000Z | 2020-12-12T00:36:31.000Z | src/gpgodzilla.py | yirzhou/gpgodzilla | b7d4e4d8db730f99d7ad6caab126ff1f14b2d39c | [
"Apache-2.0"
] | null | null | null | src/gpgodzilla.py | yirzhou/gpgodzilla | b7d4e4d8db730f99d7ad6caab126ff1f14b2d39c | [
"Apache-2.0"
] | null | null | null | import logging
import subprocess
from multiprocessing import Process
logging.basicConfig(format='gpgeternal: %(asctime)s - %(message)s', level=logging.DEBUG)
def load_public_key(key_path):
try:
output = subprocess.check_output(['gpg2', '--import', key_path])
(output)
except Exception as e:
logging.error('GPG Error: importing public key at %s.' % key_path)
raise ValueError(str(e))
def load_private_key(key_path):
try:
subprocess.check_output(['gpg2', '--allow-secret-key-import', '--import', key_path])
except Exception as e:
logging.error('GPG Error: importing private key at %s.' % key_path)
raise ValueError(str(e))
def __encrypt_with_subprocess(encryptProcess, line):
try:
encryptProcess.stdin.write(line)
encryptProcess.stdin.flush()
except Exception as e:
logging.error(str(e))
raise ValueError(str(e))
def __get_subprocess_for_encrypt(recipient, output_file):
try:
output_stream = open(output_file, 'ab+')
encryptProcess = subprocess.Popen(['gpg2', '--allow-multiple-messages', f'--recipient={recipient}', '--always-trust', '--encrypt'], stdin=subprocess.PIPE, stdout=output_stream)
return encryptProcess, output_stream
except Exception as e:
logging.error(str(e))
raise ValueError(str(e))
def __get_subprocess_for_decrypt(incoming_file, passphrase=None):
try:
input_file = open(incoming_file, 'r')
decryptProcess = None
if not passphrase:
decryptProcess = subprocess.Popen(['gpg2', '--allow-multiple-messages', '--always-trust', '--decrypt'], stdin=input_file, stdout=subprocess.PIPE)
else:
decryptProcess = subprocess.Popen(['gpg2', f'--passphrase={passphrase}', '--pinentry-mode=loopback', '--allow-multiple-messages', '--always-trust', '--decrypt'], \
stdin=input_file, stdout=subprocess.PIPE)
return decryptProcess
except Exception as e:
logging.error(str(e))
raise ValueError(str(e))
def __decrypt_and_manipulate_line(file_to_decrypt, output_file, manipulation_function, passphrase=None):
"""
The flow is as follows:
Create one subprocess for decryption.
Open the large file for decryption, decrypt it line by line in bytes,
manipulate each line and write it to the output file stream.
- recipient: the recipient of the encrypted message.
- file_to_encrypt: file path of the file to manipulate and encrypt.
- output_file: file path of the file to output.
- manipulation_function: custom function to manipulate each line before encryption.
- passphrase: passphrase of secret key if any.
"""
decryptProcess = __get_subprocess_for_decrypt(file_to_decrypt, passphrase)
with open(output_file, 'a+') as output_stream:
for line in iter(decryptProcess.stdout.readline, b''):
line = manipulation_function(line.decode("utf-8"))
output_stream.write(line)
def __encrypt_and_manipulate_line(recipient, file_to_encrypt, output_file, manipulation_function):
"""
The flow is as follows:
Create one subprocess for encryption.
Open the large file for encryption, read it line by line in bytes,
manipulate each line, encrypt each manipulated line and write it to
the output file stream.
- recipient: the recipient of the encrypted message.
- file_to_encrypt: file path of the file to manipulate and encrypt.
- output_file: file path of the file to output.
- manipulation_function: custom function to manipulate each line before encryption.
"""
encryptProcess, output_stream = __get_subprocess_for_encrypt(recipient, output_file)
with open(file_to_encrypt, 'r') as input_file:
for line in input_file:
manipulated_line = manipulation_function(line)
manipulated_line = bytes(manipulated_line, 'utf-8')
__encrypt_with_subprocess(encryptProcess, manipulated_line)
output_stream.close()
def encrypt_large_file(recipient, file_to_encrypt, output_file, manipulation_function):
"""Encrypts large file after manipulating each line with custom function.
- recipient: the recipient of the encrypted message.
- file_to_encrypt: file path of the file to manipulate and encrypt.
- output_file: file path of the file to output.
- manipulation_function: custom function to manipulate each line before encryption.
"""
logging.info('Start encrypting large file: %s', str(file_to_encrypt))
encryption_task = None
try:
encryption_task = Process(target=__encrypt_and_manipulate_line, args=(recipient, file_to_encrypt, output_file, manipulation_function,))
encryption_task.start()
encryption_task.join()
except Exception as e:
logging.fatal(str(e))
raise ValueError(str(e))
if encryption_task.exitcode != 0:
logging.error('Encryption of large file failed: %s', 'exitcode!=0')
raise ValueError('Encryption of large file failed.')
logging.info('Encryption status: %s' , 'Success')
def decrypt_large_file(file_to_decrypt, output_file, manipulation_function, passphrase=None):
"""Manipulates each line with custom function after decryption of each line.
- file_to_decrypt: file path of the file to manipulate and decrypt.
- output_file: file path of the file to output.
- manipulation_function: custom function to manipulate each line before encryption.
"""
logging.info('Start decrypting large file: %s', str(file_to_decrypt))
decryption_task = None
try:
decryption_task = Process(target=__decrypt_and_manipulate_line, args=(file_to_decrypt, output_file, manipulation_function, passphrase,))
decryption_task.start()
decryption_task.join()
except Exception as e:
logging.fatal(str(e))
raise ValueError(str(e))
if decryption_task.exitcode != 0:
logging.error('Decryption of large file failed.')
raise ValueError('Decryption of large file failed.')
logging.info('Decryption status: %s' , 'Success')
| 45.703704 | 184 | 0.702269 | import logging
import subprocess
from multiprocessing import Process
logging.basicConfig(format='gpgeternal: %(asctime)s - %(message)s', level=logging.DEBUG)
def load_public_key(key_path):
try:
output = subprocess.check_output(['gpg2', '--import', key_path])
(output)
except Exception as e:
logging.error('GPG Error: importing public key at %s.' % key_path)
raise ValueError(str(e))
def load_private_key(key_path):
try:
subprocess.check_output(['gpg2', '--allow-secret-key-import', '--import', key_path])
except Exception as e:
logging.error('GPG Error: importing private key at %s.' % key_path)
raise ValueError(str(e))
def __encrypt_with_subprocess(encryptProcess, line):
try:
encryptProcess.stdin.write(line)
encryptProcess.stdin.flush()
except Exception as e:
logging.error(str(e))
raise ValueError(str(e))
def __get_subprocess_for_encrypt(recipient, output_file):
try:
output_stream = open(output_file, 'ab+')
encryptProcess = subprocess.Popen(['gpg2', '--allow-multiple-messages', f'--recipient={recipient}', '--always-trust', '--encrypt'], stdin=subprocess.PIPE, stdout=output_stream)
return encryptProcess, output_stream
except Exception as e:
logging.error(str(e))
raise ValueError(str(e))
def __get_subprocess_for_decrypt(incoming_file, passphrase=None):
try:
input_file = open(incoming_file, 'r')
decryptProcess = None
if not passphrase:
decryptProcess = subprocess.Popen(['gpg2', '--allow-multiple-messages', '--always-trust', '--decrypt'], stdin=input_file, stdout=subprocess.PIPE)
else:
decryptProcess = subprocess.Popen(['gpg2', f'--passphrase={passphrase}', '--pinentry-mode=loopback', '--allow-multiple-messages', '--always-trust', '--decrypt'], \
stdin=input_file, stdout=subprocess.PIPE)
return decryptProcess
except Exception as e:
logging.error(str(e))
raise ValueError(str(e))
def __decrypt_and_manipulate_line(file_to_decrypt, output_file, manipulation_function, passphrase=None):
decryptProcess = __get_subprocess_for_decrypt(file_to_decrypt, passphrase)
with open(output_file, 'a+') as output_stream:
for line in iter(decryptProcess.stdout.readline, b''):
line = manipulation_function(line.decode("utf-8"))
output_stream.write(line)
def __encrypt_and_manipulate_line(recipient, file_to_encrypt, output_file, manipulation_function):
encryptProcess, output_stream = __get_subprocess_for_encrypt(recipient, output_file)
with open(file_to_encrypt, 'r') as input_file:
for line in input_file:
manipulated_line = manipulation_function(line)
manipulated_line = bytes(manipulated_line, 'utf-8')
__encrypt_with_subprocess(encryptProcess, manipulated_line)
output_stream.close()
def encrypt_large_file(recipient, file_to_encrypt, output_file, manipulation_function):
logging.info('Start encrypting large file: %s', str(file_to_encrypt))
encryption_task = None
try:
encryption_task = Process(target=__encrypt_and_manipulate_line, args=(recipient, file_to_encrypt, output_file, manipulation_function,))
encryption_task.start()
encryption_task.join()
except Exception as e:
logging.fatal(str(e))
raise ValueError(str(e))
if encryption_task.exitcode != 0:
logging.error('Encryption of large file failed: %s', 'exitcode!=0')
raise ValueError('Encryption of large file failed.')
logging.info('Encryption status: %s' , 'Success')
def decrypt_large_file(file_to_decrypt, output_file, manipulation_function, passphrase=None):
logging.info('Start decrypting large file: %s', str(file_to_decrypt))
decryption_task = None
try:
decryption_task = Process(target=__decrypt_and_manipulate_line, args=(file_to_decrypt, output_file, manipulation_function, passphrase,))
decryption_task.start()
decryption_task.join()
except Exception as e:
logging.fatal(str(e))
raise ValueError(str(e))
if decryption_task.exitcode != 0:
logging.error('Decryption of large file failed.')
raise ValueError('Decryption of large file failed.')
logging.info('Decryption status: %s' , 'Success')
| true | true |
f71e6eba44d1c7020f68f989ac6bd44651c6a4e7 | 14,293 | py | Python | windows_packages_gpu/torch/nn/grad.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | windows_packages_gpu/torch/nn/grad.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | windows_packages_gpu/torch/nn/grad.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | """Gradient interface"""
import torch
from .modules.utils import _single, _pair, _triple
import warnings
def _grad_input_padding(grad_output, input_size, stride, padding, kernel_size, dilation=None):
if dilation is None:
# For backward compatibility
warnings.warn("_grad_input_padding 'dilation' argument not provided. Default of 1 is used.")
dilation = [1] * len(stride)
input_size = list(input_size)
k = grad_output.dim() - 2
if len(input_size) == k + 2:
input_size = input_size[-k:]
if len(input_size) != k:
raise ValueError("input_size must have {} elements (got {})"
.format(k + 2, len(input_size)))
def dim_size(d):
return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] + 1
+ dilation[d] * (kernel_size[d] - 1))
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError(
("requested an input grad size of {}, but valid sizes range "
"from {} to {} (for a grad_output of {})").format(
input_size, min_sizes, max_sizes,
grad_output.size()[2:]))
return tuple(input_size[d] - min_sizes[d] for d in range(k))
def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
kernel_size = [weight.shape[2]]
if input_size is None:
raise ValueError("grad.conv1d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose1d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2])
grad_weight = torch.conv1d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2]).transpose(
0, 1).narrow(2, 0, weight_size[2])
def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
kernel_size = (weight.shape[2], weight.shape[3])
if input_size is None:
raise ValueError("grad.conv2d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose2d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,
1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3])
grad_weight = torch.conv2d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels,
grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3])
def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])
if input_size is None:
raise ValueError("grad.conv3d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose3d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3], grad_output.shape[4])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3],
input.shape[4])
grad_weight = torch.conv3d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(
4, 0, weight_size[4])
| 45.519108 | 113 | 0.639894 |
import torch
from .modules.utils import _single, _pair, _triple
import warnings
def _grad_input_padding(grad_output, input_size, stride, padding, kernel_size, dilation=None):
if dilation is None:
warnings.warn("_grad_input_padding 'dilation' argument not provided. Default of 1 is used.")
dilation = [1] * len(stride)
input_size = list(input_size)
k = grad_output.dim() - 2
if len(input_size) == k + 2:
input_size = input_size[-k:]
if len(input_size) != k:
raise ValueError("input_size must have {} elements (got {})"
.format(k + 2, len(input_size)))
def dim_size(d):
return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] + 1
+ dilation[d] * (kernel_size[d] - 1))
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError(
("requested an input grad size of {}, but valid sizes range "
"from {} to {} (for a grad_output of {})").format(
input_size, min_sizes, max_sizes,
grad_output.size()[2:]))
return tuple(input_size[d] - min_sizes[d] for d in range(k))
def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
kernel_size = [weight.shape[2]]
if input_size is None:
raise ValueError("grad.conv1d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose1d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2])
grad_weight = torch.conv1d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2]).transpose(
0, 1).narrow(2, 0, weight_size[2])
def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
kernel_size = (weight.shape[2], weight.shape[3])
if input_size is None:
raise ValueError("grad.conv2d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose2d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,
1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3])
grad_weight = torch.conv2d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels,
grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3])
def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])
if input_size is None:
raise ValueError("grad.conv3d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose3d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3], grad_output.shape[4])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3],
input.shape[4])
grad_weight = torch.conv3d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(
4, 0, weight_size[4])
| true | true |
f71e6fc0c4a210acb51488a7f006a032db64566a | 873 | py | Python | Introduction/Game - Card/UVa 10646 - What is the card.py | ServioTRC/Competitive-Programming | ddc4ed61d4a826932a65ca6d3d033ad29c36491d | [
"Unlicense"
] | null | null | null | Introduction/Game - Card/UVa 10646 - What is the card.py | ServioTRC/Competitive-Programming | ddc4ed61d4a826932a65ca6d3d033ad29c36491d | [
"Unlicense"
] | null | null | null | Introduction/Game - Card/UVa 10646 - What is the card.py | ServioTRC/Competitive-Programming | ddc4ed61d4a826932a65ca6d3d033ad29c36491d | [
"Unlicense"
] | null | null | null | def card_value(card):
if card[0] == "2":
return 2
elif card[0] == "3":
return 3
elif card[0] == "4":
return 4
elif card[0] == "5":
return 5
elif card[0] == "6":
return 6
elif card[0] == "7":
return 7
elif card[0] == "8":
return 8
elif card[0] == "9":
return 9
elif card[0] == "K" or card[0] == "Q" or card[0] == "J" or card[0] == "A" or card[0] == "T":
return 10
def main():
n = int(input())
for i in range(1, n+1):
cards = input().split(" ")
cards.reverse()
y = 0
for _ in range(3):
x = card_value(cards.pop())
y += x
if (10-x) > 0:
cards = cards[:(-1*(10-x))]
cards.reverse()
print("Case %d: %s" % (i, cards[y-1]))
if __name__ == "__main__":
main() | 23.594595 | 96 | 0.419244 | def card_value(card):
if card[0] == "2":
return 2
elif card[0] == "3":
return 3
elif card[0] == "4":
return 4
elif card[0] == "5":
return 5
elif card[0] == "6":
return 6
elif card[0] == "7":
return 7
elif card[0] == "8":
return 8
elif card[0] == "9":
return 9
elif card[0] == "K" or card[0] == "Q" or card[0] == "J" or card[0] == "A" or card[0] == "T":
return 10
def main():
n = int(input())
for i in range(1, n+1):
cards = input().split(" ")
cards.reverse()
y = 0
for _ in range(3):
x = card_value(cards.pop())
y += x
if (10-x) > 0:
cards = cards[:(-1*(10-x))]
cards.reverse()
print("Case %d: %s" % (i, cards[y-1]))
if __name__ == "__main__":
main() | true | true |
f71e7051a5a6fe161b39223bd50385377292a238 | 733 | py | Python | appengine-with-cron/appengine_config.py | DoryZi/gcp-optimized-jobs | e88abd4a7b42235e1135230cf3dbd29f3a73fb4f | [
"Apache-2.0"
] | null | null | null | appengine-with-cron/appengine_config.py | DoryZi/gcp-optimized-jobs | e88abd4a7b42235e1135230cf3dbd29f3a73fb4f | [
"Apache-2.0"
] | null | null | null | appengine-with-cron/appengine_config.py | DoryZi/gcp-optimized-jobs | e88abd4a7b42235e1135230cf3dbd29f3a73fb4f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
| 31.869565 | 74 | 0.755798 |
from google.appengine.ext import vendor
vendor.add('lib')
| true | true |
f71e70cfe09fcd5ce5b22d8790636643baa10130 | 9,847 | py | Python | ruqqus/client.py | richardARPANET/python-ruqqus | 4eadc5ae89217c63abc8566bbcbc87b39784b905 | [
"MIT"
] | 1 | 2021-01-02T09:49:28.000Z | 2021-01-02T09:49:28.000Z | ruqqus/client.py | richardARPANET/python-ruqqus | 4eadc5ae89217c63abc8566bbcbc87b39784b905 | [
"MIT"
] | null | null | null | ruqqus/client.py | richardARPANET/python-ruqqus | 4eadc5ae89217c63abc8566bbcbc87b39784b905 | [
"MIT"
] | 1 | 2021-03-21T02:52:38.000Z | 2021-03-21T02:52:38.000Z | #!/usr/bin/env python3
from time import time
import requests
class RuqqusClient:
def __init__(
self,
client_id,
client_secret,
code=None,
access_token=None,
refresh_token=None,
):
self.headers = {}
self.url = 'https://ruqqus.com'
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self._refresh_token = refresh_token
self.code = code
self.user_agent = f'python-ruqqus-{self.client_id}'
self.token_expire_utc = 0
self.header = {}
self.refresh_headers()
if not self.client_id or not self.client_secret:
exit("You must provide both a 'client_id' and 'client_secret")
elif (
self.client_id
and self.client_secret
and not self.code
and not self.access_token
):
if refresh_token:
self.refresh_token()
else:
exit(
"You must provide either a 'code', 'access_token', "
"or a 'refresh_token'."
)
elif (
self.client_id
and self.client_secret
and self.code
and not self.access_token
):
if self._refresh_token:
self.refresh_token()
else:
self.get_access_token()
def admin(self):
raise NotImplementedError()
def mod(self):
raise NotImplementedError()
def identity(self):
self.refresh_headers()
return requests.get(
url=f'{self.url}/api/v1/identity', headers=self.headers
).json()
def user(self, username=None, type=None):
self.refresh_headers()
if not username:
return {'error': 'You must provide a username.'}
if type:
type = str(type).lower()
# Default to user
if not type or type == 'user':
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'is_available':
return requests.get(
url=f'{self.url}/api/v1/is_available/{username}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/follow/{username}', headers=self.headers
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/unfollow/{username}', headers=self.headers
).json()
else:
return {'error': 'Invalid Call'}
def guild(self, name=None, type=None):
self.refresh_headers()
if not name:
return {'error': 'You must provide a guildName.'}
if type:
type = str(type).lower()
if not type or type == 'guild':
return requests.get(
url=f'{self.url}/api/v1/guild/{name}',
headers=self.headers,
).json()
elif type == 'is_available':
# Default to is_available
return requests.get(
url=f'{self.url}/api/v1/board_available/{name}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/v1/subscribe/{name}',
headers=self.headers,
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/v1/unsubscribe/{name}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def submit_post(self, *, guild, title, url):
self.refresh_headers()
return requests.post(
url=f'{self.url}/api/v1/submit',
headers=self.headers,
data={
'board': guild,
'title': title,
'url': url,
},
).json()
def get_guild_posts(self, *, name, page=1, sort='hot'):
self.refresh_headers()
url = f'{self.url}/api/v1/guild/{name.lstrip("+")}/listing'
response = requests.get(
url=url,
params={'page': page, 'sort': sort},
headers=self.headers,
)
response.raise_for_status()
return response.json()
def get(
self,
type=None,
sort=None,
time=None,
guild_name=None,
username=None,
post_id=None,
comment_id=None,
):
self.refresh_headers()
if not type:
return {'error': "You must specify which 'type' of get to use"}
else:
type = str(type).lower()
if time:
time = str(time).lower()
if time not in ['day', 'week', 'month', 'year']:
return {'error': 'Invalid time parameter.'}
if sort:
sort = str(sort).lower()
if sort not in ['top', 'hot', 'disputed', 'activity', 'new']:
return {'error': 'Invalid sort parameter.'}
if type == 'front':
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/front/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing?sort={sort}',
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing', headers=self.headers
).json()
elif type == 'guild':
if not guild_name:
return {'error': 'You must provide a guildName'}
else:
guild_name = str(guild_name)
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/guild/{guild_name}/listing',
headers=self.headers,
).json()
elif type == 'user':
if not username:
return {'error': 'You must provide a userName.'}
else:
username = str(username)
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'post':
if not post_id:
return {'error': 'You must provide a postId.'}
else:
post_id = str(post_id)
return requests.get(
url=f'{self.url}/api/v1/post/{post_id}', headers=self.headers
).json()
elif type == 'comment':
if not comment_id:
return {'error': 'You must provide a commentId.'}
else:
comment_id = str(comment_id)
return requests.get(
url=f'{self.url}/api/v1/comment/{comment_id}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def refresh_headers(self, user_agent=None, access_token=None):
if self.access_token:
self.headers = {'Authorization': 'Bearer ' + self.access_token}
elif access_token:
self.headers = {'Authorization': 'Bearer ' + access_token}
else:
return {'error': 'You must provide an accessToken.'}
if user_agent:
self.header['user-agent'] = user_agent
self.user_agent = user_agent
elif self.user_agent:
self.header['user-agent'] = self.user_agent
else:
return {'error': 'You must provide a user-agent.'}
# refresh token 30 seconds before expiration
if self._refresh_token and self.token_expire_utc >= int(time() - 30):
self.refresh_token()
def refresh_token(self, refresh_token=None):
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh',
}
if not self._refresh_token:
if refresh_token:
data['refresh_token'] = refresh_token
else:
data['refresh_token'] = self._refresh_token
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
return r
def get_access_token(self):
self.refresh_headers()
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'code',
'code': self.code,
}
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
self._refresh_token = r['refresh_token']
self.token_expire_utc = r['expires_at']
return r
| 28.459538 | 79 | 0.487864 |
from time import time
import requests
class RuqqusClient:
def __init__(
self,
client_id,
client_secret,
code=None,
access_token=None,
refresh_token=None,
):
self.headers = {}
self.url = 'https://ruqqus.com'
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self._refresh_token = refresh_token
self.code = code
self.user_agent = f'python-ruqqus-{self.client_id}'
self.token_expire_utc = 0
self.header = {}
self.refresh_headers()
if not self.client_id or not self.client_secret:
exit("You must provide both a 'client_id' and 'client_secret")
elif (
self.client_id
and self.client_secret
and not self.code
and not self.access_token
):
if refresh_token:
self.refresh_token()
else:
exit(
"You must provide either a 'code', 'access_token', "
"or a 'refresh_token'."
)
elif (
self.client_id
and self.client_secret
and self.code
and not self.access_token
):
if self._refresh_token:
self.refresh_token()
else:
self.get_access_token()
def admin(self):
raise NotImplementedError()
def mod(self):
raise NotImplementedError()
def identity(self):
self.refresh_headers()
return requests.get(
url=f'{self.url}/api/v1/identity', headers=self.headers
).json()
def user(self, username=None, type=None):
self.refresh_headers()
if not username:
return {'error': 'You must provide a username.'}
if type:
type = str(type).lower()
# Default to user
if not type or type == 'user':
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'is_available':
return requests.get(
url=f'{self.url}/api/v1/is_available/{username}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/follow/{username}', headers=self.headers
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/unfollow/{username}', headers=self.headers
).json()
else:
return {'error': 'Invalid Call'}
def guild(self, name=None, type=None):
self.refresh_headers()
if not name:
return {'error': 'You must provide a guildName.'}
if type:
type = str(type).lower()
if not type or type == 'guild':
return requests.get(
url=f'{self.url}/api/v1/guild/{name}',
headers=self.headers,
).json()
elif type == 'is_available':
# Default to is_available
return requests.get(
url=f'{self.url}/api/v1/board_available/{name}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/v1/subscribe/{name}',
headers=self.headers,
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/v1/unsubscribe/{name}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def submit_post(self, *, guild, title, url):
self.refresh_headers()
return requests.post(
url=f'{self.url}/api/v1/submit',
headers=self.headers,
data={
'board': guild,
'title': title,
'url': url,
},
).json()
def get_guild_posts(self, *, name, page=1, sort='hot'):
self.refresh_headers()
url = f'{self.url}/api/v1/guild/{name.lstrip("+")}/listing'
response = requests.get(
url=url,
params={'page': page, 'sort': sort},
headers=self.headers,
)
response.raise_for_status()
return response.json()
def get(
self,
type=None,
sort=None,
time=None,
guild_name=None,
username=None,
post_id=None,
comment_id=None,
):
self.refresh_headers()
if not type:
return {'error': "You must specify which 'type' of get to use"}
else:
type = str(type).lower()
if time:
time = str(time).lower()
if time not in ['day', 'week', 'month', 'year']:
return {'error': 'Invalid time parameter.'}
if sort:
sort = str(sort).lower()
if sort not in ['top', 'hot', 'disputed', 'activity', 'new']:
return {'error': 'Invalid sort parameter.'}
if type == 'front':
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/front/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing?sort={sort}',
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing', headers=self.headers
).json()
elif type == 'guild':
if not guild_name:
return {'error': 'You must provide a guildName'}
else:
guild_name = str(guild_name)
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/guild/{guild_name}/listing',
headers=self.headers,
).json()
elif type == 'user':
if not username:
return {'error': 'You must provide a userName.'}
else:
username = str(username)
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'post':
if not post_id:
return {'error': 'You must provide a postId.'}
else:
post_id = str(post_id)
return requests.get(
url=f'{self.url}/api/v1/post/{post_id}', headers=self.headers
).json()
elif type == 'comment':
if not comment_id:
return {'error': 'You must provide a commentId.'}
else:
comment_id = str(comment_id)
return requests.get(
url=f'{self.url}/api/v1/comment/{comment_id}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def refresh_headers(self, user_agent=None, access_token=None):
if self.access_token:
self.headers = {'Authorization': 'Bearer ' + self.access_token}
elif access_token:
self.headers = {'Authorization': 'Bearer ' + access_token}
else:
return {'error': 'You must provide an accessToken.'}
if user_agent:
self.header['user-agent'] = user_agent
self.user_agent = user_agent
elif self.user_agent:
self.header['user-agent'] = self.user_agent
else:
return {'error': 'You must provide a user-agent.'}
# refresh token 30 seconds before expiration
if self._refresh_token and self.token_expire_utc >= int(time() - 30):
self.refresh_token()
def refresh_token(self, refresh_token=None):
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh',
}
if not self._refresh_token:
if refresh_token:
data['refresh_token'] = refresh_token
else:
data['refresh_token'] = self._refresh_token
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
return r
def get_access_token(self):
self.refresh_headers()
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'code',
'code': self.code,
}
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
self._refresh_token = r['refresh_token']
self.token_expire_utc = r['expires_at']
return r
| true | true |
f71e70f75ef74e8cb49110de6c03e5a4a3b5570e | 663 | py | Python | test/test_container_image.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | test/test_container_image.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | test/test_container_image.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import h1
from h1.model.container_image import ContainerImage
class TestContainerImage(unittest.TestCase):
"""ContainerImage unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testContainerImage(self):
"""Test ContainerImage"""
# FIXME: construct object with mandatory attributes with example values
# model = ContainerImage() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 18.416667 | 79 | 0.660633 |
import sys
import unittest
import h1
from h1.model.container_image import ContainerImage
class TestContainerImage(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testContainerImage(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f71e725e0b349de80543e5aa9d59479b592d89d0 | 180 | py | Python | krit/invitations/urls.py | huroncg/krit-teams | ce96a49de44496c2f86e37dd917c51952fbbdeed | [
"BSD-3-Clause"
] | null | null | null | krit/invitations/urls.py | huroncg/krit-teams | ce96a49de44496c2f86e37dd917c51952fbbdeed | [
"BSD-3-Clause"
] | null | null | null | krit/invitations/urls.py | huroncg/krit-teams | ce96a49de44496c2f86e37dd917c51952fbbdeed | [
"BSD-3-Clause"
] | 1 | 2021-02-26T01:38:35.000Z | 2021-02-26T01:38:35.000Z | from django.conf.urls import url
from .views import InvitationDetailView
urlpatterns = [
url(r'^(?P<pk>[0-9]+)/$', InvitationDetailView.as_view(), name='invitations-detail')
] | 30 | 88 | 0.722222 | from django.conf.urls import url
from .views import InvitationDetailView
urlpatterns = [
url(r'^(?P<pk>[0-9]+)/$', InvitationDetailView.as_view(), name='invitations-detail')
] | true | true |
f71e725fcb44ac801a9366c707c6df99559a91f7 | 10,027 | py | Python | homeassistant/components/stream/__init__.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 2 | 2020-03-29T05:32:57.000Z | 2021-06-13T06:55:05.000Z | homeassistant/components/stream/__init__.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | homeassistant/components/stream/__init__.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 1 | 2020-11-18T21:04:18.000Z | 2020-11-18T21:04:18.000Z | """Provide functionality to stream video source.
Components use create_stream with a stream source (e.g. an rtsp url) to create
a new Stream object. Stream manages:
- Background work to fetch and decode a stream
- Desired output formats
- Home Assistant URLs for viewing a stream
- Access tokens for URLs for viewing a stream
A Stream consists of a background worker, and one or more output formats each
with their own idle timeout managed by the stream component. When an output
format is no longer in use, the stream component will expire it. When there
are no active output formats, the background worker is shut down and access
tokens are expired. Alternatively, a Stream can be configured with keepalive
to always keep workers active.
"""
from __future__ import annotations
import logging
import re
import secrets
import threading
import time
from types import MappingProxyType
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from .const import (
ATTR_ENDPOINTS,
ATTR_STREAMS,
DOMAIN,
HLS_PROVIDER,
MAX_SEGMENTS,
OUTPUT_IDLE_TIMEOUT,
RECORDER_PROVIDER,
STREAM_RESTART_INCREMENT,
STREAM_RESTART_RESET_TIME,
)
from .core import PROVIDERS, IdleTimer, StreamOutput
from .hls import async_setup_hls
_LOGGER = logging.getLogger(__name__)
STREAM_SOURCE_RE = re.compile("//.*:.*@")
def redact_credentials(data):
"""Redact credentials from string data."""
return STREAM_SOURCE_RE.sub("//****:****@", data)
def create_stream(hass, stream_source, options=None):
"""Create a stream with the specified identfier based on the source url.
The stream_source is typically an rtsp url and options are passed into
pyav / ffmpeg as options.
"""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
if options is None:
options = {}
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
stream = Stream(hass, stream_source, options=options)
hass.data[DOMAIN][ATTR_STREAMS].append(stream)
return stream
async def async_setup(hass, config):
"""Set up stream."""
# Set log level to error for libav
logging.getLogger("libav").setLevel(logging.ERROR)
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = []
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS][HLS_PROVIDER] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event):
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS]:
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class Stream:
"""Represents a single stream."""
def __init__(self, hass, source, options=None):
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self.keepalive = False
self.access_token = None
self._thread = None
self._thread_quit = threading.Event()
self._outputs: dict[str, StreamOutput] = {}
self._fast_restart_once = False
if self.options is None:
self.options = {}
def endpoint_url(self, fmt: str) -> str:
"""Start the stream and returns a url for the output format."""
if fmt not in self._outputs:
raise ValueError(f"Stream is not configured for format '{fmt}'")
if not self.access_token:
self.access_token = secrets.token_hex()
return self.hass.data[DOMAIN][ATTR_ENDPOINTS][fmt].format(self.access_token)
def outputs(self):
"""Return a copy of the stream outputs."""
# A copy is returned so the caller can iterate through the outputs
# without concern about self._outputs being modified from another thread.
return MappingProxyType(self._outputs.copy())
def add_provider(self, fmt, timeout=OUTPUT_IDLE_TIMEOUT):
"""Add provider output stream."""
if not self._outputs.get(fmt):
@callback
def idle_callback():
if (
not self.keepalive or fmt == RECORDER_PROVIDER
) and fmt in self._outputs:
self.remove_provider(self._outputs[fmt])
self.check_idle()
provider = PROVIDERS[fmt](
self.hass, IdleTimer(self.hass, timeout, idle_callback)
)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider):
"""Remove provider output stream."""
if provider.name in self._outputs:
self._outputs[provider.name].cleanup()
del self._outputs[provider.name]
if not self._outputs:
self.stop()
def check_idle(self):
"""Reset access token if all providers are idle."""
if all(p.idle for p in self._outputs.values()):
self.access_token = None
def start(self):
"""Start a stream."""
if self._thread is None or not self._thread.is_alive():
if self._thread is not None:
# The thread must have crashed/exited. Join to clean up the
# previous thread.
self._thread.join(timeout=0)
self._thread_quit.clear()
self._thread = threading.Thread(
name="stream_worker",
target=self._run_worker,
)
self._thread.start()
_LOGGER.info("Started stream: %s", redact_credentials(str(self.source)))
def update_source(self, new_source):
"""Restart the stream with a new stream source."""
_LOGGER.debug("Updating stream source %s", new_source)
self.source = new_source
self._fast_restart_once = True
self._thread_quit.set()
def _run_worker(self):
"""Handle consuming streams and restart keepalive streams."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .worker import SegmentBuffer, stream_worker
segment_buffer = SegmentBuffer(self.outputs)
wait_timeout = 0
while not self._thread_quit.wait(timeout=wait_timeout):
start_time = time.time()
stream_worker(self.source, self.options, segment_buffer, self._thread_quit)
segment_buffer.discontinuity()
if not self.keepalive or self._thread_quit.is_set():
if self._fast_restart_once:
# The stream source is updated, restart without any delay.
self._fast_restart_once = False
self._thread_quit.clear()
continue
break
# To avoid excessive restarts, wait before restarting
# As the required recovery time may be different for different setups, start
# with trying a short wait_timeout and increase it on each reconnection attempt.
# Reset the wait_timeout after the worker has been up for several minutes
if time.time() - start_time > STREAM_RESTART_RESET_TIME:
wait_timeout = 0
wait_timeout += STREAM_RESTART_INCREMENT
_LOGGER.debug(
"Restarting stream worker in %d seconds: %s",
wait_timeout,
self.source,
)
self._worker_finished()
def _worker_finished(self):
"""Schedule cleanup of all outputs."""
@callback
def remove_outputs():
for provider in self.outputs().values():
self.remove_provider(provider)
self.hass.loop.call_soon_threadsafe(remove_outputs)
def stop(self):
"""Remove outputs and access token."""
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self):
"""Stop worker thread."""
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
_LOGGER.info("Stopped stream: %s", redact_credentials(str(self.source)))
async def async_record(self, video_path, duration=30, lookback=5):
"""Make a .mp4 recording from a provided stream."""
# Check for file access
if not self.hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Add recorder
recorder = self.outputs().get(RECORDER_PROVIDER)
if recorder:
raise HomeAssistantError(
f"Stream already recording to {recorder.video_path}!"
)
recorder = self.add_provider(RECORDER_PROVIDER, timeout=duration)
recorder.video_path = video_path
self.start()
_LOGGER.debug("Started a stream recording of %s seconds", duration)
# Take advantage of lookback
hls = self.outputs().get(HLS_PROVIDER)
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), MAX_SEGMENTS)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segments())[-num_segments:])
| 35.306338 | 92 | 0.64057 | from __future__ import annotations
import logging
import re
import secrets
import threading
import time
from types import MappingProxyType
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from .const import (
ATTR_ENDPOINTS,
ATTR_STREAMS,
DOMAIN,
HLS_PROVIDER,
MAX_SEGMENTS,
OUTPUT_IDLE_TIMEOUT,
RECORDER_PROVIDER,
STREAM_RESTART_INCREMENT,
STREAM_RESTART_RESET_TIME,
)
from .core import PROVIDERS, IdleTimer, StreamOutput
from .hls import async_setup_hls
_LOGGER = logging.getLogger(__name__)
STREAM_SOURCE_RE = re.compile("//.*:.*@")
def redact_credentials(data):
return STREAM_SOURCE_RE.sub("//****:****@", data)
def create_stream(hass, stream_source, options=None):
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
if options is None:
options = {}
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
stream = Stream(hass, stream_source, options=options)
hass.data[DOMAIN][ATTR_STREAMS].append(stream)
return stream
async def async_setup(hass, config):
logging.getLogger("libav").setLevel(logging.ERROR)
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = []
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS][HLS_PROVIDER] = hls_endpoint
async_setup_recorder(hass)
@callback
def shutdown(event):
for stream in hass.data[DOMAIN][ATTR_STREAMS]:
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class Stream:
def __init__(self, hass, source, options=None):
self.hass = hass
self.source = source
self.options = options
self.keepalive = False
self.access_token = None
self._thread = None
self._thread_quit = threading.Event()
self._outputs: dict[str, StreamOutput] = {}
self._fast_restart_once = False
if self.options is None:
self.options = {}
def endpoint_url(self, fmt: str) -> str:
if fmt not in self._outputs:
raise ValueError(f"Stream is not configured for format '{fmt}'")
if not self.access_token:
self.access_token = secrets.token_hex()
return self.hass.data[DOMAIN][ATTR_ENDPOINTS][fmt].format(self.access_token)
def outputs(self):
return MappingProxyType(self._outputs.copy())
def add_provider(self, fmt, timeout=OUTPUT_IDLE_TIMEOUT):
if not self._outputs.get(fmt):
@callback
def idle_callback():
if (
not self.keepalive or fmt == RECORDER_PROVIDER
) and fmt in self._outputs:
self.remove_provider(self._outputs[fmt])
self.check_idle()
provider = PROVIDERS[fmt](
self.hass, IdleTimer(self.hass, timeout, idle_callback)
)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider):
if provider.name in self._outputs:
self._outputs[provider.name].cleanup()
del self._outputs[provider.name]
if not self._outputs:
self.stop()
def check_idle(self):
if all(p.idle for p in self._outputs.values()):
self.access_token = None
def start(self):
if self._thread is None or not self._thread.is_alive():
if self._thread is not None:
self._thread.join(timeout=0)
self._thread_quit.clear()
self._thread = threading.Thread(
name="stream_worker",
target=self._run_worker,
)
self._thread.start()
_LOGGER.info("Started stream: %s", redact_credentials(str(self.source)))
def update_source(self, new_source):
_LOGGER.debug("Updating stream source %s", new_source)
self.source = new_source
self._fast_restart_once = True
self._thread_quit.set()
def _run_worker(self):
from .worker import SegmentBuffer, stream_worker
segment_buffer = SegmentBuffer(self.outputs)
wait_timeout = 0
while not self._thread_quit.wait(timeout=wait_timeout):
start_time = time.time()
stream_worker(self.source, self.options, segment_buffer, self._thread_quit)
segment_buffer.discontinuity()
if not self.keepalive or self._thread_quit.is_set():
if self._fast_restart_once:
self._fast_restart_once = False
self._thread_quit.clear()
continue
break
if time.time() - start_time > STREAM_RESTART_RESET_TIME:
wait_timeout = 0
wait_timeout += STREAM_RESTART_INCREMENT
_LOGGER.debug(
"Restarting stream worker in %d seconds: %s",
wait_timeout,
self.source,
)
self._worker_finished()
def _worker_finished(self):
@callback
def remove_outputs():
for provider in self.outputs().values():
self.remove_provider(provider)
self.hass.loop.call_soon_threadsafe(remove_outputs)
def stop(self):
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self):
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
_LOGGER.info("Stopped stream: %s", redact_credentials(str(self.source)))
async def async_record(self, video_path, duration=30, lookback=5):
if not self.hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Add recorder
recorder = self.outputs().get(RECORDER_PROVIDER)
if recorder:
raise HomeAssistantError(
f"Stream already recording to {recorder.video_path}!"
)
recorder = self.add_provider(RECORDER_PROVIDER, timeout=duration)
recorder.video_path = video_path
self.start()
_LOGGER.debug("Started a stream recording of %s seconds", duration)
# Take advantage of lookback
hls = self.outputs().get(HLS_PROVIDER)
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), MAX_SEGMENTS)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segments())[-num_segments:])
| true | true |
f71e7345c9a8dac9495e2a62af51ecf868c01bbb | 253 | py | Python | tests/factories/test_acf.py | fedorpashin/statistical-modeling | 2630e6811afca29cf8a616bab130ae7f9547a043 | [
"MIT"
] | 2 | 2021-05-21T15:29:05.000Z | 2021-08-11T14:08:31.000Z | tests/factories/test_acf.py | fedorpashin/statistical-modeling | 2630e6811afca29cf8a616bab130ae7f9547a043 | [
"MIT"
] | 59 | 2021-08-11T14:08:36.000Z | 2021-09-30T11:15:33.000Z | tests/factories/test_acf.py | fedorpashin/statistical-modeling | 2630e6811afca29cf8a616bab130ae7f9547a043 | [
"MIT"
] | null | null | null | import statistical_modeling as sm
from typing import Final
import unittest
class TestACF(unittest.TestCase):
def test(self):
s: Final = sm.Sample([1, 2, 3])
f: Final = 3
self.assertEqual(sm.ACF(s, f), sm.SampleACF(s, f))
| 19.461538 | 58 | 0.644269 | import statistical_modeling as sm
from typing import Final
import unittest
class TestACF(unittest.TestCase):
def test(self):
s: Final = sm.Sample([1, 2, 3])
f: Final = 3
self.assertEqual(sm.ACF(s, f), sm.SampleACF(s, f))
| true | true |
f71e73f8ed45ce097a4bf846642a7b1f9267abc3 | 17,332 | py | Python | tf_pose/slim/nets/mobilenet/mobilenet.py | gpspelle/pose-estimation | b817dcc120092002984d8a41431046f323bc02c8 | [
"Apache-2.0"
] | 3,442 | 2017-11-20T08:39:51.000Z | 2019-05-06T10:51:19.000Z | tf_pose/slim/nets/mobilenet/mobilenet.py | bvanelli/tf-pose-estimation | 1dec506ac8abf00616dc0fe76bf476ccdfd6b93e | [
"Apache-2.0"
] | 430 | 2017-11-29T04:21:48.000Z | 2019-05-06T05:37:37.000Z | tf_pose/slim/nets/mobilenet/mobilenet.py | bvanelli/tf-pose-estimation | 1dec506ac8abf00616dc0fe76bf476ccdfd6b93e | [
"Apache-2.0"
] | 683 | 2017-11-20T08:50:34.000Z | 2019-05-04T04:25:14.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow as tf
slim = tf.contrib.slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
multiplier = params.pop('multiplier_transorm', depth_multiplier)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Arguments:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| 37.034188 | 80 | 0.687111 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow as tf
slim = tf.contrib.slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
multiplier = params.pop('multiplier_transorm', depth_multiplier)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base(
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
current_stride = 1
rate = 1
net = inputs
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
**mobilenet_args):
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| true | true |
f71e74274134242cf53d0a9f6b3fddeae6b917de | 2,269 | py | Python | PythonSource/pythoncore/antlr4/Utils.py | dean2191/JavaLove | fb0be1e829b7400aff50334f49bc4e9db7d6cb1a | [
"MIT"
] | 1 | 2021-04-29T06:40:54.000Z | 2021-04-29T06:40:54.000Z | src/antlr4/Utils.py | arminnh/ba3-c-to-p-compiler | 2c649e1d3643471bac681c2656c1c7d6249be4d7 | [
"MIT"
] | 2 | 2021-06-23T21:23:04.000Z | 2021-06-23T21:23:11.000Z | src/antlr4/Utils.py | arminnh/ba3-c-to-p-compiler | 2c649e1d3643471bac681c2656c1c7d6249be4d7 | [
"MIT"
] | 1 | 2019-12-16T07:21:49.000Z | 2019-12-16T07:21:49.000Z | #[The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from io import StringIO
def str_list(val):
with StringIO() as buf:
buf.write('[')
first = True
for item in val:
if not first:
buf.write(', ')
buf.write(str(item))
first = False
buf.write(']')
return buf.getvalue()
def escapeWhitespace(s:str, escapeSpaces:bool):
with StringIO() as buf:
for c in s:
if c==' ' and escapeSpaces:
buf.write('\u00B7')
elif c=='\t':
buf.write("\\t")
elif c=='\n':
buf.write("\\n")
elif c=='\r':
buf.write("\\r")
else:
buf.write(c)
return buf.getvalue()
| 38.457627 | 75 | 0.662406 |
from io import StringIO
def str_list(val):
with StringIO() as buf:
buf.write('[')
first = True
for item in val:
if not first:
buf.write(', ')
buf.write(str(item))
first = False
buf.write(']')
return buf.getvalue()
def escapeWhitespace(s:str, escapeSpaces:bool):
with StringIO() as buf:
for c in s:
if c==' ' and escapeSpaces:
buf.write('\u00B7')
elif c=='\t':
buf.write("\\t")
elif c=='\n':
buf.write("\\n")
elif c=='\r':
buf.write("\\r")
else:
buf.write(c)
return buf.getvalue()
| true | true |
f71e742c4e9898a942e1c505a387b4ebc7b12bb1 | 3,945 | py | Python | ml/rl/workflow/dqn_workflow.py | tao2020/Horizon | 0f9a1b16ddd6e5a8ac98e61acd227aae7c201b57 | [
"BSD-3-Clause"
] | 1 | 2020-09-27T08:48:14.000Z | 2020-09-27T08:48:14.000Z | ml/rl/workflow/dqn_workflow.py | tao2020/Horizon | 0f9a1b16ddd6e5a8ac98e61acd227aae7c201b57 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/workflow/dqn_workflow.py | tao2020/Horizon | 0f9a1b16ddd6e5a8ac98e61acd227aae7c201b57 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import sys
from typing import Dict
import numpy as np
from ml.rl.evaluation.evaluator import Evaluator
from ml.rl.preprocessing.preprocessor import Preprocessor
from ml.rl.preprocessing.sparse_to_dense import PandasSparseToDenseProcessor
from ml.rl.readers.json_dataset_reader import JSONDatasetReader
from ml.rl.tensorboardX import summary_writer_context
from ml.rl.thrift.core.ttypes import (
DiscreteActionModelParameters,
NormalizationParameters,
RainbowDQNParameters,
RLParameters,
TrainingParameters,
)
from ml.rl.training.dqn_trainer import DQNTrainer
from ml.rl.workflow.base_workflow import BaseWorkflow
from ml.rl.workflow.helpers import (
export_trainer_and_predictor,
minibatch_size_multiplier,
parse_args,
update_model_for_warm_start,
)
from ml.rl.workflow.preprocess_handler import DqnPreprocessHandler, PreprocessHandler
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
class DqnWorkflow(BaseWorkflow):
def __init__(
self,
model_params: DiscreteActionModelParameters,
preprocess_handler: PreprocessHandler,
state_normalization: Dict[int, NormalizationParameters],
use_gpu: bool,
use_all_avail_gpus: bool,
):
logger.info("Running DQN workflow with params:")
logger.info(model_params)
model_params = model_params
trainer = DQNTrainer(
model_params,
state_normalization,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
)
trainer = update_model_for_warm_start(trainer)
assert type(trainer) == DQNTrainer, "Warm started wrong model type: " + str(
type(trainer)
)
evaluator = Evaluator(
model_params.actions,
model_params.rl.gamma,
trainer,
metrics_to_score=trainer.metrics_to_score,
)
super(DqnWorkflow, self).__init__(
preprocess_handler, trainer, evaluator, model_params.training.minibatch_size
)
def main(params):
# Set minibatch size based on # of devices being used to train
params["training"]["minibatch_size"] *= minibatch_size_multiplier(
params["use_gpu"], params["use_all_avail_gpus"]
)
rl_parameters = RLParameters(**params["rl"])
training_parameters = TrainingParameters(**params["training"])
rainbow_parameters = RainbowDQNParameters(**params["rainbow"])
model_params = DiscreteActionModelParameters(
actions=params["actions"],
rl=rl_parameters,
training=training_parameters,
rainbow=rainbow_parameters,
)
state_normalization = BaseWorkflow.read_norm_file(params["state_norm_data_path"])
writer = SummaryWriter(log_dir=params["model_output_path"])
logger.info("TensorBoard logging location is: {}".format(writer.log_dir))
preprocess_handler = DqnPreprocessHandler(
Preprocessor(state_normalization, False),
np.array(model_params.actions),
PandasSparseToDenseProcessor(),
)
workflow = DqnWorkflow(
model_params,
preprocess_handler,
state_normalization,
params["use_gpu"],
params["use_all_avail_gpus"],
)
train_dataset = JSONDatasetReader(
params["training_data_path"], batch_size=training_parameters.minibatch_size
)
eval_dataset = JSONDatasetReader(params["eval_data_path"], batch_size=16)
with summary_writer_context(writer):
workflow.train_network(train_dataset, eval_dataset, int(params["epochs"]))
return export_trainer_and_predictor(
workflow.trainer, params["model_output_path"]
) # noqa
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
params = parse_args(sys.argv)
main(params)
| 31.814516 | 88 | 0.714068 |
import logging
import sys
from typing import Dict
import numpy as np
from ml.rl.evaluation.evaluator import Evaluator
from ml.rl.preprocessing.preprocessor import Preprocessor
from ml.rl.preprocessing.sparse_to_dense import PandasSparseToDenseProcessor
from ml.rl.readers.json_dataset_reader import JSONDatasetReader
from ml.rl.tensorboardX import summary_writer_context
from ml.rl.thrift.core.ttypes import (
DiscreteActionModelParameters,
NormalizationParameters,
RainbowDQNParameters,
RLParameters,
TrainingParameters,
)
from ml.rl.training.dqn_trainer import DQNTrainer
from ml.rl.workflow.base_workflow import BaseWorkflow
from ml.rl.workflow.helpers import (
export_trainer_and_predictor,
minibatch_size_multiplier,
parse_args,
update_model_for_warm_start,
)
from ml.rl.workflow.preprocess_handler import DqnPreprocessHandler, PreprocessHandler
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
class DqnWorkflow(BaseWorkflow):
def __init__(
self,
model_params: DiscreteActionModelParameters,
preprocess_handler: PreprocessHandler,
state_normalization: Dict[int, NormalizationParameters],
use_gpu: bool,
use_all_avail_gpus: bool,
):
logger.info("Running DQN workflow with params:")
logger.info(model_params)
model_params = model_params
trainer = DQNTrainer(
model_params,
state_normalization,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
)
trainer = update_model_for_warm_start(trainer)
assert type(trainer) == DQNTrainer, "Warm started wrong model type: " + str(
type(trainer)
)
evaluator = Evaluator(
model_params.actions,
model_params.rl.gamma,
trainer,
metrics_to_score=trainer.metrics_to_score,
)
super(DqnWorkflow, self).__init__(
preprocess_handler, trainer, evaluator, model_params.training.minibatch_size
)
def main(params):
ch_size"] *= minibatch_size_multiplier(
params["use_gpu"], params["use_all_avail_gpus"]
)
rl_parameters = RLParameters(**params["rl"])
training_parameters = TrainingParameters(**params["training"])
rainbow_parameters = RainbowDQNParameters(**params["rainbow"])
model_params = DiscreteActionModelParameters(
actions=params["actions"],
rl=rl_parameters,
training=training_parameters,
rainbow=rainbow_parameters,
)
state_normalization = BaseWorkflow.read_norm_file(params["state_norm_data_path"])
writer = SummaryWriter(log_dir=params["model_output_path"])
logger.info("TensorBoard logging location is: {}".format(writer.log_dir))
preprocess_handler = DqnPreprocessHandler(
Preprocessor(state_normalization, False),
np.array(model_params.actions),
PandasSparseToDenseProcessor(),
)
workflow = DqnWorkflow(
model_params,
preprocess_handler,
state_normalization,
params["use_gpu"],
params["use_all_avail_gpus"],
)
train_dataset = JSONDatasetReader(
params["training_data_path"], batch_size=training_parameters.minibatch_size
)
eval_dataset = JSONDatasetReader(params["eval_data_path"], batch_size=16)
with summary_writer_context(writer):
workflow.train_network(train_dataset, eval_dataset, int(params["epochs"]))
return export_trainer_and_predictor(
workflow.trainer, params["model_output_path"]
)
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
params = parse_args(sys.argv)
main(params)
| true | true |
f71e7468d92a80a2d5be03a087cf20fb8d906f17 | 1,476 | py | Python | translate.py | kolk/qa_factoid2natural | ccdd0096217c8e88b148f353f0c89628b85f9c4d | [
"MIT"
] | 4 | 2019-11-28T17:49:19.000Z | 2022-02-23T17:07:08.000Z | translate.py | kolk/qa_factoid2natural | ccdd0096217c8e88b148f353f0c89628b85f9c4d | [
"MIT"
] | 5 | 2019-11-28T17:49:09.000Z | 2022-02-28T16:37:17.000Z | translate.py | kolk/qa_factoid2natural | ccdd0096217c8e88b148f353f0c89628b85f9c4d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import configargparse
from onmt.utils.logging import init_logger
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import onmt.opts as opts
def main(opt):
translator = build_translator(opt, report_score=True)
src_shards = split_corpus(opt.src, opt.shard_size)
############## Modified ##########################
ans_shards = split_corpus(opt.ans, opt.shard_size)
tgt_shards = split_corpus(opt.tgt, opt.shard_size) \
if opt.tgt is not None else [None]*opt.shard_size
shard_pairs = zip(src_shards, tgt_shards, ans_shards)
for i, (src_shard, tgt_shard, ans_shard) in enumerate(shard_pairs):
logger.info("Translating shard %d." % i)
translator.translate(
src=src_shard,
tgt=tgt_shard,
ans=ans_shard,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
attn_debug=opt.attn_debug
)
if __name__ == "__main__":
parser = configargparse.ArgumentParser(
description='translate.py',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
opts.config_opts(parser)
opts.add_md_help_argument(parser)
opts.translate_opts(parser)
opt = parser.parse_args()
logger = init_logger(opt.log_file)
main(opt)
| 32.086957 | 71 | 0.682927 |
from __future__ import unicode_literals
import configargparse
from onmt.utils.logging import init_logger
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import onmt.opts as opts
def main(opt):
translator = build_translator(opt, report_score=True)
src_shards = split_corpus(opt.src, opt.shard_size)
.translate_opts(parser)
opt = parser.parse_args()
logger = init_logger(opt.log_file)
main(opt)
| true | true |
f71e752ba12c161e76ce30e5f042ecabae71c44d | 1,251 | py | Python | evaluation.py | yphsieh/rPPG_blink | 31be5b818d34892eb9f2c1abd3b00f370413e3db | [
"Apache-2.0"
] | null | null | null | evaluation.py | yphsieh/rPPG_blink | 31be5b818d34892eb9f2c1abd3b00f370413e3db | [
"Apache-2.0"
] | null | null | null | evaluation.py | yphsieh/rPPG_blink | 31be5b818d34892eb9f2c1abd3b00f370413e3db | [
"Apache-2.0"
] | null | null | null | import os
import argparse
from keras.models import load_model
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from data_preprocessing import *
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_name', default='save/RDNN.h5', type=str)
parser.add_argument('--smooth', type=bool, default=False)
parser.add_argument('--scale', type=bool, default=False)
args = parser.parse_args()
print(args)
x_test = np.load('data/data_test_600.npy')
y_test = np.load('data/label_test_600.npy').reshape(-1, 1)
print('x_test: {}'.format(x_test.shape))
print('y_test: {}'.format(y_test.shape))
lie_ratio = np.sum(y_test)/y_test.shape[0]
print('Lie Ratio: {}'.format(lie_ratio))
x_test = TestPreprocess(x_test, args.smooth, args.scale)
print('='*20, 'Model Loading...', '='*20)
model = load_model(args.model_name)
print('='*20, 'Model Loaded', '='*20)
# os.system('clear')
predict = model.predict(x_test)
y_predict = (predict > 0.3).astype(np.int)
lie_ratio = np.sum(y_predict)/y_predict.shape[0]
print('Lie Ratio Predicted: {}'.format(lie_ratio))
score_f1 = f1_score(y_test, y_predict)
score_acc = accuracy_score(y_test, y_predict)
print('f1 score: {}'.format(score_f1))
print('accuracy score: {}'.format(score_acc))
| 28.431818 | 75 | 0.730616 | import os
import argparse
from keras.models import load_model
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from data_preprocessing import *
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_name', default='save/RDNN.h5', type=str)
parser.add_argument('--smooth', type=bool, default=False)
parser.add_argument('--scale', type=bool, default=False)
args = parser.parse_args()
print(args)
x_test = np.load('data/data_test_600.npy')
y_test = np.load('data/label_test_600.npy').reshape(-1, 1)
print('x_test: {}'.format(x_test.shape))
print('y_test: {}'.format(y_test.shape))
lie_ratio = np.sum(y_test)/y_test.shape[0]
print('Lie Ratio: {}'.format(lie_ratio))
x_test = TestPreprocess(x_test, args.smooth, args.scale)
print('='*20, 'Model Loading...', '='*20)
model = load_model(args.model_name)
print('='*20, 'Model Loaded', '='*20)
predict = model.predict(x_test)
y_predict = (predict > 0.3).astype(np.int)
lie_ratio = np.sum(y_predict)/y_predict.shape[0]
print('Lie Ratio Predicted: {}'.format(lie_ratio))
score_f1 = f1_score(y_test, y_predict)
score_acc = accuracy_score(y_test, y_predict)
print('f1 score: {}'.format(score_f1))
print('accuracy score: {}'.format(score_acc))
| true | true |
f71e757c93cf4638a9270e5f34353dc31a8e6ed9 | 3,064 | py | Python | tests/test_util/test_calibration.py | decarlof/algotom | f357ae5a71d6bc11471da7b3f80505d52918b610 | [
"Apache-2.0"
] | 6 | 2021-05-13T15:20:03.000Z | 2022-01-08T11:36:03.000Z | tests/test_util/test_calibration.py | decarlof/algotom | f357ae5a71d6bc11471da7b3f80505d52918b610 | [
"Apache-2.0"
] | 4 | 2021-05-17T09:15:14.000Z | 2021-07-08T19:38:34.000Z | tests/test_util/test_calibration.py | algotom/algotom | 3dce086bcc0c4df97700c60f8ec90e07ee95d040 | [
"Apache-2.0"
] | 5 | 2021-05-20T16:28:55.000Z | 2021-06-11T23:40:57.000Z | # ============================================================================
# ============================================================================
# Copyright (c) 2021 Nghia T. Vo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Author: Nghia T. Vo
# E-mail: algotomography@gmail.com
# Description: Tests for the Algotom package.
# Contributors:
# ============================================================================
"""
Tests for methods in util/calibration.py
"""
import unittest
import numpy as np
import scipy.ndimage as ndi
import algotom.util.calibration as cali
class CalibrationMethods(unittest.TestCase):
def setUp(self):
self.eps = 10 ** (-6)
self.var = 0.05
sigma = 30
(self.hei, self.wid) = (64, 64)
(ycen, xcen) = (self.hei // 2, self.wid // 2)
y, x = np.ogrid[-ycen:self.hei - ycen, -xcen:self.wid - xcen]
num = 2.0 * sigma * sigma
self.bck = np.exp(-(x * x / num + y * y / num))
mat = np.zeros((self.hei, self.wid), dtype=np.float32)
self.num_dots = 1
mat[ycen - 3:ycen + 3, xcen - 3:xcen + 3] = 1
self.mat_dots = np.float32(ndi.binary_dilation(mat, iterations=2))
def test_normalize_background(self):
mat_nor = cali.normalize_background(self.bck, 3)
std_val = np.std(mat_nor)
self.assertTrue(std_val <= self.var)
def test_normalize_background_based_fft(self):
mat_nor = cali.normalize_background_based_fft(self.bck, sigma=5, pad=10)
std_val = np.std(mat_nor)
self.assertTrue(std_val <= self.var)
def test_binarize_image(self):
bck = 0.5 * np.random.rand(self.hei, self.wid)
mat_bin = cali.binarize_image(self.mat_dots + bck, bgr="dark",
denoise=False)
num_dots = ndi.label(mat_bin)[-1]
self.assertTrue(self.num_dots == num_dots)
def test_calculate_distance(self):
mat1 = np.zeros((self.hei, self.wid), dtype=np.float32)
mat2 = np.zeros_like(mat1)
bck = 0.5 * np.random.rand(self.hei, self.wid)
mat1[5, 10] = 1.0
mat1 = np.float32(ndi.binary_dilation(mat1, iterations=3))
mat2[5, 20] = 1.0
mat2 = np.float32(ndi.binary_dilation(mat2, iterations=3))
dis = cali.calculate_distance(mat1 + bck, mat2 + bck, bgr="dark",
denoise=False)
self.assertTrue(np.abs(dis - 10.0) <= self.eps)
| 40.315789 | 80 | 0.570496 |
import unittest
import numpy as np
import scipy.ndimage as ndi
import algotom.util.calibration as cali
class CalibrationMethods(unittest.TestCase):
def setUp(self):
self.eps = 10 ** (-6)
self.var = 0.05
sigma = 30
(self.hei, self.wid) = (64, 64)
(ycen, xcen) = (self.hei // 2, self.wid // 2)
y, x = np.ogrid[-ycen:self.hei - ycen, -xcen:self.wid - xcen]
num = 2.0 * sigma * sigma
self.bck = np.exp(-(x * x / num + y * y / num))
mat = np.zeros((self.hei, self.wid), dtype=np.float32)
self.num_dots = 1
mat[ycen - 3:ycen + 3, xcen - 3:xcen + 3] = 1
self.mat_dots = np.float32(ndi.binary_dilation(mat, iterations=2))
def test_normalize_background(self):
mat_nor = cali.normalize_background(self.bck, 3)
std_val = np.std(mat_nor)
self.assertTrue(std_val <= self.var)
def test_normalize_background_based_fft(self):
mat_nor = cali.normalize_background_based_fft(self.bck, sigma=5, pad=10)
std_val = np.std(mat_nor)
self.assertTrue(std_val <= self.var)
def test_binarize_image(self):
bck = 0.5 * np.random.rand(self.hei, self.wid)
mat_bin = cali.binarize_image(self.mat_dots + bck, bgr="dark",
denoise=False)
num_dots = ndi.label(mat_bin)[-1]
self.assertTrue(self.num_dots == num_dots)
def test_calculate_distance(self):
mat1 = np.zeros((self.hei, self.wid), dtype=np.float32)
mat2 = np.zeros_like(mat1)
bck = 0.5 * np.random.rand(self.hei, self.wid)
mat1[5, 10] = 1.0
mat1 = np.float32(ndi.binary_dilation(mat1, iterations=3))
mat2[5, 20] = 1.0
mat2 = np.float32(ndi.binary_dilation(mat2, iterations=3))
dis = cali.calculate_distance(mat1 + bck, mat2 + bck, bgr="dark",
denoise=False)
self.assertTrue(np.abs(dis - 10.0) <= self.eps)
| true | true |
f71e76a107f72063c37f240137d16371ad528ba9 | 2,353 | py | Python | third_party/chromite/cros_bisect/manual_evaluator.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/chromite/cros_bisect/manual_evaluator.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/chromite/cros_bisect/manual_evaluator.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Asks users if the commit is good or bad."""
from __future__ import print_function
import os
from chromite.cros_bisect import common
from chromite.cros_bisect import evaluator
from chromite.lib import cros_build_lib
from chromite.lib import osutils
class ManualEvaluator(evaluator.Evaluator):
"""Manual evaluator."""
# Binary evaluator.
THRESHOLD = 0.5
def __init__(self, options):
super(ManualEvaluator, self).__init__(options)
def GetReportPath(self, build_label):
"""Obtains report file path.
Args:
build_label: current build label to run the evaluation.
Returns:
Report file path.
"""
return os.path.join(self.report_base_dir, 'manual.%s.report' % build_label)
def Evaluate(self, unused_remote, build_label, unused_repeat=1):
"""Prompts user if the build is good or bad.
Args:
unused_remote: Unused args.
build_label: Build label used for part of report filename and log message.
unused_repeat: Unused args.
Returns:
Score([1.0]) if it is a good build. Otherwise, Score([0.0]).
"""
report_path = self.GetReportPath(build_label)
prompt = 'Is %s a good build on the DUT?' % build_label
is_good = cros_build_lib.BooleanPrompt(prompt=prompt)
score = 1.0 if is_good else 0.0
osutils.WriteFile(report_path, '%d' % score)
return common.Score([score])
def CheckLastEvaluate(self, build_label, unused_repeat=1):
"""Checks if previous evaluate report is available.
Args:
build_label: Build label used for part of report filename and log message.
unused_repeat: Unused.
Returns:
Score([1.0]) if previous result for the build_label is 'Yes'.
Score([0.0]) if previous result for the build_label is 'No'.
Score() if previous result does not exist or reuse_eval is unset.
"""
if self.reuse_eval:
report_path = self.GetReportPath(build_label)
if os.path.isfile(report_path):
content = osutils.ReadFile(report_path)
if content == '1':
return common.Score([1.0])
elif content == '0':
return common.Score([0.0])
return common.Score()
| 29.78481 | 80 | 0.689333 |
from __future__ import print_function
import os
from chromite.cros_bisect import common
from chromite.cros_bisect import evaluator
from chromite.lib import cros_build_lib
from chromite.lib import osutils
class ManualEvaluator(evaluator.Evaluator):
THRESHOLD = 0.5
def __init__(self, options):
super(ManualEvaluator, self).__init__(options)
def GetReportPath(self, build_label):
return os.path.join(self.report_base_dir, 'manual.%s.report' % build_label)
def Evaluate(self, unused_remote, build_label, unused_repeat=1):
report_path = self.GetReportPath(build_label)
prompt = 'Is %s a good build on the DUT?' % build_label
is_good = cros_build_lib.BooleanPrompt(prompt=prompt)
score = 1.0 if is_good else 0.0
osutils.WriteFile(report_path, '%d' % score)
return common.Score([score])
def CheckLastEvaluate(self, build_label, unused_repeat=1):
if self.reuse_eval:
report_path = self.GetReportPath(build_label)
if os.path.isfile(report_path):
content = osutils.ReadFile(report_path)
if content == '1':
return common.Score([1.0])
elif content == '0':
return common.Score([0.0])
return common.Score()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.