id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6513255 | <filename>tests/test_kubernetes_tools.py
import unittest
from typing import Sequence
import mock
import pytest
from kubernetes.client import V1AWSElasticBlockStoreVolumeSource
from kubernetes.client import V1Container
from kubernetes.client import V1ContainerPort
from kubernetes.client import V1Deployment
from kubernetes.client import V1DeploymentSpec
from kubernetes.client import V1DeploymentStrategy
from kubernetes.client import V1EnvVar
from kubernetes.client import V1ExecAction
from kubernetes.client import V1Handler
from kubernetes.client import V1HostPathVolumeSource
from kubernetes.client import V1HTTPGetAction
from kubernetes.client import V1LabelSelector
from kubernetes.client import V1Lifecycle
from kubernetes.client import V1ObjectMeta
from kubernetes.client import V1PersistentVolumeClaim
from kubernetes.client import V1PersistentVolumeClaimSpec
from kubernetes.client import V1PodSpec
from kubernetes.client import V1PodTemplateSpec
from kubernetes.client import V1Probe
from kubernetes.client import V1ResourceRequirements
from kubernetes.client import V1RollingUpdateDeployment
from kubernetes.client import V1StatefulSet
from kubernetes.client import V1StatefulSetSpec
from kubernetes.client import V1TCPSocketAction
from kubernetes.client import V1Volume
from kubernetes.client import V1VolumeMount
from kubernetes.client.rest import ApiException
from paasta_tools.kubernetes_tools import create_deployment
from paasta_tools.kubernetes_tools import create_stateful_set
from paasta_tools.kubernetes_tools import ensure_paasta_namespace
from paasta_tools.kubernetes_tools import get_active_shas_for_service
from paasta_tools.kubernetes_tools import get_kubernetes_app_by_name
from paasta_tools.kubernetes_tools import get_kubernetes_app_deploy_status
from paasta_tools.kubernetes_tools import get_kubernetes_services_running_here
from paasta_tools.kubernetes_tools import get_kubernetes_services_running_here_for_nerve
from paasta_tools.kubernetes_tools import InvalidKubernetesConfig
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import KubeDeployment
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfigDict
from paasta_tools.kubernetes_tools import KubernetesDeployStatus
from paasta_tools.kubernetes_tools import KubeService
from paasta_tools.kubernetes_tools import list_all_deployments
from paasta_tools.kubernetes_tools import load_kubernetes_service_config
from paasta_tools.kubernetes_tools import load_kubernetes_service_config_no_cache
from paasta_tools.kubernetes_tools import pods_for_service_instance
from paasta_tools.kubernetes_tools import read_all_registrations_for_service_instance
from paasta_tools.kubernetes_tools import update_deployment
from paasta_tools.kubernetes_tools import update_stateful_set
from paasta_tools.utils import AwsEbsVolume
from paasta_tools.utils import DockerVolume
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import PaastaNotConfiguredError
def test_load_kubernetes_service_config_no_cache():
with mock.patch(
'service_configuration_lib.read_service_configuration', autospec=True,
) as mock_read_service_configuration, mock.patch(
'service_configuration_lib.read_extra_service_information', autospec=True,
) as mock_read_extra_service_information, mock.patch(
'paasta_tools.kubernetes_tools.load_v2_deployments_json', autospec=True,
) as mock_load_v2_deployments_json, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig', autospec=True,
) as mock_kube_deploy_config:
with pytest.raises(NoConfigurationForServiceError):
load_kubernetes_service_config_no_cache(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=False,
)
with pytest.raises(InvalidJobNameError):
load_kubernetes_service_config_no_cache(
service='kurupt',
instance='_fm',
cluster='brentford',
load_deployments=False,
)
mock_config = {'freq': '108.9'}
mock_read_extra_service_information.return_value = {'fm': mock_config}
mock_read_service_configuration.return_value = {}
ret = load_kubernetes_service_config_no_cache(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=False,
soa_dir='/nail/blah',
)
mock_kube_deploy_config.assert_called_with(
service='kurupt',
instance='fm',
cluster='brentford',
config_dict={'freq': '108.9'},
branch_dict=None,
soa_dir='/nail/blah',
)
assert not mock_load_v2_deployments_json.called
assert ret == mock_kube_deploy_config.return_value
mock_kube_deploy_config.reset_mock()
ret = load_kubernetes_service_config_no_cache(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=True,
soa_dir='/nail/blah',
)
mock_load_v2_deployments_json.assert_called_with(
service='kurupt',
soa_dir='/nail/blah',
)
mock_kube_deploy_config.assert_called_with(
service='kurupt',
instance='fm',
cluster='brentford',
config_dict={'freq': '108.9'},
branch_dict=mock_load_v2_deployments_json.return_value.get_branch_dict(),
soa_dir='/nail/blah',
)
assert ret == mock_kube_deploy_config.return_value
def test_load_kubernetes_service_config():
with mock.patch(
'paasta_tools.kubernetes_tools.load_kubernetes_service_config_no_cache', autospec=True,
) as mock_load_kubernetes_service_config_no_cache:
ret = load_kubernetes_service_config(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=True,
soa_dir='/nail/blah',
)
assert ret == mock_load_kubernetes_service_config_no_cache.return_value
class TestKubernetesDeploymentConfig(unittest.TestCase):
def setUp(self):
mock_config_dict = KubernetesDeploymentConfigDict(
bounce_method='crossover',
instances=3,
)
self.deployment = KubernetesDeploymentConfig(
service='kurupt',
instance='fm',
cluster='brentford',
config_dict=mock_config_dict,
branch_dict=None,
soa_dir='/nail/blah',
)
def test_copy(self):
assert self.deployment.copy() == self.deployment
assert self.deployment.copy() is not self.deployment
def test_get_bounce_method(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_aws_ebs_volumes', autospec=True,
) as mock_get_aws_ebs_volumes:
mock_get_aws_ebs_volumes.return_value = []
assert self.deployment.get_bounce_method() == 'RollingUpdate'
self.deployment.config_dict['bounce_method'] = 'downthenup'
assert self.deployment.get_bounce_method() == 'Recreate'
self.deployment.config_dict['bounce_method'] = 'crossover'
# if ebs we must downthenup for now as we need to free up the EBS for the new instance
mock_get_aws_ebs_volumes.return_value = ['some-ebs']
with pytest.raises(Exception):
self.deployment.get_bounce_method()
def test_get_deployment_strategy(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_bounce_method', autospec=True,
return_value='RollingUpdate',
) as mock_get_bounce_method:
assert self.deployment.get_deployment_strategy_config() == V1DeploymentStrategy(
type='RollingUpdate',
rolling_update=V1RollingUpdateDeployment(
max_surge='100%',
max_unavailable='0%',
),
)
mock_get_bounce_method.return_value = 'Recreate'
assert self.deployment.get_deployment_strategy_config() == V1DeploymentStrategy(
type='Recreate',
)
def test_get_sanitised_volume_name(self):
self.deployment.get_sanitised_volume_name('/var/tmp') == 'slash-varslash-tmp'
self.deployment.get_sanitised_volume_name('/var/tmp/') == 'slash-varslash-tmp'
def test_get_sidecar_containers(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_registrations', autospec=True,
return_value=['universal.credit'],
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_environment', autospec=True,
return_value={},
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True,
return_value='sane-name',
):
mock_system_config = mock.Mock(
get_enable_nerve_readiness_check=mock.Mock(return_value=False),
get_nerve_readiness_check_script=mock.Mock(return_value='/nail/blah.sh'),
get_hacheck_sidecar_image_url=mock.Mock(return_value='some-docker-image'),
)
ret = self.deployment.get_sidecar_containers(mock_system_config)
expected = [
V1Container(
env={},
image='some-docker-image',
lifecycle=V1Lifecycle(
pre_stop=V1Handler(
_exec=V1ExecAction(
command=[
'/bin/sh',
'-c',
'/usr/bin/hadown '
'universal.credit; sleep '
'31',
],
),
),
),
name='hacheck',
ports=[V1ContainerPort(container_port=6666)],
),
]
assert ret == expected
mock_system_config = mock.Mock(
get_enable_nerve_readiness_check=mock.Mock(return_value=True),
get_nerve_readiness_check_script=mock.Mock(return_value='/nail/blah.sh'),
get_hacheck_sidecar_image_url=mock.Mock(return_value='some-docker-image'),
)
ret = self.deployment.get_sidecar_containers(mock_system_config)
expected = [
V1Container(
env={},
image='some-docker-image',
lifecycle=V1Lifecycle(
pre_stop=V1Handler(
_exec=V1ExecAction(
command=[
'/bin/sh',
'-c',
'/usr/bin/hadown '
'universal.credit; sleep '
'31',
],
),
),
),
name='hacheck',
ports=[V1ContainerPort(container_port=6666)],
readiness_probe=V1Probe(
_exec=V1ExecAction(
command=['/nail/blah.sh', 'universal.credit'],
),
initial_delay_seconds=10,
period_seconds=10,
),
),
]
assert ret == expected
def test_get_container_env(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_env', autospec=True,
return_value={
'mc': 'grindah',
'dj': 'beats',
},
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_environment', autospec=True,
return_value=[
V1EnvVar(
name='manager',
value='chabuddy',
),
],
):
expected = [
V1EnvVar(name='mc', value='grindah'),
V1EnvVar(name='dj', value='beats'),
V1EnvVar(name='manager', value='chabuddy'),
]
assert expected == self.deployment.get_container_env()
def test_get_kubernetes_environment(self):
ret = self.deployment.get_kubernetes_environment()
assert 'PAASTA_POD_IP' in [env.name for env in ret]
def test_get_resource_requirements(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_cpus', autospec=True,
return_value=0.3,
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_cpu_burst_add', autospec=True,
return_value=1,
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_mem', autospec=True,
return_value=2048,
):
assert self.deployment.get_resource_requirements() == V1ResourceRequirements(
limits={
'cpu': 1.3,
'memory': '2048Mi',
},
requests={
'cpu': 0.3,
'memory': '2048Mi',
},
)
def test_get_kubernetes_containers(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_docker_url', autospec=True,
) as mock_get_docker_url, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_cmd', autospec=True,
) as mock_get_cmd, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_args', autospec=True,
) as mock_get_args, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_resource_requirements', autospec=True,
) as mock_get_resource_requirements, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_container_env', autospec=True,
) as mock_get_container_env, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_service_name', autospec=True,
return_value='kurupt',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_instance_name', autospec=True,
return_value='fm',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volume_mounts', autospec=True,
) as mock_get_volume_mounts, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sidecar_containers', autospec=True,
return_value=['mock_sidecar'],
):
mock_system_config = mock.Mock()
mock_docker_volumes: Sequence[DockerVolume] = []
mock_aws_ebs_volumes: Sequence[AwsEbsVolume] = []
expected = [
V1Container(
args=mock_get_args.return_value,
command=mock_get_cmd.return_value,
env=mock_get_container_env.return_value,
resources=mock_get_resource_requirements.return_value,
image=mock_get_docker_url.return_value,
lifecycle=V1Lifecycle(
pre_stop=V1Handler(
_exec=V1ExecAction(
command=[
'/bin/sh',
'-c',
'sleep 30',
],
),
),
),
liveness_probe=V1Probe(
failure_threshold=30,
http_get=V1HTTPGetAction(
path='/status',
port=8888,
scheme='HTTP',
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
),
name='kurupt-fm',
ports=[V1ContainerPort(container_port=8888)],
volume_mounts=mock_get_volume_mounts.return_value,
), 'mock_sidecar',
]
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'http'
service_namespace_config.get_healthcheck_uri.return_value = '/status'
assert self.deployment.get_kubernetes_containers(
docker_volumes=mock_docker_volumes,
system_paasta_config=mock_system_config,
aws_ebs_volumes=mock_aws_ebs_volumes,
service_namespace_config=service_namespace_config,
) == expected
def test_get_liveness_probe(self):
liveness_probe = V1Probe(
failure_threshold=30,
http_get=V1HTTPGetAction(
path='/status',
port=8888,
scheme='HTTP',
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'http'
service_namespace_config.get_healthcheck_uri.return_value = '/status'
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_liveness_probe_non_smartstack(self):
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = None
assert self.deployment.get_liveness_probe(service_namespace_config) is None
def test_get_liveness_probe_numbers(self):
liveness_probe = V1Probe(
failure_threshold=1,
http_get=V1HTTPGetAction(
path='/status',
port=8888,
scheme='HTTP',
),
initial_delay_seconds=2,
period_seconds=3,
timeout_seconds=4,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'http'
service_namespace_config.get_healthcheck_uri.return_value = '/status'
self.deployment.config_dict['healthcheck_max_consecutive_failures'] = 1
self.deployment.config_dict['healthcheck_grace_period_seconds'] = 2
self.deployment.config_dict['healthcheck_interval_seconds'] = 3
self.deployment.config_dict['healthcheck_timeout_seconds'] = 4
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_liveness_probe_tcp_socket(self):
liveness_probe = V1Probe(
failure_threshold=30,
tcp_socket=V1TCPSocketAction(
port=8888,
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'tcp'
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_liveness_probe_cmd(self):
liveness_probe = V1Probe(
failure_threshold=30,
_exec=V1ExecAction(
command='/bin/true',
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'cmd'
self.deployment.config_dict['healthcheck_cmd'] = '/bin/true'
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_pod_volumes(self):
mock_docker_volumes = [
{'hostPath': '/nail/blah', 'containerPath': '/nail/foo'},
{'hostPath': '/nail/thing', 'containerPath': '/nail/bar'},
]
mock_aws_ebs_volumes = [
{'volume_id': 'vol-ZZZZZZZZZZZZZZZZZ', 'fs_type': 'ext4', 'container_path': '/nail/qux'},
]
expected_volumes = [
V1Volume(
host_path=V1HostPathVolumeSource(
path='/nail/blah',
),
name='host--slash-nailslash-blah',
),
V1Volume(
host_path=V1HostPathVolumeSource(
path='/nail/thing',
),
name='host--slash-nailslash-thing',
),
V1Volume(
aws_elastic_block_store=V1AWSElasticBlockStoreVolumeSource(
volume_id='vol-ZZZZZZZZZZZZZZZZZ',
fs_type='ext4',
read_only=False,
),
name='aws-ebs--vol-ZZZZZZZZZZZZZZZZZ',
),
]
assert self.deployment.get_pod_volumes(
docker_volumes=mock_docker_volumes,
aws_ebs_volumes=mock_aws_ebs_volumes,
) == expected_volumes
def test_get_volume_mounts(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True,
return_value='some-volume',
):
mock_docker_volumes = [
{'hostPath': '/nail/blah', 'containerPath': '/nail/foo'},
{'hostPath': '/nail/thing', 'containerPath': '/nail/bar', 'mode': 'RW'},
]
mock_aws_ebs_volumes = [
{'volume_id': 'vol-ZZZZZZZZZZZZZZZZZ', 'fs_type': 'ext4', 'container_path': '/nail/qux'},
]
mock_persistent_volumes = [
{'container_path': '/blah', 'mode': 'RW'},
]
expected_volumes = [
V1VolumeMount(
mount_path='/nail/foo',
name='some-volume',
read_only=True,
),
V1VolumeMount(
mount_path='/nail/bar',
name='some-volume',
read_only=False,
),
V1VolumeMount(
mount_path='/nail/qux',
name='some-volume',
read_only=True,
),
V1VolumeMount(
mount_path='/blah',
name='some-volume',
read_only=False,
),
]
assert self.deployment.get_volume_mounts(
docker_volumes=mock_docker_volumes,
aws_ebs_volumes=mock_aws_ebs_volumes,
persistent_volumes=mock_persistent_volumes,
) == expected_volumes
def test_get_sanitised_service_name(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True,
return_value='my_service',
):
assert self.deployment.get_sanitised_service_name() == 'my--service'
def test_get_sanitised_instance_name(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True,
return_value='my_instance',
):
assert self.deployment.get_sanitised_instance_name() == 'my--instance'
def test_get_desired_instances(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_aws_ebs_volumes', autospec=True,
) as mock_get_aws_ebs_volumes:
mock_get_aws_ebs_volumes.return_value = []
assert self.deployment.get_desired_instances() == 3
mock_get_aws_ebs_volumes.return_value = ['some-ebs-vol']
with pytest.raises(Exception):
self.deployment.get_desired_instances()
def test_format_kubernetes_app_dict(self):
with mock.patch(
'paasta_tools.kubernetes_tools.load_system_paasta_config', autospec=True,
) as mock_load_system_config, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_docker_url', autospec=True,
) as mock_get_docker_url, mock.patch(
'paasta_tools.kubernetes_tools.get_code_sha_from_dockerurl', autospec=True,
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_service_name', autospec=True,
return_value='kurupt',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_instance_name', autospec=True,
return_value='fm',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True,
) as mock_get_service, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True,
) as mock_get_instance, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_desired_instances', autospec=True,
) as mock_get_instances, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_deployment_strategy_config', autospec=True,
) as mock_get_deployment_strategy_config, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True,
), mock.patch(
'paasta_tools.kubernetes_tools.get_config_hash', autospec=True,
) as mock_get_config_hash, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_force_bounce', autospec=True,
) as mock_get_force_bounce, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.sanitize_for_config_hash', autospec=True,
) as mock_sanitize_for_config_hash, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_persistent_volumes', autospec=True,
) as mock_get_persistent_volumes, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volume_claim_templates', autospec=True,
) as mock_get_volumes_claim_templates, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_pod_template_spec', autospec=True,
) as mock_get_pod_template_spec, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_metadata', autospec=True,
) as mock_get_kubernetes_metadata:
mock_get_persistent_volumes.return_value = []
ret = self.deployment.format_kubernetes_app()
assert mock_load_system_config.called
assert mock_get_docker_url.called
mock_get_config_hash.assert_called_with(
mock_sanitize_for_config_hash.return_value,
force_bounce=mock_get_force_bounce.return_value,
)
expected = V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=mock_get_kubernetes_metadata.return_value,
spec=V1DeploymentSpec(
replicas=mock_get_instances.return_value,
selector=V1LabelSelector(
match_labels={
'instance': mock_get_instance.return_value,
'service': mock_get_service.return_value,
},
),
strategy=mock_get_deployment_strategy_config.return_value,
template=mock_get_pod_template_spec.return_value,
),
)
assert ret == expected
ret.metadata.labels.__setitem__.assert_called_with('config_sha', mock_get_config_hash.return_value)
ret.spec.template.metadata.labels.__setitem__.assert_called_with(
'config_sha',
mock_get_config_hash.return_value,
)
mock_get_deployment_strategy_config.side_effect = Exception("Bad bounce method")
with pytest.raises(InvalidKubernetesConfig):
self.deployment.format_kubernetes_app()
mock_get_persistent_volumes.return_value = [mock.Mock()]
ret = self.deployment.format_kubernetes_app()
expected = V1StatefulSet(
api_version='apps/v1',
kind='StatefulSet',
metadata=mock_get_kubernetes_metadata.return_value,
spec=V1StatefulSetSpec(
service_name='kurupt-fm',
replicas=mock_get_instances.return_value,
selector=V1LabelSelector(
match_labels={
'instance': mock_get_instance.return_value,
'service': mock_get_service.return_value,
},
),
template=mock_get_pod_template_spec.return_value,
volume_claim_templates=mock_get_volumes_claim_templates.return_value,
),
)
assert ret == expected
ret.metadata.labels.__setitem__.assert_called_with('config_sha', mock_get_config_hash.return_value)
ret.spec.template.metadata.labels.__setitem__.assert_called_with(
'config_sha',
mock_get_config_hash.return_value,
)
def test_get_pod_template_spec(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volumes', autospec=True,
) as mock_get_volumes, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True,
) as mock_get_service, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True,
) as mock_get_instance, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_containers', autospec=True,
) as mock_get_kubernetes_containers, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_pod_volumes', autospec=True,
return_value=[],
) as mock_get_pod_volumes:
ret = self.deployment.get_pod_template_spec(code_sha='aaaa123', system_paasta_config=mock.Mock())
assert mock_get_pod_volumes.called
assert mock_get_volumes.called
assert ret == V1PodTemplateSpec(
metadata=V1ObjectMeta(
labels={
'git_sha': 'aaaa123',
'instance': mock_get_instance.return_value,
'service': mock_get_service.return_value,
},
),
spec=V1PodSpec(
containers=mock_get_kubernetes_containers.return_value,
restart_policy='Always',
volumes=[],
),
)
def test_get_kubernetes_metadata(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True,
return_value='kurupt',
) as mock_get_service, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True, return_value='fm',
) as mock_get_instance:
ret = self.deployment.get_kubernetes_metadata('aaa123')
assert ret == V1ObjectMeta(
labels={
'git_sha': 'aaa123',
'instance': mock_get_instance.return_value,
'service': mock_get_service.return_value,
},
name='kurupt-fm',
)
def test_sanitize_config_hash(self):
mock_config = V1Deployment(
metadata=V1ObjectMeta(
name='qwe',
labels={
'mc': 'grindah',
},
),
spec=V1DeploymentSpec(
replicas=2,
selector=V1LabelSelector(
match_labels={
'freq': '108.9',
},
),
template=V1PodTemplateSpec(),
),
)
ret = self.deployment.sanitize_for_config_hash(mock_config)
assert 'replicas' not in ret['spec'].keys()
def test_get_bounce_margin_factor(self):
assert isinstance(self.deployment.get_bounce_margin_factor(), float)
def test_get_volume_claim_templates(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_persistent_volumes', autospec=True,
) as mock_get_persistent_volumes, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_persistent_volume_name', autospec=True,
) as mock_get_persistent_volume_name, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_storage_class_name', autospec=True,
) as mock_get_storage_class_name:
mock_get_persistent_volumes.return_value = [
{'size': 20},
{'size': 10},
]
expected = [
V1PersistentVolumeClaim(
metadata=V1ObjectMeta(
name=mock_get_persistent_volume_name.return_value,
),
spec=V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteOnce"],
storage_class_name=mock_get_storage_class_name.return_value,
resources=V1ResourceRequirements(
requests={
'storage': '10Gi',
},
),
),
),
V1PersistentVolumeClaim(
metadata=V1ObjectMeta(
name=mock_get_persistent_volume_name.return_value,
),
spec=V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteOnce"],
storage_class_name=mock_get_storage_class_name.return_value,
resources=V1ResourceRequirements(
requests={
'storage': '20Gi',
},
),
),
),
]
ret = self.deployment.get_volume_claim_templates()
assert expected[0] in ret
assert expected[1] in ret
assert len(ret) == 2
def test_get_storage_class_name(self):
assert isinstance(self.deployment.get_storage_class_name(), str)
def test_get_persistent_volume_name(self):
assert self.deployment.get_persistent_volume_name(
{'container_path': '/blah/what'},
) == 'pv--slash-blahslash-what'
def test_read_all_registrations_for_service_instance():
with mock.patch(
'paasta_tools.kubernetes_tools.load_system_paasta_config', autospec=True,
), mock.patch(
'paasta_tools.kubernetes_tools.load_kubernetes_service_config', autospec=True,
) as mock_load_kubernetes_service_config:
assert read_all_registrations_for_service_instance(
service='kurupt',
instance='fm',
cluster='brentford',
soa_dir='/nail/blah',
) == mock_load_kubernetes_service_config.return_value.get_registrations()
mock_load_kubernetes_service_config.assert_called_with(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=False,
soa_dir='/nail/blah',
)
def test_get_kubernetes_services_running_here():
with mock.patch(
'paasta_tools.kubernetes_tools.requests.get', autospec=True,
) as mock_requests_get:
mock_requests_get.return_value.json.return_value = {'items': []}
assert get_kubernetes_services_running_here() == []
mock_requests_get.return_value.json.return_value = {'items': [
{
'status': {
'phase': 'Running',
'podIP': '10.1.1.1',
},
'metadata': {
'namespace': 'paasta',
'labels': {
'service': 'kurupt',
'instance': 'fm',
},
},
}, {
'status': {
'phase': 'Something',
'podIP': '10.1.1.1',
},
'metadata': {
'namespace': 'paasta',
'labels': {
'service': 'kurupt',
'instance': 'garage',
},
},
}, {
'status': {
'phase': 'Running',
},
'metadata': {
'namespace': 'paasta',
'labels': {
'service': 'kurupt',
'instance': 'grindah',
},
},
}, {
'status': {
'phase': 'Running',
'podIP': '10.1.1.1',
},
'metadata': {
'namespace': 'kube-system',
'labels': {
'service': 'kurupt',
'instance': 'beats',
},
},
},
]}
assert get_kubernetes_services_running_here() == [
KubeService(
name='kurupt',
instance='fm',
port=8888,
pod_ip='10.1.1.1',
),
]
class MockNerveDict(dict):
def is_in_smartstack(self):
return False if self['name'] == 'garage' else True
def test_get_kubernetes_services_running_here_for_nerve():
with mock.patch(
'paasta_tools.kubernetes_tools.load_system_paasta_config', autospec=True,
) as mock_load_system_config, mock.patch(
'paasta_tools.kubernetes_tools.get_kubernetes_services_running_here', autospec=True,
) as mock_get_kubernetes_services_running_here, mock.patch(
'paasta_tools.kubernetes_tools.read_all_registrations_for_service_instance', autospec=True,
) as mock_read_all_registrations_for_service_instance, mock.patch(
'paasta_tools.kubernetes_tools.load_service_namespace_config', autospec=True,
) as mock_load_service_namespace:
mock_get_kubernetes_services_running_here.return_value = [
KubeService(
name='kurupt',
instance='fm',
port=8888,
pod_ip='10.1.1.1',
),
KubeService(
name='kurupt',
instance='garage',
port=8888,
pod_ip='10.1.1.1',
),
]
mock_read_all_registrations_for_service_instance.side_effect = lambda a, b, c, d: [f"{a}.{b}"]
mock_load_service_namespace.side_effect = lambda service, namespace, soa_dir: MockNerveDict(name=namespace)
mock_load_system_config.side_effect = PaastaNotConfiguredError
ret = get_kubernetes_services_running_here_for_nerve('brentford', '/nail/blah')
assert ret == []
mock_load_system_config.side_effect = None
mock_load_system_config.return_value = mock.Mock(
get_cluster=mock.Mock(return_value='brentford'),
get_register_k8s_pods=mock.Mock(return_value=False),
)
ret = get_kubernetes_services_running_here_for_nerve('brentford', '/nail/blah')
assert ret == []
mock_load_system_config.return_value = mock.Mock(
get_cluster=mock.Mock(return_value='brentford'),
get_register_k8s_pods=mock.Mock(return_value=True),
)
ret = get_kubernetes_services_running_here_for_nerve('brentford', '/nail/blah')
assert ret == [(
'kurupt.fm', {
'name': 'fm',
'hacheck_ip': '10.1.1.1',
'service_ip': '10.1.1.1',
'port': 8888,
},
)]
mock_read_all_registrations_for_service_instance.assert_has_calls([
mock.call(
'kurupt', 'fm', 'brentford', '/nail/blah',
),
mock.call(
'kurupt', 'garage', 'brentford', '/nail/blah',
),
])
mock_read_all_registrations_for_service_instance.side_effect = NoConfigurationForServiceError
ret = get_kubernetes_services_running_here_for_nerve('brentford', '/nail/blah')
assert ret == []
def test_KubeClient():
with mock.patch(
'paasta_tools.kubernetes_tools.kube_config.load_kube_config', autospec=True,
), mock.patch(
'paasta_tools.kubernetes_tools.kube_client', autospec=True,
) as mock_kube_client:
client = KubeClient()
assert client.deployments == mock_kube_client.AppsV1Api()
assert client.core == mock_kube_client.CoreV1Api()
def test_ensure_paasta_namespace():
mock_metadata = mock.Mock()
type(mock_metadata).name = 'paasta'
mock_namespaces = mock.Mock(items=[mock.Mock(metadata=mock_metadata)])
mock_client = mock.Mock(core=mock.Mock(list_namespace=mock.Mock(return_value=mock_namespaces)))
ensure_paasta_namespace(mock_client)
assert not mock_client.core.create_namespace.called
mock_metadata = mock.Mock()
type(mock_metadata).name = 'kube-system'
mock_namespaces = mock.Mock(items=[mock.Mock(metadata=mock_metadata)])
mock_client = mock.Mock(core=mock.Mock(list_namespace=mock.Mock(return_value=mock_namespaces)))
ensure_paasta_namespace(mock_client)
assert mock_client.core.create_namespace.called
mock_client.core.create_namespace.reset_mock()
mock_namespaces = mock.Mock(items=[])
mock_client = mock.Mock(core=mock.Mock(list_namespace=mock.Mock(return_value=mock_namespaces)))
ensure_paasta_namespace(mock_client)
assert mock_client.core.create_namespace.called
def test_list_all_deployments():
mock_deployments = mock.Mock(items=[])
mock_stateful_sets = mock.Mock(items=[])
mock_client = mock.Mock(
deployments=mock.Mock(
list_namespaced_deployment=mock.Mock(return_value=mock_deployments),
list_namespaced_stateful_set=mock.Mock(return_value=mock_stateful_sets),
),
)
assert list_all_deployments(mock_client) == []
mock_items = [
mock.Mock(
metadata=mock.Mock(
labels={
'service': 'kurupt',
'instance': 'fm',
'git_sha': 'a12345',
'config_sha': 'b12345',
},
),
),
mock.Mock(
metadata=mock.Mock(
labels={
'service': 'kurupt',
'instance': 'am',
'git_sha': 'a12345',
'config_sha': 'b12345',
},
),
),
]
type(mock_items[0]).spec = mock.Mock(replicas=3)
type(mock_items[1]).spec = mock.Mock(replicas=3)
mock_deployments = mock.Mock(items=[mock_items[0]])
mock_stateful_sets = mock.Mock(items=[mock_items[1]])
mock_client = mock.Mock(
deployments=mock.Mock(
list_namespaced_deployment=mock.Mock(return_value=mock_deployments),
list_namespaced_stateful_set=mock.Mock(return_value=mock_stateful_sets),
),
)
assert list_all_deployments(mock_client) == [
KubeDeployment(
service='kurupt',
instance='fm',
git_sha='a12345',
config_sha='b12345',
replicas=3,
), KubeDeployment(
service='kurupt',
instance='am',
git_sha='a12345',
config_sha='b12345',
replicas=3,
),
]
def test_create_deployment():
mock_client = mock.Mock()
create_deployment(mock_client, V1Deployment(api_version='some'))
mock_client.deployments.create_namespaced_deployment.assert_called_with(
namespace='paasta',
body=V1Deployment(api_version='some'),
)
def test_update_deployment():
mock_client = mock.Mock()
update_deployment(mock_client, V1Deployment(metadata=V1ObjectMeta(name='kurupt')))
mock_client.deployments.replace_namespaced_deployment.assert_called_with(
namespace='paasta',
name='kurupt',
body=V1Deployment(metadata=V1ObjectMeta(name='kurupt')),
)
mock_client = mock.Mock()
create_deployment(mock_client, V1Deployment(api_version='some'))
mock_client.deployments.create_namespaced_deployment.assert_called_with(
namespace='paasta',
body=V1Deployment(api_version='some'),
)
def test_create_stateful_set():
mock_client = mock.Mock()
create_stateful_set(mock_client, V1StatefulSet(api_version='some'))
mock_client.deployments.create_namespaced_stateful_set.assert_called_with(
namespace='paasta',
body=V1StatefulSet(api_version='some'),
)
def test_update_stateful_set():
mock_client = mock.Mock()
update_stateful_set(mock_client, V1StatefulSet(metadata=V1ObjectMeta(name='kurupt')))
mock_client.deployments.replace_namespaced_stateful_set.assert_called_with(
namespace='paasta',
name='kurupt',
body=V1StatefulSet(metadata=V1ObjectMeta(name='kurupt')),
)
mock_client = mock.Mock()
create_stateful_set(mock_client, V1StatefulSet(api_version='some'))
mock_client.deployments.create_namespaced_stateful_set.assert_called_with(
namespace='paasta',
body=V1StatefulSet(api_version='some'),
)
def test_get_kubernetes_app_deploy_status():
mock_status = mock.Mock(
replicas=1,
ready_replicas=1,
updated_replicas=1,
)
mock_app = mock.Mock(status=mock_status)
mock_client = mock.Mock()
assert get_kubernetes_app_deploy_status(
mock_client,
mock_app,
desired_instances=1,
) == KubernetesDeployStatus.Running
assert get_kubernetes_app_deploy_status(
mock_client,
mock_app,
desired_instances=2,
) == KubernetesDeployStatus.Waiting
mock_status = mock.Mock(
replicas=1,
ready_replicas=2,
updated_replicas=1,
)
mock_app = mock.Mock(status=mock_status)
assert get_kubernetes_app_deploy_status(
mock_client,
mock_app,
desired_instances=2,
) == KubernetesDeployStatus.Deploying
mock_status = mock.Mock(
replicas=0,
ready_replicas=0,
updated_replicas=0,
)
mock_app = mock.Mock(status=mock_status)
assert get_kubernetes_app_deploy_status(
mock_client,
mock_app,
desired_instances=0,
) == KubernetesDeployStatus.Stopped
mock_status = mock.Mock(
replicas=1,
ready_replicas=None,
updated_replicas=None,
)
mock_app = mock.Mock(status=mock_status)
assert get_kubernetes_app_deploy_status(
mock_client,
mock_app,
desired_instances=1,
) == KubernetesDeployStatus.Waiting
def test_get_kubernetes_app_by_name():
mock_client = mock.Mock()
mock_deployment = mock.Mock()
mock_client.deployments.read_namespaced_deployment_status.return_value = mock_deployment
assert get_kubernetes_app_by_name('someservice', mock_client) == mock_deployment
assert mock_client.deployments.read_namespaced_deployment_status.called
assert not mock_client.deployments.read_namespaced_stateful_set_status.called
mock_stateful_set = mock.Mock()
mock_client.deployments.read_namespaced_deployment_status.reset_mock()
mock_client.deployments.read_namespaced_deployment_status.side_effect = ApiException(404)
mock_client.deployments.read_namespaced_stateful_set_status.return_value = mock_stateful_set
assert get_kubernetes_app_by_name('someservice', mock_client) == mock_stateful_set
assert mock_client.deployments.read_namespaced_deployment_status.called
assert mock_client.deployments.read_namespaced_stateful_set_status.called
def test_pods_for_service_instance():
mock_client = mock.Mock()
assert pods_for_service_instance(
'kurupt',
'fm',
mock_client,
) == mock_client.core.list_namespaced_pod.return_value.items
def test_get_active_shas_for_service():
mock_pod_list = [
mock.Mock(metadata=mock.Mock(labels={
'config_sha': 'a123',
'git_sha': 'b456',
})),
mock.Mock(metadata=mock.Mock(labels={
'config_sha': 'a123!!!',
'git_sha': 'b456!!!',
})),
mock.Mock(metadata=mock.Mock(labels={
'config_sha': 'a123!!!',
'git_sha': 'b456!!!',
})),
]
assert get_active_shas_for_service(mock_pod_list) == {
'git_sha': {'b456', 'b456!!!'},
'config_sha': {'a123', 'a123!!!'},
}
| StarcoderdataPython |
1764466 | <reponame>reven86/dava.engine
import sys
import glob, os
import re
import argparse
import subprocess
from contextlib import closing
def GetDavaVersion( pathToFramework ):
os.chdir(pathToFramework)
file = open("Sources/Internal/DAVAVersion.h");
p = re.compile('DAVAENGINE_VERSION "([\w\.]+)"');
davaVersion = p.findall(file.read());
file.close();
davaVersion = "".join(davaVersion)
davaVersion = '[' + davaVersion.replace( '.', '_') + ']'
return davaVersion
def GetGitVersion( pathToFramework ):
os.chdir(pathToFramework)
gitVersion = str(os.popen( 'git log -1 --format=\"%ci\"' ).read())
gitVersion = gitVersion.rstrip(os.linesep)
gitVersionArray = gitVersion.split(' ')
gitVersion = '_'.join( [ gitVersionArray[0], gitVersionArray[1] ] )
gitVersion = re.sub('[:]','-', gitVersion ).rstrip()
return gitVersion
def ArchiveName( app_name, dava_path, build_number ):
archiveName = []
if app_name :
archiveName = [ app_name ]
if dava_path :
if app_name :
versionDava = GetDavaVersion( dava_path )
archiveName += [ versionDava ]
versionGit = GetGitVersion( dava_path )
archiveName += [ versionGit ]
if build_number :
archiveName += [ build_number ]
return '_'.join( archiveName )
def main():
parser = argparse.ArgumentParser()
parser.add_argument( '--app_name', required = True )
parser.add_argument( '--app_path', required = True )
parser.add_argument( '--out_path', required = True )
parser.add_argument( '--dava_path' )
parser.add_argument( '--build_number' )
options = parser.parse_args()
archiveName = 'Desc_' + ArchiveName( options.app_name, options.dava_path, options.build_number )
outPath = os.path.join( options.out_path, archiveName ) + '.txt'
if not os.path.exists( options.out_path ):
os.makedirs( options.out_path )
open(outPath, "w" ).close()
print 'Pack options.app_name -> ', outPath
if __name__ == '__main__':
main()
| StarcoderdataPython |
158933 | <filename>tests/endpoints/steps/get_all_type_games_endpoint_steps.py
from .util.util_neo4j import UtilNeo4j
CODE = "code"
NAME = "name"
LEVEL = "level"
TYPE_GAME_1_EXPECT = "type_game_1"
LEVEL_EXPECT = 50
class ShouldGetAllTypeGameSteps:
def given(self, client, user_id, type_games):
self.user_id = user_id
self.client = client
self.type_games = type_games
self.util_neo4j = UtilNeo4j()
self.util_neo4j.create_user(user_id, user_id)
self.util_neo4j.create_type_games(self.type_games)
self.util_neo4j.create_relationship_played_user_typegame(user_id, type_games)
def when(self):
self.response = self.client.get(f"/apis/typegames/1.0.0?userid={self.user_id}")
def then(self):
assert self.response is not None
type_games = self.response.get_json()
assert CODE not in type_games
count_type_game = 0
for tp in type_games:
if tp[NAME] == TYPE_GAME_1_EXPECT:
assert int(tp[LEVEL]) == LEVEL_EXPECT
count_type_game = count_type_game + 1
assert count_type_game == 1
assert len(type_games) > 0
def teardown(self):
self.util_neo4j.delete_type_games_played_data_test(self.user_id)
| StarcoderdataPython |
4849547 | from api.models import AlbumComment
from .comment_serializer import CommentSerializer
from .generic_audited_model_serializer import GenericAuditedModelSerializer
class AlbumCommentSerializer(GenericAuditedModelSerializer):
comment = CommentSerializer()
class Meta:
model = AlbumComment
fields = [
'id',
'comment',
'created_at',
'updated_at',
]
| StarcoderdataPython |
6637071 | # Copyright 2011-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import contextlib
import copy
import random
import sys
import pickle
sys.path[0:0] = [""]
from bson.py3compat import MAXSIZE
from bson.son import SON
from pymongo.errors import ConfigurationError, OperationFailure
from pymongo.message import _maybe_add_read_preference
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import (ReadPreference, MovingAverage,
Primary, PrimaryPreferred,
Secondary, SecondaryPreferred,
Nearest)
from pymongo.server_description import ServerDescription
from pymongo.server_selectors import readable_server_selector, Selection
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
from test.test_replica_set_client import TestReplicaSetClientBase
from test import (SkipTest,
client_context,
unittest,
db_user,
db_pwd)
from test.utils import connected, single_client, one, wait_until, rs_client
from test.version import Version
class TestSelections(unittest.TestCase):
@client_context.require_connection
def test_bool(self):
client = single_client()
wait_until(lambda: client.address, "discover primary")
selection = Selection.from_topology_description(
client._topology.description)
self.assertTrue(selection)
self.assertFalse(selection.with_server_descriptions([]))
class TestReadPreferenceObjects(unittest.TestCase):
prefs = [Primary(),
Secondary(),
Nearest(tag_sets=[{'a': 1}, {'b': 2}]),
SecondaryPreferred(max_staleness=30)]
def test_pickle(self):
for pref in self.prefs:
self.assertEqual(pref, pickle.loads(pickle.dumps(pref)))
def test_copy(self):
for pref in self.prefs:
self.assertEqual(pref, copy.copy(pref))
class TestReadPreferencesBase(TestReplicaSetClientBase):
def setUp(self):
super(TestReadPreferencesBase, self).setUp()
# Insert some data so we can use cursors in read_from_which_host
self.client.pymongo_test.test.drop()
self.client.get_database(
"pymongo_test",
write_concern=WriteConcern(w=self.w)).test.insert_many(
[{'_id': i} for i in range(10)])
self.addCleanup(self.client.pymongo_test.test.drop)
def read_from_which_host(self, client):
"""Do a find() on the client and return which host was used
"""
cursor = client.pymongo_test.test.find()
next(cursor)
return cursor.address
def read_from_which_kind(self, client):
"""Do a find() on the client and return 'primary' or 'secondary'
depending on which the client used.
"""
address = self.read_from_which_host(client)
if address == client.primary:
return 'primary'
elif address in client.secondaries:
return 'secondary'
else:
self.fail(
'Cursor used address %s, expected either primary '
'%s or secondaries %s' % (
address, client.primary, client.secondaries))
def assertReadsFrom(self, expected, **kwargs):
c = rs_client(**kwargs)
wait_until(
lambda: len(c.nodes - c.arbiters) == self.w,
"discovered all nodes")
used = self.read_from_which_kind(c)
self.assertEqual(expected, used, 'Cursor used %s, expected %s' % (
used, expected))
class TestSingleSlaveOk(TestReadPreferencesBase):
def test_reads_from_secondary(self):
host, port = next(iter(self.client.secondaries))
# Direct connection to a secondary.
client = single_client(host, port)
self.assertFalse(client.is_primary)
# Regardless of read preference, we should be able to do
# "reads" with a direct connection to a secondary.
# See server-selection.rst#topology-type-single.
self.assertEqual(client.read_preference, ReadPreference.PRIMARY)
db = client.pymongo_test
coll = db.test
# Test find and find_one.
self.assertIsNotNone(coll.find_one())
self.assertEqual(10, len(list(coll.find())))
# Test some database helpers.
self.assertIsNotNone(db.collection_names())
self.assertIsNotNone(db.validate_collection("test"))
self.assertIsNotNone(db.command("count", "test"))
# Test some collection helpers.
self.assertEqual(10, coll.count())
self.assertEqual(10, len(coll.distinct("_id")))
self.assertIsNotNone(coll.aggregate([]))
self.assertIsNotNone(coll.index_information())
# Test some "magic" namespace helpers.
self.assertIsNotNone(db.current_op())
class TestReadPreferences(TestReadPreferencesBase):
def test_mode_validation(self):
for mode in (ReadPreference.PRIMARY,
ReadPreference.PRIMARY_PREFERRED,
ReadPreference.SECONDARY,
ReadPreference.SECONDARY_PREFERRED,
ReadPreference.NEAREST):
self.assertEqual(
mode,
rs_client(read_preference=mode).read_preference)
self.assertRaises(
TypeError,
rs_client, read_preference='foo')
def test_tag_sets_validation(self):
S = Secondary(tag_sets=[{}])
self.assertEqual(
[{}],
rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{'k': 'v'}])
self.assertEqual(
[{'k': 'v'}],
rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{'k': 'v'}, {}])
self.assertEqual(
[{'k': 'v'}, {}],
rs_client(read_preference=S).read_preference.tag_sets)
self.assertRaises(ValueError, Secondary, tag_sets=[])
# One dict not ok, must be a list of dicts
self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'})
self.assertRaises(TypeError, Secondary, tag_sets='foo')
self.assertRaises(TypeError, Secondary, tag_sets=['foo'])
def test_threshold_validation(self):
self.assertEqual(17, rs_client(
localThresholdMS=17
).local_threshold_ms)
self.assertEqual(42, rs_client(
localThresholdMS=42
).local_threshold_ms)
self.assertEqual(666, rs_client(
localthresholdms=666
).local_threshold_ms)
self.assertEqual(0, rs_client(
localthresholdms=0
).local_threshold_ms)
self.assertRaises(ValueError,
rs_client,
localthresholdms=-1)
def test_zero_latency(self):
ping_times = set()
# Generate unique ping times.
while len(ping_times) < len(self.client.nodes):
ping_times.add(random.random())
for ping_time, host in zip(ping_times, self.client.nodes):
ServerDescription._host_to_round_trip_time[host] = ping_time
try:
client = connected(
rs_client(readPreference='nearest', localThresholdMS=0))
wait_until(
lambda: client.nodes == self.client.nodes,
"discovered all nodes")
host = self.read_from_which_host(client)
for _ in range(5):
self.assertEqual(host, self.read_from_which_host(client))
finally:
ServerDescription._host_to_round_trip_time.clear()
def test_primary(self):
self.assertReadsFrom(
'primary', read_preference=ReadPreference.PRIMARY)
def test_primary_with_tags(self):
# Tags not allowed with PRIMARY
self.assertRaises(
ConfigurationError,
rs_client, tag_sets=[{'dc': 'ny'}])
def test_primary_preferred(self):
self.assertReadsFrom(
'primary', read_preference=ReadPreference.PRIMARY_PREFERRED)
def test_secondary(self):
self.assertReadsFrom(
'secondary', read_preference=ReadPreference.SECONDARY)
def test_secondary_preferred(self):
self.assertReadsFrom(
'secondary', read_preference=ReadPreference.SECONDARY_PREFERRED)
def test_nearest(self):
# With high localThresholdMS, expect to read from any
# member
c = rs_client(
read_preference=ReadPreference.NEAREST,
localThresholdMS=10000) # 10 seconds
data_members = set(self.hosts).difference(set(self.arbiters))
# This is a probabilistic test; track which members we've read from so
# far, and keep reading until we've used all the members or give up.
# Chance of using only 2 of 3 members 10k times if there's no bug =
# 3 * (2/3)**10000, very low.
used = set()
i = 0
while data_members.difference(used) and i < 10000:
address = self.read_from_which_host(c)
used.add(address)
i += 1
not_used = data_members.difference(used)
latencies = ', '.join(
'%s: %dms' % (server.description.address,
server.description.round_trip_time)
for server in c._get_topology().select_servers(
readable_server_selector))
self.assertFalse(
not_used,
"Expected to use primary and all secondaries for mode NEAREST,"
" but didn't use %s\nlatencies: %s" % (not_used, latencies))
class ReadPrefTester(MongoClient):
def __init__(self, *args, **kwargs):
self.has_read_from = set()
client_options = client_context.ssl_client_options.copy()
client_options.update(kwargs)
super(ReadPrefTester, self).__init__(*args, **client_options)
@contextlib.contextmanager
def _socket_for_reads(self, read_preference):
context = super(ReadPrefTester, self)._socket_for_reads(read_preference)
with context as (sock_info, slave_ok):
self.record_a_read(sock_info.address)
yield sock_info, slave_ok
def record_a_read(self, address):
server = self._get_topology().select_server_by_address(address, 0)
self.has_read_from.add(server)
_PREF_MAP = [
(Primary, SERVER_TYPE.RSPrimary),
(PrimaryPreferred, SERVER_TYPE.RSPrimary),
(Secondary, SERVER_TYPE.RSSecondary),
(SecondaryPreferred, SERVER_TYPE.RSSecondary),
(Nearest, 'any')
]
class TestCommandAndReadPreference(TestReplicaSetClientBase):
@classmethod
def setUpClass(cls):
super(TestCommandAndReadPreference, cls).setUpClass()
cls.c = ReadPrefTester(
client_context.pair,
replicaSet=cls.name,
# Ignore round trip times, to test ReadPreference modes only.
localThresholdMS=1000*1000)
if client_context.auth_enabled:
cls.c.admin.authenticate(db_user, db_pwd)
cls.client_version = Version.from_client(cls.c)
# mapReduce and group fail with no collection
coll = cls.c.pymongo_test.get_collection(
'test', write_concern=WriteConcern(w=cls.w))
coll.insert_one({})
@classmethod
def tearDownClass(cls):
cls.c.drop_database('pymongo_test')
def executed_on_which_server(self, client, fn, *args, **kwargs):
"""Execute fn(*args, **kwargs) and return the Server instance used."""
client.has_read_from.clear()
fn(*args, **kwargs)
self.assertEqual(1, len(client.has_read_from))
return one(client.has_read_from)
def assertExecutedOn(self, server_type, client, fn, *args, **kwargs):
server = self.executed_on_which_server(client, fn, *args, **kwargs)
self.assertEqual(SERVER_TYPE._fields[server_type],
SERVER_TYPE._fields[server.description.server_type])
def _test_fn(self, server_type, fn):
for _ in range(10):
if server_type == 'any':
used = set()
for _ in range(1000):
server = self.executed_on_which_server(self.c, fn)
used.add(server.description.address)
if len(used) == len(self.c.secondaries) + 1:
# Success
break
unused = self.c.secondaries.union(
set([self.c.primary])
).difference(used)
if unused:
self.fail(
"Some members not used for NEAREST: %s" % (
unused))
else:
self.assertExecutedOn(server_type, self.c, fn)
def _test_primary_helper(self, func):
# Helpers that ignore read preference.
self._test_fn(SERVER_TYPE.RSPrimary, func)
def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs):
for mode, server_type in _PREF_MAP:
new_coll = coll.with_options(read_preference=mode())
func = lambda: getattr(new_coll, meth)(*args, **kwargs)
if secondary_ok:
self._test_fn(server_type, func)
else:
self._test_fn(SERVER_TYPE.RSPrimary, func)
def test_command(self):
# Test that the generic command helper obeys the read preference
# passed to it.
for mode, server_type in _PREF_MAP:
func = lambda: self.c.pymongo_test.command('dbStats',
read_preference=mode())
self._test_fn(server_type, func)
def test_create_collection(self):
# Collections should be created on primary, obviously
self._test_primary_helper(
lambda: self.c.pymongo_test.create_collection(
'some_collection%s' % random.randint(0, MAXSIZE)))
def test_drop_collection(self):
self._test_primary_helper(
lambda: self.c.pymongo_test.drop_collection('some_collection'))
self._test_primary_helper(
lambda: self.c.pymongo_test.some_collection.drop())
def test_group(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'group',
{'a': 1}, {}, {}, 'function() { }')
def test_map_reduce(self):
self._test_coll_helper(False, self.c.pymongo_test.test, 'map_reduce',
'function() { }', 'function() { }',
{'inline': 1})
def test_inline_map_reduce(self):
self._test_coll_helper(True, self.c.pymongo_test.test,
'inline_map_reduce',
'function() { }', 'function() { }')
def test_count(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'count')
def test_distinct(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'distinct', 'a')
def test_aggregate(self):
if self.client_version.at_least(2, 1, 0):
self._test_coll_helper(True, self.c.pymongo_test.test,
'aggregate',
[{'$project': {'_id': 1}}])
class TestMovingAverage(unittest.TestCase):
def test_moving_average(self):
avg = MovingAverage()
self.assertIsNone(avg.get())
avg.add_sample(10)
self.assertAlmostEqual(10, avg.get())
avg.add_sample(20)
self.assertAlmostEqual(12, avg.get())
avg.add_sample(30)
self.assertAlmostEqual(15.6, avg.get())
class TestMongosAndReadPreference(unittest.TestCase):
def test_read_preference_document(self):
pref = Primary()
self.assertEqual(
pref.document,
{'mode': 'primary'})
pref = PrimaryPreferred()
self.assertEqual(
pref.document,
{'mode': 'primaryPreferred'})
pref = PrimaryPreferred(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'primaryPreferred', 'tags': [{'dc': 'sf'}]})
pref = PrimaryPreferred(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'primaryPreferred',
'tags': [{'dc': 'sf'}],
'maxStalenessMS': 30000})
pref = Secondary()
self.assertEqual(
pref.document,
{'mode': 'secondary'})
pref = Secondary(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'secondary', 'tags': [{'dc': 'sf'}]})
pref = Secondary(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'secondary',
'tags': [{'dc': 'sf'}],
'maxStalenessMS': 30000})
pref = SecondaryPreferred()
self.assertEqual(
pref.document,
{'mode': 'secondaryPreferred'})
pref = SecondaryPreferred(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'secondaryPreferred', 'tags': [{'dc': 'sf'}]})
pref = SecondaryPreferred(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'secondaryPreferred',
'tags': [{'dc': 'sf'}],
'maxStalenessMS': 30000})
pref = Nearest()
self.assertEqual(
pref.document,
{'mode': 'nearest'})
pref = Nearest(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'nearest', 'tags': [{'dc': 'sf'}]})
pref = Nearest(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'nearest',
'tags': [{'dc': 'sf'}],
'maxStalenessMS': 30000})
def test_maybe_add_read_preference(self):
# Primary doesn't add $readPreference
out = _maybe_add_read_preference({}, Primary())
self.assertEqual(out, {})
pref = PrimaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = PrimaryPreferred(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
# SecondaryPreferred without tag_sets or max_staleness doesn't add
# $readPreference
pref = SecondaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, {})
pref = SecondaryPreferred(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = SecondaryPreferred(max_staleness=120)
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))])
pref = Nearest()
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON([("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document)]))
pref = Nearest(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON([("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document)]))
@client_context.require_mongos
def test_mongos(self):
shard = client_context.client.config.shards.find_one()['host']
num_members = shard.count(',') + 1
if num_members == 1:
raise SkipTest("Need a replica set shard to test.")
coll = client_context.client.pymongo_test.get_collection(
"test",
write_concern=WriteConcern(w=num_members))
coll.drop()
res = coll.insert_many([{} for _ in range(5)])
first_id = res.inserted_ids[0]
last_id = res.inserted_ids[-1]
# Note - this isn't a perfect test since there's no way to
# tell what shard member a query ran on.
for pref in (Primary(),
PrimaryPreferred(),
Secondary(),
SecondaryPreferred(),
Nearest()):
qcoll = coll.with_options(read_preference=pref)
results = list(qcoll.find().sort([("_id", 1)]))
self.assertEqual(first_id, results[0]["_id"])
self.assertEqual(last_id, results[-1]["_id"])
results = list(qcoll.find().sort([("_id", -1)]))
self.assertEqual(first_id, results[-1]["_id"])
self.assertEqual(last_id, results[0]["_id"])
@client_context.require_mongos
@client_context.require_version_min(3, 3, 12)
def test_mongos_max_staleness(self):
# Sanity check that we're sending maxStalenessMS
coll = client_context.client.pymongo_test.get_collection(
"test", read_preference=SecondaryPreferred(max_staleness=120))
# No error
coll.find_one()
coll = client_context.client.pymongo_test.get_collection(
"test", read_preference=SecondaryPreferred(max_staleness=10))
try:
coll.find_one()
except OperationFailure as exc:
self.assertEqual(160, exc.code)
else:
self.fail("mongos accepted invalid staleness")
coll = single_client(
readPreference='secondaryPreferred',
maxStalenessMS=120000).pymongo_test.test
# No error
coll.find_one()
coll = single_client(
readPreference='secondaryPreferred',
maxStalenessMS=10000).pymongo_test.test
try:
coll.find_one()
except OperationFailure as exc:
self.assertEqual(160, exc.code)
else:
self.fail("mongos accepted invalid staleness")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3350872 | import os.path
import os
import pickle
import time
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
import scipy.stats as stats
# Before removing what appear to be unused imports think twice.
# Some of the tests use eval, which requires the imports.
import refnx.reflect._reflect as _reflect
from refnx.analysis import (
Transform,
Objective,
CurveFitter,
Parameter,
Interval,
Parameters,
)
from refnx.reflect import (
SLD,
ReflectModel,
MixedReflectModel,
reflectivity,
Structure,
Slab,
FresnelTransform,
choose_dq_type,
use_reflect_backend,
)
import refnx.reflect.reflect_model as reflect_model
from refnx.dataset import ReflectDataset
from refnx._lib import MapWrapper
BACKENDS = reflect_model.available_backends()
class TestReflect(object):
def setup_method(self):
self.pth = os.path.dirname(os.path.abspath(__file__))
sio2 = SLD(3.47, name="SiO2")
air = SLD(0, name="air")
si = SLD(2.07, name="Si")
d2o = SLD(6.36, name="D2O")
polymer = SLD(1, name="polymer")
self.structure = air | sio2(100, 2) | si(0, 3)
theoretical = np.loadtxt(os.path.join(self.pth, "theoretical.txt"))
qvals, rvals = np.hsplit(theoretical, 2)
self.qvals = qvals.flatten()
self.rvals = rvals.flatten()
# e361 is an older dataset, but well characterised
self.structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
self.model361 = ReflectModel(self.structure361, bkg=2e-5)
self.model361.scale.vary = True
self.model361.bkg.vary = True
self.model361.scale.range(0.1, 2)
self.model361.bkg.range(0, 5e-5)
# d2o
self.structure361[-1].sld.real.vary = True
self.structure361[-1].sld.real.range(6, 6.36)
self.structure361[1].thick.vary = True
self.structure361[1].thick.range(5, 20)
self.structure361[2].thick.vary = True
self.structure361[2].thick.range(100, 220)
self.structure361[2].sld.real.vary = True
self.structure361[2].sld.real.range(0.2, 1.5)
e361 = ReflectDataset(os.path.join(self.pth, "e361r.txt"))
self.qvals361, self.rvals361, self.evals361 = (
e361.x,
e361.y,
e361.y_err,
)
def test_abeles(self):
slabs = self.structure.slabs()[..., :4]
for backend in BACKENDS:
# test reflectivity calculation with values generated from Motofit
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, slabs)
assert_almost_equal(calc, self.rvals)
def test_noncontig_abeles(self):
# test for non-contiguous Q values
tempq = self.qvals[0::5]
slabs = self.structure.slabs()[..., :4]
assert tempq.flags["C_CONTIGUOUS"] is False
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(tempq, slabs)
assert_almost_equal(calc, self.rvals[0::5])
def test_abeles_multithreaded(self):
slabs = self.structure.slabs()[..., :4]
for backend in BACKENDS:
# test reflectivity calculation with values generated from Motofit
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, slabs, threads=4)
assert_almost_equal(calc, self.rvals)
def test_available_backends(self):
assert "python" in BACKENDS
assert "c" in BACKENDS
import refnx.reflect._creflect as _creflect
import refnx.reflect._reflect as _reflect
assert _reflect.__file__ != _creflect.__file__
if "cython" in BACKENDS:
import refnx.reflect._cyreflect as _cyreflect
assert _creflect.__file__ != _cyreflect.__file__
def test_first_principles(self):
# Test a first principles reflectivity calculation, rather than
# relying on a previous calculation from Motofit code.
# Here we only examine Fresnel reflectivity from an infinitely
# sharp interface, we do not examine a rough surface. This is
# tested by profile slicing in test_structure.
def kn(q, sld_layer, sld_fronting):
# wave vector in a given layer
kvec = np.zeros_like(q, np.complex128)
sld = complex(sld_layer - sld_fronting) * 1.0e-6
kvec[:] = np.sqrt(q[:] ** 2.0 / 4.0 - 4.0 * np.pi * sld)
return kvec
q = np.linspace(0.001, 1.0, 1001)
# Is the fresnel reflectivity correct?
sld1 = 2.07
sld2 = 6.36
# first principles calcn
kf = kn(q, sld1, sld1)
kb = kn(q, sld2, sld1)
reflectance = (kf - kb) / (kf + kb)
reflectivity = reflectance * np.conj(reflectance)
# now from refnx code
struct = SLD(sld1)(0, 0) | SLD(sld2)(0, 0)
slabs = struct.slabs()[..., :4]
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
assert_allclose(abeles(q, slabs), reflectivity, rtol=1e-14)
# reverse the direction
kf = kn(q, sld2, sld2)
kb = kn(q, sld1, sld2)
reflectance = (kf - kb) / (kf + kb)
reflectivity = reflectance * np.conj(reflectance)
# now from refnx code
struct = SLD(sld2)(0, 0) | SLD(sld1)(0, 0)
slabs = struct.slabs()[..., :4]
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
assert_allclose(abeles(q, slabs), reflectivity, rtol=1e-14)
def test_scale_bkg_abeles(self):
s = self.structure.slabs()[..., :4]
calcs = []
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, s, scale=2.0)
calcs.append(calc)
for calc in calcs[1:]:
assert_allclose(calc, calcs[0])
calcs = []
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, s, scale=0.5, bkg=0.1)
calcs.append(calc)
for calc in calcs[1:]:
assert_allclose(calc, calcs[0])
calcs = []
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(self.qvals, s, scale=0.5, bkg=0.1, threads=2)
calcs.append(calc)
for calc in calcs[1:]:
assert_allclose(calc, calcs[0])
"""
@np.testing.decorators.knownfailure
def test_cabeles_parallelised(self):
# I suppose this could fail if someone doesn't have a multicore
# computer
if not TEST_C_REFLECT:
return
coefs = np.array([[0, 0, 0, 0],
[300, 3, 1e-3, 3],
[10, 3.47, 1e-3, 3],
[0, 6.36, 0, 3]])
x = np.linspace(0.01, 0.2, 1000000)
pstart = time.time()
_creflect.abeles(x, coefs, threads=0)
pfinish = time.time()
sstart = time.time()
_creflect.abeles(x, coefs, threads=1)
sfinish = time.time()
print(sfinish - sstart, pfinish - pstart)
assert_(0.7 * (sfinish - sstart) > (pfinish - pstart))
"""
def test_compare_abeles0(self):
# test one layer system against the python implementation
layer0 = np.array([[0, 2.07, 0.01, 3], [0, 6.36, 0.1, 3]])
with use_reflect_backend("python") as abeles:
calc1 = abeles(self.qvals, layer0, scale=0.99, bkg=1e-8)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(self.qvals, layer0, scale=0.99, bkg=1e-8)
assert_almost_equal(calc1, calc2)
# test a negative background
with use_reflect_backend("python") as abeles:
calc1 = abeles(self.qvals, layer0, scale=0.99, bkg=-5e-7)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(self.qvals, layer0, scale=0.99, bkg=-5e-7)
assert_almost_equal(calc1, calc2)
def test_compare_abeles2(self):
# test two layer system against the python implementation
layer2 = np.array(
[
[0, 2.07, 0.01, 3],
[10, 3.47, 0.01, 3],
[100, 1.0, 0.01, 4],
[0, 6.36, 0.1, 3],
]
)
with use_reflect_backend("python") as abeles:
calc1 = abeles(self.qvals, layer2, scale=0.99, bkg=1e-8)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(self.qvals, layer2, scale=0.99, bkg=1e-8)
assert_almost_equal(calc1, calc2)
def test_abeles_absorption(self):
# https://github.com/andyfaff/refl1d_analysis/tree/master/notebooks
q = np.linspace(0.008, 0.05, 500)
depth = [0, 850, 0]
rho = [2.067, 4.3, 6.0]
irho_zero = [0.0, 0.1, 0.0]
refnx_sigma = [np.nan, 35, 5.0]
w_zero = np.c_[depth, rho, irho_zero, refnx_sigma]
with use_reflect_backend("python") as abeles:
calc1 = abeles(q, w_zero)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc2 = abeles(q, w_zero)
assert_almost_equal(calc1, calc2)
def test_abeles_absorption2(self):
# https://github.com/andyfaff/refl1d_analysis/tree/master/notebooks
# this has an appreciable notch just below the critical edge
refl1d = np.load(os.path.join(self.pth, "absorption.npy"))
q = np.geomspace(0.005, 0.3, 201)
depth = [0, 1200, 0]
rho = [2.07, 4.66, 6.36]
irho = [0, 0.016, 0]
refnx_sigma = [np.nan, 10, 3]
slabs = np.c_[depth, rho, irho, refnx_sigma]
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(q, slabs)
assert_almost_equal(calc, refl1d[1])
def test_compare_refl1d(self):
# refl1d calculated with:
# from refl1d import abeles
# x = np.linspace(0.005, 0.5, 1001)
# z = abeles.refl(x / 2,
# [0, 100, 200, 0],
# [2.07, 3.45, 5., 6.],
# irho=[0.0, 0.1, 0.01, 0],
# sigma=[3, 1, 5, 0])
# a = z.real ** 2 + z.imag ** 2
layers = np.array(
[
[0, 2.07, 0, 0],
[100, 3.45, 0.1, 3],
[200, 5.0, 0.01, 1],
[0, 6.0, 0, 5],
]
)
x = np.linspace(0.005, 0.5, 1001)
refl1d = np.load(os.path.join(self.pth, "refl1d.npy"))
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(x, layers)
assert_almost_equal(calc, refl1d)
def test_multilayer(self):
x = np.geomspace(0.005, 0.5, 101)
air = np.array([0, 0, 0, 0])
unit_cell = np.array([[30, -2.0, 0, 3], [70, 8.0, 0, 3]])
backing = np.array([0, 2.07, 0.0001, 3])
def get_w(repeats=1):
if repeats:
filling = np.vstack([unit_cell] * repeats)
return np.vstack([air, filling, backing])
else:
return np.vstack([air, backing])
backends = list(BACKENDS)
backends.remove("python")
f_python = reflect_model.get_reflect_backend("python")
for i in range(40):
w = get_w(i)
canonical_r = f_python(x, w)
for backend in backends:
with use_reflect_backend(backend) as abeles:
calc = abeles(x, w)
try:
assert_allclose(
calc, canonical_r, atol=5.0e-15, rtol=8.0e-15
)
except AssertionError as e:
print(backend, i)
raise e
def test_use_reflectivity_backend(self):
import refnx.reflect._creflect as _creflect
import refnx.reflect._reflect as _reflect
reflect_model.abeles = _reflect.abeles
with use_reflect_backend("c") as f:
assert f == _creflect.abeles
assert reflect_model.abeles == _creflect.abeles
assert reflect_model.abeles == _reflect.abeles
# this shouldn't error if pyopencl is not installed
# it should just fall back to 'c'
reflect_model.use_reflect_backend("pyopencl")
def test_reverse(self):
# check that the structure reversal works.
sio2 = SLD(3.47, name="SiO2")
air = SLD(0, name="air")
si = SLD(2.07, name="Si")
structure = si | sio2(100, 3) | air(0, 2)
structure.reverse_structure = True
assert_equal(structure.slabs(), self.structure.slabs())
calc = structure.reflectivity(self.qvals)
assert_almost_equal(calc, self.rvals)
def test_abeles_reshape(self):
# reflectivity should be able to deal with multidimensional input
s = self.structure.slabs()[..., :4]
reshaped_q = np.reshape(self.qvals, (2, 250))
reshaped_r = self.rvals.reshape(2, 250)
for backend in BACKENDS:
with use_reflect_backend(backend) as abeles:
calc = abeles(reshaped_q, s)
assert_equal(calc.shape, reshaped_r.shape)
assert_almost_equal(calc, reshaped_r, 15)
def test_reflectivity_model(self):
# test reflectivity calculation with values generated from Motofit
rff = ReflectModel(self.structure, dq=0)
# the default for number of threads should be -1
assert rff.threads == -1
model = rff.model(self.qvals)
assert_almost_equal(model, self.rvals)
def test_mixed_reflectivity_model(self):
# test that mixed area model works ok.
# should be same as data generated from Motofit
sio2 = SLD(3.47, name="SiO2")
air = SLD(0, name="air")
si = SLD(2.07, name="Si")
s1 = air | sio2(100, 2) | si(0, 3)
s2 = air | sio2(100, 2) | si(0, 3)
mixed_model = MixedReflectModel([s1, s2], [0.4, 0.3], dq=0)
assert_almost_equal(mixed_model(self.qvals), self.rvals * 0.7)
# now try out the mixed model compared to sum of individual models
# with smearing, but no background.
s1 = air | sio2(100, 2) | si(0, 2)
s2 = air | sio2(50, 3) | si(0, 1)
mixed_model = MixedReflectModel([s1, s2], [0.4, 0.3], dq=5, bkg=0)
indiv1 = ReflectModel(s1, bkg=0)
indiv2 = ReflectModel(s2, bkg=0)
assert_almost_equal(
mixed_model(self.qvals),
(0.4 * indiv1(self.qvals) + 0.3 * indiv2(self.qvals)),
)
# now try out the mixed model compared to sum of individual models
# with smearing, and background.
mixed_model.bkg.value = 1e-7
assert_almost_equal(
mixed_model(self.qvals),
(0.4 * indiv1(self.qvals) + 0.3 * indiv2(self.qvals) + 1e-7),
)
def test_parallel_calculator(self):
# test that parallel abeles work with a mapper
q = np.linspace(0.01, 0.5, 1000).reshape(20, 50)
p0 = np.array(
[
[0, 2.07, 0, 0],
[100, 3.47, 0, 3],
[500, -0.5, 1e-3, 3],
[0, 6.36, 0, 3],
]
)
for backend in BACKENDS:
if backend == "pyopencl":
# can't do pyopencl in a multiprocessing.Pool
continue
with use_reflect_backend(backend) as abeles:
wf = Wrapper_fn(abeles, p0)
y = map(wf, q)
with MapWrapper(2) as f:
z = f(wf, q)
assert_equal(z, np.array(list(y)))
def test_parallel_objective(self):
# check that a parallel objective works without issue
# (it could be possible that parallel evaluation fails at a higher
# level in e.g. emcee or in scipy.optimize.differential_evolution)
model = self.model361
model.threads = 2
objective = Objective(
model,
(self.qvals361, self.rvals361, self.evals361),
transform=Transform("logY"),
)
p0 = np.array(objective.varying_parameters())
cov = objective.covar()
walkers = np.random.multivariate_normal(
np.atleast_1d(p0), np.atleast_2d(cov), size=(100)
)
map_logl = np.array(list(map(objective.logl, walkers)))
map_chi2 = np.array(list(map(objective.chisqr, walkers)))
wf = Wrapper_fn2(model.model, p0)
map_mod = np.array(list(map(wf, walkers)))
with MapWrapper(2) as g:
mapw_mod = g(wf, walkers)
mapw_logl = g(objective.logl, walkers)
mapw_chi2 = g(objective.chisqr, walkers)
assert_allclose(mapw_logl, map_logl)
assert_allclose(mapw_chi2, map_chi2)
assert_allclose(mapw_mod, map_mod)
def test_reflectivity_fit(self):
# a smoke test to make sure the reflectivity fit proceeds
model = self.model361
objective = Objective(
model,
(self.qvals361, self.rvals361, self.evals361),
transform=Transform("logY"),
)
fitter = CurveFitter(objective)
with np.errstate(invalid="raise"):
fitter.fit("differential_evolution")
def test_model_pickle(self):
model = self.model361
model.dq = 5.0
model.dq_type = "constant"
pkl = pickle.dumps(model)
unpkl = pickle.loads(pkl)
assert isinstance(unpkl, ReflectModel)
for param in unpkl.parameters.flattened():
try:
assert isinstance(param, Parameter)
except AssertionError:
raise AssertionError(type(param))
assert unpkl.dq_type == "constant"
def test_reflectivity_emcee(self):
model = self.model361
model.dq = 5.0
objective = Objective(
model,
(self.qvals361, self.rvals361, self.evals361),
transform=Transform("logY"),
)
fitter = CurveFitter(objective, nwalkers=100)
assert len(objective.generative().shape) == 1
assert len(objective.residuals().shape) == 1
res = fitter.fit("least_squares")
res_mcmc = fitter.sample(
steps=5, nthin=10, random_state=1, verbose=False
)
mcmc_val = [mcmc_result.median for mcmc_result in res_mcmc]
assert_allclose(mcmc_val, res.x, rtol=0.05)
# mcmc_stderr = [mcmc_result.stderr for mcmc_result in res_mcmc]
# assert_allclose(mcmc_stderr[1:], res.stderr[1:], rtol=0.25)
def test_smearedabeles(self):
# test smeared reflectivity calculation with values generated from
# Motofit (quadrature precsion order = 13)
theoretical = np.loadtxt(
os.path.join(self.pth, "smeared_theoretical.txt")
)
qvals, rvals, dqvals = np.hsplit(theoretical, 3)
"""
the order of the quadrature precision used to create these smeared
values in Motofit was 13.
Do the same here
"""
rff = ReflectModel(self.structure, quad_order=13)
calc = rff.model(qvals.flatten(), x_err=dqvals.flatten())
assert_almost_equal(rvals.flatten(), calc)
def test_smearedabeles_reshape(self):
# test smeared reflectivity calculation with values generated from
# Motofit (quadrature precsion order = 13)
theoretical = np.loadtxt(
os.path.join(self.pth, "smeared_theoretical.txt")
)
qvals, rvals, dqvals = np.hsplit(theoretical, 3)
"""
the order of the quadrature precision used to create these smeared
values in Motofit was 13.
Do the same here
"""
reshaped_q = np.reshape(qvals, (2, 250))
reshaped_r = np.reshape(rvals, (2, 250))
reshaped_dq = np.reshape(dqvals, (2, 250))
rff = ReflectModel(self.structure, quad_order=13)
calc = rff.model(reshaped_q, x_err=reshaped_dq)
assert_almost_equal(calc, reshaped_r)
def test_constant_smearing(self):
# check that constant dq/q smearing is the same as point by point
dqvals = 0.05 * self.qvals
rff = ReflectModel(self.structure, quad_order="ultimate")
calc = rff.model(self.qvals, x_err=dqvals)
rff.dq = 5.0
calc2 = rff.model(self.qvals)
assert_allclose(calc, calc2, rtol=0.011)
def test_resolution_kernel(self):
# check that resolution kernel works, use constant dq/q of 5% as
# comparison
slabs = self.structure361.slabs()[:, :4]
npnts = 1000
q = np.linspace(0.005, 0.3, npnts)
# use constant dq/q for comparison
const_R = reflectivity(
q,
slabs,
scale=1.01,
bkg=1e-6,
dq=0.05 * q,
quad_order=101,
threads=-1,
)
# lets create a kernel.
kernel = np.zeros((npnts, 2, 501), float)
sd = 0.05 * q / (2 * np.sqrt(2 * np.log(2)))
for i in range(npnts):
kernel[i, 0, :] = np.linspace(
q[i] - 3.5 * sd[i], q[i] + 3.5 * sd[i], 501
)
kernel[i, 1, :] = stats.norm.pdf(
kernel[i, 0, :], loc=q[i], scale=sd[i]
)
kernel_R = reflectivity(q, slabs, scale=1.01, bkg=1e-6, dq=kernel)
assert_allclose(kernel_R, const_R, rtol=0.002)
def test_sld_profile(self):
# test SLD profile with SLD profile from Motofit.
np.seterr(invalid="raise")
profile = np.loadtxt(os.path.join(self.pth, "sld_theoretical_R.txt"))
z, rho = np.split(profile, 2)
rff = ReflectModel(self.structure)
z, myrho = rff.structure.sld_profile(z.flatten())
assert_almost_equal(myrho, rho.flatten())
def test_modelvals_degenerate_layers(self):
# try fitting dataset with a deposited layer split into two degenerate
# layers
fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
dataset = ReflectDataset(fname)
sio2 = SLD(3.47, name="SiO2")
si = SLD(2.07, name="Si")
d2o = SLD(6.36, name="D2O")
polymer = SLD(2.0, name="polymer")
sio2_l = sio2(30, 3)
polymer_l = polymer(125, 3)
structure = si | sio2_l | polymer_l | polymer_l | d2o(0, 3)
polymer_l.thick.setp(value=125, vary=True, bounds=(0, 250))
polymer_l.rough.setp(value=4, vary=True, bounds=(0, 8))
structure[-1].rough.setp(vary=True, bounds=(0, 6))
sio2_l.rough.setp(value=3.16, vary=True, bounds=(0, 8))
model = ReflectModel(structure, bkg=2e-6)
objective = Objective(
model, dataset, use_weights=False, transform=Transform("logY")
)
model.scale.setp(vary=True, bounds=(0, 2))
model.bkg.setp(vary=True, bounds=(0, 8e-6))
slabs = structure.slabs()
assert_equal(slabs[2, 0:2], slabs[3, 0:2])
assert_equal(slabs[2, 3], slabs[3, 3])
assert_equal(slabs[1, 3], sio2_l.rough.value)
f = CurveFitter(objective)
f.fit(method="differential_evolution", seed=1, maxiter=3)
slabs = structure.slabs()
assert_equal(slabs[2, 0:2], slabs[3, 0:2])
assert_equal(slabs[2, 3], slabs[3, 3])
def test_resolution_speed_comparator(self):
fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
dataset = ReflectDataset(fname)
sio2 = SLD(3.47, name="SiO2")
si = SLD(2.07, name="Si")
d2o = SLD(6.36, name="D2O")
polymer = SLD(2.0, name="polymer")
sio2_l = sio2(30, 3)
polymer_l = polymer(125, 3)
dx = dataset.x_err
structure = si | sio2_l | polymer_l | polymer_l | d2o(0, 3)
model = ReflectModel(structure, bkg=2e-6, dq_type="constant")
objective = Objective(
model, dataset, use_weights=False, transform=Transform("logY")
)
# check that choose_resolution_approach doesn't change state
# of model
fastest_method = choose_dq_type(objective)
assert model.dq_type == "constant"
assert_equal(dx, objective.data.x_err)
# check that the comparison worked
const_time = time.time()
for i in range(1000):
objective.generative()
const_time = time.time() - const_time
model.dq_type = "pointwise"
point_time = time.time()
for i in range(1000):
objective.generative()
point_time = time.time() - point_time
if fastest_method == "pointwise":
assert point_time < const_time
elif fastest_method == "constant":
assert const_time < point_time
# check that we could use the function to setup a reflectmodel
ReflectModel(structure, bkg=2e-6, dq_type=choose_dq_type(objective))
def test_mixed_model(self):
# test for MixedReflectModel
air = SLD(0, name="air")
sio2 = SLD(3.47, name="SiO2")
si = SLD(2.07, name="Si")
structure1 = air | sio2(100, 2) | si(0, 3)
structure2 = air | sio2(50, 3) | si(0, 5)
# this is out theoretical calculation
mixed_model_y = 0.4 * structure1.reflectivity(self.qvals)
mixed_model_y += 0.6 * structure2.reflectivity(self.qvals)
mixed_model = MixedReflectModel(
[structure1, structure2], [0.4, 0.6], bkg=0, dq=0
)
assert_equal(mixed_model.scales, np.array([0.4, 0.6]))
assert mixed_model.dq.value == 0
assert_allclose(mixed_model_y, mixed_model(self.qvals), atol=1e-13)
# test repr of MixedReflectModel
q = repr(mixed_model)
r = eval(q)
assert_equal(r.scales, np.array([0.4, 0.6]))
assert r.dq.value == 0
assert_allclose(mixed_model_y, r(self.qvals), atol=1e-13)
def test_pnr(self):
# test pnr calculation
q = np.linspace(0.01, 0.3, 1001)
# use for spin channel PNR calculation
players = np.array(
[[0, 0, 0, 0, 0], [100, 3, 0, 1, 0], [0, 4, 0, 0, 0]]
)
# use for NSF calculation with abeles
pp_layers = np.array([[0, 0, 0, 0], [100, 4.0, 0, 0], [0, 4, 0, 0]])
mm_layers = np.array([[0, 0, 0, 0], [100, 2, 0, 0], [0, 4, 0, 0]])
r = _reflect.pnr(q, players)
pp = _reflect.abeles(q, pp_layers)
mm = _reflect.abeles(q, mm_layers)
assert_allclose(r[0], pp)
assert_allclose(r[1], mm)
def test_repr_reflect_model(self):
p = SLD(0.0)
q = SLD(2.07)
s = p(0, 0) | q(0, 3)
model = ReflectModel(s, scale=0.99, bkg=1e-8, q_offset=0.002)
r = eval(repr(model))
x = np.linspace(0.005, 0.3, 1000)
assert_equal(r(x), model(x))
def test_q_offset(self):
p = SLD(0.0)
q = SLD(2.07)
s = p(0, 0) | q(0, 3)
model = ReflectModel(s, scale=0.99, bkg=1e-8, q_offset=0.002)
model2 = ReflectModel(s, scale=0.99, bkg=1e-8, q_offset=0.0)
x = np.linspace(0.01, 0.2, 3)
assert_equal(model(x - 0.002), model2(x))
def test_FresnelTransform(self):
t = FresnelTransform(2.07, 6.36, dq=5)
slabs = np.array([[0, 2.07, 0, 0], [0, 6.36, 0, 0]])
q = np.linspace(0.01, 1.0, 1000)
r = reflectivity(q, slabs, dq=5)
rt, et = t(q, r)
assert_almost_equal(rt, 1.0)
# test errors are transformed
rt, et = t(q, r, y_err=1.0)
assert_almost_equal(et, 1.0 / r)
# check that you can use an SLD object
t = FresnelTransform(SLD(2.07), SLD(6.36), dq=5)
rt, et = t(q, r)
assert_almost_equal(rt, 1.0)
# reconstitute a repr'd transform and check it works
s = repr(t)
st = eval(s)
rt, et = st(q, r)
assert_almost_equal(rt, 1.0)
class Wrapper_fn(object):
def __init__(self, fn, w):
self.fn = fn
self.w = w
def __call__(self, x):
assert len(x.shape) == 1
return self.fn(x, self.w, threads=1)
class Wrapper_fn2(object):
def __init__(self, fn, w):
self.fn = fn
self.w = w
def __call__(self, x):
assert len(x.shape) == 1
return self.fn(x, self.w)
| StarcoderdataPython |
3211919 | import matplotlib.pyplot as plt
import numpy as np
from api.folders import IMAGES_FOLDER
from api.parameters import RegLambda
def plot_accuracy_comparison(accuracies, titles, ax=None):
x = [lmbd.value for lmbd in RegLambda]
xticks = np.linspace(0, 1, len(x))
colors = ['C{}'.format(i) for i in range(8)]
nrows = 2
ncols = 2
show_and_save = False
if ax is None:
show_and_save = True
_, ax = plt.subplots(2, 2, figsize=(12, 6))
for i in range(nrows):
for j in range(ncols):
y = list(accuracies[i + j * 2].values())
ax[i, j].set(xlim=(0 - 0.1, 1 + 0.1), ylim=(0 - 0.1, 1 + 0.1))
ax[i, j].set_xticks(xticks)
ax[i, j].set_xticklabels([('{:0.0e}' if i not in [0, 1] else '{}').format(i) for i in x])
ax[i, j].plot(xticks, y, 'go', alpha=0.9, color=colors[i + j * 2])
ax[i, j].plot(xticks, y, '--', alpha=0.9, color=colors[i + j * 2 + 1])
ax[i, j].set_title(titles[i + j * 2])
ax[i, j].set_xlabel(r"$\lambda$")
ax[i, j].set_ylabel("Accuracy")
for n, m in zip(xticks, y):
ax[i, j].text(n, m, "{0:.1f}%".format(round(float(m) * 100, 1)), va="bottom")
if show_and_save:
plt.tight_layout()
save_path = IMAGES_FOLDER.joinpath('accuracy_comparison.pdf')
plt.savefig(str(save_path), bbox_inches='tight', format='pdf', dpi=300)
plt.show()
| StarcoderdataPython |
9765386 | <reponame>identixone/fastapi_contrib
def test_settings_from_env_and_defaults():
from fastapi_contrib.conf import settings
assert settings.fastapi_app == "tests.conftest.app"
assert settings.now_function is None
assert settings.Config.secrets_dir == "/tmp/secrets"
assert settings.jaeger_sampler_rate == 0.1
| StarcoderdataPython |
6701043 | # <NAME>
import os
import platform
import numpy as np
from time import sleep
from PIL import ImageGrab
from game_control import *
from predict import predict
from game_control import *
from keras.models import model_from_json
def main():
# Get Model:
model_file = open('data/model/model.json', 'r')
model = model_file.read()
model_file.close()
model = model_from_json(model)
model.load_weights('data/model/weights.h5')
print('AI training has begun')
while 1:
# Get screenshot:
screen = ImageGrab.grab()
# Image to numpy array:
screen = np.array(screen)
# 4 channel(PNG) to 3 channel(JPG)
y = predict(model, screen)
if y == [0, 0, 0, 0]:
# Not action
continue
elif y[0] == -1 and y[1] == -1:
# Only keyboard action.
key = get_key(y[3])
if y[2] == 1:
# Press:
press(key)
else:
# Release:
release(key)
elif y[2] == 0 and y[3] == 0:
# Only mouse action.
pass
# click(y[0], y[1])
else:
# Mouse and keyboard action.
# Mouse:
# click(y[0], y[1])
# Keyboard:
key = get_key(y[3])
if y[2] == 1:
# Press:
press(key)
else:
# Release:
release(key)
if __name__ == '__main__':
main()
| StarcoderdataPython |
23336 | from typing import Dict
import numpy as np
import torch
from torch.nn.functional import linear, log_softmax, embedding
from torch.nn import Dropout, LogSoftmax, NLLLoss
from allennlp.common import Params
from allennlp.models.model import Model
from allennlp.data.vocabulary import Vocabulary, DEFAULT_PADDING_TOKEN
from allennlp.modules import TextFieldEmbedder, TimeDistributed, Seq2SeqEncoder
from allennlp.modules.sampled_softmax_loss import SampledSoftmaxLoss
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.token_embedders import Embedding, TokenEmbedder
from allennlp.modules.token_embedders.embedding import _read_pretrained_embeddings_file
from allennlp.nn.util import combine_initial_dims, uncombine_initial_dims
class SoftmaxLoss(torch.nn.Module):
def __init__(self,
num_words: int,
embedding_dim: int,
padding_index: int = 0) -> None:
super().__init__()
self.softmax_w = torch.nn.Parameter(torch.Tensor(num_words, embedding_dim))
self.softmax_b = torch.nn.Parameter(torch.Tensor(num_words))
self._softmax_func = LogSoftmax(dim=-1)
self._padding_index = padding_index
self._reset_parameters()
def _reset_parameters(self):
stdv = 1. / np.sqrt(self.softmax_w.size(1))
self.softmax_w.data.uniform_(-stdv, stdv)
self.softmax_b.data.uniform_(-stdv, stdv)
def forward(self, embeddings: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
logits = self._softmax_func(linear(embeddings, self.softmax_w, self.softmax_b))
criterion = NLLLoss(ignore_index=self._padding_index, reduction="sum")
return criterion(logits, targets.long())
@TokenEmbedder.register("embedding_with_dropout")
class EmbeddingWithDropout(Embedding):
def __init__(self,
num_embeddings: int,
embedding_dim: int,
dropout: float = None,
projection_dim: int = None,
weight: torch.FloatTensor = None,
padding_index: int = None,
trainable: bool = True,
max_norm: float = None,
norm_type: float = 2.,
scale_grad_by_freq: bool = False,
sparse: bool = False) -> None:
Embedding.__init__(self,
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
projection_dim=projection_dim,
weight=weight,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
self.dropout = dropout
def forward(self, inputs):
original_size = inputs.size()
inputs = combine_initial_dims(inputs)
if self.dropout and self.training:
mask = self.weight.data.new().resize_((self.weight.size(0), 1)).bernoulli_(1 - self.dropout)\
.expand_as(self.weight) / (1 - self.dropout)
masked_embed_weight = mask * self.weight
else:
masked_embed_weight = self.weight
embedded = embedding(inputs, masked_embed_weight,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse)
embedded = uncombine_initial_dims(embedded, original_size)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding':
num_embeddings = params.pop_int('num_embeddings', None)
vocab_namespace = params.pop("vocab_namespace", "tokens")
if num_embeddings is None:
num_embeddings = vocab.get_vocab_size(vocab_namespace)
embedding_dim = params.pop_int('embedding_dim')
pretrained_file = params.pop("pretrained_file", None)
projection_dim = params.pop_int("projection_dim", None)
trainable = params.pop_bool("trainable", True)
padding_index = params.pop_int('padding_index', None)
max_norm = params.pop_float('max_norm', None)
norm_type = params.pop_float('norm_type', 2.)
scale_grad_by_freq = params.pop_bool('scale_grad_by_freq', False)
sparse = params.pop_bool('sparse', False)
dropout = params.pop_float('dropout', None)
params.assert_empty(cls.__name__)
weight = _read_pretrained_embeddings_file(pretrained_file, embedding_dim,
vocab, vocab_namespace) if pretrained_file else None
return cls(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
projection_dim=projection_dim,
weight=weight,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse,
dropout=dropout)
@Model.register("encoder_only")
class EncoderOnlyLanguageModel(Model):
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
contextualizer: Seq2SeqEncoder,
dropout: float = None,
tie_embeddings: bool = True,
num_samples: int = None,
use_variational_dropout: bool = False):
super().__init__(vocab)
self._embedder = embedder
self._contextualizer = contextualizer
self._context_dim = contextualizer.get_output_dim()
if use_variational_dropout:
self._dropout = InputVariationalDropout(dropout) if dropout else lambda x: x
else:
self._dropout = Dropout(dropout) if dropout else lambda x: x
vocab_size = self.vocab.get_vocab_size()
padding_index = self.vocab.get_token_index(DEFAULT_PADDING_TOKEN)
if num_samples:
self._softmax_loss = SampledSoftmaxLoss(vocab_size, self._context_dim, num_samples)
else:
self._softmax_loss = SoftmaxLoss(vocab_size, self._context_dim, padding_index)
self._tie_embeddings = tie_embeddings
if self._tie_embeddings:
embedder_children = dict(self._embedder.named_children())
word_embedder = embedder_children["token_embedder_tokens"]
assert self._softmax_loss.softmax_w.size() == word_embedder.weight.size()
self._softmax_loss.softmax_w = word_embedder.weight
def forward(self,
source_tokens: Dict[str, torch.Tensor],
target_tokens: Dict[str, torch.Tensor]=None,
**kwargs) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, max_length)
source = source_tokens["tokens"]
mask = source > 0
# Shape: (batch_size, max_length, embedding_size)
embeddings = self._embedder(source_tokens)
embeddings = self._dropout(embeddings)
# Shape: (batch_size, max_length, context_dim)
contextual_embeddings = self._contextualizer(embeddings, mask)
contextual_embeddings = self._dropout(contextual_embeddings)
result = dict()
if target_tokens:
targets = target_tokens["tokens"]
targets = targets.view(-1)
mask = targets > 0
masked_targets = targets.masked_select(mask)
lined_embeddings = contextual_embeddings.view(-1, self._context_dim)
masked_embeddings = lined_embeddings.masked_select(mask.unsqueeze(-1))
masked_embeddings = masked_embeddings.view(-1, self._context_dim)
loss = self._softmax_loss(masked_embeddings, masked_targets)
num_targets = torch.sum(mask.long())
result["loss"] = loss / num_targets.float()
if not self.training:
result["logits"] = self._get_logits(contextual_embeddings)
return result
def _get_logits(self, embeddings):
linears = linear(embeddings, self._softmax_loss.softmax_w, self._softmax_loss.softmax_b)
return log_softmax(linears, dim=-1)
| StarcoderdataPython |
5155342 | <filename>tests/conftest.py<gh_stars>10-100
import pytest
from . import utils
@pytest.fixture(scope='session')
def url(request):
utils.cd_test_dir()
utils.start_server()
url = utils.get_url()
from twill import set_output
from twill.commands import go, find
set_output()
try:
go(url)
find("These are the twill tests")
except Exception:
raise RuntimeError("""
***
Hello! The twill test server is not running or cannot be reached;
please free port 8080 (or set TWILL_TEST_PORT to something else),
and clear your proxy settings too!
***
""")
def stop():
utils.stop_server()
utils.pop_test_dir()
request.addfinalizer(stop)
return url
| StarcoderdataPython |
8037390 | <reponame>zhj12138/ebook-manager
# 此文件存储搜索方法
# 解析查询字符串
def parseString(sear_str):
pass
# 根据书名查找
def searchByName(name):
pass
# 按照作者查询
def searchByAuthor(author_name):
pass
# 按照出版商查询
def searchByPublisher(publisher_name):
pass
# 按照标签查询
def searchByTag(tag):
pass
# 按照书单查询
def searchByBookList(booklist_name):
pass
# 传入一个书籍ID,返回相应的Book对象
def searchByBookID(ID):
pass
| StarcoderdataPython |
9726637 | <gh_stars>1-10
import sphinx_bootstrap_theme
html_css_files = [
"https://cdn.jsdelivr.net/gh/ickc/markdown-latex-css/css/_table.min.css",
"https://cdn.jsdelivr.net/gh/ickc/markdown-latex-css/fonts/fonts.min.css",
]
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"nbsphinx",
"sphinxcontrib.apidoc",
]
source_suffix = ".rst"
master_doc = "index"
project = "pannb"
year = "2021"
author = "<NAME>"
copyright = f"{year}, {author}"
version = release = "0.1.1"
pygments_style = "solarized-light"
html_theme = "bootstrap"
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
"navbar_links": [
(
"GitHub",
"https://github.com/ickc/pannb/",
True,
)
],
"source_link_position": None,
"bootswatch_theme": "readable",
"bootstrap_version": "3",
}
html_use_smartypants = True
html_last_updated_fmt = "%b %d, %Y"
html_split_index = False
html_short_title = f"{project}-{version}"
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
# math_number_all = True
mathjax_path_lut = {
"jsdelivr": "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml-full.js",
"unpkg": "https://www.unpkg.com/mathjax@3/es5/tex-chtml-full.js",
"cloudflare": "https://cdnjs.cloudflare.com/ajax/libs/mathjax/3.2.0/es5/tex-chtml-full.js",
"githack": "https://rawcdn.githack.com/mathjax/MathJax/3.2.0/es5/tex-chtml-full.js",
"statically": "https://cdn.statically.io/gh/mathjax/MathJax/3.2.0/es5/tex-chtml-full.js",
}
mathjax_path = mathjax_path_lut["jsdelivr"]
mathjax3_config = {
"loader": {
"load": [
# "[tex]/physics",
"[tex]/mathtools",
"[tex]/empheq",
]
},
"tex": {
"packages": {
"[+]": [
# "physics",
"mathtools",
"empheq",
],
},
"tags": "ams",
},
}
# sphinxcontrib.apidoc
apidoc_module_dir = "../src/pannb"
apidoc_separate_modules = True
apidoc_module_first = True
| StarcoderdataPython |
11241694 | import keras
from keras.layers import Input, Embedding, LSTM, Bidirectional, Reshape, Lambda
from keras.layers import concatenate
import keras.backend as K
def get_shape(x):
return K.int_shape(x)
def create_vector_input(dim):
return Input(shape=(dim,), dtype='float32')
def create_sequence_input(sequence_length):
return Input(shape=(sequence_length,), dtype='int32')
def create_image_input(height, width, num_channels):
return Input(shape=(height, width, num_channels), dtype='float32')
def create_float_tensor_input(shape):
return Input(shape=shape, dtype='float32')
def create_int_tensor_input(shape):
return Input(shape=shape, dtype='int32')
def reshape_with_batch_dimension(x, output_shape):
return Lambda(lambda y: K.reshape(y, output_shape))(x)
def reshape_without_batch_dimension(x, output_shape):
return Reshape(output_shape)(x)
def flatten_all_but_last_dimension(x):
shape = get_shape(x)
return reshape_with_batch_dimension(x, (-1, shape[-1]))
def concatenate_along_last_axis(lst):
assert len(lst) > 0
if len(lst) > 2:
return concatenate(lst)
else:
return lst[0]
def create_basic_sequence_model(input,
sequence_length,
vocab_size,
embedding_dim,
output_dim,
return_sequences,
bidirectional=True,
embeddings=None,
initial_embeddings=None,
trainable_embeddings=True,
lstm=None):
assert embeddings is None or initial_embeddings is None
if embeddings is None:
if initial_embeddings is not None:
embeddings = Embedding(
vocab_size,
embedding_dim,
weights=[initial_embeddings],
trainable=trainable_embeddings)
else:
embeddings = Embedding(
vocab_size, embedding_dim, trainable=trainable_embeddings)
if lstm is None:
units = int(output_dim / 2) if bidirectional else output_dim
lstm = LSTM(units, return_sequences=return_sequences)
if bidirectional:
lstm = Bidirectional(lstm)
# creation of the computational graph.
emb_sequence = embeddings(input)
output = lstm(emb_sequence)
return {
"model": {
"embeddings": embeddings,
"lstm": lstm,
},
"nodes": {
"input": input,
"embedded_sequence": emb_sequence,
"output": output
}
}
# class DataGenerator(keras.utils.Sequence):
# def __init__(self):
# pass
# # number of batches
# def __len__(self):
# pass
# # get one batch of data: (X, y).
# def __getitem__(self, index):
# return (batch_X, batch_y)
# # for reshuffling indices after the end of the epoch.
# def on_epoch_end(self):
# pass
### useful links
# - functional API: https://keras.io/getting-started/functional-api-guide/
# - multiple outputs and multiplee losses: https://www.pyimagesearch.com/2018/06/04/keras-multiple-outputs-and-multiple-losses/ | StarcoderdataPython |
1842688 | from selenium.webdriver.common.keys import Keys
from random import choice, randint
from selenium import webdriver
from time import sleep
class Instagram_ComentBot:
def __init__(self, username, password):
self.username = username
self.password = password
self.driver = webdriver.Firefox()
def login(self):
driver = self.driver
driver.get("https://www.instagram.com/")
sleep(randint(5, 6))
caixa_usuário = driver.find_element_by_xpath('//input[@name="username"]')
caixa_usuário.click() # Clica na caixa "usuário".
caixa_usuário.clear() # Limpa a caixa, caso haja algo.
caixa_usuário.send_keys(self.username) # Digita o usuário informada.
caixa_senha = driver.find_element_by_xpath('//input[@name="password"]')
caixa_senha.click() # Clica na caixa "password"
caixa_senha.clear() # Limpa a caixa, caso haja algo.
caixa_senha.send_keys(self.password) # Digita a senha informada.
sleep(randint(2, 4))
caixa_senha.send_keys(Keys.RETURN) # Aperta o botão "Enter".
sleep(randint(4, 6)) # Aguarda alguns segundos antes de iniciar a pŕixima etapa.
self.comentário_na_postagem()
@staticmethod
def simulação_de_digitação(frase, onde_digitar): # Faz a digitação letra por letra.
for letra in frase:
onde_digitar.send_keys(letra)
sleep(randint(4, 9) / 30)
def comentário_na_postagem(self):
driver = self.driver
sleep(randint(3, 5))
link_postagem = str(input('Insira o link da postagem: '))
driver.get(f'{link_postagem}') # Adicione o link da postagem que deseja comentar.
comentários = ['1', '2', '4'] # ADICIONE AQUI A SUA LISTA DE COMENTÁRIOS.
num_comentários = int(input('Digite o número de comentários que deseja realizar: '))
for c in range(num_comentários): # Delimita o número de comentários.
driver.find_element_by_class_name('Ypffh').click() # Clica no campo "Comentário"
caixa_comentario = driver.find_element_by_class_name('Ypffh')
sleep(randint(3, 7))
self.simulação_de_digitação(choice(comentários), caixa_comentario) # Digita o comentário no campo
sleep(randint(3, 6))
driver.find_element_by_xpath("//button[contains(text(), 'Publicar')]").click() # Publica a postagem.
sleep(randint(30, 60))
print(f'Número de comentários postados: {c}')
Instagram_ComentBot = Instagram_ComentBot('username', 'password') # Adicione aqui o seu usuário e senha.
Instagram_ComentBot.login()
| StarcoderdataPython |
11210920 | <filename>src/DataJoin/controller/sync_convert_data_block.py
# Copyright 2020 The 9nFL Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import tensorflow as tf
import os
from DataJoin.common import data_join_service_pb2
from google.protobuf import text_format
import logging
from DataJoin.utils.api import wrap_data_transfer_api
from DataJoin.utils.base import get_host_ip
EXAMPLE_ID = "example_id"
EVENT_TIME = "event_time"
LABEL = "label"
EXAMPLE_ID_NS = '%X'
DEFATLT_LABEL = "0 0"
import codecs
http_server_ip = get_host_ip()
data_path_vw_bas_dir = os.environ.get("data_path_vw_bas_dir", None)
def map_fn(proto):
example = tf.train.Example.FromString(proto)
f_dict = {}
feature_map = example.features.feature
for feat in feature_map:
if feat == EVENT_TIME:
continue
elif feat == EXAMPLE_ID:
f_dict[EXAMPLE_ID_NS] = feature_map[feat].bytes_list.value
else:
f_dict[feat] = feature_map[feat].bytes_list.value
if LABEL in f_dict:
label = str(f_dict.pop(LABEL)[0], encoding='utf-8')
else:
label = DEFATLT_LABEL
res = [label, ]
for ns in f_dict:
for val in f_dict[ns]:
try:
val = str(val, encoding='utf-8')
except Exception as e:
ns = ns.encode('utf-8')
res.append("%s %s" % (ns, val))
return "|".join(res)
def write_data(input_file, output_file, batch_size=16):
dataset = tf.data.TFRecordDataset(input_file) \
.batch(batch_size)
next_element = tf.compat.v1.data.make_one_shot_iterator(dataset) \
.get_next()
with tf.Session() as sess:
with codecs.open(output_file, "w", 'utf-8') as f:
while True:
try:
batch_data = sess.run(next_element)
except Exception as e:
logging.info("data convert done")
break
for item in batch_data:
vw_str = map_fn(item)
f.write("%s\n" % vw_str)
return True
class SyncConvertDataBlock(object):
def __init__(self, meta_path, data_path, time_stamp, **kwargs):
super(SyncConvertDataBlock).__init__()
self.meta_path = meta_path
self.data_path = data_path
self.data_source_name = (self.data_path.split("/")[-1]).split(".")[0]
self.time_stamp = time_stamp
self.tf_read = tf.io.tf_record_iterator
self.data_path_vw_bas_dir = data_path_vw_bas_dir
self.tmp_path = "/tmp"
def sync_convert_data_block(self):
meta_iter = self.tf_read(self.meta_path)
meta_info = text_format.Parse(next(meta_iter),
data_join_service_pb2.DataBlockMeta())
logging.info('meta info block_id: {0}'.format(meta_info.block_id))
logging.info('meta info partition_id: {0}'.format(meta_info.partition_id))
json_body = dict(start_time=meta_info.start_time,
end_time=meta_info.end_time,
leader_start_index=meta_info.leader_start_index,
leader_end_index=meta_info.leader_end_index,
follower_restart_index=meta_info.follower_restart_index,
data_block_index=meta_info.data_block_index,
dfs_data_block_dir=self.data_path,
create_status=2,
consumed_status=1,
data_source_name=self.data_source_name
)
wrap_data_transfer_api(
'POST',
'/v1/data/{0}/{1}/{2}/create/data/block'.format(
meta_info.block_id,
meta_info.partition_id,
meta_info.file_version),
json_body,
)
class StartSyncConvertDataBlock(object):
@staticmethod
def run_task():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--time_stamp', required=True, type=str, help="time_stamp")
parser.add_argument('-m', '--meta_path', required=True, type=str, help="meta_path")
parser.add_argument('-p', '--data_path', required=True, type=str, help="data_path")
args = parser.parse_args()
time_stamp = args.time_stamp
meta_path = args.meta_path
data_path = args.data_path
SyncConvertDataBlock(meta_path, data_path, time_stamp).sync_convert_data_block()
if __name__ == '__main__':
StartSyncConvertDataBlock().run_task()
| StarcoderdataPython |
11378599 | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
class Client:
"""
This is a number module
"""
@staticmethod
def parse_int(
raw: str,
) -> int:
return int(raw)
@staticmethod
def parse_long(
raw: str,
) -> int:
return int(raw)
@staticmethod
def parse_float(
raw: str,
) -> float:
return float(raw)
@staticmethod
def parse_double(
raw: str,
) -> float:
return float(raw)
@staticmethod
def itol(
val: int,
) -> int:
return val
@staticmethod
def ltoi(
val: int,
) -> int:
return val
@staticmethod
def add(
val1: int,
val2: int,
) -> int:
return val1 + val2
@staticmethod
def sub(
val1: int,
val2: int,
) -> int:
return val1 - val2
@staticmethod
def mul(
val1: int,
val2: int,
) -> int:
return val1 * val2
@staticmethod
def div(
val1: int,
val2: int,
) -> float:
return val1 / val2
@staticmethod
def gt(
val1: int,
val2: int,
) -> bool:
return val1 > val2
@staticmethod
def gte(
val1: int,
val2: int,
) -> bool:
return val1 >= val2
@staticmethod
def lt(
val1: int,
val2: int,
) -> bool:
return val1 < val2
@staticmethod
def lte(
val1: int,
val2: int,
) -> bool:
return val1 <= val2
| StarcoderdataPython |
3213667 | <filename>code/Q3pre.py
import numpy as np
import pandas as pd
pr_mat=pd.read_csv('data3/pr.csv',index_col=0)
index=[]
for i,r in pr_mat.iterrows():
if r.values[0]>0.05 and r.values[1]>0.05:
pr_mat=pr_mat.drop(i)
index.append(i)
print(index)
print(len(index))
pr_mat.to_csv('./dropData.csv')
| StarcoderdataPython |
1882685 | <reponame>phlong3105/onevision<filename>src/onevision/models/detection/scaled_yolov4/ensemble.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
import argparse
import csv
import itertools
import os
import test
from onevision.utils import pretrained_dir
if __name__ == "__main__":
weights = [
os.path.join("runs", "finetune", "yolov4-p7_a2i2haze_1536/weights/best.pt"),
os.path.join("runs", "finetune", "yolov4-p7_a2i2haze_1920/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_1536_a2i2haze_1536/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_1845_a2i2haze_1536/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_1845_a2i2haze_1845/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_1920_a2i2haze_1536/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2160_a2i2haze_1536/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2160_a2i2haze_1536_2/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2160_a2i2haze_1536_3/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2160_a2i2haze_1845/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2160_a2i2haze_multiscale/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_uavdt_1536_a2i2haze_1536/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2560_a2i2haze_1536/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2560_a2i2haze_1845/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2560_a2i2haze_1920/weights/best_strip.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2560_a2i2haze_2160/weights/best.pt"),
os.path.join("runs", "finetune", "yolov4-p7_visdrone_2560_a2i2haze_2560/weights/best.pt"),
]
N = 4 # Number of models in ensemble
table = {}
for comb in itertools.combinations(weights, N):
scaled_yolov4_dir = os.path.join(pretrained_dir, "scaled_yolov4")
parser = argparse.ArgumentParser()
parser.add_argument(
"--weights", default=os.path.join(scaled_yolov4_dir, "yolov4_p7_coco.pt"), nargs="+", type=str, help="model.pt path(s)"
)
parser.add_argument("--data", default="data/coco128.yaml", type=str, help="*.data path")
parser.add_argument("--batch-size", default=32, type=int, help="size of each image batch")
parser.add_argument("--img-size", default=640, type=int, help="inference size (pixels)")
parser.add_argument("--conf-thres", default=0.001, type=float, help="object confidence threshold")
parser.add_argument("--iou-thres", default=0.65, type=float, help="IOU threshold for NMS")
parser.add_argument("--save-json", default=False, action="store_true", help="save a cocoapi-compatible JSON results file")
parser.add_argument("--task", default="val", help="'val', 'test', 'study'")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--single-cls", default=False, action="store_true", help="treat as single-class dataset")
parser.add_argument("--augment", default=False, action="store_true", help="augmented inference")
parser.add_argument("--merge", default=False, action="store_true", help="use Merge NMS")
parser.add_argument("--verbose", default=True, action="store_true", help="report mAP by class")
parser.add_argument("--save-txt", default=True, action="store_true", help="save results to *.txt")
opt = parser.parse_args()
opt.save_json |= opt.data.endswith("coco.yaml")
opt.data = "data/a2i2haze.yaml"
opt.weights = list(comb)
opt.batch_size = 1
opt.img_size = 1536
opt.conf_thres = 0.001
opt.iou_thres = 0.5
# opt.device = "0"
opt.augment = True
opt.merge = True
opt.verbose = False
print(opt)
results = test.test(
opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
opt2=opt
)
k = ""
for v in comb:
v = v.replace("/weights/best.pt", "")
v = v.replace("/weights/best_strip.pt", "")
v = v.replace("runs/finetune/", "")
k = f"{k} + {v}"
table[k] = results
# (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
header = ["ensemble", "mp", "mr", "map50", "map", "t"]
with open(f"{N}C{len(weights)}.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(header)
for k, v in table.items():
v0 = v[0]
mp = v0[0]
mr = v0[1]
map50 = v0[2]
map = v0[3]
loss = v0[4]
maps = v[1]
t = v[2]
writer.writerow([f"{k}", f"{mp}", f"{mr}", f"{map50}", f"{map}", f"{t}"])
| StarcoderdataPython |
12836931 | # -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Unitary tests for bigfish.stack.filter module.
"""
import pytest
import numpy as np
import bigfish.stack as stack
from bigfish.stack.filter import _define_kernel
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
# toy images
x = np.array(
[[3, 2, 0, 0, 0],
[2, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 2, 1, 5, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
y = np.array(
[[0, 0, 62, 164, 55],
[0, 0, 120, 235, 181],
[0, 0, 73, 205, 0],
[0, 131, 0, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
@pytest.mark.parametrize("shape, size", [
("diamond", 3), ("disk", 3), ("rectangle", (2, 3)), ("square", 3),
("blabla", 3)])
@pytest.mark.parametrize("dtype", [
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64, bool])
def test_kernel(shape, size, dtype):
# non valid case
if shape not in ["diamond", "disk", "rectangle", "square"]:
with pytest.raises(ValueError):
_define_kernel(shape, size, dtype)
# valid cases
else:
kernel = _define_kernel(shape, size, dtype)
if shape == "diamond":
expected_kernel = np.array(
[[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]],
dtype=dtype)
elif shape == "disk":
expected_kernel = np.array(
[[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0]],
dtype=dtype)
elif shape == "rectangle":
expected_kernel = np.array(
[[1, 1, 1],
[1, 1, 1]],
dtype=dtype)
else:
expected_kernel = np.array(
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
dtype=dtype)
assert_array_equal(kernel, expected_kernel)
assert kernel.dtype == dtype
def test_mean_filter():
# np.uint8
filtered_x = stack.mean_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[2, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.mean_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
# np.float32
filtered_x = stack.mean_filter(x.astype(np.float32),
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[2.333, 1.444, 0.556, 0., 0.],
[1.556, 1., 0.444, 0., 0.],
[0.889, 0.778, 1.111, 0.667, 0.556],
[0.333, 0.444, 1., 0.667, 0.556],
[0.222, 0.333, 0.889, 0.667, 0.556]],
dtype=np.float32)
assert_allclose(filtered_x, expected_x, rtol=1e-02)
assert filtered_x.dtype == np.float32
# np.float64
filtered_x = stack.mean_filter(x.astype(np.float64),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float64)
assert_allclose(filtered_x, expected_x, rtol=1e-02)
assert filtered_x.dtype == np.float64
def test_median_filter():
# np.uint8
filtered_x = stack.median_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[2, 2, 0, 0, 0],
[2, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.median_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_maximum_filter():
# np.uint8
filtered_x = stack.maximum_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[3, 3, 2, 0, 0],
[3, 3, 2, 0, 0],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.maximum_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_minimum_filter():
# np.uint8
filtered_x = stack.minimum_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.minimum_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_log_filter():
# float64
y_float64 = stack.cast_img_float64(y)
filtered_y_float64 = stack.log_filter(y_float64, 2)
expected_y_float64 = np.array(
[[0., 0., 0.02995949, 0.06212277, 0.07584532],
[0., 0., 0.02581818, 0.05134284, 0.06123539],
[0., 0., 0.01196859, 0.0253716, 0.02853162],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
dtype=np.float64)
assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6)
assert filtered_y_float64.dtype == np.float64
# float32
y_float32 = stack.cast_img_float32(y)
filtered_y = stack.log_filter(y_float32, 2)
expected_y = stack.cast_img_float32(expected_y_float64)
assert_allclose(filtered_y, expected_y, rtol=1e-6)
assert filtered_y.dtype == np.float32
# uint8
filtered_y = stack.log_filter(y, 2)
expected_y = stack.cast_img_uint8(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint8
# uint16
y_uint16 = stack.cast_img_uint16(y)
filtered_y = stack.log_filter(y_uint16, 2)
expected_y = stack.cast_img_uint16(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint16
def test_gaussian_filter():
# float64
y_float64 = stack.cast_img_float64(y)
filtered_y_float64 = stack.gaussian_filter(y_float64, 2)
expected_y_float64 = np.array(
[[0.08928096, 0.1573019 , 0.22897881, 0.28086597, 0.3001061 ],
[0.08668051, 0.14896399, 0.21282558, 0.25752308, 0.27253406],
[0.07634613, 0.12664142, 0.17574502, 0.20765944, 0.2155001 ],
[0.05890843, 0.09356377, 0.12493327, 0.1427122 , 0.14374558],
[0.03878372, 0.05873308, 0.07492625, 0.08201409, 0.07939603]],
dtype=np.float64)
assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6)
assert filtered_y_float64.dtype == np.float64
# float32
y_float32 = stack.cast_img_float32(y)
filtered_y = stack.gaussian_filter(y_float32, 2)
expected_y = stack.cast_img_float32(expected_y_float64)
assert_allclose(filtered_y, expected_y, rtol=1e-6)
assert filtered_y.dtype == np.float32
# uint8
with pytest.raises(ValueError):
stack.gaussian_filter(y, 2, allow_negative=True)
filtered_y = stack.gaussian_filter(y, 2)
expected_y = stack.cast_img_uint8(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint8
# uint16
y_uint16 = stack.cast_img_uint16(y)
with pytest.raises(ValueError):
stack.gaussian_filter(y_uint16, 2, allow_negative=True)
filtered_y = stack.gaussian_filter(y_uint16, 2)
expected_y = stack.cast_img_uint16(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint16
def test_background_removal_mean():
# np.uint8
filtered_x = stack.remove_background_mean(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 2, 0, 5, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.remove_background_mean(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_background_removal_gaussian():
# float64
y_float64 = stack.cast_img_float64(y)
filtered_y_float64 = stack.remove_background_gaussian(y_float64, 2)
expected_y_float64 = np.array(
[[0., 0., 0.01415845, 0.36227129, 0.],
[0., 0., 0.25776265, 0.66404555, 0.43726986],
[0., 0., 0.11052949, 0.59626213, 0.],
[0., 0.42016172, 0., 0., 0.],
[0., 0., 0., 0., 0.]],
dtype=np.float64)
assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6)
assert filtered_y_float64.dtype == np.float64
# float32
y_float32 = stack.cast_img_float32(y)
filtered_y = stack.remove_background_gaussian(y_float32, 2)
expected_y = stack.cast_img_float32(expected_y_float64)
assert_allclose(filtered_y, expected_y, rtol=1e-6)
assert filtered_y.dtype == np.float32
# uint8
with pytest.raises(ValueError):
stack.gaussian_filter(y, 2, allow_negative=True)
filtered_y = stack.remove_background_gaussian(y, 2)
expected_y = stack.cast_img_uint8(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint8
# uint16
y_uint16 = stack.cast_img_uint16(y)
with pytest.raises(ValueError):
stack.gaussian_filter(y_uint16, 2, allow_negative=True)
filtered_y = stack.remove_background_gaussian(y_uint16, 2)
expected_y = stack.cast_img_uint16(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint16
def test_dilation_filter():
# np.uint8
filtered_x = stack.dilation_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[3, 3, 2, 0, 0],
[3, 3, 2, 0, 0],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.dilation_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
# np.float32
filtered_x = stack.dilation_filter(x.astype(np.float32),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float32)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float32
# np.float64
filtered_x = stack.dilation_filter(x.astype(np.float64),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float64)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float64
# bool
filtered_x = stack.dilation_filter(x.astype(bool),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(bool)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == bool
def test_erosion_filter():
# np.uint8
filtered_x = stack.erosion_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.erosion_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
# np.float32
filtered_x = stack.erosion_filter(x.astype(np.float32),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float32)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float32
# np.float64
filtered_x = stack.erosion_filter(x.astype(np.float64),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float64)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float64
# bool
filtered_x = stack.erosion_filter(x.astype(bool),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(bool)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == bool
| StarcoderdataPython |
6652281 | # MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ROOT
from .labels import CMSLabel, LuminosityLabel
def draw_labels(lumi_text, cms_position='left', extra_text=''):
"""Draw the CMS Publication Committee figure labels on the active canvas.
Parameters
----------
lumi_text : string
The luminosity label text. Data taking periods must be separated by
the "+" symbol, e.g. "19.7 fb^{-1} (8 TeV) + 4.9 fb^{-1} (7 TeV)".
cms_position : string, optional
The CMS label position on the active canvas:
:left: The top left corner inside the frame (default)
:center: The top center inside the frame
:right: The top right corner inside the frame
:outside: The top left corner outside the frame
extra_text : string, optional
The sublabel text positioned below the CMS label inside of the frame
or to the right of the CMS label outside of the frame. Common examples
are "Preliminary", "Simulation", or "Unpublished". The default is an
empty string for no sublabel.
"""
cms_label = CMSLabel()
cms_label.position = cms_position
cms_label.sublabel.text = extra_text
cms_label.draw()
lumi_label = LuminosityLabel(lumi_text)
lumi_label.draw()
ROOT.gPad.Update()
| StarcoderdataPython |
8130247 | <filename>cifar10/dev/utils_data.py<gh_stars>0
import os
# import imageio
# import yaml
import torch
import torchvision
from torch.utils.data.dataset import Subset
from torchvision.transforms import (CenterCrop, Compose, RandomHorizontalFlip, Resize, ToTensor)
import pickle
import numpy as np
import pdb
st = pdb.set_trace
class MultiViewDataset(torch.utils.data.Dataset):
def __init__(self, data_path='data.pkl', view_paths=['view.pkl'], train=True, transform=None, n_views=2,
must_include_original=False):
# TODO: hardcoded
if train:
index = np.arange(50000)
else:
index = np.arange(50000, 60000)
if not os.path.exists(data_path):
raise FileNotFoundError(f'{data_path} does not exist')
with open(data_path, 'rb') as f:
data = pickle.load(f)
self.data_path = data_path
self.data = {
'images': data['images'][index],
'labels': torch.tensor(data['labels'])[index],
}
views = []
if isinstance(view_paths, str):
view_paths = [view_paths]
for view_path in view_paths:
if not os.path.exists(view_path):
raise FileNotFoundError(f'{view_path} does not exist')
with open(view_path, 'rb') as f:
view = pickle.load(f)
views.append(view['views'][:, index])
self.view_paths = view_paths
# NOTE: original data is view_0
self.views = torch.cat([self.data['images'].unsqueeze(0)] + views, dim=0)
# self.total_views = self.views.shape[0] - 1
self.total_views = self.views.shape[0]
self.n_views = n_views
self.transform = transform
self.must_include_original = must_include_original
def __getitem__(self, index):
if self.total_views == 0:
inds = [0] * self.n_views
else:
if self.must_include_original:
inds = [0] + list(np.random.choice(np.arange(1, self.total_views), self.n_views - 1, replace=False))
else:
inds = np.random.choice(np.arange(self.total_views), self.n_views, replace=False)
imgs = []
for i in inds:
img = self.views[i, index].clone()
img = (img + 1) / 2 # main assumes range is [0, 1]
if self.transform is not None:
img = self.transform(img)
imgs.append(img)
lbls = self.data['labels'][index]
return imgs, lbls
def __len__(self):
return len(self.data['labels'])
class LatentDataset(torch.utils.data.Dataset):
def __init__(self, data_path='data.pkl', latent_path='latent.pkl', train=True, transform=None):
# TODO: hardcoded
if train:
index = np.arange(50000)
else:
index = np.arange(50000, 60000)
if not os.path.exists(data_path):
raise FileNotFoundError(f'{data_path} does not exist')
with open(data_path, 'rb') as f:
data = pickle.load(f)
self.data_path = data_path
self.data = {
'images': data['images'][index],
'labels': torch.tensor(data['labels'])[index],
}
with open(latent_path, 'rb') as f:
latents = pickle.load(f)
self.latent_path = latent_path
self.data['latents'] = latents['latents'][index]
self.transform = transform
def __getitem__(self, index):
x = self.data['images'][index].clone()
w = self.data['latents'][index].clone()
y = self.data['labels'][index]
if self.transform is not None:
x = self.transform((x + 1) / 2)
return x, w, y
def __len__(self):
return len(self.data['labels'])
class PosViewDataset(torch.utils.data.Dataset):
def __init__(self, data_path='data.pkl', view_paths=['view.pkl'], train=True, transform01=None, transform2=None, n_views=2,
must_include_original=False, transform0=None):
# TODO: hardcoded
if train:
index = np.arange(50000)
else:
index = np.arange(50000, 60000)
if not os.path.exists(data_path):
raise FileNotFoundError(f'{data_path} does not exist')
with open(data_path, 'rb') as f:
data = pickle.load(f)
self.data_path = data_path
self.data = {
'images': data['images'][index],
'labels': torch.tensor(data['labels'])[index],
}
views = []
if isinstance(view_paths, str):
view_paths = [view_paths]
for view_path in view_paths:
if not os.path.exists(view_path):
raise FileNotFoundError(f'{view_path} does not exist')
with open(view_path, 'rb') as f:
view = pickle.load(f)
views.append(view['views'][:, index])
self.view_paths = view_paths
# NOTE: original data is view_0
self.views = torch.cat([self.data['images'].unsqueeze(0)] + views, dim=0)
self.total_views = self.views.shape[0]
self.n_views = n_views
self.transform01 = transform01
self.transform2 = transform2
self.must_include_original = must_include_original
self.transform0 = transform0
def __getitem__(self, index):
if self.transform01 is not None:
if self.must_include_original:
imgs = [
self.transform0((self.data['images'][index].clone() + 1) / 2),
self.transform01((self.data['images'][index].clone() + 1) / 2)
]
else:
imgs = [
self.transform01((self.data['images'][index].clone() + 1) / 2),
self.transform01((self.data['images'][index].clone() + 1) / 2)
]
else:
raise ValueError('transform01 is None')
imgs = [(self.data['images'][index].clone() + 1) / 2, (self.data['images'][index].clone() + 1) / 2]
inds = list(np.random.choice(np.arange(1, self.total_views), self.n_views, replace=False))
for i in inds:
img = self.views[i, index].clone()
img = (img + 1) / 2 # main assumes range is [0, 1]
if self.transform2 is not None:
img = self.transform2(img)
imgs.append(img)
lbls = self.data['labels'][index]
return imgs, lbls
def __len__(self):
return len(self.data['labels']) | StarcoderdataPython |
1791782 | from imdb import IMDB
# from pascal_voc import PascalVOC
# from cityscape import CityScape
# from coco import coco
from road_images import RoadImages
| StarcoderdataPython |
1854340 | <gh_stars>0
from pytest import mark, raises
from sls.parser.lexer import ErrorCodes, LexerException, Tokenizer
def tokenize(text):
"""Returns a list of tokens"""
return [*Tokenizer(text).tokenize()]
def simple_tokenize(text):
"""Return a list of only the token ids"""
toks = tokenize(text)
return [k.id() for k in toks]
def test_regex_token():
toks = tokenize("/foo/")
assert len(toks) == 1
assert toks[0].id() == "regex"
assert toks[0].text() == "/foo/"
def test_regex_token_ws():
toks = tokenize("/foo/ ")
assert len(toks) == 1
assert toks[0].id() == "regex"
assert toks[0].text() == "/foo/"
def test_regex_token_flag():
toks = tokenize("/foo/i")
assert len(toks) == 1
assert toks[0].id() == "regex"
assert toks[0].text() == "/foo/i"
def test_regex_token_div():
toks = tokenize("/fo")
assert len(toks) == 2
assert toks[0].id() == "div"
assert toks[0].text() == "/"
assert toks[1].id() == "name"
assert toks[1].text() == "fo"
def test_string_token():
toks = tokenize('"a"')
assert len(toks) == 1
assert toks[0].id() == "string"
assert toks[0].text() == '"a"'
def test_string_token_ws():
toks = tokenize('"a" ')
assert len(toks) == 1
assert toks[0].id() == "string"
assert toks[0].text() == '"a"'
def test_string_token_escaped():
toks = tokenize(r'"a\\b"')
assert len(toks) == 1
assert toks[0].id() == "string"
assert toks[0].text() == '"a\\b"'
def test_string_token_escaped_2():
toks = tokenize(r'"a\"b"')
assert len(toks) == 1
assert toks[0].id() == "string"
assert toks[0].text() == '"a"b"'
def test_int_token():
toks = tokenize("123")
assert len(toks) == 1
assert toks[0].id() == "int"
assert toks[0].text() == "123"
def test_int_token_ws():
toks = tokenize("123 ")
assert len(toks) == 1
assert toks[0].id() == "int"
assert toks[0].text() == "123"
def test_int_parents():
toks = tokenize("123)")
assert len(toks) == 2
assert toks[0].id() == "int"
assert toks[1].id() == "rparens"
def test_int_op():
toks = tokenize("1-2")
assert len(toks) == 3
assert toks[0].id() == "int"
assert toks[1].id() == "sub"
assert toks[0].id() == "int"
def test_float_token():
toks = tokenize("123.")
assert len(toks) == 1
assert toks[0].id() == "float"
assert toks[0].text() == "123."
def test_float_token_ws():
toks = tokenize("123. ")
assert len(toks) == 1
assert toks[0].id() == "float"
assert toks[0].text() == "123."
def test_name_token():
toks = tokenize("abc")
assert len(toks) == 1
assert toks[0].id() == "name"
assert toks[0].text() == "abc"
def test_name_token_op():
toks = tokenize("abc+2")
assert len(toks) == 3
assert toks[0].id() == "name"
assert toks[1].id() == "add"
assert toks[2].id() == "int"
def test_name_token_2():
toks = tokenize("my/serv123_ice")
assert len(toks) == 1
assert toks[0].id() == "name"
assert toks[0].text() == "my/serv123_ice"
def test_name_token_3():
toks = tokenize("omg-services/uuid")
assert len(toks) == 1
assert toks[0].id() == "name"
assert toks[0].text() == "omg-services/uuid"
def test_name_token_4():
toks = tokenize("name)")
assert len(toks) == 2
assert toks[0].id() == "name"
assert toks[1].id() == "rparens"
def test_name_token_5():
toks = tokenize("name(i")
assert len(toks) == 3
assert toks[0].id() == "name"
assert toks[1].id() == "lparens"
assert toks[2].id() == "name"
def test_name_token_6():
toks = tokenize("a-b")
assert len(toks) == 1
assert toks[0].id() == "name"
assert toks[0].text() == "a-b"
def test_name_token_7():
toks = tokenize("a-2")
assert len(toks) == 1
assert toks[0].id() == "name"
assert toks[0].text() == "a-2"
def test_name_token_ws():
toks = tokenize("abc ")
assert len(toks) == 1
assert toks[0].id() == "name"
assert toks[0].text() == "abc"
def test_newline_token():
toks = tokenize("\n")
assert len(toks) == 1
assert toks[0].id() == "nl"
assert toks[0].text() == "\n"
def test_newline_token_ws():
toks = tokenize("\n ")
assert len(toks) == 3
assert toks[0].id() == "nl"
assert toks[0].text() == "\n"
assert toks[1].id() == "indent"
assert toks[1].text() == " "
assert toks[2].id() == "dedent"
assert toks[2].text() == ""
def test_newline_token_indent():
toks = tokenize("\n ab")
assert len(toks) == 4
assert toks[0].id() == "nl"
assert toks[0].text() == "\n"
assert toks[1].id() == "indent"
assert toks[1].text() == " "
assert toks[2].id() == "name"
assert toks[2].text() == "ab"
assert toks[3].id() == "dedent"
assert toks[3].text() == ""
def test_comment():
toks = tokenize("#")
assert len(toks) == 1
assert toks[0].id() == "comment"
def test_comment_2():
toks = tokenize(" #")
assert len(toks) == 1
assert toks[0].id() == "comment"
def test_comment_3():
toks = tokenize(" # foo")
assert len(toks) == 1
assert toks[0].id() == "comment"
def test_comment_4():
toks = tokenize("# foo \n12")
assert len(toks) == 3
assert toks[0].id() == "comment"
assert toks[1].id() == "nl"
assert toks[2].id() == "int"
def test_fn_token():
toks = tokenize("foo(")
assert len(toks) == 2
assert toks[0].id() == "name"
assert toks[1].id() == "lparens"
@mark.parametrize(
"op,id",
[
("+", "add"),
("-", "sub"),
("*", "mul"),
("/", "div"),
("%", "mod"),
("^", "pow"),
(".", "dot"),
(":", "colon"),
("=", "assign"),
("(", "lparens"),
(")", "rparens"),
("[", "lbracket"),
("]", "rbracket"),
("{", "lcurly"),
("}", "rcurly"),
(",", "comma"),
("+=", "assign_add"),
("-=", "assign_sub"),
("*=", "assign_mul"),
("/=", "assign_div"),
("%=", "assign_mod"),
("==", "equal"),
("!=", "not_equal"),
("<=", "less_equal"),
(">=", "greater_equal"),
("<", "less"),
(">", "greater"),
("as", "as_operator"),
("to", "to_operator"),
("and", "and_operator"),
("or", "or_operator"),
("not", "not_operator"),
("return", "return"),
("returns", "returns"),
("when", "when"),
("foreach", "foreach"),
("while", "while"),
("function", "function"),
("int", "int_type"),
("float", "float_type"),
("boolean", "boolean_type"),
("string", "string_type"),
("time", "time_type"),
("regex", "regex_type"),
("object", "object_type"),
("any", "any_type"),
("Map", "map_type"),
("List", "list_type"),
],
)
def test_operator(op, id):
toks = tokenize(op)
assert len(toks) == 1
assert toks[0].id() == id
assert toks[0].text() == op
# op with whitespace
toks = tokenize(f"{op} ")
assert len(toks) == 1
assert toks[0].id() == id
assert toks[0].text() == op
@mark.parametrize(
"story,ids",
[
(
"""function foo returns object
return app
b = foo()
""",
[
"function",
"name",
"returns",
"object_type",
"nl",
"indent",
"return",
"name",
"nl",
"dedent",
"nl",
"name",
"assign",
"name",
"lparens",
"rparens",
"nl",
],
),
(
"if 1\n return 2\nelse\n return 3",
[
"if",
"int",
"nl",
"indent",
"return",
"int",
"nl",
"dedent",
"else",
"nl",
"indent",
"return",
"int",
"dedent",
],
),
(
"if 1\n a = 1\n return 2",
[
"if",
"int",
"nl",
"indent",
"name",
"assign",
"int",
"nl",
"return",
"int",
"dedent",
],
),
(
"if 1\n# foo\n a = 1\n # foo\n return 2",
[
"if",
"int",
"nl",
"comment",
"nl",
"indent",
"name",
"assign",
"int",
"nl",
"comment",
"nl",
"return",
"int",
"dedent",
],
),
(
"a = 2.5 + -3.5",
["name", "assign", "float", "add", "sub", "float",],
),
("true*false", ["true", "mul", "false",],),
(
"redis set name: (mongodb findOne)",
[
"name",
"name",
"name",
"colon",
"lparens",
"name",
"name",
"rparens",
],
),
(
"if true\n"
" if true\n"
" a = 1\n"
" else\n"
' a = "s"\n'
"else\n"
" a = 1.5\n",
[
"if",
"true",
"nl",
"indent",
"if",
"true",
"nl",
"indent",
"name",
"assign",
"int",
"nl",
"dedent",
"else",
"nl",
"indent",
"name",
"assign",
"string",
"nl",
"dedent",
"dedent",
"else",
"nl",
"indent",
"name",
"assign",
"float",
"nl",
"dedent",
],
),
(
"[1,2,3]",
["lbracket", "int", "comma", "int", "comma", "int", "rbracket"],
),
(
"[1,2,3].length()",
[
"lbracket",
"int",
"comma",
"int",
"comma",
"int",
"rbracket",
"dot",
"name",
"lparens",
"rparens",
],
),
],
)
def test_stories(story, ids):
toks = simple_tokenize(story)
assert toks == ids
@mark.parametrize(
"story,error",
[
('"a', ErrorCodes.string_no_end),
('"a\n 2', ErrorCodes.string_no_end),
("2a", ErrorCodes.number_only_digits),
("a$", ErrorCodes.name_only_alphanumeric),
("/rr/z", ErrorCodes.regex_invalid_flag),
("/rr\n", ErrorCodes.regex_no_end),
],
)
def test_lexer_exception(story, error):
with raises(LexerException) as e:
tokenize(story)
e.value.code == error
| StarcoderdataPython |
3482105 | from datetime import datetime, timedelta
import numpy as np
import pytides
from pytides import constituent as cons
from pytides.tide import Tide as PyTide
from .constants import DATE_FORMAT, FOOT, HIGH, UTC, METERS
NOAA_CONSTITUENTS = [c for c in cons.noaa if c != cons._Z0] + [cons._Z0]
SIX_HOURS = timedelta(hours=6, minutes=15)
class Tide(object):
def __init__(self, constituents, mtl, mllw):
self.amplitudes = [cst["amplitude"] for cst in constituents]
self.phases = [cst["phase"] for cst in constituents]
offset = mtl - mllw
self.phases.append(0)
self.amplitudes.append(offset)
self.model = np.zeros(len(NOAA_CONSTITUENTS), dtype=PyTide.dtype)
self.model['constituent'] = NOAA_CONSTITUENTS
self.model['amplitude'] = self.amplitudes
self.model['phase'] = self.phases
self.tide = PyTide(model=self.model, radians=False)
def extrema(self, start, stop, timezone, unit):
extrema = self.tide.extrema(start - SIX_HOURS, stop + SIX_HOURS)
return [self.format_extremum(time, height, hilo, timezone, unit) for time, height, hilo in extrema]
@staticmethod
def format_extremum(time, height, hilo, timezone, unit):
return {"height": height if unit == METERS else height * FOOT,
"high": hilo == HIGH,
"time": time.astimezone(timezone).strftime(DATE_FORMAT)}
| StarcoderdataPython |
8025927 | <reponame>sprocter/lab-games
from clocks.clock import Clock
class Increment(Clock):
increment = 0 # Stores the amount to increment the player's clocks by
def __init__(self, player_count=0, player_names=None, increment_amount=30, starting_clock=5):
super().__init__(player_count=player_count, player_names=player_names)
self.clock = [int(starting_clock * 60) for _ in range(self.player_count+1)]
self.increment = increment_amount
self.clock[self.current_player] += self.increment # One time increase to first players clock
def next_player(self):
# Get turn length, and then subtract it from the clock
turn_length = self.get_turn_length()
self.clock[self.current_player] -= turn_length
# Do the standard next player things
super().next_player()
# Add the increment to this next player's clock
self.clock[self.current_player] += self.increment
def previous_player(self):
# Get turn length, and then subtract it from the clock
turn_length = self.get_turn_length()
self.clock[self.current_player] -= turn_length
# Do the standard previous player things
super().previous_player()
# Do not give an increment when going to previous player?
| StarcoderdataPython |
1757909 | <filename>apps/web/forms/issues.py
from django import forms
from django.core.exceptions import ValidationError
from apps.web.forms.bootstrap import BootStrapForm
from apps.web.models import Issues, ProjectUser, IssuesType, Module, IssuesReply, ProjectInvite
class IssuesModelForm(BootStrapForm, forms.ModelForm):
"""问题表单验证"""
class Meta:
model = Issues
exclude = ['project', 'create_user', 'create_datetime', 'modify_datetime']
widgets = {
'assign': forms.Select(attrs={"class": 'selectpicker', 'data-live-search=': 'true'}),
'attention': forms.SelectMultiple(attrs={"class": 'selectpicker',
'data-live-search=': 'true',
'data-actions-box=': 'true'}),
'parent': forms.Select(attrs={"class": 'selectpicker', 'data-live-search=': 'true'}),
}
def __init__(self, request, *args, **kwargs):
super(IssuesModelForm, self).__init__(*args, **kwargs)
self.request = request
# 获取当前项目的所有问题类型
issues_type_list = IssuesType.objects.filter(project=self.request.tracer.project).values_list('id', 'title')
self.fields['issues_type'].choices = issues_type_list
# 获取当前项目中所有的模块
module_list = [('', '没有任何选择项'), ] # 用于做提示
module_obj_list = Module.objects.filter(project=self.request.tracer.project).values_list('id', 'title')
module_list.extend(module_obj_list)
self.fields['module'].choices = module_list
# 指派和关注者
total_user_list = [
(self.request.tracer.project.create_user.id, self.request.tracer.project.create_user.username), ] # 用于做提示
project_user_list = ProjectUser.objects.filter(project=self.request.tracer.project).values_list('user_id',
'user__username')
total_user_list.extend(project_user_list)
self.fields['assign'].choices = [('', '没有任何选择项')] + total_user_list
self.fields['attention'].choices = total_user_list
# 当前项目中创建的问题
parent_list = [('', '没有任何选择项')]
parent_obj_list = Issues.objects.filter(project=self.request.tracer.project).values_list('id', 'subject')
parent_list.extend(parent_obj_list)
self.fields['parent'].choices = parent_list
class IssuesReplyModelForm(BootStrapForm, forms.ModelForm):
"""评论表单验证"""
class Meta:
model = IssuesReply
fields = ['content', 'reply']
class InviteModelForm(BootStrapForm, forms.ModelForm):
"""邀请验证"""
class Meta:
model = ProjectInvite
fields = ['period', 'count']
| StarcoderdataPython |
4973014 | <reponame>luispedro/Coelho2021_GMGCv1_analysis
import numpy as np
from scipy import stats
from glob import glob
from itertools import islice
def build_ctable(g0, g1, total):
c = np.zeros((2,2))
c[1,1] = len(g0 & g1)
c[1,0] = len(g0 - g1)
c[0,1] = len(g1 - g0)
c[0,0] = total - c.sum()
return c
TOTAL = 311197450.
def test_group(group, travelers, name, total=TOTAL):
print(f"Test {name}")
print("Fraction of genes that travel {:1%}".format(len(travelers)/total))
print("Fraction of genes that travel ({name} genes only) {:1%}".format(len(travelers&group)/len(group), name=name))
print("Fraction of {name} genes {:.1%}".format(len(group)/total, name=name))
print("Fraction of {name} genes within travelers {:.1%}".format(len(group&travelers)/len(travelers), name=name))
print("Association test")
print(stats.fisher_exact(build_ctable(group, travelers, total)))
ABRs = set(line.split()[0] for line in islice(open('cold/annotations/GMGC.card_resfam_updated.out.r'), 1, None) if not line.lstrip().startswith('SYNERGY'))
mobile = set(line.split()[0] for line in open('cold/annotations/e5_besthit_DDE_TRdb_filtered_cellular_duplicates.out'))
travelers = set(line.split()[0] for line in open('outputs/travelers.txt'))
test_group(mobile, travelers, 'mobile')
print("\n\n")
test_group(ABRs, travelers, 'ABR')
human_genes = set(line.strip() for line in open('outputs/GMGC.human_gut.95nr.headers.txt'))
print("\n\nHUMAN GUT ONLY")
test_group(mobile& human_genes, travelers&human_genes, 'mobile', total=len(human_genes))
test_group(ABRs& human_genes, travelers&human_genes, 'abrs', total=len(human_genes))
| StarcoderdataPython |
5085476 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Arguments
Created on 2020/5/13
"""
__author__ = "<NAME>"
class Arguments:
"""
General settings of arguments
"""
# Device
device = 'cuda'
# Path
raw_data_dir = '../data/raw'
raw_data_train = raw_data_dir + '/train.csv'
raw_data_val = raw_data_dir + '/val.csv'
raw_data_test = raw_data_dir + '/test.csv'
data_dir = '../data'
dataset_path = data_dir + '/dataset.pkl'
lookup_path = data_dir + '/lookup.pkl'
padded_dataset_path = data_dir + '/padded_dataset.pkl'
result_dir = '../result'
event_dir = result_dir + '/event'
ckpt_dir = result_dir + '/ckpt'
embed_dir = result_dir + '/embed'
embed_word_path = embed_dir + '/word.npy' # 48
embed_char_path = embed_dir + '/char.npy' # 48
embed_pos_path = embed_dir + '/pos.npy' # 32
embed_xavier_path = embed_dir + '/xavier.npy' # 128
embedding_paths = ( # these embeddings will be concatenated [CHECK IT]
embed_word_path, embed_char_path, embed_pos_path
)
# embedding_paths = (embed_xavier_path,) # for base comparing
# Special tokens and corresponding _indexes
word_pad = ('<pad>', 0)
word_oov = ('<oov>', 1)
entity_pad = ('<p>', 0)
entity_bos = ('<bos>', 1)
entity_eos = ('<eos>', 2)
# Train
use_pretrained_embeddings = True # [CHECK IT]
finished_epoch = 0
num_epochs = 100
batch_size = 64
weight_decay = 0.001
lr = 1e-3
min_lr = 5e-5
lr_decay_factor = 0.95
# Model Common Part
num_vocabs = None # set automatically
num_entities = None # set automatically
embed_dim = 128 # embedding size [CHECK IT]
model_dim = 256
# Early Stop
min_delta = 0.
patience = 6
# Test
test_ckpt = ckpt_dir + '/gru_attn_crf_1block/ckpt_epoch_21.pt' # [CHECK IT]
test_batch_size = 200
write_to_csv = True
csv_dir = result_dir + '/csv'
class AttnCRFArguments(Arguments):
"""
Arguments for Attention-CRF model
GammaAttnCRF in model.py
"""
model_name = 'attn_crf'
model_dim = 128
attention_type = 'scaled_dot'
num_blocks = 1
num_heads = 4
ff_hidden_dim = 512
dropout_rate = 0.2
class GRUCRFArguments(Arguments):
"""
Arguments for BiGRU-CRF model
GammaGRUCRF in model.py
"""
model_name = 'gru_crf'
gru_hidden_dim = 100
class GRUAttnCRFArguments(Arguments):
"""
Arguments for BiGRU-Attention-CRF model
GammaGRUAttnCRF in model.py
"""
model_name = 'gru_attn_crf'
attention_type = 'scaled_dot' # {dot, scaled_dot, cosine, general} tested
num_blocks = 1 # {1, 2, 3} tested
num_heads = 4
ff_hidden_dim = 512
dropout_rate = 0.2
gru_hidden_dim = Arguments.model_dim // 2
class GRUArguments(Arguments):
"""
Arguments for BiGRU model
GammaGRU in model.py
"""
model_name = 'gru'
gru_hidden_dim = 120
class GRUAttnArguments(Arguments):
"""
Arguments for BiGRU-Attention model
GammaGRUAttn in model.py
"""
model_name = 'gru_attn'
attention_type = 'scaled_dot'
num_blocks = 1 # {1, 2, 3} tested
num_heads = 4
ff_hidden_dim = 512
dropout_rate = 0.2
gru_hidden_dim = Arguments.model_dim // 2
| StarcoderdataPython |
4991313 | <filename>qcfractal/interface/orm/torsiondrive_orm.py
"""
A ORM for TorsionDrive
"""
import json
class TorsionDriveORM:
"""
A interface to the raw JSON data of a TorsionDrive torsion scan run.
"""
# Maps {internal_status : FractalServer status}
__json_mapper = {
"_id": "id",
"_success": "success",
# Options
"_optimization_history": "optimization_history",
"_initial_molecule_id": "initial_molecule",
"_torsiondrive_options": "torsiondrive_meta",
"_geometric_options": "geometric_meta",
"_qc_options": "qc_meta",
# Energies
"_final_energies": "final_energies",
}
def __init__(self, initial_molecule, **kwargs):
"""Initializes a TorsionDriveORM object, from local data.
This object may be able to submit jobs to the server in the future.
*Prototype object, may change in the future.
Parameters
----------
initial_molecule : TYPE
Description
kwargs:
See TorsionDriveORM.from_json
"""
self._initial_molecule = initial_molecule
# Set kwargs
for k in self.__json_mapper.keys():
setattr(self, k, kwargs.get(k[1:], None))
@classmethod
def from_json(cls, data):
"""
Creates a TorsionDriveORM object from FractalServer data.
Parameters
----------
data : dict
A JSON blob from FractalServer:
- "id": The service id of the blob
- "success": If the procedure was successful or not.
- "initial_molecule": The id of the submitted molecule
- "torsiondrive_meta": The option submitted to the TorsionDrive method
- "geometric_meta": The options submitted to the Geometric method called by TorsionDrive
- "qc_meta": The program, options, method, and basis to be run by Geometric.
- "final_energies": A dictionary of final energies if the TorsionDrive service is finished
Returns
-------
torsiondrive_obj : TorsionDriveORM
A TorsionDriveORM object from the specified JSON.
"""
kwargs = {}
for k, v in TorsionDriveORM.__json_mapper.items():
if v in data:
kwargs[k[1:]] = data[v]
if ("final_energies" in kwargs) and (kwargs["final_energies"] is not None):
kwargs["final_energies"] = {tuple(json.loads(k)): v for k, v in kwargs["final_energies"].items()}
return cls(None, **kwargs)
def __str__(self):
"""
Simplified torsiondrive string representation.
Returns
-------
ret : str
A representation of the current TorsionDrive status.
Examples
--------
>>> repr(torsiondrive_obj)
TorsionDrive(id='5b7f1fd57b87872d2c5d0a6d', success=True, molecule_id='5b7f1fd57b87872d2c5d0a6c', molecule_name='HOOH')
"""
ret = "TorsionDrive("
ret += "id='{}', ".format(self._id)
ret += "success='{}', ".format(self._success)
ret += "molecule_id='{}', ".format(self._initial_molecule_id)
name = None
if self._initial_molecule:
name = self._initial_molecule.name()
ret += "molecule_name='{}')".format(name)
return ret
def final_energies(self, key=None):
"""
Provides the final optimized energies at each grid point.
Parameters
----------
key : None, optional
Returns the final energy at a single grid point.
Returns
-------
energy : float, dict
Returns energies at each grid point in a dictionary or at a
single point if a key is specified.
Examples
--------
>>> torsiondrive_obj.final_energies()
{(-90,): -148.7641654446243, (180,): -148.76501336993732, (0,): -148.75056290106735, (90,): -148.7641654446148}
"""
if not self._success:
raise KeyError("{} has not completed or failed. Unable to show final energies.".format(self))
if key is None:
return self._final_energies.copy()
else:
if isinstance(key, (int, float)):
key = (int(key), )
return self._final_energies[key]
| StarcoderdataPython |
3362003 | # -*- coding: utf-8 -*-
# Simple Bot (SimpBot)
# Copyright 2016-2017, <NAME> (kwargs)
from simpbot.bottools import text
import time
class user:
def __init__(self, user, host, nick, realname=None, account=None):
self.user = user
self.host = host
self.nick = nick
self.realname = realname
self.ircoper = False
self.account = account
self.server = None
self.modes = None
self.since = None
self._idle = None
self.lastmsg = None
self.ssl = False
self.completed = False
self.lines = 0
self.channels = {}
self.dateinfo = time.time()
def __repr__(self):
return repr("<user '%s'>" % (self.nick if self.nick else '--'))
@property
def logged(self):
return self.account is not None and self.account != ''
@property
def nosuch(self):
return (self.user, self.host, self.realname) == (None, None, None)
@property
def tracked(self):
if self.nosuch:
return False
return len(self.channels) != 0
@property
def mask(self):
return '%s!%s@%s' % (self.nick, self.user, self.host)
@property
def idle(self):
return int(time.time()) - self._idle
def update(self):
self.dateinfo = int(time.time())
def uplastmsg(self):
self.lastmsg = int(time.time())
def reset(self):
self.user = None
self.host = None
self.realname = None
self.ircoper = False
self.account = None
self.server = None
self.modes = None
self.since = None
self._idle = None
self.lastmsg = None
self.ssl = False
self.completed = False
self.lines = 0
self.channels.clear()
self.update()
def set(self, attr, value):
setattr(self, attr, value)
#self.update()
def increase_lines(self):
self.lines += 1
self.uplastmsg()
#self.update()
@text.lower
def add_channel(self, channame, chanlist):
if channame in self.channels:
return
self.channels[channame] = {'channel': chanlist, 'status': ''}
@text.lower
def del_channel(self, channame):
if not channame in self.channels:
return
del self.channels[channame]
@text.lower
def get_status(self, channame):
if not channame in self.channels:
return
return self.channels[channame]['status']
def set_status(self, channame, act, status_mode):
if status_mode is None:
status_mode = ''
channame = channame.lower()
#status_mode = status_mode[0]
if not channame in self.channels:
return
status = self.channels[channame]['status']
if act == 'insert':
if not status_mode in status:
self.channels[channame]['status'] += status_mode
elif act == 'remove':
if status_mode in status:
self.channels[channame]['status'] = status.replace(status_mode, '')
elif act == 'reset':
self.channels[channame]['status'] = status_mode | StarcoderdataPython |
9638426 | import rethinkdb as r
def handler(db_conn, event):
servers = r.db('rethinkdb').table('server_status').run(db_conn)
server_count = len(list(servers))
return "I'm in a cluster with %d rethinkdb servers" % server_count
| StarcoderdataPython |
11207715 | from trello import TrelloClient
import pprint, requests,os
client = TrelloClient(
api_key=os.environ.get('Trello_API_KEY'),
token=os.environ.get('Trello_API_TOKEN'),)
attachments = []
def list_all_boards(client):
"""
get list of all boards to determine the ID
for further functions
"""
all_boards = client.list_boards()
for counter, board in enumerate(all_boards):
print(counter, board.name)
# list_all_boards(client)
def print_cards_from_board(board_id, client):
"""
Access board with ID board_id in the client instance
and print all non-archived lists with their non-archived cards
"""
all_boards = client.list_boards()
my_board = all_boards[board_id]
all_lists_on_board = my_board.list_lists()
for list in all_lists_on_board:
for card in list.list_cards():
print(str(card.board.name + ":" + card.description) + ":" +str(card.name))
for x in range(1, 30):
print_cards_from_board(x, client)
| StarcoderdataPython |
134054 | import glob
import os
import pickle
import shlex
import tarfile
import tempfile
import threading
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Set, Tuple, Union
import boto3
from redun.file import File
from redun.hashing import hash_stream
from redun.scheduler import Job
# Constants.
REDUN_PROG = "redun"
REDUN_REQUIRED_VERSION = ">=0.4.1"
DEFAULT_AWS_REGION = "us-west-2"
# S3 scratch filenames.
S3_SCRATCH_INPUT = "input"
S3_SCRATCH_OUTPUT = "output"
S3_SCRATCH_CODE = "code.tar.gz"
S3_SCRATCH_ERROR = "error"
S3_SCRATCH_HASHES = "eval_hashes"
S3_SCRATCH_STATUS = "status"
# Cache for AWS Clients.
_boto_clients: Dict[Tuple[int, str, str], boto3.Session] = {}
class JobStatus(NamedTuple):
all: List[str]
pending: List[str]
inflight: List[str]
success: List[str]
failure: List[str]
stopped: List[str]
timeout: List[str]
def get_aws_client(service: str, aws_region: str = DEFAULT_AWS_REGION) -> boto3.Session:
"""
Get an AWS Client with caching.
"""
cache_key = (threading.get_ident(), service, aws_region)
client = _boto_clients.get(cache_key)
if not client:
client = _boto_clients[cache_key] = boto3.client(service, region_name=aws_region)
return client
def find_code_files(
basedir: str = ".", includes: Optional[List[str]] = None, excludes: Optional[List[str]] = None
) -> Iterable[str]:
"""
Find all the workflow code files consitent with the include/exclude patterns.
"""
if includes is None:
includes = ["**/*.py"]
if excludes is None:
excludes = []
files: Set[str] = set()
for pattern in includes:
files |= set(glob.glob(os.path.join(basedir, pattern), recursive=True))
for pattern in excludes:
files -= set(glob.glob(os.path.join(basedir, pattern), recursive=True))
return files
def create_tar(tar_path: str, file_paths: Iterable[str]) -> File:
"""
Create a tar file from local file paths.
"""
tar_file = File(tar_path)
with tar_file.open("wb") as out:
with tarfile.open(fileobj=out, mode="w|gz") as tar:
for file_path in file_paths:
tar.add(file_path)
return tar_file
def extract_tar(tar_file: File, dest_dir: str = ".") -> None:
"""
Extract a tar file to local paths.
"""
with tar_file.open("rb") as infile:
with tarfile.open(fileobj=infile, mode="r|gz") as tar:
tar.extractall(dest_dir)
def get_job_scratch_dir(s3_scratch_prefix: str, job: Job) -> str:
"""
Returns s3 scratch directory for a redun Job.
"""
assert job.eval_hash
return os.path.join(s3_scratch_prefix, "jobs", job.eval_hash)
def get_job_scratch_file(s3_scratch_prefix: str, job: Job, filename: str) -> str:
"""
Returns s3 scratch path for a file related to a redun Job.
"""
assert job.eval_hash
return os.path.join(s3_scratch_prefix, "jobs", job.eval_hash, filename)
def get_code_scratch_file(s3_scratch_prefix: str, tar_hash: str) -> str:
"""
Returns s3 scratch path for a code package tar file.
"""
return os.path.join(s3_scratch_prefix, "code", tar_hash + ".tar.gz")
def get_array_scratch_file(s3_scratch_prefix: str, job_array_id: str, filename: str) -> str:
"""
Returns an S3 scratch path for a file related to an AWS batch job
"""
return os.path.join(s3_scratch_prefix, "array_jobs", job_array_id, filename)
def copy_to_s3(file_path: str, s3_scratch_dir: str) -> str:
"""
Copies a file to the S3 scratch directory if it is not already on S3.
Returns the path to the file on S3.
"""
file = File(file_path)
_, filename = os.path.split(file.path)
s3_temp_file = File(f"{s3_scratch_dir.rstrip('/')}/{filename}")
file.copy_to(s3_temp_file)
return s3_temp_file.path
def get_default_region() -> str:
"""
Returns the default AWS region.
"""
return boto3.Session().region_name or DEFAULT_AWS_REGION
def get_aws_user(aws_region: str = DEFAULT_AWS_REGION) -> str:
"""
Returns the current AWS user.
"""
sts_client = get_aws_client("sts", aws_region=aws_region)
response = sts_client.get_caller_identity()
return response["Arn"]
def parse_code_package_config(config) -> Union[dict, bool]:
"""
Parse the code package options from a AWSBatchExecutor config.
"""
if not config.getboolean("code_package", fallback=True):
return False
include_config = config.get("code_includes", "**/*.py")
exclude_config = config.get("code_excludes", "")
return {"includes": shlex.split(include_config), "excludes": shlex.split(exclude_config)}
def package_code(s3_scratch_prefix: str, code_package: dict = {}) -> File:
"""
Package code to S3.
"""
with tempfile.TemporaryDirectory() as tmpdir:
file_paths = find_code_files(
includes=code_package.get("includes"), excludes=code_package.get("excludes")
)
temp_file = File(os.path.join(tmpdir, "code.tar.gz"))
create_tar(temp_file.path, file_paths)
with temp_file.open("rb") as infile:
tar_hash = hash_stream(infile)
code_file = File(get_code_scratch_file(s3_scratch_prefix, tar_hash))
if not code_file.exists():
temp_file.copy_to(code_file)
return code_file
def parse_task_result(s3_scratch_prefix: str, job: Job) -> Any:
"""
Parse task result from s3 scratch path.
"""
output_path = get_job_scratch_file(s3_scratch_prefix, job, S3_SCRATCH_OUTPUT)
output_file = File(output_path)
assert job.task
if not job.task.script:
with output_file.open("rb") as infile:
result = pickle.load(infile)
else:
result = [0, output_file.read(mode="rb")] # TODO: Get real exitcode.
return result
| StarcoderdataPython |
12863964 | <reponame>seculayer/automl-mlps
# -*- coding: utf-8 -*-
# Author : <NAME>
# e-mail : <EMAIL>
# Powered by Seculayer © 2021 Service Model Team
from mlps.core.data.cnvrtr.ConvertAbstract import ConvertAbstract
class IPTransferDivide(ConvertAbstract):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.num_feat = 4
# 토크나이징 하는곳
def apply(self, data):
try:
row = data.split(".")
except Exception as e:
# self.LOGGER.error(e)
row = ["0", "0", "0", "0"]
return row
if __name__ == "__main__":
payload = "192.168.1.110"
tokenizer = IPTransferDivide(stat_dict=None, arg_list=None)
print(tokenizer.apply(payload))
| StarcoderdataPython |
3286682 | from django.urls import path
from cride.circles.views import list_circles, create_cricle
urlpatterns = [
path('circles/', list_circles),
path('circles/create/', create_cricle),
] | StarcoderdataPython |
90263 | <filename>volatility/volatility/plugins/linux/keyboard_notifiers.py<gh_stars>1-10
# Volatility
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: <NAME>
@license: GNU General Public License 2.0
@contact: <EMAIL>
@organization: 504ENSICS Labs
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
class linux_keyboard_notifiers(linux_common.AbstractLinuxCommand):
"""Parses the keyboard notifier call chain"""
def calculate(self):
linux_common.set_plugin_members(self)
knl_addr = self.addr_space.profile.get_symbol("keyboard_notifier_list")
if not knl_addr:
debug.error("Symbol keyboard_notifier_list not found in kernel")
knl = obj.Object("atomic_notifier_head", offset = knl_addr, vm = self.addr_space)
symbol_cache = {}
for call_back in linux_common.walk_internal_list("notifier_block", "next", knl.head):
call_addr = call_back.notifier_call
if symbol_cache.has_key(call_addr):
sym_name = symbol_cache[call_addr]
hooked = 0
else:
sym_name = self.profile.get_symbol_by_address("kernel", call_addr)
if not sym_name:
sym_name = "HOOKED"
hooked = 1
symbol_cache[call_addr] = sym_name
yield call_addr, sym_name, hooked
def render_text(self, outfd, data):
self.table_header(outfd, [("Address", "[addrpad]"), ("Symbol", "<30")])
for call_addr, sym_name, _ in data:
self.table_row(outfd, call_addr, sym_name)
| StarcoderdataPython |
1995087 | ################################################################################################################################
# *** Copyright Notice ***
#
# "Price Based Local Power Distribution Management System (Local Power Distribution Manager) v1.0"
# Copyright (c) 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory
# (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this software, please contact
# Berkeley Lab's Innovation & Partnerships Office at <EMAIL>.
################################################################################################################################
| StarcoderdataPython |
8113226 | '''初始化'''
from .qrcodegenerator import QRCodeGenerator | StarcoderdataPython |
11375519 | # ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
from datetime import datetime
from pandas import concat, DataFrame
from recharge.time_series_manager import amf_obs_time_series, get_etrm_time_series # from recharge.time_series_manager import amf_obs_time_series, get_etrm_time_series
from recharge.etrm_processes import Processes # from recharge.etrm_processes import Processes
from app.paths import paths
from app.generic_runner import run_model
from app.config import Config
BASE_AMF_DICT = {# '1': {'Coords': '361716 3972654', 'Name': 'Valles_Coniferous'},
'2': {'Coords': '355774 3969864', 'Name': 'Valles_Ponderosa'},
# '3': {'Coords': '339552 3800667', 'Name': 'Sevilleta_Shrub'},
# '4': {'Coords': '343495 3803640', 'Name': 'Sevilleta_Grass'},
# '5': {'Coords': '386288 3811461', 'Name': 'Heritage_Pinyon_Juniper'},
# '6': {'Coords': '420840 3809672', 'Name': 'Tablelands_Juniper_Savanna'}
}
def run(input_root, simulation_period):
paths.build(input_root)
amf_dict = amf_obs_time_series(BASE_AMF_DICT,
complete_days_only=True,
return_low_err=True)
# get_etrm_time_series(paths.amf_extract, dict_=amf_dict)
for k, v in amf_dict.iteritems():
for kk, vv in v.iteritems():
if isinstance(vv, DataFrame):
p = os.path.join(paths.amf_output_root,'{}_{}.csv'.format(k, kk))
print 'writing to {}'.format(p)
vv.to_csv(p)
val = amf_dict.values()[0]
cfg = Config()
for runspec in cfg.runspecs:
paths.build(runspec.input_root, runspec.output_root)
etrm = Processes(runspec)
etrm.configure_run(runspec)
etrm.run()
save_run(etrm, val)
# for key, val in amf_dict.iteritems():
# etrm = Processes(cfg.runspecs[0])
# etrm.run(ro_reinf_frac=0.7, allen_ceff=0.8)
# save_run(etrm, val)
def save_run(etrm, val):
# tracker is deprecated, try commenting this out
# path = os.path.join(paths.amf_output_root, '{}.csv'.format(val['Name']))
# etrm.tracker.to_csv(path, na_rep='nan', index_label='Date')
amf_obs_etrm_combo = DataFrame(concat((val['AMF_Data'], etrm.point_tracker), axis=1, join='outer'))
obs_etrm_comb_out = os.path.join(paths.amf_combo_root, '{}_Ceff.csv'.format(val['Name']))
print 'this should be your combo csv: {}'.format(obs_etrm_comb_out)
amf_obs_etrm_combo.to_csv(obs_etrm_comb_out, index_label='Date')
if __name__ == '__main__':
# ir = os.path.join('/Volumes', 'Seagate Expansion Drive', 'ETRM_Inputs')
# ir = os.path.join('/Volumes', 'Seagate Expansion Drive')
ir = os.path.join('C:\Users\Mike\PyRANA\ETRM_inputs_Ameriflux')
sim = datetime(2007, 1, 1), datetime(2013, 12, 29)
run(ir, sim)
# ============= EOF =============================================
| StarcoderdataPython |
292782 | <gh_stars>1-10
import math
def isPrime(n):
if n == 1:
return False
i = 2
while i*i <= n:
if n % i == 0:
return False
i += 1
return True
t = int(input())
finalAns = []
for i in range(t):
n = int(input())
if(n<4):
readList = [i for i in range(1,n+1)]
readList.insert(0,1)
finalAns.append(str(1))
finalAns.append(readList)
continue
readList = [[1]]
for j in range(2, n+1):
count = 1
if(isPrime(j)):
readList[0].append(j)
elif(j%2==0):
readList.append([j])
else:
while True:
flag = 0
for m in readList[count]:
if(math.gcd(m, j)!=1):
count+=1
flag=1
break
if(flag==0):
readList[count].append(j)
break
finalAns.append([len(readList)])
for j in range(len(readList)):
readList[j].insert(0, len(readList[j]))
finalAns.append(readList[j])
for i in range(len(finalAns)):
print(" ".join(map(str, finalAns[i]))) | StarcoderdataPython |
1930366 | # coding: utf-8
import datetime
import os
AUTHOR = '<NAME>'
SITENAME = '<NAME>'
SITESUBTITLE = "Hello, I'm Martin, and this is my webpage."
SITEURL = ''
MENUITEMS = (
# Put this page first (it's "hidden" and must appear before categories in
# the menu)
("About me", "aboutme.html"),
)
PATH = 'content'
STATIC_PATHS = ("images",)
# We want "pages" to be in the root directory
PAGE_URL = "{slug}.html"
PAGE_SAVE_AS = "{slug}.html"
# Customize the scheme of urls for articles to match existing articles
INDEX_SAVE_AS = 'w/index.html'
ARTICLE_URL = 'w/{date:%Y}-{date:%m}-{date:%d}-{slug}.html'
ARTICLE_SAVE_AS = 'w/{date:%Y}-{date:%m}-{date:%d}-{slug}.html'
ARTICLE_LANG_URL = 'w/{date:%Y}-{date:%m}-{date:%d}-{slug}-{lang}.html'
ARTICLE_LANG_SAVE_AS = 'w/{date:%Y}-{date:%m}-{date:%d}-{slug}-{lang}.html'
DEFAULT_CATEGORY = "hidden"
CATEGORY_URL = "{slug}/"
CATEGORY_SAVE_AS = "{slug}/index.html"
# This allows us to write /(t|w)/index.html
TAG_URL = "{slug}/"
TAG_SAVE_AS = "{slug}/index.html"
# Because of talks
USE_FOLDER_AS_CATEGORY = False
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
THEME = "theme/default"
NOW = datetime.datetime.now()
DATE_FORMATS = {
'fr': '%d %b %Y',
'en': '%d %b %Y',
}
PLUGINS = ("homefront.pelican.spotify", )
| StarcoderdataPython |
6424565 | import numpy as np
import pandas as pd
from src.testing.rapid_test_reactions import rapid_test_reactions
def test_rapid_test_reactions():
states = pd.DataFrame()
states["quarantine_compliance"] = [0.0, 0.2, 0.4, 0.6, 0.8]
states["cd_received_rapid_test"] = 0
states["is_tested_positive_by_rapid_test"] = True
contacts = pd.DataFrame()
contacts["households"] = [True, True, True, True, True]
contacts["other_recurrent"] = [False, True, False, True, False]
contacts["other_non_recurrent"] = [5, 2, 2, 2, 2]
expected = pd.DataFrame()
expected["households"] = [True, True, True, True, 0]
expected["other_recurrent"] = [False, False, False, False, False]
expected["other_non_recurrent"] = [5, 0, 0, 0, 0]
params = pd.DataFrame(
data=[0.7, 0.15],
columns=["value"],
index=pd.MultiIndex.from_tuples(
[
("rapid_test_demand", "reaction", "hh_contacts_multiplier"),
("rapid_test_demand", "reaction", "not_hh_contacts_multiplier"),
]
),
)
res = rapid_test_reactions(states, contacts, params, None)
pd.testing.assert_frame_equal(res, expected, check_dtype=False)
def test_rapid_test_reactions_lln():
np.random.seed(38484)
states = pd.DataFrame()
states["quarantine_compliance"] = np.random.uniform(0, 1, size=10000)
states["cd_received_rapid_test"] = [0] * 9900 + [-3] * 90 + [-99] * 10
states["is_tested_positive_by_rapid_test"] = (
[True] * 9980 + [False] * 10 + [True] * 10
)
contacts = pd.DataFrame()
contacts["households"] = [True] * 10000
contacts["other"] = True
params = pd.DataFrame(
data=[0.7, 0.15],
columns=["value"],
index=pd.MultiIndex.from_tuples(
[
("rapid_test_demand", "reaction", "hh_contacts_multiplier"),
("rapid_test_demand", "reaction", "not_hh_contacts_multiplier"),
]
),
)
res = rapid_test_reactions(states, contacts, params, None)
quarantine_pool = res.loc[:9979]
share_meet_other = quarantine_pool["other"].mean()
share_meet_hh = quarantine_pool["households"].mean()
assert 0.145 < share_meet_other < 0.155
assert 0.695 < share_meet_hh < 0.705
assert (res.loc[9980:] == contacts.loc[9980:]).all().all()
| StarcoderdataPython |
12826576 | <reponame>Emiyalzn/EI328-hw1
import time
import pickle
import os
import numpy as np
import argparse
from utils import load_data, mse_loss, plot_boundaries, partition_data, plot_minmax_boundaries, result_dir, plot_datapoints
from model import MLP, MLQP
from copy import deepcopy
from multiprocessing import Pool
def parse_arguments():
parser = argparse.ArgumentParser("Training hyperparameters.")
parser.add_argument('--lr_1', type=float, default=1e-2, help="learning rate for v and b")
parser.add_argument('--lr_2', type=float, default=1e-2, help="learning rate for u")
parser.add_argument('--alpha_1', type=float, default=0., help="momentum rate for v and b")
parser.add_argument('--alpha_2', type=float, default=0., help="momentum rate for u")
parser.add_argument('--n_hid', type=int, default=32, help="size of hidden layer")
parser.add_argument('--n_epoch', type=int, default=30000, help="Max training epochs")
parser.add_argument('--type', choices=['vanilla', 'minmax'], default='vanilla', help="choose the training model type")
parser.add_argument('--model', choices=['mlp', 'mlqp'], default='mlp', help="use mlp or mlqp for training")
parser.add_argument('--partition_num', type=int, default=2, help="number of training set partitions")
parser.add_argument('--partition_mode', choices=['random', 'yaxis', 'yaxis+overlap'], default='random', help="data partition method")
parser.add_argument('--train_mode', choices=['sequential', 'parallel'], default='parallel', help="how to train the submodules")
parser.add_argument('--seed', type=int, default=None, help="fix the random seed")
args = parser.parse_args()
return args
def train_vanillanet(args):
xs, ys, labels = load_data('train')
x_s, y_s, label_s = load_data('test')
train_data = (xs, ys, labels)
test_data = (x_s, y_s, label_s)
train_accs, test_accs, losses, times = [], [], [], []
best_epoch, best_loss = 0, 1e6
# initialize models
if args.model == 'mlp':
model = MLP([2, args.n_hid, 1])
else:
model = MLQP([2, args.n_hid, 1])
# start training
start_time = time.time()
for epoch in range(args.n_epoch):
for i in range(len(xs)):
pred = model.forward(np.array([xs[i], ys[i]]))
model.backward(labels[i])
if args.model == 'mlp':
model.update(args.alpha_1, args.lr_1)
else:
model.update(args.alpha_1, args.alpha_2, args.lr_1, args.lr_2)
curr_time = time.time() - start_time
times.append(curr_time)
# predict on train and test data
train_acc, train_loss = predict(model, train_data)
test_acc, _ = predict(model, test_data)
losses.append(train_loss)
train_accs.append(train_acc)
test_accs.append(test_acc)
if epoch % 10 == 0:
print(f"Epoch {epoch}, Time {curr_time:.2f}, Loss {train_loss:.4f}, Train acc {train_acc:.4f}, Test acc {test_acc:.4f}")
# early stopping
if best_loss - train_loss > 0.0001:
best_loss = train_loss
best_epoch = epoch
elif epoch - best_epoch >= 200:
break
# save files
plot_boundaries(model, args.model, args.lr_1, args.lr_2, args.alpha_1)
save_file = open(os.path.join(result_dir, f"{args.model}_data.pkl"), 'wb')
pickle.dump(times, save_file)
pickle.dump(losses, save_file)
pickle.dump(train_accs, save_file)
pickle.dump(test_accs, save_file)
save_file.close()
def train_minmax_sequential(args):
xs, ys, labels = load_data('train')
x_s, y_s, label_s = load_data('test')
train_data = (xs, ys, labels)
test_data = (x_s, y_s, label_s)
white_subsets, black_subsets = partition_data(train_data, args.partition_mode, args.partition_num)
plot_datapoints(white_subsets, black_subsets)
train_accs, test_accs, losses, times = [], [], [], []
best_epoch, best_loss = 0, 1e6
# initialize models
models = []
for i in range(args.partition_num):
models.append([])
for j in range(args.partition_num):
if args.model == 'mlp':
models[i].append(MLP([2, args.n_hid, 1]))
else:
models[i].append(MLQP([2, args.n_hid, 1]))
# start training
start_time = time.time()
for epoch in range(args.n_epoch):
for i in range(args.partition_num):
for j in range(args.partition_num):
x_train = np.concatenate((white_subsets[i][0], black_subsets[j][0]), axis=0)
y_train = np.concatenate((white_subsets[i][1], black_subsets[j][1]), axis=0)
labels_train = np.concatenate((white_subsets[i][2], black_subsets[j][2]), axis=0)
for k in range(len(x_train)):
pred = models[i][j].forward(np.array([x_train[k], y_train[k]]))
models[i][j].backward(labels_train[k])
if args.model == 'mlp':
models[i][j].update(args.alpha_1, args.lr_1)
else:
models[i][j].update(args.alpha_1, args.alpha_2, args.lr_1, args.lr_2)
curr_time = time.time() - start_time
times.append(curr_time)
# predict on train and test data
train_acc, train_loss = predict_minmax(models, train_data)
test_acc, _ = predict_minmax(models, test_data)
losses.append(train_loss)
train_accs.append(train_acc)
test_accs.append(test_acc)
if epoch % 10 == 0:
print(f"Epoch {epoch}, Time {curr_time:.2f}, Loss {train_loss:.4f}, Train acc {train_acc:.4f}, Test acc {test_acc:.4f}")
# early stopping
if best_loss - train_loss > 0.0001:
best_loss = train_loss
best_epoch = epoch
elif epoch - best_epoch >= 200:
break
# Visualization and save files
plot_minmax_boundaries(models, args.model, args.partition_mode)
save_file = open(os.path.join(result_dir, f"minmax_{args.model}_data.pkl"), 'wb')
pickle.dump(times, save_file)
pickle.dump(losses, save_file)
pickle.dump(train_accs, save_file)
pickle.dump(test_accs, save_file)
save_file.close()
def train_one_model(model, data, args):
xs, ys, labels = data
best_epoch, best_loss = 0, 1e6
for epoch in range(args.n_epoch):
for k in range(len(xs)):
pred = model.forward(np.array([xs[k], ys[k]]))
model.backward(labels[k])
if args.model == 'mlp':
model.update(args.alpha_1, args.lr_1)
else:
model.update(args.alpha_1, args.alpha_2, args.lr_1, args.lr_2)
_, train_loss = predict(model, data)
# early stopping
if best_loss - train_loss > 0.0001:
best_loss = train_loss
best_epoch = epoch
elif epoch - best_epoch >= 200:
break
return model
def step_func_feeder(args, models, white_subsets, black_subsets):
partition_num = len(white_subsets)
for i in range(partition_num):
for j in range(partition_num):
x_train = np.concatenate((white_subsets[i][0], black_subsets[j][0]), axis=0)
y_train = np.concatenate((white_subsets[i][1], black_subsets[j][1]), axis=0)
labels_train = np.concatenate((white_subsets[i][2], black_subsets[j][2]), axis=0)
data_train = (x_train, y_train, labels_train)
yield models[i][j], data_train, args
def train_minmax_parallel(args):
num_workers = args.partition_num * args.partition_num
mp_pool = Pool(num_workers)
xs, ys, labels = load_data('train')
x_s, y_s, label_s = load_data('test')
train_data = (xs, ys, labels)
test_data = (x_s, y_s, label_s)
white_subsets, black_subsets = partition_data(train_data, args.partition_mode, args.partition_num)
plot_datapoints(white_subsets, black_subsets)
# initialize models
models = []
for i in range(args.partition_num):
models.append([])
for j in range(args.partition_num):
if args.model == 'mlp':
models[i].append(MLP([2, args.n_hid, 1]))
else:
models[i].append(MLQP([2, args.n_hid, 1]))
start_time = time.time()
pool_map = mp_pool.starmap_async(train_one_model, step_func_feeder(args, models, white_subsets, black_subsets))
results = pool_map.get()
mp_pool.close()
mp_pool.join()
for i in range(args.partition_num):
for j in range(args.partition_num):
models[i][j] = results[i*args.partition_num+j]
train_acc, train_loss = predict_minmax(models, train_data)
test_acc, _ = predict_minmax(models, test_data)
curr_time = time.time() - start_time
print(f"Time {curr_time:.2f}, Loss {train_loss:.4f}, Train acc {train_acc:.4f}, Test acc {test_acc:.4f}")
# Visualization
plot_minmax_boundaries(models, args.model, args.partition_mode)
def predict(model, data):
xs, ys, labels = data
preds = []
for i in range(len(xs)):
preds.append(model.forward(np.array([xs[i],ys[i]])))
preds = np.squeeze(np.array(preds))
loss = mse_loss(preds, labels)
acc = ((preds > 0.5) == labels).mean()
return acc, loss
def predict_minmax(models, data):
num_partitions = len(models)
xs, ys, labels = data
preds = []
min_results = []
single_pred = np.zeros(len(xs))
for i in range(num_partitions):
preds.append([])
for j in range(num_partitions):
for k in range(len(xs)):
single_pred[k] = models[i][j].forward(np.array([xs[k],ys[k]]))
preds[i].append(deepcopy(single_pred))
for i in range(num_partitions):
min_result = np.min(preds[i],axis=0)
min_results.append(deepcopy(min_result))
max_result = np.max(min_results, axis=0)
loss = mse_loss(max_result, labels)
acc = ((max_result > 0.5) == labels).mean()
return acc, loss
if __name__ == '__main__':
args = parse_arguments()
if args.seed != None:
np.random.seed(args.seed)
if args.type == 'vanilla':
train_vanillanet(args)
elif args.type == 'minmax':
if args.train_mode == 'parallel':
train_minmax_parallel(args)
else:
train_minmax_sequential(args)
else:
raise ValueError("Unknown model type!")
| StarcoderdataPython |
11307927 | from math import exp, factorial
class Erlang(object):
def __init__(self, shrinkage:float=0.35, calls:int=200, aht:int=400, tat:int=20, ap:int=60):
super().__init__()
self.shrinkage = shrinkage
self.calls = calls
self.aht = aht
self.tat = tat
self.ap = ap
def __call__(self, shrinkage:float, calls:int, aht:int, tat:int, ap:int):
self.shrinkage = shrinkage
self.calls = calls
self.aht = aht
self.tat = tat
self.ap = ap
def __str__(self):
return 'This class calculates Erlang modelling equations for staffing scientifically a call center. \nTime unit measure is one hour.'
def GammaFunction(self, x, y, iterations):
def GetProd(x_, k):
prod_ = 1
for j in range(1, k + 1):
prod_ *= x_ + j
return prod_
sum_ = 0
for i in range(1, iterations + 1):
sum_ += (y**i / GetProd(x, i))
return 1 + sum_
def ErlangA(self, x, y, ro):
return 1 / (ro * self.GammaFunction(x, y, 100)) + 1 - ( 1 / ro )
def ErlangB(self, E, N):
num = (E**N) / factorial(N)
sum = 0
for i in range(0, N + 1):
sum += (E**i) / factorial(i)
return num / sum
def ErlangC(self, A, N):
L = (A**N / factorial(N)) * (N / (N - A))
sum_ = 0
for i in range(N):
sum_ += (A**i) / factorial(i)
return ( L / (sum_ + L) )
def GradeOfService(self, Erlang, n, t, AHT):
GoS = 1 - self.ErlangC(Erlang, n) * exp( -(n - Erlang) * (t / AHT) )
return GoS
@property
def get_shrinkage(self):
return self.shrinkage # This parameter has been set like the industry standard (i.e. 35%) . . .
@property
def get_calls(self):
return self.calls # This parameter has been set like the industry standard (i.e. 35%) . . .
@property
def get_aht(self):
return self.aht # Model parameter called average handling time that is answer to the call duration (i.e. AHT) . . .
@property
def get_tat(self):
return self.tat # Model parameter called targeting answering time (i.e. TAT) . . .
@property
def get_ap(self):
return self.ap # Model parameter called average patience used in abandoned rate calculation (i.e. AP) . . .
| StarcoderdataPython |
1999147 | """Manage in-memory profile interaction."""
from collections import OrderedDict
from typing import Any, Mapping, Type
from ..config.injection_context import InjectionContext
from ..storage.base import BaseStorage
from ..utils.classloader import DeferLoad
from ..wallet.base import BaseWallet
from .profile import Profile, ProfileManager, ProfileSession
STORAGE_CLASS = DeferLoad("aries_cloudagent.storage.in_memory.InMemoryStorage")
WALLET_CLASS = DeferLoad("aries_cloudagent.wallet.in_memory.InMemoryWallet")
class InMemoryProfile(Profile):
"""
Provide access to in-memory profile management.
Used primarily for testing.
"""
BACKEND_NAME = "in_memory"
TEST_PROFILE_NAME = "test-profile"
def __init__(self, *, context: InjectionContext = None, name: str = None):
"""Create a new InMemoryProfile instance."""
global STORAGE_CLASS, WALLET_CLASS
super().__init__(context=context, name=name, created=True)
self.keys = {}
self.local_dids = {}
self.pair_dids = {}
self.records = OrderedDict()
def session(self, context: InjectionContext = None) -> "ProfileSession":
"""Start a new interactive session with no transaction support requested."""
return InMemoryProfileSession(self, context=context)
def transaction(self, context: InjectionContext = None) -> "ProfileSession":
"""
Start a new interactive session with commit and rollback support.
If the current backend does not support transactions, then commit
and rollback operations of the session will not have any effect.
"""
return InMemoryProfileSession(self, context=context)
@classmethod
def test_profile(cls) -> "InMemoryProfile":
"""Used in tests to create a standard InMemoryProfile."""
return InMemoryProfile(
context=InjectionContext(enforce_typing=False),
name=InMemoryProfile.TEST_PROFILE_NAME,
)
@classmethod
def test_session(
cls, settings: Mapping[str, Any] = None, bind: Mapping[Type, Any] = None
) -> "InMemoryProfileSession":
"""Used in tests to quickly create InMemoryProfileSession."""
session = InMemoryProfileSession(cls.test_profile(), settings=settings)
session._active = True
session._init_context()
if bind:
for k, v in bind.items():
if v:
session.context.injector.bind_instance(k, v)
else:
session.context.injector.clear_binding(k)
return session
class InMemoryProfileSession(ProfileSession):
"""An active connection to the profile management backend."""
def __init__(
self,
profile: Profile,
*,
context: InjectionContext = None,
settings: Mapping[str, Any] = None
):
"""Create a new InMemoryProfileSession instance."""
super().__init__(profile=profile, context=context, settings=settings)
async def _setup(self):
"""Create the session or transaction connection, if needed."""
await super()._setup()
self._init_context()
def _init_context(self):
"""Initialize the session context."""
self._context.injector.bind_instance(BaseStorage, STORAGE_CLASS(self.profile))
self._context.injector.bind_instance(BaseWallet, WALLET_CLASS(self.profile))
@property
def storage(self) -> BaseStorage:
"""Get the `BaseStorage` implementation (helper specific to in-memory profile)."""
return self._context.inject(BaseStorage)
@property
def wallet(self) -> BaseWallet:
"""Get the `BaseWallet` implementation (helper specific to in-memory profile)."""
return self._context.inject(BaseWallet)
class InMemoryProfileManager(ProfileManager):
"""Manager for producing in-memory wallet/storage implementation."""
async def provision(
self, context: InjectionContext, config: Mapping[str, Any] = None
) -> Profile:
"""Provision a new instance of a profile."""
return InMemoryProfile(context=context, name=(config or {}).get("name"))
async def open(
self, context: InjectionContext, config: Mapping[str, Any] = None
) -> Profile:
"""Open an instance of an existing profile."""
return await self.provision(context, config)
| StarcoderdataPython |
264187 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
class Config(object):
def __init__(self, **kwargs):
self.defaults = kwargs
def __getattr_(self, name):
try:
return getattr(settings, name)
except AttributeError:
if name not in self.defaults:
raise ImproperlyConfigured('[Django-Github-OAuth] Missing setting {0}'.format(name))
if __name__ != '__main__':
conf = Config(
CLIENT_ID=settings.GITHUB_CLIENT_ID,
CLIENT_SECRET=settings.GITHUB_CLIENT_SECRET,
REDIRECT_URI=settings.GITHUB_REDIRECT_URI,
ACCEPT_TYPE='application/json',
)
| StarcoderdataPython |
6415485 | <filename>src/fizzbuzz/fizzbuzz.py
def fizzbuzz(n: int) -> dict:
error = _invalid_n(n)
if error:
return dict(error=error)
fizzbuzz = [_do_fizzbuzz(i) for i in range(1, n + 1)]
return dict(data=fizzbuzz)
def _do_fizzbuzz(n: int) -> str:
parts = []
if n % 3 == 0:
parts.append("fizz")
if n % 5 == 0:
parts.append("buzz")
if parts:
return "".join(parts)
else:
return str(n)
def _invalid_n(n):
req = "Please provide a positive integer less than or equal to 1_000."
if not isinstance(n, int):
return f"Not an integer. {req}"
if n < 1:
return f"Zero and negative integers not supported. {req}"
if n > 1_000:
return f"Integer too large. {req}"
return None
| StarcoderdataPython |
6598440 | # Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Umpire RPC base class."""
def RPCCall(method):
"""Enables the method to be Umpire RPC function.
Args:
method: an unbound derived UmpireRPC class method.
Example:
class Foo(UmpireRPC):
def NonRPCFunction():
pass
@RPCCall
def RPCFunction(parameter, ...):
pass
"""
method.is_rpc_method = True
return method
class UmpireRPC:
"""RPC base class.
Properties:
daemon: UmpireDaemon object.
env: UmpireEnv object.
"""
def __init__(self, daemon):
self.daemon = daemon
self.env = daemon.env
@RPCCall
def __bool__(self):
"""Truth value testing.
It is used for handling request issued when client side performs truth
value testing on RPC server proxy. For example:
p = xmlrpclib.ServerProxy('http://127.0.0.1:9090')
if p: # <- this invokes __nonzero__() RPC call.
p.DoSomething()
Returns:
True
"""
return True
| StarcoderdataPython |
4853567 | <filename>make_CSVs.py
#This file takes the survey response data from the original Excel file and transforms
#it into two CSV files suitable for NLP analysis.
import pandas as pd
import numpy as np
import pickle
#Read in the Excel file
data_dict = pd.read_excel('data_files/NPS_SignificanceUnderstanding.xlsx', sheetname=None)
#Delete the Excel sheet that summarizes the data
del data_dict['SUMMARY']
#Concatenate the lines of the remaining Excel sheets (one sheet per NPS unit)
data = pd.concat(data_dict.values())
#Drop irrelevant & incompletely filled in columns ('Updated Significance Code', 'Updated Significance Keywords')
data = data.iloc[:, 0:4]
#Drop rows that are missing the Visitor's significance comment
data = data[pd.notnull(data["Visitor's Significance Comment"])]
data = data.reset_index()
data = data.drop('index', axis=1)
#Rename columns
data.columns = ['ParkName', 'ParkAlphaCode', 'SignificanceComment', 'SignificanceCode']
print "Summary of Data by Individual Survey Response: "
print data.info()
print data.head()
#How many survey responses are there for each park unit?
counts = data.groupby(by='ParkAlphaCode').sum()
print counts.head()
max = np.max(counts)
print 'Maximum number of survey responses: ', max
min = np.min(counts)
print 'Minimum number of survey responses: ', min
#Which units saw the least / most survey responses?
sorted_counts = counts.sort_values(by='SignificanceCode')
print sorted_counts
#Save this as a CSV for use with any analyses where I want to examine the individual survey response
data.to_csv('data_files/npdata_long')
#Now create a second dataframe that sums all the comments for each unique park unit. Use this when main interest is the park units themselves.
def combine_texts(grp):
"""Function to concatenate all entries under 'SignificanceComment' together for each park unit"""
return grp.SignificanceComment.astype(str).str.cat(sep = " ")
#Apply combine_texts function to the groups defined by each ParkAlphaCode
combined_comments = data.groupby(['ParkAlphaCode']).apply(combine_texts)
#Create dataframe with only one entry per park unit & merge it with the combined_comments series
data_bypark = data[['ParkName', 'ParkAlphaCode']].drop_duplicates()
data_bypark = data_bypark.merge(pd.DataFrame(combined_comments), left_on='ParkAlphaCode', right_index=True)
data_bypark.columns = ['ParkName', 'ParkAlphaCode', 'SignificanceComments']
data_bypark = data_bypark.reset_index()
data_bypark = data_bypark.drop('index', axis=1)
print "Summary of Data by Park Unit"
print data_bypark.info()
#Export this to its own CSV
data_bypark.to_csv('data_files/data_bypark')
| StarcoderdataPython |
9723787 | import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Posts(Base):
__tablename__ = 'posts'
post_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
post_hash = sqlalchemy.Column(sqlalchemy.String)
file_name = sqlalchemy.Column(sqlalchemy.String)
date = sqlalchemy.Column(sqlalchemy.String)
source_url = sqlalchemy.Column(sqlalchemy.String)
source_info = sqlalchemy.Column(sqlalchemy.String)
tags = sqlalchemy.Column(sqlalchemy.String)
def __repr__(self):
return "<Post(post_id='%s', file_name='%s')>" % (self.post_id, self.file_name)
| StarcoderdataPython |
8154027 | <reponame>jabra98/aoc
import sys; datafilepath = sys.argv[1]
lines = open(datafilepath).read().splitlines()
pairs = {'(':')','<':'>','[':']','{':'}',
')':'(','>':'<',']':'[','}':'{'}
end_seqs = list()
ans = 0
for i in lines:
s = list()
is_valid=True
for j in i:
if j in ['(','<','[','{']:
s.append(j)
else:
if len(s) == 0:
continue
top = s[len(s)-1]
s.pop()
if j != pairs[top]:
is_valid = False
break
if is_valid:
end_seqs.append(s)
points = {')':1, ']':2, '}':3, '>':4}
p = list()
for i in end_seqs:
if len(i) == 0:
continue
anw = 0
while len(i) > 0:
top = i[len(i)-1]
i.pop()
anw *= 5
anw += points[pairs[top]]
p.append(anw)
p = sorted(p)
print(p[int(len(p)/2)])
| StarcoderdataPython |
11357974 | # Generated by Django 3.2.9 on 2021-12-09 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='meal',
name='prev_status',
field=models.CharField(choices=[('p', 'Pending'), ('c', 'Completed'), ('e', 'Expired'), ('a', 'Aborted')], default=None, max_length=1, null=True),
),
]
| StarcoderdataPython |
8041814 | # Generated by Django 3.1.1 on 2020-10-21 17:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nutrihacker', '0010_auto_20201021_1333'),
]
operations = [
migrations.RenameField(
model_name='recipefood',
old_name='amount_unit',
new_name='portions_unit',
),
]
| StarcoderdataPython |
3263338 | <filename>events/migrations/0003_auto_20200421_0138.py
# Generated by Django 3.0.5 on 2020-04-21 05:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20200421_0134'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='day',
),
migrations.RemoveField(
model_name='event',
name='notes',
),
migrations.AlterField(
model_name='event',
name='end_time',
field=models.DateTimeField(help_text='End time', verbose_name='End time'),
),
migrations.AlterField(
model_name='event',
name='start_time',
field=models.DateTimeField(help_text='Starting time', verbose_name='Starting time'),
),
]
| StarcoderdataPython |
8139240 | from pom_pages.recreations import RecreationsPage
recreation_name = "text=playwright-test"
def test_find_recreation_is_working(page):
recreation_page = RecreationsPage(page)
recreation_page.open()
recreation_page.select_recreation(recreation_name)
# verify hike name
assert page.inner_text(
"//*[@id='hikeTitle']") == "playwright-test"
| StarcoderdataPython |
9083 | import sqlite3
from bottle import route, run,debug,template,request,redirect
@route('/todo')
def todo_list():
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT id, task FROM todo WHERE status LIKE '1'")
result = c.fetchall()
c.close()
output = template('make_table', rows=result)
return output
@route('/new', method='GET')
def new_item():
if request.GET.save:
new = request.GET.task.strip()
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("INSERT INTO todo (task,status) VALUES (?,?)", (new,1))
new_id = c.lastrowid
conn.commit()
c.close()
redirect('/todo')
#return '<p>The new task was inserted into the database, the ID is %s</p>' % new_id
else:
return template('new_task.tpl')
@route('/do_insert' , method='GET')
def get_id():
redirect('/new')
@route('/edit/<no:int>', method='GET')
def edit_item(no):
if request.GET.save:
edit = request.GET.task.strip()
status = request.GET.status.strip()
if status == 'open':
status = 1
else:
status = 0
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("UPDATE todo SET task = ?, status = ? WHERE id LIKE ?", (edit, status, no))
conn.commit()
return '<p>The item number %s was successfully updated</p>' % no
else:
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (str(no)))
cur_data = c.fetchone()
return template('edit_task', old=cur_data, no=no)
@route('/find_edit' , method='GET')
def get_id():
id_edit = request.GET.editdata.strip()
redirect('/edit/' + id_edit)
@route('/delete/<no:int>', method='GET')
def delete_item(no):
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("DELETE FROM todo WHERE id LIKE ?", (str(no)))
conn.commit()
redirect('/todo')
@route('/find_delete' , method='GET')
def get_id():
id_delete = request.GET.deletedata.strip()
redirect('/delete/' + id_delete)
debug(True)
run(reloader=True)
| StarcoderdataPython |
345140 | <reponame>banesullivan/PVGeophysics
__all__ = [
'TensorMeshReader',
'TensorMeshAppender',
'TopoMeshAppender',
]
__displayname__ = 'Tensor Mesh'
import os
import sys
import numpy as np
import pandas as pd
import vtk
from .. import _helpers, interface
from ..base import AlgorithmBase
from .two_file_base import ModelAppenderBase, ubcMeshReaderBase
if sys.version_info < (3,):
from StringIO import StringIO
else:
from io import StringIO
class TensorMeshReader(ubcMeshReaderBase):
"""UBC Mesh 2D/3D models are defined using a 2-file format. The "mesh" file
describes how the data is discretized. The "model" file lists the physical
property values for all cells in a mesh. A model file is meaningless without
an associated mesh file. The reader will automatically detect if the mesh is
2D or 3D and read the remainder of the data with that dimensionality
assumption. If the mesh file is 2D, then then model file must also be in the
2D format (same for 3D).
Note:
Model File is optional. Reader will still construct
``vtkRectilinearGrid`` safely.
"""
__displayname__ = 'UBC Tensor Mesh Reader'
__category__ = 'reader'
description = 'PVGeo: UBC Mesh 2D/3D Two-File Format'
def __init__(self, nOutputPorts=1, outputType='vtkRectilinearGrid', **kwargs):
ubcMeshReaderBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
self.__mesh = vtk.vtkRectilinearGrid()
self.__models = []
@staticmethod
def place_model_on_mesh(mesh, model, data_name='Data'):
"""Places model data onto a mesh. This is for the UBC Grid data readers
to associate model data with the mesh grid.
Args:
mesh (vtkRectilinearGrid): The ``vtkRectilinearGrid`` that is the
mesh to place the model data upon.
model (np.array): A NumPy float array that holds all of the data to
place inside of the mesh's cells.
data_name (str) : The name of the model data array once placed on the
``vtkRectilinearGrid``.
Return:
vtkRectilinearGrid :
Returns the input ``vtkRectilinearGrid`` with model data appended.
"""
if isinstance(model, dict):
for key in model.keys():
TensorMeshReader.place_model_on_mesh(mesh, model[key], data_name=key)
return mesh
# model.GetNumberOfValues() if model is vtkDataArray
# Make sure this model file fits the dimensions of the mesh
ext = mesh.GetExtent()
n1, n2, n3 = ext[1], ext[3], ext[5]
if n1 * n2 * n3 < len(model):
raise _helpers.PVGeoError(
'Model `%s` has more data than the given mesh has cells to hold.'
% data_name
)
elif n1 * n2 * n3 > len(model):
raise _helpers.PVGeoError(
'Model `%s` does not have enough data to fill the given mesh\'s cells.'
% data_name
)
# Swap axes because VTK structures the coordinates a bit differently
# - This is absolutely crucial!
# - Do not play with unless you know what you are doing!
if model.ndim > 1 and model.ndim < 3:
ncomp = model.shape[1]
model = np.reshape(model, (n1, n2, n3, ncomp))
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :, :] # Note it is in Fortran ordering
model = np.reshape(model, (n1 * n2 * n3, ncomp))
else:
model = np.reshape(model, (n1, n2, n3))
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :] # Note it is in Fortran ordering
model = model.flatten()
# Convert data to VTK data structure and append to output
c = interface.convert_array(model, name=data_name, deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
mesh.GetCellData().AddArray(c)
return mesh
# ------------------------------------------------------------------#
# ---------------------- UBC MESH 2D ------------------------#
# ------------------------------------------------------------------#
@staticmethod
def ubc_mesh_2d(FileName, output):
"""This method reads a UBC 2D Mesh file and builds an empty
``vtkRectilinearGrid`` for data to be inserted into. `Format Specs`_.
.. _Format Specs: http://giftoolscookbook.readthedocs.io/en/latest/content/fileFormats/mesh2Dfile.html
Args:
FileName (str) : The mesh filename as an absolute path for the input
mesh file in UBC 3D Mesh Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid.
Mesh is defined by the input mesh file.
No data attributes here, simply an empty mesh. Use the
``place_model_on_mesh()`` method to associate with model data.
"""
# Read in data from file
xpts, xdisc, zpts, zdisc = ubcMeshReaderBase._ubc_mesh_2d_part(FileName)
nx = np.sum(np.array(xdisc, dtype=int)) + 1
nz = np.sum(np.array(zdisc, dtype=int)) + 1
# Now generate the vtkRectilinear Grid
def _genCoords(pts, disc, z=False):
c = [float(pts[0])]
for i in range(len(pts) - 1):
start = float(pts[i])
stop = float(pts[i + 1])
num = int(disc[i])
w = (stop - start) / num
for j in range(1, num):
c.append(start + (j) * w)
c.append(stop)
c = np.array(c, dtype=float)
if z:
c = -c[::-1]
return interface.convert_array(c, deep=True)
xcoords = _genCoords(xpts, xdisc)
zcoords = _genCoords(zpts, zdisc, z=True)
ycoords = interface.convert_array(np.zeros(1), deep=True)
output.SetDimensions(nx, 2, nz) # note this subtracts 1
output.SetXCoordinates(xcoords)
output.SetYCoordinates(ycoords)
output.SetZCoordinates(zcoords)
return output
@staticmethod
def ubc_model_2d(FileName):
"""Reads a 2D model file and returns a 1D NumPy float array. Use the
``place_model_on_mesh()`` method to associate with a grid.
Note:
Only supports single component data
Args:
FileName (str) : The model filename as an absolute path for the
input model file in UBCMesh Model Format. Also accepts a list of
string file names.
Return:
np.array :
a NumPy float array that holds the model data read from
the file. Use the ``place_model_on_mesh()`` method to associate
with a grid. If a list of file names is given then it will
return a dictionary of NumPy float array with keys as the
basenames of the files.
"""
if isinstance(FileName, (list, tuple)):
out = {}
for f in FileName:
out[os.path.basename(f)] = TensorMeshReader.ubc_model_2d(f)
return out
dim = np.genfromtxt(
FileName, dtype=int, delimiter=None, comments='!', max_rows=1
)
names = ['col%d' % i for i in range(dim[0])]
df = pd.read_csv(
FileName, names=names, delim_whitespace=True, skiprows=1, comment='!'
)
data = df.values
if np.shape(data)[0] != dim[1] and np.shape(data)[1] != dim[0]:
raise _helpers.PVGeoError('Mode file `%s` improperly formatted.' % FileName)
return data.flatten(order='F')
def __ubc_mesh_data_2d(self, filename_mesh, filename_models, output):
"""Helper method to read a 2D mesh"""
# Construct/read the mesh
if self.need_to_readMesh():
TensorMeshReader.ubc_mesh_2d(filename_mesh, self.__mesh)
self.need_to_readMesh(flag=False)
output.DeepCopy(self.__mesh)
if self.need_to_readModels() and self.this_has_models():
self.__models = []
for f in filename_models:
# Read the model data
self.__models.append(TensorMeshReader.ubc_model_2d(f))
self.need_to_readModels(flag=False)
return output
# ------------------------------------------------------------------#
# ---------------------- UBC MESH 3D ------------------------#
# ------------------------------------------------------------------#
@staticmethod
def ubc_mesh_3d(FileName, output):
"""This method reads a UBC 3D Mesh file and builds an empty
``vtkRectilinearGrid`` for data to be inserted into.
Args:
FileName (str) : The mesh filename as an absolute path for the input
mesh file in UBC 3D Mesh Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid.
Mesh is defined by the input mesh file.
No data attributes here, simply an empty mesh. Use the
``place_model_on_mesh()`` method to associate with model data.
"""
# --- Read in the mesh ---#
fileLines = np.genfromtxt(FileName, dtype=str, delimiter='\n', comments='!')
# Get mesh dimensions
dim = np.array(fileLines[0].split('!')[0].split(), dtype=int)
dim = (dim[0] + 1, dim[1] + 1, dim[2] + 1)
# The origin corner (Southwest-top)
# - Remember UBC format specifies down as the positive Z
# - Easting, Northing, Altitude
oo = np.array(fileLines[1].split('!')[0].split(), dtype=float)
ox, oy, oz = oo[0], oo[1], oo[2]
# Read cell sizes for each line in the UBC mesh files
def _readCellLine(line):
line_list = []
for seg in line.split():
if '*' in seg:
sp = seg.split('*')
seg_arr = np.ones((int(sp[0]),), dtype=float) * float(sp[1])
else:
seg_arr = np.array([float(seg)], dtype=float)
line_list.append(seg_arr)
return np.concatenate(line_list)
# Read the cell sizes
cx = _readCellLine(fileLines[2].split('!')[0])
cy = _readCellLine(fileLines[3].split('!')[0])
cz = _readCellLine(fileLines[4].split('!')[0])
# Invert the indexing of the vector to start from the bottom.
cz = cz[::-1]
# Adjust the reference point to the bottom south west corner
oz = oz - np.sum(cz)
# Now generate the coordinates for from cell width and origin
cox = ox + np.cumsum(cx)
cox = np.insert(cox, 0, ox)
coy = oy + np.cumsum(cy)
coy = np.insert(coy, 0, oy)
coz = oz + np.cumsum(cz)
coz = np.insert(coz, 0, oz)
# Set the dims and coordinates for the output
output.SetDimensions(dim[0], dim[1], dim[2])
# Convert to VTK array for setting coordinates
output.SetXCoordinates(interface.convert_array(cox, deep=True))
output.SetYCoordinates(interface.convert_array(coy, deep=True))
output.SetZCoordinates(interface.convert_array(coz, deep=True))
return output
def __ubc_mesh_data_3d(self, filename_mesh, filename_models, output):
"""Helper method to read a 3D mesh"""
# Construct/read the mesh
if self.need_to_readMesh():
TensorMeshReader.ubc_mesh_3d(filename_mesh, self.__mesh)
self.need_to_readMesh(flag=False)
output.DeepCopy(self.__mesh)
if self.need_to_readModels() and self.this_has_models():
self.__models = []
for f in filename_models:
# Read the model data
self.__models.append(TensorMeshReader.ubc_model_3d(f))
self.need_to_readModels(flag=False)
return output
def __ubc_tensor_mesh(self, filename_mesh, filename_models, output):
"""Wrapper to Read UBC GIF 2D and 3D meshes. UBC Mesh 2D/3D models are
defined using a 2-file format. The "mesh" file describes how the data is
descritized. The "model" file lists the physical property values for all
cells in a mesh. A model file is meaningless without an associated mesh
file. If the mesh file is 2D, then then model file must also be in the
2D format (same for 3D).
Args:
filename_mesh (str) : The mesh filename as an absolute path for the
input mesh file in UBC 2D/3D Mesh Format
filename_models (str or list(str)) : The model filename(s) as an
absolute path for the input model file in UBC 2D/3D Model Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 2D/3D Mesh grid.
Mesh is defined by the input mesh file.
Cell data is defined by the input model file.
"""
# Check if the mesh is a UBC 2D mesh
if self.is_2d():
self.__ubc_mesh_data_2d(filename_mesh, filename_models, output)
# Check if the mesh is a UBC 3D mesh
elif self.is_3d():
self.__ubc_mesh_data_3d(filename_mesh, filename_models, output)
else:
raise _helpers.PVGeoError('File format not recognized')
return output
def RequestData(self, request, inInfo, outInfo):
"""Handles data request by the pipeline."""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
self.__ubc_tensor_mesh(
self.get_mesh_filename(), self.get_model_filenames(), output
)
# Place the model data for given timestep onto the mesh
if len(self.__models) > i:
TensorMeshReader.place_model_on_mesh(
output, self.__models[i], self.get_data_name()
)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Handles info request by pipeline about timesteps and grid extents."""
# Call parent to handle time stuff
ubcMeshReaderBase.RequestInformation(self, request, inInfo, outInfo)
# Now set whole output extent
if self.need_to_readMesh():
ext = self._read_extent()
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
def clear_mesh(self):
"""Use to clean/rebuild the mesh"""
self.__mesh = vtk.vtkRectilinearGrid()
ubcMeshReaderBase.clear_models(self)
def clear_models(self):
"""Use to clean the models and reread"""
self.__models = []
ubcMeshReaderBase.clear_models(self)
###############################################################################
class TensorMeshAppender(ModelAppenderBase):
"""This filter reads a timeseries of models and appends it to an input
``vtkRectilinearGrid``
"""
__displayname__ = 'UBC Tensor Mesh Appender'
__category__ = 'filter'
def __init__(self, **kwargs):
ModelAppenderBase.__init__(
self,
inputType='vtkRectilinearGrid',
outputType='vtkRectilinearGrid',
**kwargs
)
def _read_up_front(self):
"""Internal helepr to read data at start"""
reader = ubcMeshReaderBase.ubc_model_3d
if not self._is_3D:
# Note how in UBC format, 2D grids are specified on an XZ plane (no Y component)
# This will only work prior to rotations to account for real spatial reference
reader = TensorMeshReader.ubc_model_2d
self._models = []
for f in self._model_filenames:
# Read the model data
self._models.append(reader(f))
self.need_to_read(flag=False)
return
def _place_on_mesh(self, output, idx=0):
"""Internal helepr to place a model on the mesh for a given index"""
TensorMeshReader.place_model_on_mesh(
output, self._models[idx], self.get_data_name()
)
return
###############################################################################
class TopoMeshAppender(AlgorithmBase):
"""This filter reads a single discrete topography file and appends it as a
boolean data array.
"""
__displayname__ = 'Append UBC Discrete Topography'
__category__ = 'filter'
def __init__(
self, inputType='vtkRectilinearGrid', outputType='vtkRectilinearGrid', **kwargs
):
AlgorithmBase.__init__(
self,
nInputPorts=1,
inputType=inputType,
nOutputPorts=1,
outputType=outputType,
)
self._topoFileName = kwargs.get('filename', None)
self.__indices = None
self.__need_to_read = True
self.__ne, self.__nn = None, None
def need_to_read(self, flag=None):
"""Ask self if the reader needs to read the files again
Args:
flag (bool): if the flag is set then this method will set the read
status
Return:
bool:
The status of the reader aspect of the filter.
"""
if flag is not None and isinstance(flag, (bool, int)):
self.__need_to_read = flag
return self.__need_to_read
def Modified(self, read_again=True):
"""Call modified if the files needs to be read again."""
if read_again:
self.__need_to_read = read_again
AlgorithmBase.Modified(self)
def modified(self, read_again=True):
"""Call modified if the files needs to be read again."""
return self.Modified(read_again=read_again)
def _read_up_front(self):
"""Internal helepr to read data at start"""
# Read the file
content = np.genfromtxt(
self._topoFileName, dtype=str, delimiter='\n', comments='!'
)
dim = content[0].split()
self.__ne, self.__nn = int(dim[0]), int(dim[1])
self.__indices = pd.read_csv(
StringIO("\n".join(content[1::])),
names=['i', 'j', 'k'],
delim_whitespace=True,
)
# NOTE: K indices are inverted
self.need_to_read(flag=False)
return
def _place_on_mesh(self, output):
"""Internal helepr to place an active cells model on the mesh"""
# Check mesh extents to math topography
nx, ny, nz = output.GetDimensions()
nx, ny, nz = nx - 1, ny - 1, nz - 1 # because GetDimensions counts the nodes
topz = np.max(self.__indices['k']) + 1
if nx != self.__nn or ny != self.__ne or topz > nz:
raise _helpers.PVGeoError(
'Dimension mismatch between input grid and topo file.'
)
# # Adjust the k indices to be in the cartesian system
# self.__indices['k'] = nz - self.__indices['k']
# Fill out the topo and add it as model as it will be in UBC format
# Create a 3D array of 1s and zeros (1 means beneath topo or active)
topo = np.empty((ny, nx, nz), dtype=float)
topo[:] = np.nan
for row in self.__indices.values:
i, j, k = row
topo[i, j, k + 1 :] = 0
topo[i, j, : k + 1] = 1
# Add as model... ``place_model_on_mesh`` handles the rest
TensorMeshReader.place_model_on_mesh(
output, topo.flatten(), 'Active Topography'
)
return
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
output = self.GetOutputData(outInfo, 0)
output.DeepCopy(pdi) # ShallowCopy if you want changes to propagate upstream
# Perform task:
if self.__need_to_read:
self._read_up_front()
# Place the model data for given timestep onto the mesh
self._place_on_mesh(output)
return 1
#### Setters and Getters ####
def clear_topo_file(self):
"""Use to clear data file name."""
self._topoFileName = None
self.Modified(read_again=True)
def set_topo_filename(self, filename):
"""Use to set the file names for the reader. Handles single strings only"""
if filename is None:
return # do nothing if None is passed by a constructor on accident
elif isinstance(filename, str) and self._topoFileName != filename:
self._topoFileName = filename
self.Modified()
return 1
###############################################################################
#
# import numpy as np
# indices = np.array([[0,0,1],
# [0,1,1],
# [0,2,1],
# [1,0,1],
# [1,1,1],
# [1,2,1],
# [2,0,1],
# [2,1,1],
# [2,2,1],
# ])
#
# topo = np.empty((3,3,3), dtype=float)
# topo[:] = np.nan
#
# for row in indices:
# i, j, k = row
# topo[i, j, k:] = 0
# topo[i, j, :k] = 1
# topo
| StarcoderdataPython |
8151062 | <reponame>iamdanialkamali/zibal-wallet
from mongoengine import *
class Wallet(Document):
id = ObjectIdField()
name = StringField()
credit = FloatField()
class Transaction(Document):
id = ObjectIdField()
source_wallet_id = ObjectIdField()
destination_wallet_id = ObjectIdField(default=None)
amount = FloatField(min_value=0)
| StarcoderdataPython |
1814345 | import encrypt
import decrypt
MORSE_CODE_DICT = {'A': '.-', 'B': '-...',
'C': '-.-.', 'D': '-..', 'E': '.',
'F': '..-.', 'G': '--.', 'H': '....',
'I': '..', 'J': '.---', 'K': '-.-',
'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-',
'R': '.-.', 'S': '...', 'T': '-',
'U': '..-', 'V': '...-', 'W': '.--',
'X': '-..-', 'Y': '-.--', 'Z': '--..',
'1': '.----', '2': '..---', '3': '...--',
'4': '....-', '5': '.....', '6': '-....',
'7': '--...', '8': '---..', '9': '----.',
'0': '-----', ', ': '--..--', '.': '.-.-.-',
'?': '..--..', '/': '-..-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-'}
if __name__ == "__main__":
while True:
print('\nHello welcome to morsecode translator')
print('press 1 for english to morse code')
print('press 2 for morse code to english')
print('press 3 to exit')
x = input('')
if x == '1':
z = input('Enter the text you want to convert to morse code:')
e_msg = encrypt.encrypt(z, MORSE_CODE_DICT)
e_string = ""
for i in e_msg:
e_string = e_string + i + "/ "
e_string = e_string[:-2]
print('encrypted code:' + e_string)
elif x == '2':
z = input('Enter the code you want to convert to english:')
d_msg = decrypt.decrypt(z, MORSE_CODE_DICT)
d_string = ""
for i in d_msg:
d_string = d_string + i + ' '
d_string = d_string[:-1]
print(d_string)
elif x == '3':
exit(True)
else:
print('invalid input')
continue
| StarcoderdataPython |
9759195 | <filename>neptune-notebooks/__init__.py
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from IPython.display import display, JSON
import json
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': '../packages/nbextension',
'dest': 'neptune-notebooks',
'require': 'neptune-notebooks/neptune-notebook'
}]
# A display class that can be used within a notebook.
# from neptune-notebooks import JSON
# JSON(data)
class JSON(JSON):
"""A display class for displaying JSON visualizations in the Jupyter Notebook and IPython kernel.
JSON expects a JSON-able dict, not serialized JSON strings.
Scalar types (None, number, string) are not allowed, only dict containers.
"""
def _ipython_display_(self):
bundle = {
'application/json': self.data,
'text/plain': '<neptune-notebooks.JSON object>'
}
metadata = {
'application/json': self.metadata
}
display(bundle, metadata=metadata, raw=True)
| StarcoderdataPython |
4904555 | import math
from BitTornado.clock import clock
from .CurrentRateMeasure import Measure
DEBUG = False
MAX_RATE_PERIOD = 20.0
MAX_RATE = 10e10
PING_BOUNDARY = 1.2
PING_SAMPLES = 7
PING_DISCARDS = 1
PING_THRESHHOLD = 5
PING_DELAY = 5 # cycles 'til first upward adjustment
PING_DELAY_NEXT = 2 # 'til next
ADJUST_UP = 1.05
ADJUST_DOWN = 0.95
UP_DELAY_FIRST = 5
UP_DELAY_NEXT = 2
SLOTS_STARTING = 6
SLOTS_FACTOR = 1.66 / 1000
class RateLimiter:
def __init__(self, sched, unitsize, slotsfunc=lambda x: None):
self.sched = sched
self.last = None
self.unitsize = unitsize
self.slotsfunc = slotsfunc
self.measure = Measure(MAX_RATE_PERIOD)
self.autoadjust = False
self.upload_rate = MAX_RATE * 1000
self.slots = SLOTS_STARTING # garbage if not automatic
def set_upload_rate(self, rate):
# rate = -1 # test automatic
if rate < 0:
if self.autoadjust:
return
self.autoadjust = True
self.autoadjustup = 0
self.pings = []
rate = MAX_RATE
self.slots = SLOTS_STARTING
self.slotsfunc(self.slots)
else:
self.autoadjust = False
if not rate:
rate = MAX_RATE
self.upload_rate = rate * 1000
self.lasttime = clock()
self.bytes_sent = 0
def queue(self, conn):
assert conn.next_upload is None
if self.last is None:
self.last = conn
conn.next_upload = conn
self.try_send(True)
else:
conn.next_upload = self.last.next_upload
self.last.next_upload = conn
self.last = conn
def try_send(self, check_time=False):
t = clock()
self.bytes_sent -= (t - self.lasttime) * self.upload_rate
self.lasttime = t
if check_time:
self.bytes_sent = max(self.bytes_sent, 0)
cur = self.last.next_upload
while self.bytes_sent <= 0:
bytes = cur.send_partial(self.unitsize)
self.bytes_sent += bytes
self.measure.update_rate(bytes)
if bytes == 0 or cur.backlogged():
if self.last is cur:
self.last = None
cur.next_upload = None
break
else:
self.last.next_upload = cur.next_upload
cur.next_upload = None
cur = self.last.next_upload
else:
self.last = cur
cur = cur.next_upload
else:
self.sched(self.try_send, self.bytes_sent / self.upload_rate)
def adjust_sent(self, bytes):
self.bytes_sent = min(self.bytes_sent + bytes, self.upload_rate * 3)
self.measure.update_rate(bytes)
def ping(self, delay):
if DEBUG:
print(delay)
if not self.autoadjust:
return
self.pings.append(delay > PING_BOUNDARY)
if len(self.pings) < PING_SAMPLES + PING_DISCARDS:
return
if DEBUG:
print('cycle')
pings = sum(self.pings[PING_DISCARDS:])
del self.pings[:]
if pings >= PING_THRESHHOLD: # assume flooded
if self.upload_rate == MAX_RATE:
self.upload_rate = self.measure.get_rate() * ADJUST_DOWN
else:
self.upload_rate = min(self.upload_rate,
self.measure.get_rate() * 1.1)
self.upload_rate = max(int(self.upload_rate * ADJUST_DOWN), 2)
self.slots = int(math.sqrt(self.upload_rate * SLOTS_FACTOR))
self.slotsfunc(self.slots)
if DEBUG:
print('adjust down to ', self.upload_rate)
self.lasttime = clock()
self.bytes_sent = 0
self.autoadjustup = UP_DELAY_FIRST
else: # not flooded
if self.upload_rate == MAX_RATE:
return
self.autoadjustup -= 1
if self.autoadjustup:
return
self.upload_rate = int(self.upload_rate * ADJUST_UP)
self.slots = int(math.sqrt(self.upload_rate * SLOTS_FACTOR))
self.slotsfunc(self.slots)
if DEBUG:
print('adjust up to ', self.upload_rate)
self.lasttime = clock()
self.bytes_sent = 0
self.autoadjustup = UP_DELAY_NEXT
| StarcoderdataPython |
11342323 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from openstackclient.tests.unit import utils
from otcextensions.common import sdk_utils
from otcextensions.osclient.load_balancer.v1 import listener
from otcextensions.tests.unit.osclient.load_balancer.v1 import fakes
class TestListListener(fakes.TestLoadBalancer):
_objects = fakes.FakeListener.create_multiple(3)
columns = (
'id', 'default_pool_id', 'name', 'project_id',
'protocol', 'protocol_port', 'admin_state_up')
data = []
for s in _objects:
data.append((
s.id,
s.default_pool_id,
s.name,
s.project_id,
s.protocol,
s.protocol_port,
# sdk_utils.ListOfIdsColumn(s.load_balancer_ids),
s.is_admin_state_up,
))
def setUp(self):
super(TestListListener, self).setUp()
self.cmd = listener.ListListener(self.app, None)
self.client.listeners = mock.Mock()
def test_list_default(self):
arglist = [
]
verifylist = [
]
# Verify cm is triggereg with default parameters
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Set the response
self.client.listeners.side_effect = [
self._objects
]
# Trigger the action
columns, data = self.cmd.take_action(parsed_args)
self.client.listeners.assert_called_once_with()
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_list_filters(self):
arglist = [
'--protocol', 'TCP',
'--protocol_port', '12',
'--name', 'some_name',
'--load_balancer', 'lb'
]
verifylist = [
('protocol', 'TCP'),
('protocol_port', 12),
('name', 'some_name'),
('load_balancer', 'lb')
]
# Verify cm is triggereg with default parameters
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Set the response
self.client.listeners.side_effect = [
self._objects
]
# Trigger the action
columns, data = self.cmd.take_action(parsed_args)
self.client.listeners.assert_called_once_with(
protocol='TCP',
protocol_port=12,
name='some_name',
load_balancer_id='lb'
)
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_list_filters_exceptions_proto(self):
arglist = [
'--protocol', 'SMTP',
'--protocol_port', '12'
]
verifylist = [
('protocol', 'SMTP'),
('protocol_port', 12)
]
self.assertRaises(
utils.ParserException,
self.check_parser, self.cmd, arglist, verifylist)
def test_list_filters_exceptions_port(self):
arglist = [
'--protocol_port', '12x'
]
verifylist = [
('protocol_port', 12)
]
# Ensure exception is raised
self.assertRaises(
utils.ParserException,
self.check_parser, self.cmd, arglist, verifylist)
class TestShowListener(fakes.TestLoadBalancer):
_object = fakes.FakeListener.create_one()
columns = (
'admin_state_up', 'connection_limit', 'default_pool_id',
'description', 'id', 'loadbalancers', 'name', 'protocol',
'protocol_port')
data = (
_object.is_admin_state_up,
_object.connection_limit,
_object.default_pool_id,
_object.description,
_object.id,
sdk_utils.ListOfIdsColumnBR(_object.load_balancer_ids),
_object.name,
_object.protocol,
_object.protocol_port,
)
def setUp(self):
super(TestShowListener, self).setUp()
self.cmd = listener.ShowListener(self.app, None)
self.client.find_listener = mock.Mock()
def test_show_default(self):
arglist = [
'lb'
]
verifylist = [
('listener', 'lb')
]
# Verify cm is triggereg with default parameters
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Set the response
self.client.find_listener.side_effect = [
self._object
]
# Trigger the action
columns, data = self.cmd.take_action(parsed_args)
self.client.find_listener.assert_called_once_with(
name_or_id='lb',
ignore_missing=False
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
class TestCreateListener(fakes.TestLoadBalancer):
_object = fakes.FakeListener.create_one()
columns = (
'admin_state_up', 'connection_limit', 'default_pool_id',
'description', 'id', 'loadbalancers', 'name', 'protocol',
'protocol_port')
data = (
_object.is_admin_state_up,
_object.connection_limit,
_object.default_pool_id,
_object.description,
_object.id,
sdk_utils.ListOfIdsColumnBR(_object.load_balancer_ids),
_object.name,
_object.protocol,
_object.protocol_port,
)
def setUp(self):
super(TestCreateListener, self).setUp()
self.cmd = listener.CreateListener(self.app, None)
self.client.create_listener = mock.Mock()
def test_create_default(self):
arglist = [
'lb',
'--protocol', 'tcp',
'--protocol_port', '134',
'--disable',
'--connection_limit', '-1',
'--default_pool', 'pool',
'--default_tls_container_ref', 'default_tls_container_ref',
'--description', 'description',
'--name', 'name'
]
verifylist = [
('protocol', 'TCP'),
('protocol_port', 134),
('load_balancer', 'lb'),
('disable', True),
('connection_limit', -1),
('default_pool', 'pool'),
('default_tls_container_ref', 'default_tls_container_ref'),
('description', 'description'),
('name', 'name')
]
# Verify cm is triggereg with default parameters
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Set the response
self.client.create_listener.side_effect = [
self._object
]
# Trigger the action
columns, data = self.cmd.take_action(parsed_args)
self.client.create_listener.assert_called_once_with(
connection_limit=-1,
default_pool_id='pool',
default_tls_container_ref='default_tls_container_ref',
description='description',
is_admin_state_up=False,
load_balancer_id='lb',
name='name',
protocol='TCP',
protocol_port=134
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
class TestUpdateListener(fakes.TestLoadBalancer):
_object = fakes.FakeListener.create_one()
columns = (
'ID', 'Name', 'description',
'is_admin_state_up', 'protocol', 'protocol_port',
'load_balancer_ids', 'default_pool_id',
'connection_limit')
data = (
_object.id,
_object.name,
_object.description,
_object.is_admin_state_up,
_object.protocol,
_object.protocol_port,
sdk_utils.ListOfIdsColumn(_object.load_balancer_ids),
_object.default_pool_id,
_object.connection_limit,
)
def setUp(self):
super(TestUpdateListener, self).setUp()
self.cmd = listener.SetListener(self.app, None)
self.client.update_listener = mock.Mock()
def test_update_default(self):
arglist = [
'lsnr',
'--disable',
'--connection_limit', '-1',
'--default_pool', 'pool',
'--default_tls_container_ref', 'default_tls_container_ref',
'--description', 'description',
'--name', 'name'
]
verifylist = [
('listener', 'lsnr'),
('disable', True),
('connection_limit', -1),
('default_pool', 'pool'),
('default_tls_container_ref', 'default_tls_container_ref'),
('description', 'description'),
('name', 'name')
]
# Verify cm is triggereg with default parameters
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Set the response
self.client.update_listener.side_effect = [
self._object
]
# Trigger the action
columns, data = self.cmd.take_action(parsed_args)
self.client.update_listener.assert_called_once_with(
is_admin_state_up=False,
listener='lsnr',
connection_limit=-1,
default_pool_id='pool',
default_tls_container_ref='default_tls_container_ref',
description='description',
name='name',
)
class TestDeleteListener(fakes.TestLoadBalancer):
_object = fakes.FakeListener.create_one()
def setUp(self):
super(TestDeleteListener, self).setUp()
self.cmd = listener.DeleteListener(self.app, None)
self.client.delete_listener = mock.Mock()
self.client.find_listener = mock.Mock()
def test_delete_default(self):
arglist = [
'lsnr',
]
verifylist = [
('listener', ['lsnr']),
]
# Verify cm is triggereg with default parameters
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Set the response
self.client.delete_listener.side_effect = [
{}
]
self.client.find_listener.side_effect = [
self._object
]
# Trigger the action
self.cmd.take_action(parsed_args)
self.client.delete_listener.assert_called_once_with(
listener=self._object.id,
ignore_missing=False
)
| StarcoderdataPython |
136283 | __all__ = ["InvalidECFGFormatException"]
class InvalidECFGFormatException(Exception):
pass
| StarcoderdataPython |
6468891 | # Copyright (C) 2019-2021 by Vd.
# This file is part of Wheelchair, the async CouchDB connector.
# Wheelchair is released under the MIT License (see LICENSE).
import pytest
from wheelchair import Connection
@pytest.mark.asyncio
async def test_session(admin_connection: Connection):
await admin_connection.authenticate()
res = await admin_connection.session()
assert res['userCtx']['name'] == 'admin'
assert len(res['userCtx']['roles']) > 0
@pytest.mark.asyncio
async def test_delete(admin_connection: Connection):
await admin_connection.authenticate()
await admin_connection.session.delete()
res = await admin_connection.session()
assert res['userCtx']['name'] is None
assert res['userCtx']['roles'] == []
| StarcoderdataPython |
11269993 | <reponame>leon3110l/katana_tsl_patch
from typing import List
from .pedals.pedal import BasePedal, FXPedal
from .pedals.delay import Delay
from .pedals.fx import FX
from importlib import import_module
import json
class Patch():
def __init__(self, name: str, pedals: List[BasePedal] = []):
self.name = name
self.pedals = pedals
self._fix_pedals()
def _fix_pedals(self):
new_pedals = []
for pedal in self.pedals:
if(isinstance(pedal, FXPedal)):
new_pedals.append(FX(pedal))
elif(not isinstance(pedal, BasePedal)):
raise TypeError(pedal)
else:
new_pedals.append(pedal)
self.pedals = new_pedals
if(len(self.pedals) > 12):
raise Exception('too many pedals given')
if(len(self.pedals) < 12):
# fix the amount by adding pedals that are turned off
pedal_amount = {
"katana.pedals.wah.Wah": 0,
"katana.pedals.booster.Booster": 0,
"katana.pedals.fx.FX": 0,
"katana.pedals.ns.NoiseSuppressor": 0,
"katana.pedals.volume.Volume": 0,
"katana.pedals.amp.Amp": 0,
"katana.pedals.eq.EQ": 0,
"katana.pedals.sr.SendReturn": 0,
"katana.pedals.delay.Delay": 0,
"katana.pedals.reverb.Reverb": 0
}
for pedal in self.pedals:
pedal_type = type(pedal).__module__ + '.' + type(pedal).__name__
pedal_amount[pedal_type] += 1
for cp_name, amount in pedal_amount.items():
if(amount == 0):
if(cp_name == 'katana.pedals.amp.Amp'):
self._add_missing_pedal(cp_name, on=True)
else:
self._add_missing_pedal(cp_name)
if((cp_name == 'katana.pedals.delay.Delay' or cp_name == 'katana.pedals.fx.FX') and amount < 2):
self._add_missing_pedal(cp_name)
self._fix_num_pedals()
def _add_missing_pedal(self, cp_name: str, on: bool=False):
cp_split = cp_name.split('.')
c_name = cp_split.pop()
p_name = '.'.join(cp_split)
module = import_module(p_name)
target_class = getattr(module, c_name)
self.pedals.append(target_class(on=on))
def _fix_num_pedals(self):
delay_count = 0
fx_count = 1
for pedal in self.pedals:
# there can be two of these pedals
if(isinstance(pedal, Delay)):
pedal._num = delay_count
delay_count += 2
if(isinstance(pedal, FX)):
pedal._num = fx_count
fx_count += 1
def _get_chain_positions(self):
out = {}
for pos in range(2, 14):
out['fx_chain_position' + str(pos)] = self.pedals[pos-2].get_chain_index()
return out
def get_pedal_params(self):
out = {}
for value in self.pedals:
out = {**out, **value.to_dict()}
return out
def to_tsl(self):
with open(__file__ + '/../clean_patch.tsl') as tsl_raw:
tsl = json.load(tsl_raw)
tsl['patchList']['params'] = self.get_pedal_params()
tsl['patchList']['params'] = { **tsl['patchList']['params_always_include'], **tsl['patchList']['params'], **self._get_chain_positions() }
del tsl['patchList']['params_always_include']
return tsl
def save(self, file: str):
with open(file, 'w') as file:
file.write(json.dumps(self.to_tsl(), indent=4))
file.close() | StarcoderdataPython |
3364124 | <reponame>ggsdc/corn
"""
This file contains the base for a sign up endpoint
"""
from flask import current_app
from cornflow_core.authentication import BaseAuth
from cornflow_core.constants import AUTH_LDAP, AUTH_OID
from cornflow_core.exceptions import (
EndpointNotImplemented,
InvalidCredentials,
InvalidUsage,
)
from cornflow_core.models import UserBaseModel, UserRoleBaseModel
from .meta_resource import BaseMetaResource
class SignupBaseEndpoint(BaseMetaResource):
"""
Ths base for the sign up endpoint
"""
def __init__(self):
super().__init__()
self.data_model = UserBaseModel
self.auth_class = BaseAuth
self.user_role_association = UserRoleBaseModel
def sign_up(self, **kwargs):
"""
The method in charge of performing the sign up of users
:param kwargs: the keyword arguments needed to perform the sign up
:return: a dictionary with the newly issued token and the user id, and a status code
"""
auth_type = current_app.config["AUTH_TYPE"]
if auth_type == AUTH_LDAP:
raise EndpointNotImplemented(
"The user has to sign up on the active directory"
)
elif auth_type == AUTH_OID:
raise EndpointNotImplemented(
"The user has to sign up with the OpenID protocol"
)
user = self.data_model(kwargs)
if user.check_username_in_use():
raise InvalidCredentials(
error="Username already in use, please supply another username"
)
if user.check_email_in_use():
raise InvalidCredentials(
error="Email already in use, please supply another email address"
)
user.save()
user_role = self.user_role_association(
{"user_id": user.id, "role_id": current_app.config["DEFAULT_ROLE"]}
)
user_role.save()
try:
token = self.auth_class.generate_token(user.id)
except Exception as e:
raise InvalidUsage(
error="Error in generating user token: " + str(e), status_code=400
)
return {"token": token, "id": user.id}, 201
| StarcoderdataPython |
5057439 | <gh_stars>1-10
#!/usr/bin/env python
"""
@file fixedTimeControl.py
@author <NAME>
@date 31/01/2013
class for fixed time signal control
"""
import signalControl, readJunctionData, traci
class fixedTimeControl(signalControl.signalControl):
def __init__(self, junctionData):
super(fixedTimeControl, self).__init__()
self.junctionData = junctionData
self.firstCalled = self.getCurrentSUMOtime()
self.lastCalled = self.getCurrentSUMOtime()
self.lastStageIndex = 0
traci.trafficlights.setRedYellowGreenState(self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString)
def process(self):
if self.transitionObject.active:
# If the transition object is active i.e. processing a transition
pass
elif (self.getCurrentSUMOtime() - self.firstCalled) < (self.junctionData.offset*1000):
# Process offset first
pass
elif (self.getCurrentSUMOtime() - self.lastCalled) < (self.junctionData.stages[self.lastStageIndex].period*1000):
# Before the period of the next stage
pass
else:
# Not active, not in offset, stage not finished
if len(self.junctionData.stages) != (self.lastStageIndex)+1:
# Loop from final stage to first stage
self.transitionObject.newTransition(
self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString,
self.junctionData.stages[self.lastStageIndex+1].controlString)
self.lastStageIndex += 1
else:
# Proceed to next stage
#print(0.001*(self.getCurrentSUMOtime() - self.lastCalled))
self.transitionObject.newTransition(
self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString,
self.junctionData.stages[0].controlString)
self.lastStageIndex = 0
self.lastCalled = self.getCurrentSUMOtime()
super(fixedTimeControl, self).process()
def getTimeToSignalChange(self):
return (self.junctionData.stages[self.lastStageIndex].period*1000 -
(self.getCurrentSUMOtime() - self.lastCalled))
| StarcoderdataPython |
5009493 | <filename>community_tca9555.py
# SPDX-FileCopyrightText: 2017 <NAME>, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2021 <NAME>
#
# SPDX-License-Identifier: MIT
"""
`community_tca9555`
================================================================================
CircuitPython library for connecting a TCA9555 16-Bit I2C GPIO expander
Library for TCA9555 Low-Voltage 16-Bit I2C and SMBus I/O Expander with Interrupt Output
and Configuration Registers
* Author(s): <NAME>
Implementation Notes
--------------------
**Hardware:**
* `Pimoroni Pico RGB Keybad Base <https://shop.pimoroni.com/products/pico-rgb-keypad-base>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
* Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/lesamouraipourpre/Community_CircuitPython_TCA9555.git"
import busio
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_bit import ROBit, RWBit
from adafruit_register.i2c_bits import ROBits, RWBits
ADDRESS_MINIMUM = const(0x20)
"""The minimum I2C address the TCA9555 supports"""
ADDRESS_MAXIMUM = const(0x27)
"""The maximum I2C address the TCA9555 supports"""
INPUT_PORT_0 = const(0x00)
INPUT_PORT_1 = const(0x01)
OUTPUT_PORT_0 = const(0x02)
OUTPUT_PORT_1 = const(0x03)
POLARITY_INVERSION_PORT_0 = const(0x04)
POLARITY_INVERSION_PORT_1 = const(0x05)
CONFIGURATION_PORT_0 = const(0x06)
CONFIGURATION_PORT_1 = const(0x07)
class TCA9555:
# pylint: disable=too-few-public-methods
"""CircuitPython driver for the Texas Instruments TCA9555 expander."""
def __init__(self, i2c: busio.I2C, address: int = ADDRESS_MINIMUM):
"""
:param busio.I2C i2c: the I2C bus object to use. *Note:* This will
be converted to an `adafruit_bus_device.i2c_device.I2CDevice`
internally.
:param int address: The I2C address of the TCA9555. This must be in
the range `ADDRESS_MINIMUM` to `ADDRESS_MAXIMUM`. (Defaults to
`ADDRESS_MINIMUM`)
"""
if not ADDRESS_MINIMUM <= address <= ADDRESS_MAXIMUM:
raise ValueError(
"Address '{}' is not in the allowed range: {}-{}".format(
address, ADDRESS_MINIMUM, ADDRESS_MAXIMUM
)
)
# This MUST be named i2c_device for register to work
self.i2c_device = I2CDevice(i2c, address)
input_ports = ROBits(16, INPUT_PORT_0, 0, register_width=2)
"""
Read all 16 bits from port 0 and 1.
The Input Port registers reflect the incoming logic levels of the pins,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_ports`.
"""
input_port_0 = ROBits(8, INPUT_PORT_0, 0)
"""
Read all 8 bits from port 0.
The Input Port register reflect the incoming logic levels of the pins,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0`.
"""
input_port_0_pin_0 = ROBit(INPUT_PORT_0, 0)
"""
Read the state of port 0 pin 0.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_0`.
"""
input_port_0_pin_1 = ROBit(INPUT_PORT_0, 1)
"""
Read the state of port 0 pin 1.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_1`.
"""
input_port_0_pin_2 = ROBit(INPUT_PORT_0, 2)
"""
Read the state of port 0 pin 2.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_2`.
"""
input_port_0_pin_3 = ROBit(INPUT_PORT_0, 3)
"""
Read the state of port 0 pin 3.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_3`.
"""
input_port_0_pin_4 = ROBit(INPUT_PORT_0, 4)
"""
Read the state of port 0 pin 4.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_4`.
"""
input_port_0_pin_5 = ROBit(INPUT_PORT_0, 5)
"""
Read the state of port 0 pin 5.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_5`.
"""
input_port_0_pin_6 = ROBit(INPUT_PORT_0, 6)
"""
Read the state of port 0 pin 6.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_6`.
"""
input_port_0_pin_7 = ROBit(INPUT_PORT_0, 7)
"""
Read the state of port 0 pin 7.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_0_pin_7`.
"""
input_port_1 = ROBits(8, INPUT_PORT_1, 0)
"""
Read all 8 bits from port 1.
The Input Port register reflect the incoming logic levels of the pins,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1`.
"""
input_port_1_pin_0 = ROBit(INPUT_PORT_1, 0)
"""
Read the state of port 1 pin 0.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_0`.
"""
input_port_1_pin_1 = ROBit(INPUT_PORT_1, 1)
"""
Read the state of port 1 pin 1.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_1`.
"""
input_port_1_pin_2 = ROBit(INPUT_PORT_1, 2)
"""
Read the state of port 1 pin 2.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_2`.
"""
input_port_1_pin_3 = ROBit(INPUT_PORT_1, 3)
"""
Read the state of port 1 pin 3.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_3`.
"""
input_port_1_pin_4 = ROBit(INPUT_PORT_1, 4)
"""
Read the state of port 1 pin 4.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_4`.
"""
input_port_1_pin_5 = ROBit(INPUT_PORT_1, 5)
"""
Read the state of port 1 pin 5.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_5`.
"""
input_port_1_pin_6 = ROBit(INPUT_PORT_1, 6)
"""
Read the state of port 1 pin 6.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_6`.
"""
input_port_1_pin_7 = ROBit(INPUT_PORT_1, 7)
"""
Read the state of port 1 pin 7.
The Input Port register reflect the incoming logic level of the pin,
regardless of whether the pin is defined as an input or an output by the
Configuration register :py:attr:`configuration_port_1_pin_7`.
"""
output_ports = RWBits(16, OUTPUT_PORT_0, 0, register_width=2)
"""
Write 16 bits of state to the outputs. This will only apply to pins that
are configured as outputs.
"""
output_port_0 = RWBits(8, OUTPUT_PORT_0, 0)
"""
Write 8 bits of state to port 0. This will only apply to pins that are
configured as outputs.
"""
output_port_0_pin_0 = RWBit(OUTPUT_PORT_0, 0)
"""
Write boolean state to port 0 pin 0. This will only apply if the pin is
configured as an output.
"""
output_port_0_pin_1 = RWBit(OUTPUT_PORT_0, 1)
"""
Write boolean state to port 0 pin 1. This will only apply if the pin is
configured as an output.
"""
output_port_0_pin_2 = RWBit(OUTPUT_PORT_0, 2)
"""
Write boolean state to port 0 pin 2. This will only apply if the pin is
configured as an output.
"""
output_port_0_pin_3 = RWBit(OUTPUT_PORT_0, 3)
"""
Write boolean state to port 0 pin 3. This will only apply if the pin is
configured as an output.
"""
output_port_0_pin_4 = RWBit(OUTPUT_PORT_0, 4)
"""
Write boolean state to port 0 pin 4. This will only apply if the pin is
configured as an output.
"""
output_port_0_pin_5 = RWBit(OUTPUT_PORT_0, 5)
"""
Write boolean state to port 0 pin 5. This will only apply if the pin is
configured as an output.
"""
output_port_0_pin_6 = RWBit(OUTPUT_PORT_0, 6)
"""
Write boolean state to port 0 pin 6. This will only apply if the pin is
configured as an output.
"""
output_port_0_pin_7 = RWBit(OUTPUT_PORT_0, 7)
"""
Write boolean state to port 0 pin 7. This will only apply if the pin is
configured as an output.
"""
output_port_1 = RWBits(8, OUTPUT_PORT_1, 0)
"""
Write 8 bits of state to port 1. This will only apply to pins that are
configured as outputs.
"""
output_port_1_pin_0 = RWBit(OUTPUT_PORT_1, 0)
"""
Write boolean state to port 1 pin 0. This will only apply if the pin is
configured as an output.
"""
output_port_1_pin_1 = RWBit(OUTPUT_PORT_1, 1)
"""
Write boolean state to port 1 pin 1. This will only apply if the pin is
configured as an output.
"""
output_port_1_pin_2 = RWBit(OUTPUT_PORT_1, 2)
"""
Write boolean state to port 1 pin 2. This will only apply if the pin is
configured as an output.
"""
output_port_1_pin_3 = RWBit(OUTPUT_PORT_1, 3)
"""
Write boolean state to port 1 pin 3. This will only apply if the pin is
configured as an output.
"""
output_port_1_pin_4 = RWBit(OUTPUT_PORT_1, 4)
"""
Write boolean state to port 1 pin 4. This will only apply if the pin is
configured as an output.
"""
output_port_1_pin_5 = RWBit(OUTPUT_PORT_1, 5)
"""
Write boolean state to port 1 pin 5. This will only apply if the pin is
configured as an output.
"""
output_port_1_pin_6 = RWBit(OUTPUT_PORT_1, 6)
"""
Write boolean state to port 1 pin 6. This will only apply if the pin is
configured as an output.
"""
output_port_1_pin_7 = RWBit(OUTPUT_PORT_1, 7)
"""
Write boolean state to port 1 pin 7. This will only apply if the pin is
configured as an output.
"""
polarity_inversions = RWBits(16, POLARITY_INVERSION_PORT_0, 0, register_width=2)
"""Read or write 16 bits of polarity inversion state."""
polarity_inversion_port_0 = RWBits(8, POLARITY_INVERSION_PORT_0, 0)
"""Read or write 8 bits of port 0 polarity inversion state."""
polarity_inversion_port_0_pin_0 = RWBit(POLARITY_INVERSION_PORT_0, 0)
"""Read or write port 0 pin 0 polarity inversion state."""
polarity_inversion_port_0_pin_1 = RWBit(POLARITY_INVERSION_PORT_0, 1)
"""Read or write port 0 pin 1 polarity inversion state."""
polarity_inversion_port_0_pin_2 = RWBit(POLARITY_INVERSION_PORT_0, 2)
"""Read or write port 0 pin 2 polarity inversion state."""
polarity_inversion_port_0_pin_3 = RWBit(POLARITY_INVERSION_PORT_0, 3)
"""Read or write port 0 pin 3 polarity inversion state."""
polarity_inversion_port_0_pin_4 = RWBit(POLARITY_INVERSION_PORT_0, 4)
"""Read or write port 0 pin 4 polarity inversion state."""
polarity_inversion_port_0_pin_5 = RWBit(POLARITY_INVERSION_PORT_0, 5)
"""Read or write port 0 pin 5 polarity inversion state."""
polarity_inversion_port_0_pin_6 = RWBit(POLARITY_INVERSION_PORT_0, 6)
"""Read or write port 0 pin 6 polarity inversion state."""
polarity_inversion_port_0_pin_7 = RWBit(POLARITY_INVERSION_PORT_0, 7)
"""Read or write port 0 pin 7 polarity inversion state."""
polarity_inversion_port_1 = RWBits(8, POLARITY_INVERSION_PORT_1, 0)
"""Read or write 8 bits of port 1 polarity inversion state."""
polarity_inversion_port_1_pin_0 = RWBit(POLARITY_INVERSION_PORT_1, 0)
"""Read or write port 1 pin 0 polarity inversion state."""
polarity_inversion_port_1_pin_1 = RWBit(POLARITY_INVERSION_PORT_1, 1)
"""Read or write port 1 pin 1 polarity inversion state."""
polarity_inversion_port_1_pin_2 = RWBit(POLARITY_INVERSION_PORT_1, 2)
"""Read or write port 1 pin 2 polarity inversion state."""
polarity_inversion_port_1_pin_3 = RWBit(POLARITY_INVERSION_PORT_1, 3)
"""Read or write port 1 pin 3 polarity inversion state."""
polarity_inversion_port_1_pin_4 = RWBit(POLARITY_INVERSION_PORT_1, 4)
"""Read or write port 1 pin 4 polarity inversion state."""
polarity_inversion_port_1_pin_5 = RWBit(POLARITY_INVERSION_PORT_1, 5)
"""Read or write port 1 pin 5 polarity inversion state."""
polarity_inversion_port_1_pin_6 = RWBit(POLARITY_INVERSION_PORT_1, 6)
"""Read or write port 1 pin 6 polarity inversion state."""
polarity_inversion_port_1_pin_7 = RWBit(POLARITY_INVERSION_PORT_1, 7)
"""Read or write port 1 pin 7 polarity inversion state."""
configuration_ports = RWBits(16, CONFIGURATION_PORT_0, 0, register_width=2)
"""
Read or write 16 bits of configuration state.
If a bit is set to 1, the corresponding port pin is enabled as an input
with a high-impedance output driver. If a bit in this register is cleared
to 0, the corresponding port pin is enabled as an output.
"""
configuration_port_0 = RWBits(8, CONFIGURATION_PORT_0, 0)
"""Read or write 8 bits of port 0 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_0 = RWBit(CONFIGURATION_PORT_0, 0)
"""Read or write port 0 pin 0 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_1 = RWBit(CONFIGURATION_PORT_0, 1)
"""Read or write port 0 pin 1 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_2 = RWBit(CONFIGURATION_PORT_0, 2)
"""Read or write port 0 pin 2 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_3 = RWBit(CONFIGURATION_PORT_0, 3)
"""Read or write port 0 pin 3 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_4 = RWBit(CONFIGURATION_PORT_0, 4)
"""Read or write port 0 pin 4 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_5 = RWBit(CONFIGURATION_PORT_0, 5)
"""Read or write port 0 pin 5 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_6 = RWBit(CONFIGURATION_PORT_0, 6)
"""Read or write port 0 pin 6 configuration state. 0 = Output, 1 = Input"""
configuration_port_0_pin_7 = RWBit(CONFIGURATION_PORT_0, 7)
"""Read or write port 0 pin 7 configuration state. 0 = Output, 1 = Input"""
configuration_port_1 = RWBits(8, CONFIGURATION_PORT_1, 0)
"""Read or write 8 bits of port 1 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_0 = RWBit(CONFIGURATION_PORT_1, 0)
"""Read or write port 1 pin 0 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_1 = RWBit(CONFIGURATION_PORT_1, 1)
"""Read or write port 1 pin 1 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_2 = RWBit(CONFIGURATION_PORT_1, 2)
"""Read or write port 1 pin 2 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_3 = RWBit(CONFIGURATION_PORT_1, 3)
"""Read or write port 1 pin 3 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_4 = RWBit(CONFIGURATION_PORT_1, 4)
"""Read or write port 1 pin 4 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_5 = RWBit(CONFIGURATION_PORT_1, 5)
"""Read or write port 1 pin 5 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_6 = RWBit(CONFIGURATION_PORT_1, 6)
"""Read or write port 1 pin 6 configuration state. 0 = Output, 1 = Input"""
configuration_port_1_pin_7 = RWBit(CONFIGURATION_PORT_1, 7)
"""Read or write port 1 pin 7 configuration state. 0 = Output, 1 = Input"""
| StarcoderdataPython |
12850529 | <filename>src/Noncircular/Calculations/_Appendix13_7_c.py<gh_stars>1-10
import math
# TODO: Implement acceptibility tests
class Appendix13_7_cParams:
def __init__(
self,
internal_pressure,
corner_radius,
short_side_half_length,
long_side_half_length,
thickness,
eval_at_outer_walls = False):
self.P = internal_pressure
self.R = corner_radius
self.L_1 = short_side_half_length
self.L_2 = long_side_half_length
self.t_1 = thickness
self.eval_at_outer_walls = eval_at_outer_walls
class Appendix13_7_cCalcs:
def __init__(self, params: Appendix13_7_cParams):
self.P = params.P
self.R = params.R
self.L_1 = params.L_1
self.L_2 = params.L_2
self.t_1 = params.t_1
self.isOuterWallEval = params.eval_at_outer_walls
def c(self):
"""
:return: The distance from the neutral axis of cross section to extreme fibers. Will return c_i or c_o for its thickness, depending on pressure
"""
sign = 1
if self.isOuterWallEval:
sign = -1
return 0.5 * sign * self.t_1
def I_1(self):
return (1 / 12.0) * self.t_1 ** 3
def alpha3(self):
return self.L_2 / self.L_1
def phi(self):
return self.R / self.L_1
def K_3(self):
"""
:return: Equation 40
"""
return (-1.0) * (self.L_1 ** 2) * (
6.0 * (self.phi() ** 2) * self.alpha3()
- 3.0 * math.pi * (self.phi() ** 2)
+ 6.0 * (self.phi() ** 2)
+ (self.alpha3() ** 3)
+ (3.0 * self.alpha3() ** 2)
- 6.0 * self.phi()
- 2.0
+ 1.5 * math.pi * self.phi() * (self.alpha3() ** 2)
+ 6.0 * self.phi() * self.alpha3()
) / (3.0 * (2.0 * self.alpha3() + math.pi * self.phi() + 2.0))
def M_A(self):
"""
:return: Equation 38
"""
return self.P * self.K_3()
def M_r(self):
"""
:return: equation 39
"""
raise ValueError("Looks like it's time to implement M_r")
def S_m_C(self):
"""
:return: Short side membrane stress at point C for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 25
"""
return (self.P * (self.R + self.L_2)) / self.t_1
def S_m_D(self):
"""
:return: Same as S_m_C
"""
return self.S_m_C()
def S_m_A(self):
"""
:return: Long side membrane stress at point A for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 26
"""
return (self.P *(self.L_1 + self.R)) / self.t_1
def S_m_B(self):
"""
:return: Same as S_m_A
"""
return self.S_m_A()
def S_m_BC(self):
"""
:return: Membrane stress in radius, between points B and C for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 27
"""
return (self.P / self.t_1) * (math.sqrt((self.L_2 ** 2) + self.L_1 ** 2) + self.R)
def S_b_C(self):
"""
:return: Bending stress at C for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 28
"""
return (self.c() / (2.0 * self.I_1())) * (2.0 * self.M_A() + self.P * (2 * self.R * self.L_2 - 2.0 * self.R * self.L_1 + self.L_2 ** 2))
def S_b_D(self):
"""
:return: Bending stress at D for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 29
"""
return (self.c() / (2.0 * self.I_1())) * (2.0 * self.M_A() + self.P * ((self.L_2 ** 2) + 2 * self.R * self.L_2 - 2.0 * self.R * self.L_1 + self.L_2 ** 2))
def S_b_A(self):
"""
:return: Bending stress at point A for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 30
"""
return self.M_A() * self.c() / self.I_1()
def S_b_B(self):
"""
:return: Bending stress at point B for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 31
"""
return (self.c() / (2 * self.I_1())) * (2 * self.M_A() + self.P * self.L_2 ** 2)
def S_b_BC(self):
"""
:return: Max bending stress between points B and C for corner sections for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 32
"""
maxStressTheta = math.atan(self.L_1 / self.L_2)
geom = self.c() / self.I_1()
moment = 0.5 * (2 * self.M_A() + self.P * (2 * self.R * (self.L_2 * math.cos(maxStressTheta) - self.L_1 * (1 - math.sin(maxStressTheta))) + self.L_2 ** 2))
return geom * moment
def S_T_C(self):
"""
:return: Total stress at point C for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 33
"""
return self.S_m_C() + self.S_b_C()
def S_T_D(self):
"""
:return: Total stress at point D for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 34
"""
return self.S_m_D() + self.S_b_D()
def S_T_A(self):
"""
:return: Total stress at point A for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 35
"""
return self.S_m_A() + self.S_b_A()
def S_T_B(self):
"""
:return: Total stress at point B for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 36
"""
return self.S_m_B() + self.S_b_B()
def S_T_BC(self):
"""
:return: Total stress between points B and C for corner sections for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 37
"""
return self.S_m_BC() + self.S_b_BC()
if __name__ == "__main__":
import copy
params_inner = Appendix13_7_cParams(
internal_pressure=100,
corner_radius=3,
short_side_half_length=5,
long_side_half_length=10,
thickness=1
)
calc_inner = Appendix13_7_cCalcs(params_inner)
params_outer = copy.deepcopy(params_inner)
params_outer.eval_at_outer_walls = True
calc_outer = Appendix13_7_cCalcs(params_outer)
print("*** Input ***")
print("P = " + str(params_inner.P))
print("R = " + str(params_inner.R))
print("L_1 = " + str(params_inner.L_1))
print("L_2 = " + str(params_inner.L_2))
print("t_1 = " + str(params_inner.t_1))
print("")
print("*** Output ***")
print("")
print("*** Inner Walls ***")
print("c = " + str(calc_inner.c()))
print("I_1 = " + str(calc_inner.I_1()))
print("alpha3 = " + str(calc_inner.alpha3()))
print("phi = " + str(calc_inner.phi()))
print("K_3 = " + str(calc_inner.K_3()))
print("M_A = " + str(calc_inner.M_A()))
# print("M_r = " + str(calc_inner.M_r()))
print("S_m_C = " + str(calc_inner.S_m_C()))
print("S_m_D = " + str(calc_inner.S_m_D()))
print("S_m_A = " + str(calc_inner.S_m_A()))
print("S_m_B = " + str(calc_inner.S_m_B()))
print("S_m_BC = " + str(calc_inner.S_m_BC()))
print("S_b_C = " + str(calc_inner.S_b_C()))
print("S_b_D = " + str(calc_inner.S_b_D()))
print("S_b_A = " + str(calc_inner.S_b_A()))
print("S_b_B = " + str(calc_inner.S_b_B()))
print("S_b_BC = " + str(calc_inner.S_b_BC()))
print("S_T_C = " + str(calc_inner.S_T_C()))
print("S_T_D = " + str(calc_inner.S_T_D()))
print("S_T_A = " + str(calc_inner.S_T_A()))
print("S_T_B = " + str(calc_inner.S_T_B()))
print("S_T_BC = " + str(calc_inner.S_T_BC()))
print("")
print("*** Outer Walls ***")
print("c = " + str(calc_outer.c()))
print("I_1 = " + str(calc_outer.I_1()))
print("alpha3 = " + str(calc_outer.alpha3()))
print("phi = " + str(calc_outer.phi()))
print("K_3 = " + str(calc_outer.K_3()))
print("M_A = " + str(calc_outer.M_A()))
# print("M_r = " + str(calc_outer.M_r()))
print("S_m_C = " + str(calc_outer.S_m_C()))
print("S_m_D = " + str(calc_outer.S_m_D()))
print("S_m_A = " + str(calc_outer.S_m_A()))
print("S_m_B = " + str(calc_outer.S_m_B()))
print("S_m_BC = " + str(calc_outer.S_m_BC()))
print("S_b_C = " + str(calc_outer.S_b_C()))
print("S_b_D = " + str(calc_outer.S_b_D()))
print("S_b_A = " + str(calc_outer.S_b_A()))
print("S_b_B = " + str(calc_outer.S_b_B()))
print("S_b_BC = " + str(calc_outer.S_b_BC()))
print("S_T_C = " + str(calc_outer.S_T_C()))
print("S_T_D = " + str(calc_outer.S_T_D()))
print("S_T_A = " + str(calc_outer.S_T_A()))
print("S_T_B = " + str(calc_outer.S_T_B()))
print("S_T_BC = " + str(calc_outer.S_T_BC())) | StarcoderdataPython |
11212762 | <gh_stars>1-10
import time
import random
import re
import os
from requests.sessions import Session
import json
try:
import execjs
is_execjs_imported = True
except:
is_execjs_imported = False
if not is_execjs_imported:
try:
"""
Name: Js2Py
Version: 0.37
Summary: JavaScript to Python Translator & JavaScript interpreter written in 100% pure Python.
Home-page: https://github.com/PiotrDabkowski/Js2Py
Author: <NAME>
Author-email: <EMAIL>
License: MIT
Description: Translates JavaScript to Python code. Js2Py is able to translate and execute virtually any JavaScript code.
"""
import js2py
except:
raise
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
DEFAULT_USER_AGENTS = [
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0"
]
DEFAULT_USER_AGENT = random.choice(DEFAULT_USER_AGENTS)
def path_to_value(dict_, path, default=None):
for key in dict_.keys():
if key != path[0]:
continue
value = dict_[key]
if type(value) == dict and len(path) != 1:
return path_to_value(dict_, path[1:])
elif len(path) == 1:
return value
return default
"""
def number_magic(value, index=0):
number = 0.0
tail_operator = ""
working =
while index < len(value):
char = value
if in ["+","-","*","/","&",">>>","<<",">>","^","&","|"]:
"""
def hira_last_add(ls, appendy):
append_index = None
append_list = None
for x in range(len(ls)):
if type(ls[x]) == list:
append_index = x
append_list = ls[x]
if type(append_index) == int:
ls[append_index] = hira_last_add(append_list, appendy)
else:
ls.append(appendy)
return ls
class CloudflareScraper(Session):
def __init__(self, *args, **kwargs):
self.js_engine = kwargs.pop("js_engine", None)
super(CloudflareScraper, self).__init__(*args, **kwargs)
if "requests" in self.headers["User-Agent"]:
# Spoof Firefox on Linux if no custom User-Agent has been set
self.headers["User-Agent"] = "Mozilla/5.0 (X11; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0"
def request(self, method, url, *args, **kwargs):
resp = super(CloudflareScraper, self).request(method, url, *args, **kwargs)
print(resp.content)
# Check if Cloudflare anti-bot is on
if ("URL=/cdn-cgi/" in resp.headers.get("Refresh", "") or
(resp.status_code == 503 and
re.search(r'<form id="challenge-form".+?DDoS protection by CloudFlare', resp.text, re.I | re.DOTALL)
)
): # Sometimes cloud flare sends a 503 status_code with no "Refresh" header for DDos protection.
return self.solve_cf_challenge(resp, **kwargs)
# Otherwise, no Cloudflare anti-bot detected
return resp
def solve_cf_challenge(self, resp, **kwargs):
time.sleep(4) # Cloudflare requires a delay before solving the challenge
body = resp.text
parsed_url = urlparse(resp.url)
path = parsed_url.path
domain = urlparse(resp.url).netloc
submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (parsed_url.scheme, domain)
params = kwargs.setdefault("params", {})
headers = kwargs.setdefault("headers", {})
headers["Referer"] = resp.url
try:
params["s"] = re.findall(r'name="s" value="(.+?)"', body)[-1]
params["jschl_vc"] = re.findall(r'name="jschl_vc" value="(\w+)"', body)[-1]
params["pass"] = re.findall(r'name="pass" value="(.+?)"', body)[-1]
# Extract the arithmetic operation
secret = self.extract_js(body)
secret += len(domain+path)
context = js2py.EvalJs({'value': secret})
context.execute("value = value.toFixed(10);")
secret = context.value
print(secret)
params["jschl_answer"] = secret
except Exception:
# Something is wrong with the page.
# This may indicate Cloudflare has changed their anti-bot
# technique. If you see this and are running the latest version,
# please open a GitHub issue so I can update the code accordingly.
print("[!] Unable to parse Cloudflare anti-bots page. "
"Try upgrading cloudflare-scrape, or submit a bug report "
"if you are running the latest version. Please read "
"https://github.com/Anorov/cloudflare-scrape#updates "
"before submitting a bug report.\n")
raise
print(kwargs["params"])
return self.get(submit_url, **kwargs)
def extract_js(self, body):
js = re.search(r"setTimeout\(function\(\){\s+(var "
"s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n", body).group(1)
# TODO support add all call method of js_dict
# TODO regexless
root_bastard = re.compile('(([ \\t]+|)(;|,|:|=|)([ \t]+|)(var |"|\'|)([ \\t]+|)(?P<key>[A-Za-z.]+)([ \\t]+|)("|\'|)(?P<operator>(:|=|(\\+|\\-|\\*|\\/|\\&|>>>|<<|>>|\\^|\\&|\\|)=))([ \t]+|)((?P<value>[^;]+)(;|,)|))')
statements = []
for statement in re.finditer(root_bastard, js):
statements.append(statement.groupdict())
hidden_value = 0
find_dotted = "(([A-Za-z0-9]+)((\\.[A-Za-z0-9]+)+))"
hidden_path = []
for state in statements:
if state["key"].strip().endswith(".value"):
for may_hidden in re.findall(find_dotted, state["value"]):
if may_hidden[0].endswith(".length"):
continue
hidden_path = may_hidden[0].split(".")
break
context = js2py.EvalJs({'value': None})
calc = None
hidden_keys = ["[\""+"\"][\"".join(hidden_path)+"\"]", "['"+"']['".join(hidden_path)+"']",".".join(hidden_path)]
for state in statements:
is_js_dict = state["value"].strip().startswith("{")
key = state["key"].strip()
operator = state["operator"]
if operator == ":":
operator = "="
value = state["value"]
if (len(hidden_path) > 0 and is_js_dict and key == hidden_path[0]) or (key in hidden_keys):
if is_js_dict:
new_chars = ""
last_chars = ""
for x in range(len(value)):
if last_chars == "\":" or last_chars == "':" or value[x] == ",":
new_chars += "\""
if len(last_chars) == 2:
last_chars = last_chars[1:]
last_chars += value[x]
new_chars += value[x]
if new_chars.endswith("}"):
new_chars = new_chars[:len(new_chars)-1]+"\"}"
else:
new_chars += "\"}"
value = path_to_value(json.loads(new_chars), hidden_path[1:])
if value is None:
continue
print("value"+operator+value)
context.execute("value"+operator+value)
"""
region_mess = []
region_deepness = 0
last_deepness = -1
trues = ["!+[]", "!![]"]
collecty = ""
number = 0
operator = None
for char in value:
if char == "(":
if type(operator) in [str, chr, unicode]:
region_mess.append(operator)
operator = None
region_deepness+=1
continue
elif char == ")":
region_mess.append(number)
number = 0
collecty = ""
operator = ""
region_deepness-=1
continue
if type(operator) == str:
operator += char
else:
collecty += char
for true in trues:
if collecty.endswith(true):
number+=1
break
print(value)
print(region_mess)
"""
print(statements)
print(context.value)
return context.value
"""
js = re.sub(r"a\.value = (parseInt\(.+?\)).+", r"\1", js)
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js)
# Strip characters that could be used to exit the string context
# These characters are not currently used in Cloudflare's arithmetic snippet
js = re.sub(r"[\n\\']", "", js)
if is_execjs_imported:
if "Node" in self.js_engine:
# Use vm.runInNewContext to safely evaluate code
# The sandboxed code cannot use the Node.js standard library
return "return require('vm').runInNewContext('%s');" % js
else:
return js.replace("parseInt", "return parseInt")
else:
return js
"""
@classmethod
def create_scraper(cls, sess=None, js_engine=None):
"""
Convenience function for creating a ready-to-go requests.Session (subclass) object.
"""
if is_execjs_imported:
if js_engine:
os.environ["EXECJS_RUNTIME"] = js_engine
js_engine = execjs.get().name
if not ("Node" in js_engine or "V8" in js_engine):
raise EnvironmentError("Your Javascript runtime '%s' is not supported due to security concerns. "
"Please use Node.js or PyV8. To force a specific engine, "
"such as Node, call create_scraper(js_engine=\"Node\")" % js_engine)
scraper = cls(js_engine=js_engine)
if sess:
attrs = ["auth", "cert", "cookies", "headers", "hooks", "params", "proxies", "data"]
for attr in attrs:
val = getattr(sess, attr, None)
if val:
setattr(scraper, attr, val)
return scraper
## Functions for integrating cloudflare-scrape with other applications and scripts
@classmethod
def get_tokens(cls, url, user_agent=None, js_engine=None):
scraper = cls.create_scraper(js_engine=js_engine)
if user_agent:
scraper.headers["User-Agent"] = user_agent
try:
resp = scraper.get(url)
resp.raise_for_status()
except Exception as e:
print("'%s' returned an error. Could not collect tokens.\n" % url)
raise
domain = urlparse(resp.url).netloc
cookie_domain = None
for d in scraper.cookies.list_domains():
if d.startswith(".") and d in ("." + domain):
cookie_domain = d
break
else:
raise ValueError("Unable to find Cloudflare cookies. Does the site actually have Cloudflare IUAM mode enabled?")
return ({
"__cfduid": scraper.cookies.get("__cfduid", "", domain=cookie_domain),
"cf_clearance": scraper.cookies.get("cf_clearance", "", domain=cookie_domain)
},
scraper.headers["User-Agent"]
)
@classmethod
def get_cookie_string(cls, url, user_agent=None, js_engine=None):
"""
Convenience function for building a Cookie HTTP header value.
"""
tokens, user_agent = cls.get_tokens(url, user_agent=user_agent, js_engine=None)
return "; ".join("=".join(pair) for pair in tokens.items()), user_agent
create_scraper = CloudflareScraper.create_scraper
get_tokens = CloudflareScraper.get_tokens
get_cookie_string = CloudflareScraper.get_cookie_string
| StarcoderdataPython |
80218 | #!/usr/bin/env python
"""
Created by howie.hu at 08/04/2018.
"""
import asyncio
import sys
import time
sys.path.append('../../')
from hproxy.database import DatabaseSetting
from hproxy.utils import logger
from hproxy.spider.proxy_tools import get_proxy_info
db_client = DatabaseSetting()
async def valid_proxies():
all_res = await db_client.get_all()
start = time.time()
tasks = []
if all_res:
for each in all_res.keys():
tasks.append(asyncio.ensure_future(valid_proxy(each, nums=1)))
done_list, pending_list = await asyncio.wait(tasks)
good_nums = 0
for task in done_list:
if task.result():
good_nums += 1
logger.info(type="Authentication finished", message="Authenticating finished ,total proxy num : {0} - valid proxy num : {1} ,Time costs : {2}}".format(
len(tasks),
good_nums,
time.time() - start))
async def valid_proxy(proxy, nums=1):
if nums > 5:
await db_client.delete(proxy)
logger.error(type='Invalid proxy', message="{0} had been abandoned".format(proxy))
return False
else:
ip, port = proxy.split(':')
isOk = await get_proxy_info(ip, port)
if not isOk:
logger.error(type='Invalid proxy', message="{0}:retry times = {1}".format(proxy, nums))
res = await valid_proxy(proxy, nums=nums + 1)
return res
else:
logger.info(type='Valid proxy', message="{0} is valid".format(proxy))
return True
def refresh_proxy():
asyncio.get_event_loop().run_until_complete(valid_proxies())
if __name__ == '__main__':
refresh_proxy()
| StarcoderdataPython |
6465572 | <reponame>Bohdanski/fuzzy-lookup<filename>fuzzy_lookup.py
import os
import sys
import csv
import xlsxwriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
# User input
base = "tblTopsMatch.csv"
match = "tblWegmansMatch.csv"
base_field = "topsDesc"
match_field = "wegmansDesc"
method = "sort"
threshold = 60
def fuzzy_match(base, match, method):
"""
"""
if method == "ratio":
return fuzz.ratio(base.lower(), match.lower())
elif method == "pratio":
return fuzz.partial_ratio(base.lower(), match.lower())
elif method == "sort":
return fuzz.token_sort_ratio(base, match)
elif method == "set":
return fuzz.token_set_ratio(base, match)
else:
print("ERROR: Invalid match method.")
raise
def main():
try:
data_dir = ".\\excel\\data\\"
archive_dir = ".\\excel\\archive\\"
# Open base file
with open(data_dir + base, "r") as file:
base_file = csv.DictReader(file)
base_lst = []
header_lst = []
# Copy dictionary rows into a list, and extract headers into a list
for row in base_file:
for key in row:
if key not in header_lst:
header_lst.append(key)
base_lst.append(row)
# Open file to match records against the base file
with open(data_dir + match, "r") as file:
match_file = csv.DictReader(file)
match_lst = []
# Copy dictionary rows into a list
for row in match_file:
match_lst.append(row)
# For dictionary row in the base file list...
write_lst = []
for base_row in base_lst:
best_match = ("No Match", 0)
row_lst = []
# Match each row against the current base row
for match_row in match_lst:
match_ratio = fuzzy_match(base_row[base_field], match_row[match_field], method)
# If the match ratio is less than threshold, skip record
if match_ratio < threshold:
continue
# Else, assign the highest ratio (linear search)
elif match_ratio > best_match[1]:
best_match = (match_row[match_field], match_ratio)
print(f"[{base_row[base_field]} | {best_match[0]}] Match Ratio: {best_match[1]}")
# For each row bring in all additional fields from base file, using header as key
for header in header_lst:
row_lst.append(base_row[header])
# For each row, create a list of values to be appeneded to a master write list
row_lst.extend(list(best_match))
write_lst.append(row_lst)
# Create a new workbook and worksheet
workbook = xlsxwriter.Workbook(archive_dir + f"match-{method}.xlsx")
worksheet = workbook.add_worksheet()
# Write coloumn headers as the first row
for col_num, data in enumerate(header_lst):
worksheet.write(0, col_num, data)
# Write each row from the write list to workbook
for row_num, row_data in enumerate(write_lst):
if row_num == 0:
continue
for col_num, col_data in enumerate(row_data):
worksheet.write(row_num, col_num, col_data)
workbook.close()
except:
if not os.path.exists(archive_dir):
os.makedirs(archive_dir)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if if __name__ == "__main__":
main()
| StarcoderdataPython |
5034720 | <reponame>lavanyashukla/ray<gh_stars>1-10
import ray
from ray import serve
import requests
ray.init()
client = serve.start()
def say_hello(request):
return "hello " + request.query_params["name"] + "!"
# Form a backend from our function and connect it to an endpoint.
client.create_backend("my_backend", say_hello)
client.create_endpoint("my_endpoint", backend="my_backend", route="/hello")
# Query our endpoint in two different ways: from HTTP and from Python.
print(requests.get("http://127.0.0.1:8000/hello?name=serve").text)
# > hello serve!
print(ray.get(client.get_handle("my_endpoint").remote(name="serve")))
# > hello serve!
| StarcoderdataPython |
4827513 | #!/usr/bin/env python
'''
Copyright (c) 2016 anti-XSS developers
'''
pass
| StarcoderdataPython |
1613574 | class UserAlreadyExistsError(Exception):
#raised them the user already exists
pass
class DatabaseConnectionError(Exception):
#raised when there is a problem with the database
pass
class PasswordsDontMatchError(Exception):
#raised when passwords do not match
pass
class InvalidPasswordError(Exception):
#raised when the password is not at least 8 charactes long,
#does not contain a special character, an uppercase or a number
pass | StarcoderdataPython |
1687162 | <gh_stars>10-100
from accountancy.helpers import (bulk_delete_with_history,
create_historical_records, get_action,
get_historical_change)
from contacts.models import Contact
from django.db import models
from django.test import TestCase
class GetActionTests(TestCase):
def test_action_create(self):
action = get_action("+")
self.assertEqual(
action,
"Create"
)
def test_action_update(self):
action = get_action("~")
self.assertEqual(
action,
"Update"
)
def test_action_delete(self):
action = get_action("-")
self.assertEqual(
action,
"Delete"
)
class GetHistoricalChangeTests(TestCase):
def test_historical_change_for_created_audit_only(self):
"""
Check the changes when only one audit log is provided -
the audit for the creation of the object
"""
contact = Contact.objects.create(code="1", name="11", email="111")
historical_records = Contact.history.all()
self.assertEqual(
len(historical_records),
1
)
audit = get_historical_change(None, historical_records[0])
self.assertEqual(
audit["id"]["old"],
""
)
self.assertEqual(
audit["id"]["new"],
str(contact.id)
)
self.assertEqual(
audit["code"]["old"],
""
)
self.assertEqual(
audit["code"]["new"],
contact.code
)
self.assertEqual(
audit["email"]["old"],
""
)
self.assertEqual(
audit["email"]["new"],
contact.email
)
def test_historical_change_for_updated(self):
contact = Contact.objects.create(code="1", name="11", email="111")
contact.name = "12"
contact.save()
historical_records = Contact.history.all()
self.assertEqual(
len(historical_records),
2
)
audit = get_historical_change(
historical_records[1], historical_records[0]
)
self.assertEqual(
audit["name"]["old"],
"11"
)
self.assertEqual(
audit["name"]["new"],
"12"
)
self.assertEqual(
len(audit.keys()),
2 # the name field - which changed - and the meta field
)
def test_historical_change_for_updated_but_no_change(self):
contact = Contact.objects.create(code="1", name="11", email="111")
contact.name = "11" # No change !!!
contact.save() # Create another log
historical_records = Contact.history.all()
self.assertEqual(
len(historical_records),
2
)
audit = get_historical_change(
historical_records[1], historical_records[0]
)
self.assertIsNone(
audit
)
def test_historical_change_for_deleted(self):
contact = Contact.objects.create(code="1", name="11", email="111")
pk = contact.pk
contact.delete() # Create another log
historical_records = Contact.history.all()
self.assertEqual(
len(historical_records),
2
)
audit = get_historical_change(
historical_records[1], historical_records[0]
)
self.assertEqual(
audit["meta"]["AUDIT_id"],
historical_records[0].pk
)
self.assertEqual(
audit["meta"]["AUDIT_action"],
"Delete"
)
self.assertEqual(
audit["meta"]["object_pk"],
pk
)
self.assertEqual(
audit["code"]["old"],
contact.code
)
self.assertEqual(
audit["code"]["new"],
""
)
self.assertEqual(
audit["name"]["old"],
contact.name
)
self.assertEqual(
audit["name"]["new"],
""
)
self.assertEqual(
audit["email"]["old"],
contact.email
)
self.assertEqual(
audit["email"]["new"],
""
)
class CreateHistoricalRecordsTests(TestCase):
@classmethod
def setUpTestData(cls):
contacts = []
for i in range(10):
c = Contact(code=i, name="duh")
contacts.append(c)
Contact.objects.bulk_create(contacts)
def test_creating_create_audits(self):
contacts = Contact.objects.all().order_by("pk")
audits = create_historical_records(contacts, Contact, "+")
first_audit_pk = audits[0].id
for i, audit in enumerate(audits, 1):
self.assertEqual(
audit.id,
first_audit_pk + (i - 1)
)
self.assertEqual(
audit.code,
str(i - 1)
)
self.assertEqual(
audit.name,
"duh"
)
self.assertEqual(
audit.email,
""
)
self.assertEqual(
audit.customer,
False
)
self.assertEqual(
audit.supplier,
False
)
self.assertEqual(
audit.history_change_reason,
""
)
self.assertEqual(
audit.history_type,
"+"
)
self.assertEqual(
audit.history_user_id,
None
)
def test_creating_update_audits(self):
contacts = Contact.objects.all().order_by("pk")
for c in contacts:
c.name = "duh-duh"
Contact.objects.bulk_update(contacts, ["name"])
audits = create_historical_records(contacts, Contact, "~")
first_audit_pk = audits[0].id
for i, audit in enumerate(audits, 1):
self.assertEqual(
audit.id,
first_audit_pk + (i - 1)
)
self.assertEqual(
audit.code,
str(i - 1)
)
self.assertEqual(
audit.name,
"duh-duh"
)
self.assertEqual(
audit.email,
""
)
self.assertEqual(
audit.customer,
False
)
self.assertEqual(
audit.supplier,
False
)
self.assertEqual(
audit.history_change_reason,
""
)
self.assertEqual(
audit.history_type,
"~"
)
self.assertEqual(
audit.history_user_id,
None
)
def test_creating_delete_audits(self):
contacts = Contact.objects.all().order_by("pk")
audits = create_historical_records(contacts, Contact, "-")
first_audit_pk = audits[0].id
for i, audit in enumerate(audits, 1):
self.assertEqual(
audit.id,
first_audit_pk + (i - 1)
)
self.assertEqual(
audit.code,
str(i - 1)
)
self.assertEqual(
audit.name,
"duh"
)
self.assertEqual(
audit.email,
""
)
self.assertEqual(
audit.customer,
False
)
self.assertEqual(
audit.supplier,
False
)
self.assertEqual(
audit.history_change_reason,
""
)
self.assertEqual(
audit.history_type,
"-"
)
self.assertEqual(
audit.history_user_id,
None
)
class BulkDeleteWithHistoryTests(TestCase):
@classmethod
def setUpTestData(cls):
contacts = []
for i in range(10):
c = Contact(code=i, name="duh")
contacts.append(c)
cls.contacts = Contact.objects.bulk_create(contacts)
def test(self):
contacts = self.contacts
bulk_delete_with_history(contacts, Contact)
audits = Contact.history.all().order_by("pk")
first_audit_pk = audits[0].id
self.assertEqual(
len(audits),
10
)
for i, audit in enumerate(audits, 1):
self.assertEqual(
audit.id,
first_audit_pk + (i - 1)
)
self.assertEqual(
audit.code,
str(i - 1)
)
self.assertEqual(
audit.name,
"duh"
)
self.assertEqual(
audit.email,
""
)
self.assertEqual(
audit.customer,
False
)
self.assertEqual(
audit.supplier,
False
)
self.assertEqual(
audit.history_change_reason,
""
)
self.assertEqual(
audit.history_type,
"-"
)
self.assertEqual(
audit.history_user_id,
None
) | StarcoderdataPython |
11307382 | from torch import nn
from constants import *
from layers import *
import torch
class Transformer(nn.Module):
def __init__(self, src_vocab_size, trg_vocab_size):
super().__init__()
self.src_vocab_size = src_vocab_size
self.trg_vocab_size = trg_vocab_size
self.src_embedding = nn.Embedding(self.src_vocab_size, d_model)
self.trg_embedding = nn.Embedding(self.trg_vocab_size, d_model)
self.positional_encoder = PositionalEncoder()
self.encoder = Encoder()
self.decoder = Decoder()
self.output_linear = nn.Linear(d_model, self.trg_vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, src_input, trg_input, e_mask=None, d_mask=None):
src_input = self.src_embedding(src_input) # (B, L) => (B, L, d_model)
trg_input = self.trg_embedding(trg_input) # (B, L) => (B, L, d_model)
src_input = self.positional_encoder(src_input) # (B, L, d_model) => (B, L, d_model)
trg_input = self.positional_encoder(trg_input) # (B, L, d_model) => (B, L, d_model)
e_output = self.encoder(src_input, e_mask) # (B, L, d_model)
d_output = self.decoder(trg_input, e_output, e_mask, d_mask) # (B, L, d_model)
output = self.softmax(self.output_linear(d_output)) # (B, L, d_model) => # (B, L, trg_vocab_size)
return output
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.ModuleList([EncoderLayer() for i in range(num_layers)])
self.layer_norm = LayerNormalization()
def forward(self, x, e_mask):
for i in range(num_layers):
x = self.layers[i](x, e_mask)
return self.layer_norm(x)
class Decoder(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.ModuleList([DecoderLayer() for i in range(num_layers)])
self.layer_norm = LayerNormalization()
def forward(self, x, e_output, e_mask, d_mask):
for i in range(num_layers):
x = self.layers[i](x, e_output, e_mask, d_mask)
return self.layer_norm(x)
| StarcoderdataPython |
11229781 | import requests
import json
import argparse
import time
import logging
import sys
HDRS = {
"Accept": "application/json",
"Content-Type": "application/json"
}
LOG = logging.getLogger("setup_aion")
def request(url, data=None, method=None, headers=HDRS, params={}, allow_redirects=True,
files=None, stream=False, verify=True):
if not method:
if data:
method = 'POST'
else:
method = 'GET'
if isinstance(data, dict) or isinstance(data, list):
data = json.dumps(data)
with requests.Session() as s:
req = requests.Request(method, url, headers=headers, params=params, data=data, files=files)
prepreq = s.prepare_request(req)
resp = s.send(prepreq, timeout=15, allow_redirects=allow_redirects, stream=stream, verify=verify)
if not resp.ok:
raise Exception("request error: url:%s, code:%s, data:%s" % (url, str(resp.status_code), resp.content))
return resp
def csv_list(vstr, sep=','):
''' Convert a string of comma separated values to floats
@returns iterable of floats
'''
values = []
for v in vstr.split(sep):
if v:
values.append(v)
return values
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--aion_url",
help="AION URL. An example URL would be https://example.spirentaion.com", type=str,
default="", required=True)
parser.add_argument("--aion_user", help="AION user", type=str,
required=True)
parser.add_argument("--aion_password", help="AION password", type=str,
required=True)
parser.add_argument("--local_addr",
help="Local API IP/host. Will use platform_addr if not specified.",
type=str, default="")
parser.add_argument("--platform_addr", help="Cluser/Node IP/host", type=str,
required=True)
parser.add_argument("--cluster_name", help="Node Name", type=str,
default="")
parser.add_argument("--node_name", help="Node Name", type=str,
default="")
parser.add_argument("--admin_first_name", help="Admin First Name", type=str,
default="")
parser.add_argument("--admin_last_name", help="Admin Last Name", type=str,
default="")
parser.add_argument("--admin_email", help="Admin Email", type=str,
default="")
parser.add_argument("--admin_password", help="Admin Email", type=str,
required=True)
parser.add_argument("--org_id", help="Organization ID", type=str,
default="")
parser.add_argument("--org_domains", help="Organization Domains", type=csv_list,
default="")
parser.add_argument("--org_subdomain", help="Organization Subdomain", type=str,
default="")
parser.add_argument("--metrics_opt_out", help="Metrics Opt Out", type=str2bool,
default=False)
parser.add_argument("--http_enabled", help="HTTP Enabled", type=str2bool,
default=False)
parser.add_argument("--local_admin_password", help="HTTP Enabled", type=str,
default="")
parser.add_argument("--node_storage_provider", help="Node Storage Provider", type=str,
default="local")
parser.add_argument("--node_storage_remote_uri", help="Node Storage Remote URL", type=str,
default="")
parser.add_argument("--wait_timeout", help="Time in seconds to wait for platform initialization", type=str,
default=900)
parser.add_argument("-v", "--verbose", help="Verbose logging", type=str2bool,
default=False)
parser.add_argument("--log_file", help="Log file for output. stdout when not set", type=str, default="")
args = parser.parse_args()
if args.admin_password == "":
raise Exceoption("admin password must be specified")
return args
def get_server_init_data(c, org, user_info):
# Config Auto Fill
if not c.get("org_id"):
c["org_id"] = org["id"]
if not c.get("org_name"):
c["org_name"] = org["name"]
if not c.get("org_domains"):
c["org_domains"] = org["domains"]
if not c.get("org_subdomain"):
c["org_subdomain"] = org["subdomain"]
if not c.get("cluster_name"):
c["cluster_name"] = c["platform_addr"]
if not c.get("node_name"):
c["node_name"] = c["platform_addr"]
if not c.get("admin_firt_name"):
c["admin_first_name"] = user_info["first"]
if not c.get("admin_last_name"):
c["admin_last_name"] = user_info["last"]
if not c.get("admin_email"):
c["admin_email"] = user_info["email"]
if not c.get("local_admin_password"):
c["local_admin_password"] = c["<PASSWORD>"]
email_settings = None
# Send Initialization
data = {
"cluster": {
"name": c["cluster_name"],
"admin": {
"first": c["admin_first_name"],
"last": c["admin_last_name"],
"password": c["<PASSWORD>"],
"email": c["admin_email"],
},
"organization": {
"id": c["org_id"],
"name": c["org_name"],
"subdomain": c["org_subdomain"],
"domains": c["org_domains"]
},
"email_settings": email_settings,
"metrics_opt_out": c["metrics_opt_out"],
"web_settings": {
"http": {
"enabled": c["http_enabled"],
}
}
},
"node": {
"name": c["node_name"],
"local_admin_password": c["<PASSWORD>"],
"storage": {
"provider": c["node_storage_provider"],
"remote_uri": c["node_storage_remote_uri"]
}
}
}
return data
def main():
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
LOG.addHandler(handler)
args = parse_args()
if args.verbose:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.INFO)
if args.log_file:
log_handler = logging.FileHandler(args.log_file)
log_handler.setFormatter(formatter)
LOG.addHandler(log_handler)
c = args.__dict__
LOG.debug("Config: %s" % json.dumps(c))
if c["local_addr"]:
app_url = "http://" + c["local_addr"]
else:
app_url = "http://" + c["platform_addr"]
aion_url = c["aion_url"]
org_info = request(aion_url + "/api/iam/organizations/default").json()
LOG.debug("org_info: %s" % json.dumps(org_info))
data = {
"grant_type": "password",
"username": c["aion_user"],
"password": c["<PASSWORD>"],
"scope": org_info["id"]
}
r = request(aion_url + "/api/iam/oauth2/token", data=data).json()
access_token = r["access_token"]
LOG.debug("access_token: %s" % access_token)
hdrs = {
"Accept": "application/json",
"Authorization": "Bearer " + access_token,
}
user_info = request(aion_url + "/api/iam/users/my", headers=hdrs).json()
LOG.debug("userInfo: %s" % json.dumps(user_info))
hdrs = {
"Accept": "application/json",
"Content-Type": "application/json",
}
# Local Storage
data = {
"config": {
"provider": "local",
"remote_uri": ""
}
}
local_storage = request(app_url + "/api/local/storage/test", headers=hdrs, data=data).json()
LOG.debug("localStorage: %s" % json.dumps(local_storage))
data = get_server_init_data(c, org_info, user_info)
LOG.debug("ServerFormingNewCluster: %s" % json.dumps(data))
r = request(app_url + "/api/local/initialization/server-forming-new-cluster", headers=hdrs, data=data)
completed = False
start_time = time.time()
wait_time = int(c["wait_timeout"])
if wait_time:
LOG.info("Waiting for AION platform initialization to complete...")
while True:
try:
r = request(app_url + "/api/local/initialization").json()
except Exception as e:
LOG.debug("installation status exception which may not be error: %s" % str(e))
r = None
if r:
LOG.debug("initialization status: %s\n" % json.dumps(r))
if r["initialized"]:
completed = True
break
if r.get("status") == "error":
raise Exception("failed to configure platform")
if (time.time() - start_time) > wait_time:
LOG.warning(
"platform initialization didn't complete in %d seconds. platform wait timed out." % wait_time)
break
time.sleep(5)
if not completed:
raise Exception("platform initialization did not complete")
org_info = request(app_url + "/api/iam/organizations/default").json()
LOG.debug("org_info: %s" % json.dumps(org_info))
data = {
"grant_type": "password",
"username": c["admin_email"],
"password": c["<PASSWORD>"],
"scope": org_info["id"]
}
r = request(app_url + "/api/iam/oauth2/token", data=data).json()
app_token = r["access_token"]
hdrs = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer " + app_token,
}
data = {
"url": aion_url,
"username": c["aion_user"],
"password": c["<PASSWORD>"]
}
request(app_url + "/api/cluster/settings/temeva/login", headers=hdrs, data=data)
if __name__ == "__main__":
try:
main()
except Exception as e:
LOG.error('%s' % str(e))
LOG.debug('Error in setup-aion', exc_info=True)
sys.exit(str(e))
'''
python3 setup-aion.py --aion_url "https://spirent.spirentaion.com" --platform_addr "10.109.121.113"
--aion_user <user> --aion_password <password> --admin_password <password>
'''
| StarcoderdataPython |
1680091 | import json
from argo_workflows.model.object_field_selector import ObjectFieldSelector
from argo_workflows.models import ConfigMapKeySelector, EnvVarSource, SecretKeySelector
from pydantic import BaseModel
from hera import ConfigMapEnvSpec, EnvSpec, FieldEnvSpec, SecretEnvSpec
class MockModel(BaseModel):
field1: int = 1
field2: int = 2
def test_env_spec_sets_base_model(mock_model):
m = mock_model()
env = EnvSpec(name="model_string", value=m)
argo_spec = env.argo_spec
assert argo_spec.value == '{"field1": 1, "field2": 2}'
model_dict = json.loads(argo_spec.value)
test_model = MockModel(**model_dict)
assert test_model.field1 == m.field1
assert test_model.field2 == m.field2
def test_env_spec_sets_primitive_types_as_expected():
int_val = 1
int_env = EnvSpec(name="int", value=int_val)
int_spec = int_env.argo_spec
assert int_spec.value == '1'
assert json.loads(int_spec.value) == int_val
# values are stringified to env variable values, but strings are already stringified
# so the test here ensures that strings are passed as strings, by comparison to
# other primitive types
str_val = 'str'
str_env = EnvSpec(name="str", value=str_val)
str_spec = str_env.argo_spec
assert str_spec.value == 'str'
dict_val = {'a': 42}
dict_env = EnvSpec(name="dict", value=dict_val)
dict_spec = dict_env.argo_spec
assert dict_spec.value == '{"a": 42}'
assert json.loads(dict_spec.value) == dict_val
def test_secret_env_spec_contains_expected_fields():
env = SecretEnvSpec(name='s', secret_name='a', secret_key='b')
spec = env.argo_spec
assert not hasattr(spec, 'value')
assert spec.name == 's'
assert isinstance(spec.value_from, EnvVarSource)
assert isinstance(spec.value_from.secret_key_ref, SecretKeySelector)
assert spec.value_from.secret_key_ref.name == 'a'
assert spec.value_from.secret_key_ref.key == 'b'
def test_config_map_env_spec_contains_expected_fields():
env = ConfigMapEnvSpec(name='s', config_map_name='a', config_map_key='b')
spec = env.argo_spec
assert not hasattr(spec, 'value')
assert spec.name == 's'
assert isinstance(spec.value_from, EnvVarSource)
assert isinstance(spec.value_from.config_map_key_ref, ConfigMapKeySelector)
assert spec.value_from.config_map_key_ref.name == 'a'
assert spec.value_from.config_map_key_ref.key == 'b'
def test_field_env_spec_contains_expected_fields():
env = FieldEnvSpec(name='s', field_path='a', api_version="b")
spec = env.argo_spec
assert not hasattr(spec, 'value')
assert spec.name == 's'
assert isinstance(spec.value_from, EnvVarSource)
assert isinstance(spec.value_from.field_ref, ObjectFieldSelector)
assert spec.value_from.field_ref.field_path == 'a'
assert spec.value_from.field_ref.api_version == 'b'
| StarcoderdataPython |
6496412 | <gh_stars>0
"""Top-level package for python_template."""
from ._version import get_versions
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = get_versions()['version']
del get_versions
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| StarcoderdataPython |
8020803 | from django.urls import path
from worlds.views import *
urlpatterns = [
path('pipeline/start/', start_pipeline),
path('pipelines/', pipeline_list),
path('jobs/', job_list),
path('job/<int:jid>/', job_details),
path('job/<int:jid>/shelix-logs/', job_shelix_log),
path('job/<int:jid>/kill/', job_kill),
path('job/<int:jid>/<str:pod>.log', job_log),
path('job/<int:jid>/<str:zip>.logs.zip', all_logs),
path('job/<int:jid>/<str:zip>.zip', job_zip),
]
| StarcoderdataPython |
6476636 | <filename>backpack/extensions/secondorder/diag_ggn/convtransposend.py<gh_stars>100-1000
from backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule
from backpack.utils import conv_transpose as convUtils
class DiagGGNConvTransposeND(DiagGGNBaseModule):
def bias(self, ext, module, grad_inp, grad_out, backproped):
sqrt_ggn = backproped
return convUtils.extract_bias_diagonal(module, sqrt_ggn, sum_batch=True)
def weight(self, ext, module, grad_inp, grad_out, backproped):
X = convUtils.unfold_by_conv_transpose(module.input0, module)
weight_diag = convUtils.extract_weight_diagonal(
module, X, backproped, sum_batch=True
)
return weight_diag
class BatchDiagGGNConvTransposeND(DiagGGNBaseModule):
def bias(self, ext, module, grad_inp, grad_out, backproped):
sqrt_ggn = backproped
return convUtils.extract_bias_diagonal(module, sqrt_ggn, sum_batch=False)
def weight(self, ext, module, grad_inp, grad_out, backproped):
X = convUtils.unfold_by_conv_transpose(module.input0, module)
weight_diag = convUtils.extract_weight_diagonal(
module, X, backproped, sum_batch=False
)
return weight_diag
| StarcoderdataPython |
6541969 | #Data Types
#String
#Hello is quotes is the string or output
print("Hello")
#Subscript
#looks at the position within the string and outputs just that character
#below will output just H
print("Hello"[0])
#below will output just o
print("Hello"[4])
#Integer
#actual numbers in the code for calculating
#displayed just as numbers without quotes
#below will add 123+456 = 579
print(123+456)
#in python 1000 is seperated with underscroll
#this is to make it easier to read in the code
print(1_000)
print(125_000)
#Float
#This is used for decimals
3.14159
#Boolean
#data type which is always either True or False
True
False
print(3 * (3 + 3) / 3 - 3)
#Round Numbers
#Will round into a whole number by default
print(round(8/3))
#If you want to round to two decimal places add a comma and specify which spot
print(round(8/3,2))
print(round(8/3,3))
#F Strings
#Used to print an integer/float/booleans in a string
print(f"your score is {3}") | StarcoderdataPython |
4820182 | <filename>scout/parse/cytoband.py
import intervaltree
def parse_cytoband(lines):
"""Parse iterable with cytoband coordinates
Args:
lines(iterable): Strings on format "chr1\t2300000\t5400000\tp36.32\tgpos25"
Returns:
cytobands(dict): Dictionary with chromosome names as keys and
interval trees as values
"""
cytobands = {}
for line in lines:
if line.startswith("#"):
continue
line = line.rstrip()
splitted_line = line.split("\t")
chrom = splitted_line[0].lstrip("chr")
start = int(splitted_line[1])
stop = int(splitted_line[2])
name = splitted_line[3]
if chrom in cytobands:
# Add interval to existing tree
cytobands[chrom][start:stop] = name
else:
# Create a new interval tree
new_tree = intervaltree.IntervalTree()
# create the interval
new_tree[start:stop] = name
# Add the interval tree
cytobands[chrom] = new_tree
return cytobands
| StarcoderdataPython |
12829784 | <reponame>rahulbahal7/restricted-python<gh_stars>0
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# This tiny set of safe builtins is extended by users of the module.
# AccessControl.ZopeGuards contains a large set of wrappers for builtins.
# DocumentTemplate.DT_UTil contains a few.
from RestrictedPython import _compat
if _compat.IS_PY2:
import __builtin__ as builtins
else:
# Do not attempt to use this package on Python2.7 as there
# might be backports for this package such as future.
import builtins
safe_builtins = {}
_safe_names = [
'None',
'False',
'True',
'abs',
'bool',
'callable',
'chr',
'complex',
'divmod',
'float',
'hash',
'hex',
'id',
'int',
'isinstance',
'issubclass',
'len',
'oct',
'ord',
'pow',
'range',
'repr',
'round',
'slice',
'str',
'tuple',
'zip'
]
_safe_exceptions = [
'ArithmeticError',
'AssertionError',
'AttributeError',
'BaseException',
'BufferError',
'BytesWarning',
'DeprecationWarning',
'EOFError',
'EnvironmentError',
'Exception',
'FloatingPointError',
'FutureWarning',
'GeneratorExit',
'IOError',
'ImportError',
'ImportWarning',
'IndentationError',
'IndexError',
'KeyError',
'KeyboardInterrupt',
'LookupError',
'MemoryError',
'NameError',
'NotImplementedError',
'OSError',
'OverflowError',
'PendingDeprecationWarning',
'ReferenceError',
'RuntimeError',
'RuntimeWarning',
'StopIteration',
'SyntaxError',
'SyntaxWarning',
'SystemError',
'SystemExit',
'TabError',
'TypeError',
'UnboundLocalError',
'UnicodeDecodeError',
'UnicodeEncodeError',
'UnicodeError',
'UnicodeTranslateError',
'UnicodeWarning',
'UserWarning',
'ValueError',
'Warning',
'ZeroDivisionError',
]
if _compat.IS_PY2:
_safe_names.extend([
'basestring',
'cmp',
'long',
'unichr',
'unicode',
'xrange',
])
_safe_exceptions.extend([
'StandardError',
])
else:
_safe_names.extend([
'__build_class__', # needed to define new classes
])
for name in _safe_names:
safe_builtins[name] = getattr(builtins, name)
for name in _safe_exceptions:
safe_builtins[name] = getattr(builtins, name)
# Wrappers provided by this module:
# delattr
# setattr
# Wrappers provided by ZopeGuards:
# __import__
# apply
# dict
# enumerate
# filter
# getattr
# hasattr
# iter
# list
# map
# max
# min
# sum
# all
# any
# Builtins that are intentionally disabled
# compile - don't let them produce new code
# dir - a general purpose introspector, probably hard to wrap
# execfile - no direct I/O
# file - no direct I/O
# globals - uncontrolled namespace access
# input - no direct I/O
# locals - uncontrolled namespace access
# open - no direct I/O
# raw_input - no direct I/O
# vars - uncontrolled namespace access
# There are several strings that describe Python. I think there's no
# point to including these, although they are obviously safe:
# copyright, credits, exit, help, license, quit
# Not provided anywhere. Do something about these? Several are
# related to new-style classes, which we are too scared of to support
# <0.3 wink>. coerce, buffer, and reload are esoteric enough that no
# one should care.
# buffer
# bytes
# bytearray
# classmethod
# coerce
# eval
# intern
# memoryview
# object
# property
# reload
# staticmethod
# super
# type
def _write_wrapper():
# Construct the write wrapper class
def _handler(secattr, error_msg):
# Make a class method.
def handler(self, *args):
try:
f = getattr(self.ob, secattr)
except AttributeError:
raise TypeError(error_msg)
f(*args)
return handler
class Wrapper(object):
def __init__(self, ob):
self.__dict__['ob'] = ob
__setitem__ = _handler(
'__guarded_setitem__',
'object does not support item or slice assignment')
__delitem__ = _handler(
'__guarded_delitem__',
'object does not support item or slice assignment')
__setattr__ = _handler(
'__guarded_setattr__',
'attribute-less object (assign or del)')
__delattr__ = _handler(
'__guarded_delattr__',
'attribute-less object (assign or del)')
return Wrapper
def _full_write_guard():
# Nested scope abuse!
# safetypes and Wrapper variables are used by guard()
safetypes = {dict, list}
Wrapper = _write_wrapper()
def guard(ob):
# Don't bother wrapping simple types, or objects that claim to
# handle their own write security.
if type(ob) in safetypes or hasattr(ob, '_guarded_writes'):
return ob
# Hand the object to the Wrapper instance, then return the instance.
return Wrapper(ob)
return guard
full_write_guard = _full_write_guard()
def guarded_setattr(object, name, value):
setattr(full_write_guard(object), name, value)
safe_builtins['setattr'] = guarded_setattr
def guarded_delattr(object, name):
delattr(full_write_guard(object), name)
safe_builtins['delattr'] = guarded_delattr
def safer_getattr(object, name, default=None, getattr=getattr):
"""Getattr implementation which prevents using format on string objects.
format() is considered harmful:
http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/
"""
if isinstance(object, _compat.basestring) and name == 'format':
raise NotImplementedError(
'Using format() on a %s is not safe.' % object.__class__.__name__)
if name.startswith('_'):
raise AttributeError(
'"{name}" is an invalid attribute name because it '
'starts with "_"'.format(name=name)
)
return getattr(object, name, default)
safe_builtins['_getattr_'] = safer_getattr
def guarded_iter_unpack_sequence(it, spec, _getiter_):
"""Protect sequence unpacking of targets in a 'for loop'.
The target of a for loop could be a sequence.
For example "for a, b in it"
=> Each object from the iterator needs guarded sequence unpacking.
"""
# The iteration itself needs to be protected as well.
for ob in _getiter_(it):
yield guarded_unpack_sequence(ob, spec, _getiter_)
def guarded_unpack_sequence(it, spec, _getiter_):
"""Protect nested sequence unpacking.
Protect the unpacking of 'it' by wrapping it with '_getiter_'.
Furthermore for each child element, defined by spec,
guarded_unpack_sequence is called again.
Have a look at transformer.py 'gen_unpack_spec' for a more detailed
explanation.
"""
# Do the guarded unpacking of the sequence.
ret = list(_getiter_(it))
# If the sequence is shorter then expected the interpreter will raise
# 'ValueError: need more than X value to unpack' anyway
# => No childs are unpacked => nothing to protect.
if len(ret) < spec['min_len']:
return ret
# For all child elements do the guarded unpacking again.
for (idx, child_spec) in spec['childs']:
ret[idx] = guarded_unpack_sequence(ret[idx], child_spec, _getiter_)
return ret
safe_globals = {'__builtins__': safe_builtins}
| StarcoderdataPython |
4958286 | from opencivicdata.core.models import Post, PostContactDetail, PostLink
from .base import BaseImporter
class PostImporter(BaseImporter):
_type = 'post'
model_class = Post
related_models = {'contact_details': (PostContactDetail, 'post_id', {}),
'links': (PostLink, 'post_id', {})
}
def __init__(self, jurisdiction_id, org_importer):
super(PostImporter, self).__init__(jurisdiction_id)
self.org_importer = org_importer
def prepare_for_db(self, data):
data['organization_id'] = self.org_importer.resolve_json_id(data['organization_id'])
return data
def get_object(self, post):
spec = {
'organization_id': post['organization_id'],
'label': post['label'],
}
if post['role']:
spec['role'] = post['role']
return self.model_class.objects.get(**spec)
def limit_spec(self, spec):
spec['organization__jurisdiction_id'] = self.jurisdiction_id
return spec
| StarcoderdataPython |
8156536 | <filename>pre-refactor/reverse.py
#!/usr/bin/env python3
# reverse words or sentences
import random
import sys
def reverse_words(input_array):
output_array = []
for item in input_array:
playstring = ''
for i in range(0, len(item)):
playstring += item[len(item) - 1 - i]
output_array.append(playstring)
return output_array
def reverse_sentences(input_array):
reversed_array = reverse_words(input_array)
output_string = ''
for i in range(0, len(reversed_array)):
output_string += reversed_array[len(reversed_array) - 1 - i] + ' '
return output_string
param = sys.argv[1]
inputs = sys.argv[2:len(sys.argv)]
if param == '-w':
word_array = reverse_words(input_array=inputs)
output = ''
for item in word_array:
output += item + ' '
print(output)
elif param == '-s':
output = reverse_sentences(inputs)
print(output)
else:
print(
'Unrecognized argument \'{}\'. Use \'-w\' to reverse words, or \'-s\' to reverse sentences.'.format(sys.argv[1]))
| StarcoderdataPython |
179543 | import streamlit as st
import statsmodels.api as s_api
import matplotlib.pyplot as plt
class lin_mod: #perform linear regression using sklearn and stamodels
def __init__(self,x,y):
self.x=x
self.y=y
def model_imp(self):
x=s_api.add_constant(self.x)
y=self.y
mod=s_api.OLS(y,x).fit()
preds=mod.predict(x)
st.write(mod.summary())
mod_plots(preds-y)
hetero(y,y-preds)
def mod_plots(res): # create a qqplot
fig=s_api.qqplot(res,line='45')
st.pyplot(fig)
def hetero(x,y): #check heteroscekedacity
fig=plt.figure(figsize=(5,3))
plt.scatter(x,y,alpha=0.5)
plt.xlabel("response")
plt.ylabel("residuals")
plt.title("response vs residuals")
st.pyplot(fig)
| StarcoderdataPython |
5106053 | <gh_stars>100-1000
from django.http import HttpResponse
from django.core.exceptions import ImproperlyConfigured
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import six
# Django 1.5+ compat
try:
import json
except ImportError: # pragma: no cover
from django.utils import simplejson as json
class JSONResponseMixin(object):
content_type = None
json_encoder_class = DjangoJSONEncoder
def get_content_type(self):
if (self.content_type is not None and
not isinstance(self.content_type,
(six.string_types, six.text_type))):
raise ImproperlyConfigured(
'{0} is missing content type. Define {0}.content_type, '
'or override {0}.get_content_type().'.format(
self.__class__.__name__))
return self.content_type or "application/json"
def render_json_response(self, context, status=200):
"""
Serialize the context dictionary as JSON and return it
as a HTTP Repsonse object. This method only allows
serialization of simple objects (i.e. no model instances)
"""
json_context = json.dumps(context, cls=self.json_encoder_class)
return HttpResponse(json_context,
content_type=self.get_content_type(),
status=status)
| StarcoderdataPython |
8114053 | import unittest
from pybooru import Safebooru, SafebooruImage
from .common import CommonTests
object_data = {
"directory": "3375",
"hash": "image_name",
"height": 1,
"id": 1,
"image": "image.ext",
"change": 1,
"owner": "owner",
"parent_id": 0,
"rating": "rating",
"sample": True,
"sample_height": 1,
"sample_width": 1,
"score": 0,
"tags": "tags",
"width": 1
}
class SafebooruTestCase(CommonTests, unittest.TestCase):
def setUp(self):
self.booru = Safebooru(limit=10)
self.image = SafebooruImage
self.object_data = object_data
self.count = '<?xml version="1.0" encoding="UTF-8"?><posts count="11" offset="0"></posts>'
self.page_increment = 0
self.page_parameter = 'pid'
class SafebooruImageTestCase(unittest.TestCase):
def setUp(self):
self.image = SafebooruImage(**object_data)
| StarcoderdataPython |
11399591 | <reponame>deHasara/modin<gh_stars>0
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Dataframe exchange protocol implementation.
See more in https://data-apis.org/dataframe-protocol/latest/index.html.
"""
import enum
import re
import pandas
import numpy as np
from pandas.api.types import is_datetime64_dtype
class DTypeKind(enum.IntEnum): # noqa PR01
"""
Integer enum for data types.
Attributes
----------
INT : int
Matches to integer data type.
UINT : int
Matches to unsigned integer data type.
FLOAT : int
Matches to floating point data type.
BOOL : int
Matches to boolean data type.
STRING : int
Matches to string data type.
DATETIME : int
Matches to datetime data type.
CATEGORICAL : int
Matches to categorical data type.
"""
INT = 0
UINT = 1
FLOAT = 2
BOOL = 20
STRING = 21 # UTF-8
DATETIME = 22
CATEGORICAL = 23
class ColumnNullType(enum.IntEnum): # noqa PR01
"""
Integer enum for null type representation.
Attributes
----------
NON_NULLABLE : int
Non-nullable column.
USE_NAN : int
NaN/NaT value.
USE_SENTINEL : int
Sentinel value besides NaN/NaT.
USE_BITMASK : int
The bit is set/unset representing a null on a certain position.
USE_BYTEMASK : int
The byte is set/unset representing a null on a certain position.
"""
NON_NULLABLE = 0
USE_NAN = 1
USE_SENTINEL = 2
USE_BITMASK = 3
USE_BYTEMASK = 4
class DlpackDeviceType(enum.IntEnum): # noqa PR01
"""Integer enum for device type codes matching DLPack."""
CPU = 1
CUDA = 2
CPU_PINNED = 3
OPENCL = 4
VULKAN = 7
METAL = 8
VPI = 9
ROCM = 10
class ArrowCTypes:
"""
Enum for Apache Arrow C type format strings.
The Arrow C data interface:
https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings
"""
NULL = "n"
BOOL = "b"
INT8 = "c"
UINT8 = "C"
INT16 = "s"
UINT16 = "S"
INT32 = "i"
UINT32 = "I"
INT64 = "l"
UINT64 = "L"
FLOAT16 = "e"
FLOAT32 = "f"
FLOAT64 = "g"
STRING = "u" # utf-8
DATE32 = "tdD"
DATE64 = "tdm"
# Resoulution:
# - seconds -> 's'
# - miliseconds -> 'm'
# - microseconds -> 'u'
# - nanoseconds -> 'n'
TIMESTAMP = "ts{resolution}:{tz}"
def pandas_dtype_to_arrow_c(dtype) -> str:
"""
Represent pandas `dtype` as a format string in Apache Arrow C notation.
Parameters
----------
dtype : np.dtype
Datatype of pandas DataFrame to represent.
Returns
-------
str
Format string in Apache Arrow C notation of the given `dtype`.
"""
if isinstance(dtype, pandas.CategoricalDtype):
return ArrowCTypes.INT64
elif dtype == np.dtype("O"):
return ArrowCTypes.STRING
format_str = getattr(ArrowCTypes, dtype.name.upper(), None)
if format_str is not None:
return format_str
if is_datetime64_dtype(dtype):
# Selecting the first char of resolution string:
# dtype.str -> '<M8[ns]'
resolution = re.findall(r"\[(.*)\]", dtype.str)[0][:1]
return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="")
raise NotImplementedError(
f"Convertion of {dtype} to Arrow C format string is not implemented."
)
| StarcoderdataPython |
5182429 | from ocr4all_helper_scripts.helpers import legacyconvert_helper
from pathlib import Path
import click
@click.command("legacy-convert", help="Convert legacy OCR4all projects to latest.")
@click.option("-p", "--path", type=str, required=True, help="Path to the OCR4all project.")
def legacyconvert_cli(path):
for xml in sorted(list(Path(path).glob("*.xml"))):
updated_page = legacyconvert_helper.convert_page(xml)
legacyconvert_helper.write_xml(xml, updated_page)
if __name__ == "__main__":
legacyconvert_cli()
| StarcoderdataPython |
3251871 | # -*- coding: utf-8 -*-
"""
Created on 2017/6/9
@author: MG
"""
import logging
import threading
import inspect
from ctp import ApiStruct, MdApi, TraderApi
import hashlib, os, sys, tempfile, time, re
from config import Config, PeriodType, PositionDateType
from backend.fh_utils import str_2_bytes, bytes_2_str
from datetime import datetime, timedelta, date
from queue import Queue, Empty
from threading import Thread
from collections import OrderedDict
from backend.orm import CommissionRateModel
from event_agent import event_agent, EventType
from md_saver import MdMin1Combiner, MdMinNCombiner, MdSaver, MdPublisher
TOO_SMALL_TO_AVAILABLE = 0.0000001
TOO_LARGE_TO_AVAILABLE = 100000000
OST_Canceled_STR = bytes_2_str(ApiStruct.OST_Canceled)
FRONT_DISCONNECTED_REASON_DIC = {
0x1001: '网络读失败',
0x1002: '网络写失败',
0x2001: '接收心跳超时',
0x2002: '发送心跳失败',
0x2003: '收到错误报文',
}
class RequestInfo:
"""
保存 request 相关信息到 queue中,共其他线程执行
"""
def __init__(self, func, p_struct, request_id, request_timeout=0, max_wait_rsp_time=2):
"""
:param func:
:param p_struct:
:param request_id:
:param request_timeout:
:param max_wait_rsp_time:
"""
self.func = func
self.p_struct = p_struct
self.request_id = request_id
self.request_timeout = request_timeout
self.create_datetime = datetime.now()
self.handle_datetime = datetime.now()
self.max_wait_rsp_time = max_wait_rsp_time
class ApiBase:
def __init__(self,
broker_id=Config.BROKER_ID,
user_id=Config.USER_ID,
password=Config.PASSWORD):
self.logger = logging.getLogger(self.__class__.__name__)
# TODO: 未来将通过 self.init_request_id() 增加对 request_id的初始化动作
self._request_id = 0
self._mutex_request_id = threading.Lock()
# TODO: 初始化 is_SettlementInfoConfirmed 信息,并封装成属性,同步更新数据库
self.broker_id = broker_id
self.user_id = user_id
self.password = password
self.trading_day = ''
self.front_id = 1
self.session_id = 0
# 已登录标志位
self._has_login = False
# 请求响应队列
self._request_info_resp_queue = Queue()
# 等待请求响应字典
self._request_info_wait_resp_dic = {}
# 等待发送请求队列
self._request_info_will_be_send_queue = Queue()
self._handle_request_in_queue_thread = Thread(target=self._handle_request_in_queue, daemon=True)
self._handle_request_in_queue_thread_running = True
@property
def has_login(self):
return self._has_login
@has_login.setter
def has_login(self, val):
self._has_login = val
if val and not self._handle_request_in_queue_thread.is_alive():
self._handle_request_in_queue_thread_running = True
self._handle_request_in_queue_thread.start()
elif not val:
self._handle_request_in_queue_thread_running = False
@staticmethod
def struc_attr_value_transfor(attr_value, validate_data_type=None):
"""
用于将 OnRsp 接口中返回的类型的对应属性值中bytes 值转换为str ,其他类型原样返回
目前该函数仅供 struct_2_json 函数使用
:param attr_value:
:param validate_data_type: 默认为 None, 验证字段是否有效,例如:date类型,验证字符串是否为日期格式(处于性能考虑,目前只验证字段长度,达到8为就代表是日期格式,对于 StartDelivDate 有时候会传递 b'1'值导致合约更新失败)
:return:
"""
val_type = type(attr_value)
if val_type == bytes:
ret = bytes_2_str(attr_value)
else:
ret = attr_value
if validate_data_type is not None:
if validate_data_type == date:
if len(attr_value)!=8:
ret = None
else:
try:
ret = datetime.strptime(attr_value, Config.DATE_FORMAT_STR_CTP)
except:
ret = None
return ret
@staticmethod
def struct_2_dic(a_struct, validate_data_type:dict=None):
"""
用于将 OnRsp 接口中返回的类型转换为 json 字符串
:param a_struct:
:param validate_data_type: 默认为 None, 验证字段是否有效
:return:
"""
attr_dic = {k: ApiBase.struc_attr_value_transfor(getattr(a_struct, k),
None if validate_data_type is None else validate_data_type.setdefault(k, None))
for k, v in a_struct._fields_}
# json_str = json.dumps(attr_dic)
return attr_dic
@staticmethod
def insert_one_2_db(a_struct, collection_name=None, **kwargs):
"""
用于将 OnRsp 接口中返回的类型转换为 json 字符串,插入mongo数据库制定 collectoin 中
:param a_struct:
:param collection_name:
:return:
"""
if collection_name is None:
collection_name = a_struct.__class__.__name__
dic = ApiBase.struct_2_dic(a_struct)
dic.update(kwargs)
# with_mongo_collection(lambda col: col.insert_one(dic), collection_name)
Config.do_for_mongo_collection(lambda col: col.insert_one(dic), collection_name)
return dic
def get_tmp_path(self):
"""
获取api启动是临时文件存放目录
:return:
"""
folder_name = b''.join((b'ctp.futures', self.broker_id, self.user_id))
dir_name = hashlib.md5(folder_name).hexdigest()
dir_path = os.path.join(tempfile.gettempdir(), dir_name, self.__class__.__name__) + os.sep
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return os.fsencode(dir_path) if sys.version_info[0] >= 3 else dir_path
def is_rsp_success(self, rsp_info, stack_num=1):
"""
检查 rsp_info 状态,已上层函数名记录相应日志
:param rsp_info:
:param stack_num:
:return:
"""
is_success = rsp_info is None or rsp_info.ErrorID == 0
if not is_success:
stack = inspect.stack()
parent_function_name = stack[stack_num].function
self.logger.error('%s 失败:%s', parent_function_name, bytes_2_str(rsp_info.ErrorMsg))
return is_success
def resp_common(self, rsp_info, request_id, is_last, stack_num=1):
"""
查询 RspInfo状态及bLsLast状态
:param rsp_info:
:param is_last:
:param stack_num: 被调用函数堆栈层数
:return: 1 成功结束;0 成功但未结束;-1 未成功
"""
# self.logger.debug("resp: %s" % str(rsp_info))
is_success = self.is_rsp_success(rsp_info, stack_num=2)
if not is_success:
self._get_response(request_id)
return -1
elif is_last and is_success:
self._get_response(request_id)
return 1
else:
# try:
# stack = inspect.stack()
# parent_function_name = stack[stack_num].function
# self.logger.debug("%s 等待数据接收完全...", parent_function_name)
# except:
# self.logger.warning("get stack error")
return 0
def init_request_id(self):
"""
增加对 request_id的初始化动作
:return:
"""
raise NotImplementedError()
def inc_request_id(self):
"""
获取自增的 request_id (线程安全)
:return:
"""
self._mutex_request_id.acquire()
self._request_id += 1
self._mutex_request_id.release()
return self._request_id
def log_req_if_error(self, req_ret, func_name=None, stack_num=1):
"""
如果req 返回小于0 则记录错误信息
-1,表示网络连接失败;
-2,表示未处理请求超过许可数;
-3,表示每秒发送请求数超过许可数。
:param req_ret:
:param stack_num: 被调用函数堆栈层数
:return:
"""
if req_ret is None:
msg = "req 返回为 None"
if func_name is None:
stack = inspect.stack()
func_name = stack[stack_num].function
self.logger.error('%s 返回错误:%s', func_name, msg)
elif req_ret < 0:
if req_ret == -1:
msg = '网络连接失败'
elif req_ret == -2:
msg = '未处理请求超过许可数'
elif req_ret == -3:
msg = '每秒发送请求数超过许可数'
else:
msg = '其他原因'
if func_name is None:
stack = inspect.stack()
func_name = stack[stack_num].function
self.logger.error('%s 返回错误:%s', func_name, msg)
def _send_request_2_queue(self, func, p_struct, add_request_id=False, add_order_ref=False, request_timeout=2, max_wait_rsp_time=2):
"""
公共Request方法,传入方面,及相关struct数据
函数自动生成新的request_id及进行log记录
:param func:
:param p_struct:
:param add_request_id:
:param request_timeout: 默认1秒钟超时。0 不做超时检查
:param max_wait_rsp_time: 默认2秒钟超时。0 不做超时检查
:return:
"""
request_id = self.inc_request_id()
if add_request_id:
p_struct.RequestID = request_id
if add_order_ref:
order_ref = self.inc_order_ref()
p_struct.OrderRef = order_ref
req_info = RequestInfo(func, p_struct, request_id, request_timeout, max_wait_rsp_time)
# self.logger.debug("发送请求到 _request_info_will_be_send_queue")
self._request_info_will_be_send_queue.put(req_info)
# self.logger.debug("发送请求到 _request_info_will_be_send_queue 完成")
return 0
def _handle_request_in_queue(self):
"""
用于从后台请求队列中获取请求,并执行发送
:return:
"""
self.logger.info("%s 请求队列 后台发送线程 启动", self.__class__.__name__)
max_wait_time = 30
datetime_last_req = datetime.now()
# 设定最大请求发送频率 0.5 秒钟一笔请求
max_seconds_4_freq_request = 1
while self._handle_request_in_queue_thread_running:
# CTP存在请求过于密集拒绝响应的情况,因此,设定一个请求等待机制
# 交易所未返回请求响应前等待最大 max_wait_time 秒
# 超过响应时间,则认为该请求已经废弃,继续执行后面的请求
# self.logger.debug("后台请求队列处理线程 Looping 1")
try:
if len(self._request_info_wait_resp_dic) > 0:
request_id = self._request_info_resp_queue.get(timeout=1)
if request_id in self._request_info_wait_resp_dic:
req_info = self._request_info_wait_resp_dic.pop(request_id)
datetime_last_req = req_info.handle_datetime
self._request_info_resp_queue.task_done()
except Empty:
for request_id in list(self._request_info_wait_resp_dic.keys()):
req_info = self._request_info_wait_resp_dic[request_id]
handle_datetime = req_info.handle_datetime
if (datetime.now() - handle_datetime).seconds > req_info.max_wait_rsp_time:
self.logger.warning('请求超时 %s[%d] %s -> %s', req_info.func.__name__, request_id,
handle_datetime, req_info.p_struct)
del self._request_info_wait_resp_dic[request_id]
else:
self.logger.debug("等待请求响应 %s[%d] %s -> %s", req_info.func.__name__, request_id,
handle_datetime, req_info.p_struct)
pass
continue
except:
self.logger.exception("从 _request_info_will_be_send_queue 获取信息出现异常")
# self.logger.debug("后台请求队列处理线程 Looping 2")
try:
req_info = self._request_info_will_be_send_queue.get(timeout=5)
# self.logger.debug("后台请求队列处理线程 Looping 2.1")
func = req_info.func
func_name = func.__name__
request_id = req_info.request_id
request_timeout = req_info.request_timeout
total_seconds = (datetime.now() - datetime_last_req).total_seconds()
if total_seconds < max_seconds_4_freq_request:
time.sleep(max_seconds_4_freq_request - total_seconds)
p_struct = req_info.p_struct
if (datetime.now() - req_info.create_datetime).total_seconds() > request_timeout > 0:
# 超时检查主要是为了防止出现队列堵塞结束后,请求集中爆发的情况
logging.warning("请求过期[%d],创建时间:%s 超时时间:%.1f:%s %s ",
request_id, req_info.create_datetime, request_timeout, func_name, p_struct)
continue
self.logger.debug('发送请求[%d] -> %s %s', request_id, func_name, p_struct)
for n_time in range(1, 4):
req_ret = func(p_struct, request_id)
self.log_req_if_error(req_ret, func_name=func_name)
datetime_now = datetime.now()
datetime_last_req = datetime_now
if req_ret == 0:
req_info.handle_datetime = datetime_now
self._request_info_wait_resp_dic[request_id] = req_info
break
elif req_ret == -3:
# -3,表示每秒发送请求数超过许可数
# 等待一段时间后,再次发送
time.sleep(0.5 * n_time)
continue
else:
break
except Empty:
pass
except:
self.logger.exception("执行请求失败")
finally:
time.sleep(0.1)
# self.logger.debug("后台请求队列处理线程 Looping 3")
self.logger.info("%s 后台请求队列处理线程 结束", self.__class__.__name__)
def _get_response(self, request_id):
"""
每个request发送后,获得response时,执行此函数,用于处理相关请求队列
:param request_id:
:return:
"""
self._request_info_resp_queue.put(request_id)
class StrategyOrder:
"""
用户记录用户从 ReqOrderInsert, OnRspOrderInsert, 只到 OnRtnTrade 的全部数据信息
"""
def __init__(self, strategy_id, front_id, session_id, order_ref, input_order):
"""
用户记录用户从 ReqOrderInsert 请求中的 strategy_id, input_order
:param strategy_id:
:param input_order:
"""
self.__strategy_id = strategy_id
self.front_id = front_id
self.session_id = session_id
self.order_ref = order_ref
self.__input_order = None
self.trade_list = []
self.input_order = input_order
@property
def strategy_id(self):
return self.__strategy_id
@property
def input_order(self):
return self.__input_order
@input_order.setter
def input_order(self, input_order):
self.__input_order = input_order
# with with_mongo_client() as client:
# db = client[Config.MONGO_DB_NAME]
# collection = db[Config.MONGO_COLLECTION_INPUT_ORDER]
# collection.insert_one(input_order)
def __str__(self):
return "%s(front_id=%d, session_id=%d, order_ref=%r)" % (
self.__class__.__name__, self.front_id, self.session_id, self.order_ref)
__repr__ = __str__
class MyMdApi(MdApi, ApiBase):
"""
MdApi的本地封装接口
"""
def __init__(self,
instrument_id_list,
broker_id=Config.BROKER_ID,
user_id=Config.USER_ID,
password=Config.PASSWORD):
ApiBase.__init__(self, broker_id=broker_id,
user_id=user_id,
password=password)
self.instrument_id_list = instrument_id_list
self.front_id = 0
self.Create()
self.md_saver_dic = {}
self.md_min1_combiner_dic = {}
self.md_minn_combiner_dic = {}
# 已订阅的合约列表
self.sub_instrument_list = []
# self.pub_sub = None # 似乎已经没用了
# self._md_handler = {}
# def register_md_handler(self, name, md_handler):
# self._md_handler[name] = md_handler
# def _handle_depth_md(self, depth_md_dic):
# error_name_list = []
# for name, handler in self._md_handler.items():
# try:
# handler(depth_md_dic)
# # self.logger.debug('%s data handle finished', name)
# except:
# self.logger.exception('%s run with error will be del on _md_handler', name)
# error_name_list.append(name)
# for name in error_name_list:
# del self._md_handler[name]
# self.logger.warning('从 _md_handler 中移除 %s', name)
# def insert_depth_md_2_queue(self, depth_md_dic):
# """
# 将Tick 数据推送到 md_tick_saver 的队列,供批量保存使用
# 将Tick 数据推送到 md_minute_saver 的队列,供生成分钟K线使用
# :param depth_md_dic:
# :return:
# """
# # 将Tick 数据推送到 md_tick_saver 的队列,供批量保存使用
# self.md_tick_saver.queue_md.put(depth_md_dic)
# # 将Tick 数据推送到 md_minute_saver 的队列,供生成分钟K线使用
# instrument_id = depth_md_dic['InstrumentID']
# md_minute_saver = self.md_minute_saver_dic.setdefault(instrument_id, None)
# if md_minute_saver is not None:
# md_minute_saver.queue_md.put(depth_md_dic)
# else:
# self.logger.warning('%s is not on list for minute saver', instrument_id)
def Create(self): # , pszFlowPath='', bIsUsingUdp=False, bIsMulticast=False
dir_path = self.get_tmp_path()
self.logger.info('cache %s', dir_path)
return super().Create(dir_path)
def RegisterFront(self, front=Config.FRONT_MD_ADDRESS):
self.logger.info('%s', front)
if isinstance(front, bytes):
return super().RegisterFront(front)
for pszFrontAddress in front:
super().RegisterFront(pszFrontAddress)
def OnFrontConnected(self):
self.logger.info('-> ReqUserLogin')
self.ReqUserLogin()
def OnFrontDisconnected(self, nReason):
"""
当客户端与交易后台通信连接断开时,该方法被调用。当发生这个情况后,API会自动重新连接,客户端可不做处理。
:param nReason:
:return:
"""
self.logger.warning('API将自动重新连接,客户端可不做处理 reason=%s %s',
nReason, FRONT_DISCONNECTED_REASON_DIC.setdefault(nReason, '未知原因'))
super().OnFrontDisconnected(self, nReason)
def OnHeartBeatWarning(self, nTimeLapse):
self.logger.debug('nTimeLapse=%s', nTimeLapse)
def ReqUserLogin(self): # , pReqUserLogin, nRequestID
pReqUserLogin = ApiStruct.ReqUserLogin(BrokerID=self.broker_id, UserID=self.user_id, Password=<PASSWORD>)
request_id = self.inc_request_id()
self.logger.debug('MyMdApi:%d', request_id)
req_ret = super().ReqUserLogin(pReqUserLogin, request_id)
self.log_req_if_error(req_ret, stack_num=2)
return req_ret
def OnRspUserLogin(self, pRspUserLogin, pRspInfo, nRequestID, bIsLast):
self.logger.info('%s %s %d %s', pRspUserLogin, pRspInfo, nRequestID, bIsLast)
if not self.is_rsp_success(pRspInfo):
return
self.trading_day = self.GetTradingDay()
self.logger.info('当前交易日: %s', self.trading_day)
# 设置 sessionid, frontid
self.session_id = pRspUserLogin.SessionID
self.front_id = pRspUserLogin.FrontID
self.has_login = True
# 统一注册事件及相应的处理句柄 # 该功能已经被各个对象对应的 register 替代
# self._register_event()
# 订阅 md 数据
self.SubscribeMarketData(self.instrument_id_list)
self.has_login = True
# def _register_event(self):
# """
# 统一注册事件及相应的处理句柄
# :return:
# """
# # 注册 md handler
# # self.register_md_handler('queue md',
# # lambda depth_md_dic: self.insert_depth_md_2_queue(depth_md_dic)
# # )
# event_agent.register_handler(EventType.Tick_MD_Event,
# self.insert_depth_md_2_queue,
# 'queue md')
# # tick 数据的publish 可以集成到 md_saver_tick 中,不过出于效率考虑,将其在这里直接发布
# # self.register_md_handler('publish md',
# # lambda depth_md_dic: MdPublisher.publish_md(
# # Config.REDIS_CHANNEL[PeriodType.Tick],
# # depth_md_dic)
# # )
# event_agent.register_handler(EventType.Tick_MD_Event,
# lambda depth_md_dic: MdPublisher.publish_md(
# Config.REDIS_CHANNEL[PeriodType.Tick],
# depth_md_dic),
# 'publish md')
def _start_combiner_and_saver_thread(self):
"""
统一启动 md sver 线程
:return:
"""
self.logger.info('启动 MdMin1Combiner、MdMinNCombiner')
instrument_id_list = []
period_shift_min_set_dic = {PeriodType.Min5: {0},
PeriodType.Min15: {0}
}
for instrument_id in self.sub_instrument_list:
# 启动1分钟 combiner
combiner = MdMin1Combiner(instrument_id)
combiner.start()
self.md_min1_combiner_dic[instrument_id] = combiner
# 启动N分钟 combiner
combiner = MdMinNCombiner(instrument_id, period_shift_min_set_dic)
combiner.start()
self.md_minn_combiner_dic[instrument_id] = combiner
instrument_id_list.append(instrument_id)
# self.logger.info('启动 md_minute_saver[%s]', instrument_id)
self.logger.info('%d 个 MdMin1Combiner、MdMinNCombiner 被启动:\n%s', len(instrument_id_list), instrument_id_list)
self.logger.info('启动 md saver')
for event_type in EventType:
saver = MdSaver.factory(event_type)
saver.start()
self.md_saver_dic[event_type] = saver
publisher = MdPublisher(event_type)
self.logger.info('启动 md saver')
def ReqUserLogout(self):
"""
登出请求
:return:
"""
pUserLogout = ApiStruct.UserLogout(
BrokerID=Config.BROKER_ID,
UserID=Config.USER_ID)
return self._send_request_2_queue(super().ReqUserLogout, pUserLogout)
def OnRspUserLogout(self, pUserLogout, pRspInfo, nRequestID, bIsLast):
self.logger.info('(%d)%s', nRequestID, pRspInfo)
self.md_tick_saver.keep_running = False
self.logger.info('停止 md_tick_saver')
for instrument_id, saver in self.md_min1_combiner_dic.items():
saver.keep_running = False
self.logger.info('停止 md_minute_saver[%s]', instrument_id)
self.has_login = False
def OnRspSubMarketData(self, pSpecificInstrument, pRspInfo, nRequestID, bIsLast):
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
if pSpecificInstrument is None:
self.logger.warning('(%d):结果为 None', nRequestID)
return
instrument_id = bytes_2_str(pSpecificInstrument.InstrumentID)
self.sub_instrument_list.append(instrument_id)
# self.logger.debug('(%d):%s', nRequestID, pSpecificInstrument)
if status > 0:
self.logger.debug('%d个合约订阅行情完成\n%s', len(self.sub_instrument_list), self.sub_instrument_list)
# 启动 md sver 线程
self._start_combiner_and_saver_thread()
def OnRspUnSubMarketData(self, pSpecificInstrument, pRspInfo, nRequestID, bIsLast):
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
if pSpecificInstrument is None:
self.logger.info('(%d):结果为 None', nRequestID)
return
self.logger.info('(%d):%s', nRequestID, pSpecificInstrument)
if status > 0:
self.logger.debug('取消订阅行情完成')
def OnRspError(self, pRspInfo, nRequestID, bIsLast):
self.logger.info('(%d)%s', nRequestID, pRspInfo)
def OnRtnDepthMarketData(self, pDepthMarketData):
# self.logger.debug(pDepthMarketData)
# 对无效价格进行一些过滤,否则价格中经常会出现 'AskPrice2': 1.7976931348623157e+308 这样的数字
upper_limit_price = pDepthMarketData.UpperLimitPrice
lower_limit_price = pDepthMarketData.LowerLimitPrice
# Bid
if pDepthMarketData.BidPrice1 < lower_limit_price or upper_limit_price < pDepthMarketData.BidPrice1:
# self.logger.debug('set BidPrice1 %f --> 0 ', pDepthMarketData.BidPrice1)
pDepthMarketData.BidPrice1 = 0
if pDepthMarketData.BidPrice2 < lower_limit_price or upper_limit_price < pDepthMarketData.BidPrice2:
# self.logger.debug('set BidPrice2 %f --> 0 ', pDepthMarketData.BidPrice2)
pDepthMarketData.BidPrice2 = 0
if pDepthMarketData.BidPrice3 < lower_limit_price or upper_limit_price < pDepthMarketData.BidPrice3:
# self.logger.debug('set BidPrice3 %f --> 0 ', pDepthMarketData.BidPrice3)
pDepthMarketData.BidPrice3 = 0
if pDepthMarketData.BidPrice4 < lower_limit_price or upper_limit_price < pDepthMarketData.BidPrice4:
# self.logger.debug('set BidPrice4 %f --> 0 ', pDepthMarketData.BidPrice4)
pDepthMarketData.BidPrice4 = 0
if pDepthMarketData.BidPrice5 < lower_limit_price or upper_limit_price < pDepthMarketData.BidPrice5:
# self.logger.debug('set BidPrice5 %f --> 0 ', pDepthMarketData.BidPrice5)
pDepthMarketData.BidPrice5 = 0
# Ask
if pDepthMarketData.AskPrice1 < lower_limit_price or upper_limit_price < pDepthMarketData.AskPrice1:
# self.logger.debug('set AskPrice1 %f --> 0 ', pDepthMarketData.AskPrice1)
pDepthMarketData.AskPrice1 = 0
if pDepthMarketData.AskPrice2 < lower_limit_price or upper_limit_price < pDepthMarketData.AskPrice2:
# self.logger.debug('set AskPrice2 %f --> 0 ', pDepthMarketData.AskPrice2)
pDepthMarketData.AskPrice2 = 0
if pDepthMarketData.AskPrice3 < lower_limit_price or upper_limit_price < pDepthMarketData.AskPrice3:
# self.logger.debug('set AskPrice3 %f --> 0 ', pDepthMarketData.AskPrice3)
pDepthMarketData.AskPrice3 = 0
if pDepthMarketData.AskPrice4 < lower_limit_price or upper_limit_price < pDepthMarketData.AskPrice4:
# self.logger.debug('set AskPrice4 %f --> 0 ', pDepthMarketData.AskPrice4)
pDepthMarketData.AskPrice4 = 0
if pDepthMarketData.AskPrice5 < lower_limit_price or upper_limit_price < pDepthMarketData.AskPrice5:
# self.logger.debug('set AskPrice4 %f --> 0 ', pDepthMarketData.AskPrice5)
pDepthMarketData.AskPrice5 = 0
# Others
if pDepthMarketData.AveragePrice < lower_limit_price or upper_limit_price < pDepthMarketData.AveragePrice:
# self.logger.debug('set AveragePrice %f --> 0 ', pDepthMarketData.AveragePrice)
pDepthMarketData.AveragePrice = 0
if pDepthMarketData.SettlementPrice < lower_limit_price or upper_limit_price < pDepthMarketData.SettlementPrice:
# self.logger.debug('set SettlementPrice %f --> 0 ', pDepthMarketData.SettlementPrice)
pDepthMarketData.SettlementPrice = 0
if pDepthMarketData.ClosePrice < lower_limit_price or upper_limit_price < pDepthMarketData.ClosePrice:
# self.logger.debug('set ClosePrice %f --> 0 ', pDepthMarketData.ClosePrice)
pDepthMarketData.ClosePrice = 0
if pDepthMarketData.OpenPrice < lower_limit_price or upper_limit_price < pDepthMarketData.OpenPrice:
# self.logger.debug('set OpenPrice %f --> 0 ', pDepthMarketData.OpenPrice)
pDepthMarketData.OpenPrice = 0
if pDepthMarketData.HighestPrice < lower_limit_price or upper_limit_price < pDepthMarketData.HighestPrice:
# self.logger.debug('set HighestPrice %f --> 0 ', pDepthMarketData.HighestPrice)
pDepthMarketData.HighestPrice = 0
if pDepthMarketData.LowestPrice < lower_limit_price or upper_limit_price < pDepthMarketData.LowestPrice:
# self.logger.debug('set LowestPrice %f --> 0 ', pDepthMarketData.LowestPrice)
pDepthMarketData.LowestPrice = 0
# 对异常数据进行归 0 处理
if pDepthMarketData.PreClosePrice < TOO_SMALL_TO_AVAILABLE or TOO_LARGE_TO_AVAILABLE < pDepthMarketData.PreClosePrice:
pDepthMarketData.PreClosePrice = 0
if pDepthMarketData.Turnover < TOO_SMALL_TO_AVAILABLE or TOO_LARGE_TO_AVAILABLE < pDepthMarketData.Turnover:
pDepthMarketData.Turnover = 0
if pDepthMarketData.PreSettlementPrice < TOO_SMALL_TO_AVAILABLE or TOO_LARGE_TO_AVAILABLE < pDepthMarketData.PreSettlementPrice:
pDepthMarketData.PreSettlementPrice = 0
if pDepthMarketData.OpenInterest < TOO_SMALL_TO_AVAILABLE or TOO_LARGE_TO_AVAILABLE < pDepthMarketData.OpenInterest:
pDepthMarketData.OpenInterest = 0
if pDepthMarketData.LastPrice < TOO_SMALL_TO_AVAILABLE or TOO_LARGE_TO_AVAILABLE < pDepthMarketData.LastPrice:
pDepthMarketData.LastPrice = 0
if pDepthMarketData.PreOpenInterest < TOO_SMALL_TO_AVAILABLE or TOO_LARGE_TO_AVAILABLE < pDepthMarketData.PreOpenInterest:
pDepthMarketData.PreOpenInterest = 0
pDepthMarketData.PreDelta = 0
pDepthMarketData.CurrDelta = 0
depth_md_dic = ApiBase.struct_2_dic(pDepthMarketData)
instrument_id = bytes_2_str(pDepthMarketData.InstrumentID)
trading_day_str = bytes_2_str(pDepthMarketData.TradingDay)
action_day_str = bytes_2_str(pDepthMarketData.ActionDay)
# datetime.strptime(trading_day_str, '%Y%m%d').date()
depth_md_dic['TradingDay'] = '-'.join([trading_day_str[:4], trading_day_str[4:6], trading_day_str[6:]])
# datetime.strptime(action_day_str, '%Y%m%d').date()
# 现在默认 ActionDay为当前服务器日期
exch_datetime = Config.exch_datetime
depth_md_dic['ActionDay'] = exch_datetime.strftime(Config.DATE_FORMAT_STR)
depth_md_dic['ActionTime'] = exch_datetime.strftime(Config.TIME_FORMAT_STR)
depth_md_dic['ActionMillisec'] = exch_datetime.microsecond
depth_md_dic['UpdateTime_Sec_ms'] = float(
pDepthMarketData.UpdateTime[6:]) + pDepthMarketData.UpdateMillisec / 1000
# self.logger.debug(depth_md_dic)
# depth_md_dic 需要被 json ,因此,无法直接处理datetime类型
# action_datetime = action_day_str + ' ' + depth_md_dic['UpdateTime'] + '.' + str(pDepthMarketData.UpdateMillisec)
# depth_md_dic['ActionDateTime'] = action_datetime # datetime.strptime(action_datetime, '%Y%m%d %H:%M:%S.%f')
# 通过注册器模式将 md 处理方法进行解耦、封装
# self._handle_depth_md(depth_md_dic)
event_agent.send_event(EventType.Tick_MD_Event, depth_md_dic, key=instrument_id)
# 数据推入 md_saver 的 queue
# self.md_saver.queue_md.put(depth_md_dic)
# publish md data
# r = Config.get_redis()
# md_str = json.dumps(depth_md_dic)
# r.publish(Config.REDIS_CHANNEL_HEAD_MD, md_str)
def Release(self):
"""
删除接口对象本身 释放资源
:return:
"""
try:
Config.release()
finally:
super().Release()
class MyTraderApi(TraderApi, ApiBase):
"""
TraderApi的本地封装接口
"""
def __init__(self, broker_id=Config.BROKER_ID, user_id=Config.USER_ID,
investor_id=Config.INVESTOR_ID, password=Config.PASSWORD,
**kwargs
):
ApiBase.__init__(self, broker_id=broker_id,
user_id=user_id,
password=password)
self.investor_id = investor_id
# 结算单信息,内容部分
self.settlement_info_content = []
self._order_ref_sn = 0 # 最大报单引用, char[13]
self._mutex_order_ref = threading.Lock()
# 供报单请求回报 OnRspOrderInsert 使用
self.order_ref_strategy_order_dic = {}
# 结算单是否已确认
self._is_settlement_info_confirmed = False
# 设置 strategy_id
# TODO: strategy_id 需要通过读取配置信息初始化
self.strategy_id = 0
# 持仓信息
# TODO: _investor_position_dic 用于保存 ReqQryInvestorPosition 返回的持仓信息,该信息将会在登录初始化,发生交易回报等事件后自动更新
# _investor_position_dic 用于保存 ReqQryInvestorPosition 返回的持仓信息,该信息将会在登录初始化,发生交易回报等事件后自动更新
self._instrument_investor_position_dic = {}
# 持仓信息(临时记录)
self._instrument_investor_position_tmp_dic = {}
# 订单信息
self._instrument_order_dic = {}
# 订单信息(临时记录)
self._instrument_order_tmp_dic = {}
# 合约信息
self._instrument_info_dic = {}
# 是否已经更新合约信息
self.has_update_instruemnt = False
# 登录成功后自动更新合约信息
self.auto_update_instrument_list = kwargs.setdefault('auto_update_instrument_list', False)
# 交易回报后自动更新仓位信息
self.update_position_after_rtn_trade = True
# 交易回报后自动更新仓位信息
self.update_order_after_rtn_trade = True
# 定时更新仓位信息
self.timedelta_update_position = True
# 最近一次 更新仓位信息的时间
self.datetime_last_update_position = None
self.datetime_last_update_position_dic = {}
# 最近一次 发送 order 请求的时间
self.datetime_last_send_order_dic = {}
# 最近一次 ret_trade 更新时间
self.datetime_last_rtn_trade_dic = {}
# 设置后台线程
self.enable_background_thread = kwargs.setdefault('enable_background_thread', False)
self.background_thread_working = False
self.background_thread = Thread(target=self._time_interval_job, daemon=True)
self.second_of_interval = 10
self.auto_update_position = kwargs.setdefault('auto_update_position', False)
self.Create()
def Create(self): # , pszFlowPath=''
"""
创建TraderApi
:return:
"""
dir_path = self.get_tmp_path()
self.logger.info('cache %s', dir_path)
return super().Create(dir_path)
def RegisterFront(self, front=Config.FRONT_TRADER_ADDRESS):
"""
注册前置机网络地址
:param front:
:return:
"""
self.logger.info('%s', front)
if isinstance(front, bytes):
return super().RegisterFront(front)
for pszFrontAddress in front:
super().RegisterFront(pszFrontAddress)
# 交易初始化
def _time_interval_job(self):
"""
综合处理各种后台更新动作
:return:
"""
self.logger.info("%s 后台线程 启动", self.__class__.__name__)
has_send_req_qry_instrument = False
has_send_req_qry_investor_position = False
timedelta_interval = timedelta(seconds=self.second_of_interval)
while self.background_thread_working:
try:
if (not has_send_req_qry_instrument) and self.auto_update_instrument_list:
self.logger.info("集中请求全部合约信息")
self.ReqQryInstrument()
has_send_req_qry_instrument = True
if self.auto_update_position and ((not self.auto_update_instrument_list) or self.has_update_instruemnt):
if self.datetime_last_update_position is None or self.datetime_last_update_position + timedelta_interval < datetime.now():
self.logger.debug("定时更新持仓信息")
self.ReqQryInvestorPosition()
has_send_req_qry_investor_position = True
except:
self.logger.exception('后台线程任务失败')
finally:
time.sleep(self.second_of_interval)
self.logger.info("%s 后台线程 结束", self.__class__.__name__)
@property
def is_settlement_info_confirmed(self):
return self._is_settlement_info_confirmed
@is_settlement_info_confirmed.setter
def is_settlement_info_confirmed(self, val):
"""
当结算信息被首次确认,调用 ReqQryInstrument 查询全部可交易合约的信息并更新配置信息
:param val:
:return:
"""
if self._is_settlement_info_confirmed != val:
self._is_settlement_info_confirmed = val
def OnFrontConnected(self):
'''
当客户端与交易后台建立起通信连接时(还未登录前),该方法被调用。
'''
super().OnFrontConnected()
self.logger.info('-> ReqUserLogin')
self.ReqUserLogin()
def OnFrontDisconnected(self, nReason):
"""
当客户端与交易后台通信连接断开时,该方法被调用。当发生这个情况后,API会自动重新连接,客户端可不做处理。
:param nReason:
:return:
"""
self.logger.warning('API将自动重新连接,客户端可不做处理 reason=%s %s',
nReason, FRONT_DISCONNECTED_REASON_DIC.setdefault(nReason, '未知原因'))
super().OnFrontDisconnected(nReason)
def ReqUserLogin(self):
"""
用户登录请求
:return:
"""
pReqUserLogin = ApiStruct.ReqUserLogin(
BrokerID=Config.BROKER_ID,
UserID=Config.USER_ID,
Password=Config.PASSWORD)
request_id = self.inc_request_id()
self.logger.debug('TraderApi:%d', request_id)
req_ret = super().ReqUserLogin(pReqUserLogin, request_id)
self.log_req_if_error(req_ret, stack_num=2)
return req_ret
def OnRspUserLogin(self, pRspUserLogin, pRspInfo, nRequestID, bIsLast):
"""
登录请求响应
:param pRspUserLogin:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
super().OnRspUserLogin(pRspUserLogin, pRspInfo, nRequestID, bIsLast)
# 获取当前时间,用于后面与交易所时间同步使用
datetime_now = datetime.now()
self.logger.info('%s %s %d %s', pRspUserLogin, pRspInfo, nRequestID, bIsLast)
if not self.is_rsp_success(pRspInfo):
return
# 由于 get_inc_order_ref 需要, trading_day 需要固定长度为 8
self.trading_day = bytes_2_str(self.GetTradingDay())
if len(self.trading_day) != 8:
raise ValueError('当前交易日期 trading_day %s 必须长度为8' % self.trading_day)
self.logger.info('当前交易日期: %s', self.trading_day)
# 设置 sessionid, frontid
self.session_id = pRspUserLogin.SessionID
self.front_id = pRspUserLogin.FrontID
# 设置上一次登录时的最大报单编号
MaxOrderRef = pRspUserLogin.MaxOrderRef
if MaxOrderRef is None or len(MaxOrderRef) == 0:
self.logger.warning('MaxOrderRef 无效,将被重置为 1', )
self._order_ref_sn = 1
else:
self._order_ref_sn = int(MaxOrderRef)
self.logger.debug('上一次登录 MaxOrderRef = %d', self._order_ref_sn)
# 设置各个交易所与本地的时间差
date_str = datetime_now.strftime(Config.DATE_FORMAT_STR)
timedelta_default = None
# 上期所时间
is_shfe_timedelta_error = False
if len(pRspUserLogin.SHFETime) == 8:
try:
Config.SHFETimeDelta = datetime.strptime(date_str + ' ' + bytes_2_str(pRspUserLogin.SHFETime),
Config.DATETIME_FORMAT_STR) - datetime_now
self.logger.info(
'比上期所 SHFE(%s)慢 %f 秒', pRspUserLogin.SHFETime, Config.SHFETimeDelta.total_seconds())
if timedelta_default is None:
timedelta_default = Config.SHFETimeDelta
except:
is_shfe_timedelta_error = True
self.logger.error('上期所 SHFE 时间解析错误,使用默认时差')
Config.SHFETimeDelta = timedelta()
# 大商所时间
is_dce_timedelta_error = False
if len(pRspUserLogin.DCETime) == 8:
try:
Config.DCETimeDelta = datetime.strptime(date_str + ' ' + bytes_2_str(pRspUserLogin.DCETime),
Config.DATETIME_FORMAT_STR) - datetime_now
self.logger.info(
'比大商所 DCE(%s)慢 %f 秒', pRspUserLogin.DCETime, Config.DCETimeDelta.total_seconds())
if timedelta_default is None:
timedelta_default = Config.DCETimeDelta
except:
is_dce_timedelta_error = True
self.logger.error('大商所 DCE 时间解析错误,使用默认时差')
Config.DCETimeDelta = timedelta()
# 郑商所时间
is_czce_timedelta_error = False
if len(pRspUserLogin.CZCETime) == 8:
try:
Config.CZCETimeDelta = datetime.strptime(date_str + ' ' + bytes_2_str(pRspUserLogin.CZCETime),
Config.DATETIME_FORMAT_STR) - datetime_now
self.logger.info(
'比郑商所 CZCE(%s)慢 %f 秒', pRspUserLogin.CZCETime, Config.CZCETimeDelta.total_seconds())
if timedelta_default is None:
timedelta_default = Config.CZCETimeDelta
except:
is_czce_timedelta_error = True
self.logger.error('郑商所 CZCE 时间解析错误,使用默认时差')
Config.CZCETimeDelta = timedelta()
# 中金所时间
is_ffex_timedelta_error = False
if len(pRspUserLogin.FFEXTime) == 8:
try:
Config.FFEXTimeDelta = datetime.strptime(date_str + ' ' + bytes_2_str(pRspUserLogin.FFEXTime),
Config.DATETIME_FORMAT_STR) - datetime_now
self.logger.info(
'比中金所 FFEX(%s)慢 %f 秒', pRspUserLogin.FFEXTime, Config.FFEXTimeDelta.total_seconds())
if timedelta_default is None:
timedelta_default = Config.FFEXTimeDelta
except:
is_ffex_timedelta_error = True
self.logger.error('中金所 FFEX 时间解析错误,使用默认时差')
Config.FFEXTimeDelta = timedelta()
# 能源中心时间
is_ine_timedelta_error = False
if len(pRspUserLogin.INETime) == 8:
try:
Config.INETimeDelta = datetime.strptime(date_str + ' ' + bytes_2_str(pRspUserLogin.INETime),
Config.DATETIME_FORMAT_STR) - datetime_now
self.logger.info(
'比能源中心 INE(%s)慢 %f 秒', pRspUserLogin.INETime, Config.INETimeDelta.total_seconds())
if timedelta_default is None:
timedelta_default = Config.INETimeDelta
except:
is_ine_timedelta_error = True
self.logger.error('能源中心 INE 时间解析错误,使用默认时差')
Config.INETimeDelta = timedelta()
# 处理异常时差
if timedelta_default is not None:
if is_shfe_timedelta_error:
Config.SHFETimeDelta = timedelta_default
if is_dce_timedelta_error:
Config.DCETimeDelta = timedelta_default
if is_czce_timedelta_error:
Config.CZCETimeDelta = timedelta_default
if is_ffex_timedelta_error:
Config.FFEXTimeDelta = timedelta_default
if is_ine_timedelta_error:
Config.INETimeDelta = timedelta_default
# 查询结算单日期
self.ReqQrySettlementInfoConfirm()
# req_ret = self.ReqQrySettlementInfo()
# self.ReqSettlementInfoConfirm()
# 查询合约
# self.ReqQryInstrument(str_2_bytes('rb17'))
# 查询合约保证金率
# self.ReqQryInstrumentMarginRate(str_2_bytes('rb1712'))
# 查询资金账户
# self.ReqQryTradingAccount()
self.has_login = True
if self.enable_background_thread and not self.background_thread.is_alive():
self.background_thread_working = True
self.background_thread.start()
def ReqUserLogout(self):
"""
登出请求
:return:
"""
pUserLogout = ApiStruct.UserLogout(
BrokerID=Config.BROKER_ID,
UserID=Config.USER_ID)
return self._send_request_2_queue(super().ReqUserLogout, pUserLogout)
def OnRspUserLogout(self, pUserLogout, pRspInfo, nRequestID, bIsLast):
'''
登出请求响应
:param pUserLogout:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
'''
TraderApi.OnRspUserLogout(self, pUserLogout, pRspInfo, nRequestID, bIsLast)
if not self.is_rsp_success(pRspInfo):
return
self.logger.info('%s %s %d %s', pUserLogout, pRspInfo, nRequestID, bIsLast)
self.has_login = False
self.background_thread_working = False
def ReqQrySettlementInfoConfirm(self):
"""
查询结算单确认的日期
:return:
"""
self.logger.info("请求查询结算单确认的日期")
pQrySettlementInfoConfirm = ApiStruct.QrySettlementInfoConfirm(
BrokerID=self.broker_id, InvestorID=self.investor_id)
return self._send_request_2_queue(super().ReqQrySettlementInfoConfirm, pQrySettlementInfoConfirm, request_timeout=0)
def OnRspQrySettlementInfoConfirm(self, pSettlementInfoConfirm, pRspInfo, nRequestID, bIsLast):
"""
查询结算单确认的日期响应
:param pSettlementInfoConfirm:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
self.logger.info('查询结算单确认的日期响应(%d):%s', nRequestID, pSettlementInfoConfirm)
is_need_confirmed = True
confirm_date = ""
if pSettlementInfoConfirm is None or type(pSettlementInfoConfirm) == ApiStruct.QrySettlementInfoConfirm:
pass
else:
confirm_date = bytes_2_str(pSettlementInfoConfirm.ConfirmDate)
is_need_confirmed = int(confirm_date) < int(self.trading_day)
# 进行结算单确认
if is_need_confirmed:
self.logger.info('最新结算单未确认,需查询后再确认,最后确认时间=%s,当前日期:%s',
confirm_date, self.trading_day)
# 此处连续请求可能导致错误 -3 每秒发送请求数超过许可数
time.sleep(1)
req = self.ReqQrySettlementInfo()
# -1,表示网络连接失败;
# -2,表示未处理请求超过许可数;
# -3,表示每秒发送请求数超过许可数。
if req < 0:
self.logger.error('-> ReqQrySettlementInfo(%d) 请求失败', nRequestID)
else:
self.logger.info('最新结算单已确认,不需再次确认,最后确认时间=%s,当前日期:%s',
confirm_date, self.trading_day)
self.is_settlement_info_confirmed = True
def ReqQrySettlementInfo(self):
"""
请求查询投资者结算结果
:return:
"""
self.logger.info("请求查询投资者结算结果")
pQrySettlementInfo = ApiStruct.QrySettlementInfo(BrokerID=self.broker_id, InvestorID=self.investor_id)
self.settlement_info_content = []
return self._send_request_2_queue(super().ReqQrySettlementInfo, pQrySettlementInfo, request_timeout=0)
def OnRspQrySettlementInfo(self, pSettlementInfo, pRspInfo, nRequestID, bIsLast):
"""
请求查询投资者结算信息响应
:param pSettlementInfo:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
settlement_id = pSettlementInfo.SettlementID
trading_day_settlement = bytes_2_str(pSettlementInfo.TradingDay)
# content = str_2_bytes(pSettlementInfo.Content)
# self.logger.info('结算单[%d]内容:%s', SettlementID, content)
self.settlement_info_content.append(pSettlementInfo.Content)
if status > 0:
content_all = bytes_2_str(b''.join(self.settlement_info_content))
self.logger.info('%s 结算单内容:\n%s', trading_day_settlement, content_all)
if trading_day_settlement is None or trading_day_settlement == "":
self.logger.warning('结算单日期为空,替换为当前交易日%s', self.trading_day)
trading_day_settlement = self.trading_day
file_name = 'Settlement_%d_%s.txt' % (settlement_id, trading_day_settlement)
try:
with open(file_name, mode='x') as file_tmp:
file_tmp.write(content_all)
except:
self.logger.exception('结算单文件 %s 保存失败', file_name)
self.logger.debug('查询结算单完成')
self.settlement_info_content = content_all
# 执行结算单确认
self.ReqSettlementInfoConfirm()
def ReqSettlementInfoConfirm(self):
"""
投资者结算结果确认
:return:
"""
self.logger.info("投资者结算结果确认")
pSettlementInfoConfirm = ApiStruct.SettlementInfoConfirm(BrokerID=self.broker_id, InvestorID=self.investor_id)
return self._send_request_2_queue(super().ReqSettlementInfoConfirm, pSettlementInfoConfirm, request_timeout=0)
def OnRspSettlementInfoConfirm(self, pSettlementInfoConfirm, pRspInfo, nRequestID, bIsLast):
"""
投资者结算结果确认响应
:param pSettlementInfoConfirm:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
ConfirmDate = bytes_2_str(pSettlementInfoConfirm.ConfirmDate)
self.logger.info(
'结算单确认时间: %s %s', ConfirmDate, bytes_2_str(pSettlementInfoConfirm.ConfirmTime))
if ConfirmDate == self.trading_day:
self.logger.info('最新结算确认日期 %s 确认成功', ConfirmDate)
self.is_settlement_info_confirmed = True
def Release(self):
"""
删除接口对象本身 释放资源
:return:
"""
try:
Config.release()
if self.background_thread.is_alive():
self.background_thread_working = False
self.background_thread.join(self.second_of_interval)
finally:
super().Release()
# 交易准备、交易查询
def inc_order_ref(self):
"""
自动生成自增报单编号,长度 char[13]
:return:
"""
self._mutex_order_ref.acquire()
self._order_ref_sn += 1
order_ref = str_2_bytes(str(self._order_ref_sn))
self._mutex_order_ref.release()
return order_ref
def inc_order_action_ref(self):
"""
自动生成自增报单编号,长度 int
:return:
"""
self._mutex_order_ref.acquire()
self._order_ref_sn += 1
order_ref_int = self._order_ref_sn
self._mutex_order_ref.release()
return order_ref_int
def ReqQryInstrumentMarginRate(self, instrument_id=b''):
"""
请求查询合约保证金率
:return:
"""
pQryInstrumentMarginRate = ApiStruct.QryInstrumentMarginRate(self.broker_id, self.investor_id, instrument_id)
return self._send_request_2_queue(super().ReqQryInstrumentMarginRate, pQryInstrumentMarginRate, request_timeout=0)
def OnRspQryInstrumentMarginRate(self, pInstrumentMarginRate, pRspInfo, nRequestID, bIsLast):
"""
请求查询合约保证金率响应,保证金率回报。返回的必然是绝对值
:param pInstrumentMarginRate:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
# self.logger.info('合约保证金率:%s', pInstrumentMarginRate)
if status > 0:
self.logger.debug('查询合约保证金率完成')
def ReqQryInstrument(self, instrument_id=b'', exchange_id=b'', exchange_inst_id=b'', product_id=b''):
"""
请求查询合约
:param instrument_id:
:param exchange_id:
:param exchange_inst_id:
:param product_id:
:return:
"""
pQryInstrument = ApiStruct.QryInstrument(InstrumentID=instrument_id,
ExchangeID=exchange_id,
ExchangeInstID=exchange_inst_id,
ProductID=product_id)
self.logger.debug("查询合约 %s", pQryInstrument)
return self._send_request_2_queue(super().ReqQryInstrument, pQryInstrument, request_timeout=0, max_wait_rsp_time=30)
def OnRspQryInstrument(self, pInstrument, pRspInfo, nRequestID, bIsLast):
"""
请求查询合约响应
:param pInstrument:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
# self.logger.debug('(%d)pInstrument=%s', nRequestID, pInstrument)
if pInstrument.UnderlyingMultiple > 100000:
pInstrument.UnderlyingMultiple = 0
if pInstrument.StrikePrice > 100000:
pInstrument.StrikePrice = 0
if pInstrument.LongMarginRatio > 100000:
pInstrument.LongMarginRatio = 0
if pInstrument.ShortMarginRatio > 100000:
pInstrument.ShortMarginRatio = 0
instrument_dic = ApiBase.struct_2_dic(pInstrument,
{
"StartDelivDate": date,
"EndDelivDate": date,
'ExpireDate': date,
})
# 清理垃圾数据
for name, val in instrument_dic.items():
if val == "":
instrument_dic[name] = None
# self.logger.debug("%s: %s", instrument_dic['InstrumentID'], instrument_dic)
self._instrument_info_dic[instrument_dic['InstrumentID']] = instrument_dic
if status > 0:
self.logger.debug('查询合约完成。 %d条合约信息加载完成', len(Config.instrument_info_dic))
try:
Config.update_instrument(self._instrument_info_dic)
except:
self.logger.exception("update_instrument_info")
finally:
self.has_update_instruemnt = True
# self.ReqQryInstrumentCommissionRate(pInstrument.InstrumentID)
# self._req_qry_instrument_commission_rate_all()
def req_qry_instrument_commission_rate_all(self):
"""
更新全部合约的手续费信息
:return:
"""
self.logger.info("开始更新合约手续费数据,当前数据库中包含合约 %d 条", len(Config.instrument_info_dic))
re_pattern_instrument_header = re.compile(r'[A-Za-z]+(?=\d+$)')
query_dic = OrderedDict()
for n, instrument_id in enumerate(Config.instrument_info_dic.keys()):
m = re_pattern_instrument_header.match(instrument_id)
if m is None:
# self.logger.warning("%d) %s 没有找到有效的合约头部字母", n, instrument_id)
continue
instrument_header = m.group()
if instrument_header not in query_dic:
query_dic[instrument_header] = instrument_id
self.logger.info('%d 条合约手续费信息将被请求更新\n%s', len(query_dic), query_dic)
# new_added_instrument_id_list = []
for instrument_header, instrument_id in query_dic.items():
if instrument_header in Config.instrument_commission_rate_dic:
continue
time.sleep(1)
self.ReqQryInstrumentCommissionRate(str_2_bytes(instrument_id))
# 设置延时等待,直到获得相关合约费用信息
for n in range(8):
second = 0.2 * (2 ** n)
time.sleep(second)
self.logger.debug('wait %.1f', second)
if instrument_header in Config.instrument_commission_rate_dic:
# new_added_instrument_id_list.append(instrument_header)
break
# 服务器更新相关费用数据
# try:
# Config.update_instrument_commission_rate(new_added_instrument_id_list)
# except:
# self.logger.exception("update_instrument_commission_rate")
# 单合约调试使用
# instrument_id = 'SPD TA711&TA806'
# self.ReqQryInstrumentCommissionRate(str_2_bytes(instrument_id))
def ReqQryInstrumentCommissionRate(self, instrument_id=b''):
"""
请求查询合约手续费率
:param instrument_id:
:return:
"""
pQryInstrumentCommissionRate = ApiStruct.QryInstrumentCommissionRate(BrokerID=Config.BROKER_ID,
InvestorID=Config.INVESTOR_ID,
InstrumentID=instrument_id,
)
return self._send_request_2_queue(super().ReqQryInstrumentCommissionRate, pQryInstrumentCommissionRate,
request_timeout=0)
def OnRspQryInstrumentCommissionRate(self, pInstrumentCommissionRate, pRspInfo, nRequestID, bIsLast):
"""
请求查询合约手续费率响应
:param pInstrumentCommissionRate:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
self.logger.debug('(%d)pInstrumentCommissionRate=%s', nRequestID, pInstrumentCommissionRate)
if pInstrumentCommissionRate is not None:
instrument_commission_rate_dic = ApiBase.struct_2_dic(pInstrumentCommissionRate)
# 清理垃圾数据
for name, val in instrument_commission_rate_dic.items():
if val == "":
instrument_commission_rate_dic[name] = None
Config.instrument_commission_rate_dic[
instrument_commission_rate_dic['InstrumentID']] = instrument_commission_rate_dic
# 更新合约手续费信息到数据库
try:
CommissionRateModel.merge_by_commission_rate_dic(instrument_commission_rate_dic)
except:
self.logger.exception('更新合约手续费信息失败')
if status > 0:
self.logger.debug('查询合约手续费率完成。 已累计查询 %d 条合约手续费率信息', len(Config.instrument_commission_rate_dic))
def ReqQryTradingAccount(self):
"""
请求查询资金账户
:return:
"""
pQryTradingAccount = ApiStruct.QryTradingAccount(self.broker_id, self.investor_id)
return self._send_request_2_queue(super().ReqQryTradingAccount, pQryTradingAccount, request_timeout=0)
def OnRspQryTradingAccount(self, pTradingAccount, pRspInfo, nRequestID, bIsLast):
"""
请求查询资金账户响应
:param pTradingAccount:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
self.logger.info('资金账户:%s', pTradingAccount)
if status > 0:
self.logger.info('查询资金账户完成')
def ReqQryInvestorPosition(self, instrument_id=b"", refresh=False):
"""
请求查询投资者持仓
:param instrument_id:
:return:
"""
pQryInvestorPosition = ApiStruct.QryInvestorPosition(self.broker_id, self.investor_id, instrument_id)
if instrument_id != b"":
instrument_id_str = bytes_2_str(instrument_id)
if instrument_id_str in self._instrument_investor_position_tmp_dic:
self._instrument_investor_position_tmp_dic = self._instrument_investor_position_dic.copy()
del self._instrument_investor_position_tmp_dic[instrument_id_str]
else:
self._instrument_investor_position_tmp_dic = self._instrument_investor_position_dic
else:
self._instrument_investor_position_tmp_dic = {}
return self._send_request_2_queue(super().ReqQryInvestorPosition, pQryInvestorPosition, request_timeout=0)
def OnRspQryInvestorPosition(self, pInvestorPosition, pRspInfo, nRequestID, bIsLast):
"""
请求查询投资者持仓响应
:param pInvestorPosition:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
self.logger.info('持仓:%s', pInvestorPosition)
if pInvestorPosition is None:
return
instrument_id_str = bytes_2_str(pInvestorPosition.InstrumentID)
data_dic = ApiBase.struct_2_dic(pInvestorPosition)
# 增加仓位最近刷新时间
data_dic["RefreshDateTime"] = datetime.now()
position_date = PositionDateType.create_by_position_date(pInvestorPosition.PositionDate)
self._instrument_investor_position_tmp_dic.setdefault(instrument_id_str, {})[position_date] = data_dic
self.datetime_last_update_position_dic[instrument_id_str] = datetime.now()
if status > 0:
self._instrument_investor_position_dic = self._instrument_investor_position_tmp_dic
self.logger.info('查询持仓完成')
self.datetime_last_update_position = datetime.now()
def ReqQryInvestorPositionDetail(self, instrument_id=b""):
"""
请求查询投资者持仓明细
:param instrument_id:
:return:
"""
p_struct = ApiStruct.QryInvestorPositionDetail(self.broker_id, self.investor_id, instrument_id)
return self._send_request_2_queue(super().ReqQryInvestorPositionDetail, p_struct)
def OnRspQryInvestorPositionDetail(self, pInvestorPositionDetail, pRspInfo, nRequestID, bIsLast):
"""请求查询投资者持仓明细响应"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
self.logger.info('持仓明细:%s', pInvestorPositionDetail)
if status > 0:
self.logger.info('查询持仓明细完成')
def OnRspError(self, info, RequestId, IsLast):
"""
错误应答
:param info:
:param RequestId:
:param IsLast:
:return:
"""
if IsLast:
self.logger.info('错误应答(%d):%s', RequestId, info)
self.logger.info('接收错误应答完成')
else:
self.logger.info('错误应答(%d):%s', RequestId, info)
def ReqQryOrder(self, instrument_id=b""):
self._instrument_order_tmp_dic = {}
p_struct = ApiStruct.QryOrder(self.broker_id, self.investor_id, InstrumentID=instrument_id)
if instrument_id != b"":
instrument_id_str = bytes_2_str(instrument_id)
if instrument_id_str in self._instrument_order_tmp_dic:
self._instrument_order_tmp_dic = self._instrument_order_dic.copy()
del self._instrument_order_tmp_dic[instrument_id_str]
else:
self._instrument_order_tmp_dic = self._instrument_order_dic
else:
self._instrument_order_tmp_dic = {}
return self._send_request_2_queue(super().ReqQryOrder, p_struct, request_timeout=0)
def OnRspQryOrder(self, pOrder, pRspInfo, nRequestID, bIsLast):
"""
请求查询报单响应
:param pOrder:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
self.logger.info('报单:%s', pOrder)
data_dic = ApiBase.struct_2_dic(pOrder)
instrument_id_str = bytes_2_str(pOrder.InstrumentID)
self._instrument_order_tmp_dic.setdefault(instrument_id_str, {})[pOrder.SequenceNo] = data_dic
if status > 0:
# 为了避免出现加载查询结果过程中,其他现场查询订单引发的信息错误,利用 self._order_tmp_dic 进行临时信息存储
self._instrument_order_dic = self._instrument_order_tmp_dic
self.logger.info('查询报单完成。 %s', {key: len(val) for key, val in self._instrument_order_dic.items()})
def OnRspQryTrade(self, pTrade, pRspInfo, nRequestID, bIsLast):
"""
请求查询成交响应
:param pTrade:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
self.logger.info('成交:%s', pTrade)
if status > 0:
self.logger.info('查询成交完成')
# 供TradeAgent交易接口使用
def get_position(self, instrument_id) -> dict:
""" 供 TradeAgent get_position 使用"""
if instrument_id in self._instrument_investor_position_dic:
position_date_inv_pos_dic = self._instrument_investor_position_dic[instrument_id]
else:
position_date_inv_pos_dic = None
return position_date_inv_pos_dic
def get_order(self, instrument_id) -> dict:
""" 供 TradeAgent get_order 使用"""
if instrument_id in self._instrument_order_dic:
orders_dic = self._instrument_order_dic[instrument_id]
else:
orders_dic = None
return orders_dic
def cancel_order(self, instrument_id):
"""供 TradeAgent cancel_order 使用"""
if instrument_id in self._instrument_order_dic:
orders_dic = self._instrument_order_dic[instrument_id]
for seq_no, order_dic in orders_dic.items():
# if order_dic['VolumeTotal'] > 0:
self.ReqOrderAction(order_dic)
# 交易操作
def insert_struct_2_db(self, p_struct):
"""
将 struct 插入 mongodb,返回key 以及 dic对象
key:(front_id 、session_id 、order_ref_int)
OrderRef 是 CTP 后台提供给客户端标识一笔报单的字段,客户端可以通过关键
字组(FrontID 、SessionID 、OrderRef)唯一确定一笔报单,客户端在报单发出时未填
写 OrderRef 字段,CTP 后台会自动为该报单的 OrderRef 字段赋值并返回给客户端。
:param p_struct:
:return:
"""
order_ref_int = int(bytes_2_str(p_struct.OrderRef))
key = (self.front_id, self.session_id, order_ref_int)
# dic = ApiBase.insert_one_2_db(p_struct, collection_name=None, **key)
dic = {}
return key, dic
def _request_action_order_insert(self, p_struct):
"""
公共Request方法,传入方面,及相关struct数据
函数自动生成新的request_id及进行log记录
2017-11-26 统一改为异步方式执行
:param func:
:param p_struct:
:return:
"""
if not self.is_settlement_info_confirmed:
self.logger.warning('当前交易日期%s,最新结算信息尚未确认,发出交易请求可能失败', self.trading_day)
func = super().ReqOrderInsert
# request_id = self.inc_request_id()
# order_ref = self.inc_order_ref()
# p_struct.OrderRef = order_ref
# p_struct.RequestID = request_id
return self._send_request_2_queue(func, p_struct, add_request_id=True, add_order_ref=True, request_timeout=2)
# self.logger.debug('-> %s(%d):\n%s', func.__name__, request_id, p_struct)
# req_ret = func(p_struct, request_id)
# self.log_req_if_error(req_ret, stack_num=2)
# if req_ret >= 0:
# order_ref_int = int(order_ref)
# key = (self.front_id, self.session_id, order_ref_int)
# stg_order = StrategyOrder(
# self.strategy_id, self.front_id, self.session_id, order_ref, p_struct)
# self.order_ref_strategy_order_dic[key] = stg_order
# self.logger.debug('%s(%d) order_ref_strategy_order_dic[%s]=\n%r',
# func.__name__, request_id, key, stg_order)
# return req_ret, order_ref
def open_long(self, instrument_id, price, vol):
"""
开多
:param instrument_id:
:param price:
:param vol:
:return:
"""
p_struct = ApiStruct.InputOrder(BrokerID=self.broker_id,
InvestorID=self.investor_id,
InstrumentID=str_2_bytes(instrument_id),
UserID=self.user_id,
OrderPriceType=ApiStruct.OPT_LimitPrice,
Direction=ApiStruct.D_Buy,
CombOffsetFlag=ApiStruct.OF_Open,
CombHedgeFlag=ApiStruct.HF_Speculation,
LimitPrice=price,
VolumeTotalOriginal=vol,
TimeCondition=ApiStruct.TC_GFD
)
return self._request_action_order_insert(p_struct)
def close_long(self, instrument_id, price, vol, offset_flag=ApiStruct.OF_Close):
"""
平多
上期所的持仓分今仓(当日开仓)和昨仓(历史持仓),平仓时需要指定是平今仓还是昨仓。
上述字段中, 若对上期所的持仓直接使用THOST_FTDC_OF_Close , 则效果同使用
THOST_FTDC_OF_CloseYesterday 。若对其他交易所的持仓使用
:param instrument_id:
:param price:
:param vol:
:return:
"""
p_struct = ApiStruct.InputOrder(BrokerID=self.broker_id,
InvestorID=self.investor_id,
InstrumentID=str_2_bytes(instrument_id),
UserID=self.user_id,
OrderPriceType=ApiStruct.OPT_LimitPrice,
Direction=ApiStruct.D_Sell,
CombOffsetFlag=offset_flag,
CombHedgeFlag=ApiStruct.HF_Speculation,
LimitPrice=price,
VolumeTotalOriginal=vol,
TimeCondition=ApiStruct.TC_GFD
)
return self._request_action_order_insert(p_struct)
def open_short(self, instrument_id, price, vol):
"""
开空
:param instrument_id:
:param price:
:param vol:
:return:
"""
p_struct = ApiStruct.InputOrder(BrokerID=self.broker_id,
InvestorID=self.investor_id,
InstrumentID=str_2_bytes(instrument_id),
UserID=self.user_id,
OrderPriceType=ApiStruct.OPT_LimitPrice,
Direction=ApiStruct.D_Sell,
CombOffsetFlag=ApiStruct.OF_Open,
CombHedgeFlag=ApiStruct.HF_Speculation,
LimitPrice=price,
VolumeTotalOriginal=vol,
TimeCondition=ApiStruct.TC_GFD
)
return self._request_action_order_insert(p_struct)
def close_short(self, instrument_id, price, vol, offset_flag=ApiStruct.OF_Close):
"""
开空
上期所的持仓分今仓(当日开仓)和昨仓(历史持仓),平仓时需要指定是平今仓还是昨仓。
上述字段中, 若对上期所的持仓直接使用THOST_FTDC_OF_Close , 则效果同使用
THOST_FTDC_OF_CloseYesterday 。若对其他交易所的持仓使用
:param instrument_id:
:param price:
:param vol:
:param offset_flag: 平仓标示 OF_Close, OF_ForceClose, OF_CloseToday, OF_CloseYesterday
:return:
"""
p_struct = ApiStruct.InputOrder(BrokerID=self.broker_id,
InvestorID=self.investor_id,
InstrumentID=str_2_bytes(instrument_id),
UserID=self.user_id,
OrderPriceType=ApiStruct.OPT_LimitPrice,
Direction=ApiStruct.D_Buy,
CombOffsetFlag=offset_flag,
CombHedgeFlag=ApiStruct.HF_Speculation,
LimitPrice=price,
VolumeTotalOriginal=vol,
TimeCondition=ApiStruct.TC_GFD
)
return self._request_action_order_insert(p_struct)
def OnRspOrderInsert(self, pInputOrder, pRspInfo, nRequestID, bIsLast):
"""
报单录入请求
:param pInputOrder:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
key, _ = self.insert_struct_2_db(pInputOrder)
self.logger.info('报单回报%s:\n%s', key, pInputOrder)
if status > 0:
self.order_ref_strategy_order_dic[key].input_order = pInputOrder
self.logger.info('提交报单完成')
instrument_id_str = bytes_2_str(pInputOrder.InstrumentID)
self.datetime_last_send_order_dic[instrument_id_str] = datetime.now()
else:
if key in self.order_ref_strategy_order_dic:
del self.order_ref_strategy_order_dic[key]
self.logger.error('提交报单失败')
def OnErrRtnOrderInsert(self, pInputOrder, pRspInfo):
"""
交易所报单录入错误回报
:param pInputOrder:
:param pRspInfo:
:return:
"""
if not self.is_rsp_success(pRspInfo):
return
key, _ = self.insert_struct_2_db(pInputOrder)
self.logger.error('报单错误回报%s:\n%s', key, pInputOrder)
self.ReqQryOrder(pInputOrder.InstrumentID)
def OnRtnOrder(self, pOrder):
"""
报单通知
报盘将通过交易核心检査的报单发送到交易所前置, 交易所会再次校验该报单. 如果交易所认为该报单不合
法,交易所会将该报单撤销,将错误値息返回給报盘.并返回更新后的该报单的状态,当客户端接收到该错
误信息后, 就会调用 OnErrRtnOrderlnsert 函数, 而更新后的报单状态会通过调用函数 OnRtnOrder发送到
户端. 如果交易所认为该报单合法,则只、返回该报単状态(此时的状态应为: “尚未触发”)。
:param pOrder:
:return:
"""
self.logger.info('报单通知:%s', pOrder)
self._get_response(pOrder.RequestID)
# 报单状态
order_status = pOrder.OrderStatus
order_status_str = ""
if order_status == ApiStruct.OST_AllTraded:
order_status_str = '全部成交'
elif order_status == ApiStruct.OST_PartTradedQueueing:
order_status_str = '部分成交还在队列中'
elif order_status == ApiStruct.OST_PartTradedNotQueueing:
order_status_str = '部分成交不在队列中'
elif order_status == ApiStruct.OST_NoTradeQueueing:
order_status_str = '未成交还在队列中'
elif order_status == ApiStruct.OST_NoTradeNotQueueing:
order_status_str = '未成交不在队列中'
elif order_status == ApiStruct.OST_Canceled:
order_status_str = '撤单'
elif order_status == ApiStruct.OST_Unknown:
order_status_str = '未知'
elif order_status == ApiStruct.OST_NotTouched:
order_status_str = '尚未触发'
elif order_status == ApiStruct.OST_Touched:
order_status_str = '已触发'
# 报单提交状态
order_submit_status = pOrder.OrderSubmitStatus
order_submit_status_str = ""
if order_submit_status == ApiStruct.OSS_InsertRejected:
order_submit_status_str = '已经提交'
elif order_submit_status == ApiStruct.OSS_CancelSubmitted:
order_submit_status_str = '撤单已经提交'
elif order_submit_status == ApiStruct.OSS_ModifySubmitted:
order_submit_status_str = '修改已经提交'
elif order_submit_status == ApiStruct.OSS_Accepted:
order_submit_status_str = '已经接受'
elif order_submit_status == ApiStruct.OSS_InsertRejected:
order_submit_status_str = '报单已经被拒绝'
elif order_submit_status == ApiStruct.OSS_CancelRejected:
order_submit_status_str = '撤单已经被拒绝'
elif order_submit_status == ApiStruct.OSS_ModifyRejected:
order_submit_status_str = '改单已经被拒绝'
# 记录状态
key, _ = self.insert_struct_2_db(pOrder)
if order_status in (ApiStruct.OST_Canceled, ApiStruct.OST_Unknown) or \
order_submit_status in (
ApiStruct.OSS_InsertRejected, ApiStruct.OSS_CancelRejected, ApiStruct.OSS_ModifyRejected):
self.logger.warning('报单提交状态:%s 报单状态:%s %s',
order_submit_status_str, order_status_str, bytes_2_str(pOrder.StatusMsg))
else:
self.logger.debug('报单提交状态:%s 报单状态:%s', order_submit_status_str, order_status_str)
self.ReqQryOrder(pOrder.InstrumentID)
def OnRtnTrade(self, pTrade):
"""
成交通知
如果该报単由交易所进行了最合成交.交易所再次返国该报単的状态(已成交) ,并通过此函数返回该笔成交。
报单成交之后, 一个报单回报(0nRtnorder)和一个成交回报(0nRtnTrade)会被发送到客户瑞,报单回报
中报单的状态为“已成交”.但是仍然建议客户端将成交回报作为报单成交的标志,因为CTP的交易核心在
收到 OnRtnTrade之后才会更新该报単的状态. 如.果客户端通过报单回报来判断报単成交与否并立即平仓,有
极小的概率会出现在平仓指令到达cTP交易核心时该报单的状态仍未更新,就会导致无法平仓.
:param pTrade:
:return:
"""
key, _ = self.insert_struct_2_db(pTrade)
self.logger.info('成交通知 %s:%s', key, pTrade)
order_ref = key[2]
if key in self.order_ref_strategy_order_dic:
self.order_ref_strategy_order_dic[key].trade_list = pTrade
else:
self.logger.debug('order_ref_strategy_order_dic.keys:%s', self.order_ref_strategy_order_dic.keys())
self.logger.warning('order_ref: %s 在当前策略报单列表中未找到\n%s', order_ref, pTrade)
instrument_id_str = bytes_2_str(pTrade.InstrumentID)
self.datetime_last_rtn_trade_dic[instrument_id_str] = datetime.now()
# 更新股票持仓信息
if self.update_position_after_rtn_trade:
self.ReqQryInvestorPosition(pTrade.InstrumentID)
if self.update_order_after_rtn_trade:
self.ReqQryOrder(pTrade.InstrumentID)
def ReqOrderAction(self, order_dic):
"""
2017-11-26 改为使用 order_dic 作为输入参数
根据《API特别说明》文档介绍需要增加 5 个关键参数,因此,需要从 order 信息中进行提取
报单操作请求:撤单指令
FrontID 、SessionID、OrderRef、ExchangID、OrderSysID
:param order_dic:
:return:
"""
if order_dic['OrderStatus'] == OST_Canceled_STR or order_dic['VolumeTotalOriginal'] == order_dic['VolumeTraded']:
# TODO: 1 返回值没有特别意义,只是用于区分正常返回值,以后再进行规范化处理
return 1
front_id = int(order_dic['FrontID'])
session_id = int(order_dic['SessionID'])
order_ref = str_2_bytes(order_dic['OrderRef'])
exchang_id = str_2_bytes(order_dic['ExchangeID'])
order_sys_id = str_2_bytes(order_dic['OrderSysID'])
self.logger.debug('请求撤销报单:\n%s', order_dic)
order_action_ref = self.inc_order_action_ref()
p_struct = ApiStruct.InputOrderAction(self.broker_id, self.investor_id, order_action_ref,
OrderRef=order_ref, FrontID=front_id, SessionID=session_id,
ExchangeID=exchang_id, OrderSysID=order_sys_id)
return self._send_request_2_queue(super().ReqOrderAction, p_struct, add_request_id=True, request_timeout=0)
def OnRspOrderAction(self, pInputOrderAction, pRspInfo, nRequestID, bIsLast):
"""
报单操作请求响应
:param pInputOrderAction:
:param pRspInfo:
:param nRequestID:
:param bIsLast:
:return:
"""
status = self.resp_common(pRspInfo, nRequestID, bIsLast)
if status < 0:
return
key, _ = self.insert_struct_2_db(pInputOrderAction)
self.logger.info('撤单回报%s:\n%s', key, pInputOrderAction)
self.ReqQryOrder(pInputOrderAction.InstrumentID)
def OnErrRtnOrderAction(self, pOrderAction, pRspInfo):
"""
交易所撤单操作错误回报
正常情况不应该出现
:param pOrderAction:
:param pRspInfo:
:return:
"""
if not self.is_rsp_success(pRspInfo):
return
key, _ = self.insert_struct_2_db(pOrderAction)
self.logger.info('撤单错误回报%s:\n%s', key, pOrderAction)
def test_md_api():
"""
对mdapi 进行创建、登录、订阅等操作
:return:
"""
# 行情接口的初始化
# 1. 创建继承自 SPI,并创建出实例,以及 API 实例
# 2. 向 API 实例注册 SPI 实例。
# md_api = MyMdApi(Config.subscribe_instrument_list) # [b'rb1710', b'rb1712']
md_api = MyMdApi([b'au1712', b'rb1712', b'pb1801']) # [b'rb1710', b'rb1712']
# 3. 向 API 实例注册前置地址。交易接口需要注册交易前置地址,行情接口需要注册行情前置地址。
md_api.RegisterFront()
md_api.Init()
time.sleep(5)
try:
max_count = 20
# for n in range(max_count):
while md_api.has_login:
time.sleep(1)
# if n == 1:
# md_api.UnSubscribeMarketData([b'rb1712'])
except KeyboardInterrupt:
pass
finally:
md_api.ReqUserLogout()
md_api.Release()
def run_trader_api():
"""
初始化的步骤和代码基本上与行情接口(3.2 节)的初始化一致。
不同之处
1. 创建 API 实例时不能指定数据的传输协议。即第二行中的函数 CreateFtdcTraderApi 中只接受一个参数(即流文件目录)。
2. 需要订阅公有流和私有流。
公有流:交易所向所有连接着的客户端发布的信息。比如说:合约场上交易状态。
私有流:交易所向特定客户端发送的信息。如报单回报,成交回报。
订阅模式
Restart:接收所有交易所当日曾发送过的以及之后可能会发送的所有该类消息。
Resume:接收客户端上次断开连接后交易所曾发送过的以及之后可能会发送的所有该类消息。
Quick:接收客户端登录之后交易所可能会发送的所有该类消息。
3. 注册的前置地址是交易前置机的地址。
:return:
"""
trader_api = MyTraderApi()
trader_api.RegisterFront()
trader_api.SubscribePrivateTopic(ApiStruct.TERT_QUICK)
trader_api.SubscribePublicTopic(ApiStruct.TERT_QUICK)
trader_api.Init()
try:
max_count = 60
# while 1:
wait_count = 0
is_settlement_info_confirmed = False
order_dic = None
for n in range(max_count):
if trader_api.has_login:
wait_count += 1
# if not trader_api.is_settlement_info_confirmed and wait_count % 5 == 0 and wait_count < 10:
# trader_api.ReqQrySettlementInfo()
# elif not trader_api.is_settlement_info_confirmed and wait_count % 5 == 0 and 10 < wait_count < 20:
# trader_api.ReqSettlementInfoConfirm()
# elif trader_api.is_settlement_info_confirmed and not is_settlement_info_confirmed:
# is_settlement_info_confirmed = trader_api.is_settlement_info_confirmed
# trader_api.logger.info("结算信息已经确认")
if not trader_api.is_settlement_info_confirmed and n == 3:
trader_api.is_settlement_info_confirmed = True
time.sleep(1)
if trader_api.is_settlement_info_confirmed:
if n == 5:
logging.info("开仓")
trader_api.open_long('rb1805', 3910, 1)
elif n == 6:
orders_dic = trader_api.get_order('rb1805')
if orders_dic is None:
continue
logging.info("%s", order_dic)
elif n == 8:
if order_dic is not None:
trader_api.ReqOrderAction(order_dic)
except KeyboardInterrupt:
pass
finally:
trader_api.ReqUserLogout()
trader_api.Release()
def test_md_trade_api():
"""
test_md_api test_trader_api 的合体
:return:
"""
# 为了更加准确的生成分钟线数据
# 需要首先初始化 Config中的交易所时间同步数据,因此,trade_api需要被首先启动
# 初始化 trade_api
trader_api = MyTraderApi()
trader_api.RegisterFront()
trader_api.SubscribePrivateTopic(ApiStruct.TERT_QUICK)
trader_api.SubscribePublicTopic(ApiStruct.TERT_QUICK)
trader_api.Init()
for _ in range(5):
if trader_api.has_login:
break
else:
time.sleep(1)
else:
logging.warning('trade api 登录超时,交易所时间同步可能不准,导致分钟线存在误差')
# 初始化 md_api
# md_api = MyMdApi(Config.subscribe_instrument_list) # [b'rb1710', b'rb1712']
md_api = MyMdApi([b'au1712', b'rb1712', b'pb1801']) # [b'rb1710', b'rb1712']
# 3. 向 API 实例注册前置地址。交易接口需要注册交易前置地址,行情接口需要注册行情前置地址。
md_api.RegisterFront()
md_api.Init()
try:
max_count = 20
# while md_api.has_login:
# for n in range(max_count):
# time.sleep(1)
# if n == 1:
# req_ret, order_ref = trader_api.open_long(b'rb1710', 3001, 1)
# elif n == 3:
# trader_api.ReqOrderAction(order_ref)
except KeyboardInterrupt:
pass
finally:
md_api.ReqUserLogout()
md_api.Release()
trader_api.ReqUserLogout()
trader_api.Release()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format=Config.LOG_FORMAT)
# test_md_api()
run_trader_api()
# test_md_trade_api()
| StarcoderdataPython |
9607161 | <gh_stars>0
from django.db import models
from accounts.models import Account
from django.db import models
from django.db.models import Avg
# Create your models here.
class Post(models.Model):
user = models.ForeignKey(Account, on_delete=models.SET_NULL, null=True)
image = models.ImageField(upload_to='images')
title = models.CharField(max_length=20 ,unique=True, null=True)
description = models.CharField(max_length=200)
date_posted = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
post_link = models.CharField(max_length=20, unique=True,null=True)
def __str__(self):
return self.description
def averageReview(self):
reviews = ReviewRating.objects.filter(post=self, status=True).aggregate(average=Avg('rating'))
avg = 0
if reviews['average'] is not None:
avg = float(reviews['average'])
return avg
class ReviewRating(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(Account, on_delete=models.CASCADE)
subject = models.CharField(max_length=100,blank=True)
review = models.TextField(max_length=500,blank=True)
rating = models.FloatField()
ip = models.CharField(max_length=20, blank=True)
status = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.subject | StarcoderdataPython |
6438327 | from __future__ import print_function
import os
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv1D, MaxPooling1D, Conv2DTranspose,Lambda,BatchNormalization,LSTM
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import keras
import tensorflow as tf
from keras.layers import ZeroPadding1D
K.set_image_data_format('channels_last') # TF dimension ordering in this code
#size= 2**16
size= 48
channel=60
ss=10
def crossentropy_cut(y_true,y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
y_pred_f= tf.clip_by_value(y_pred_f, 1e-7, (1. - 1e-7))
# mask=K.cast(K.greater_equal(y_true_f,-0.5),dtype='float32')
mask=K.greater_equal(y_true_f,-0.5)
# out = -(y_true_f * K.log(y_pred_f)*mask + (1.0 - y_true_f) * K.log(1.0 - y_pred_f)*mask)
# out=K.mean(out)
losses = -(y_true_f * K.log(y_pred_f) + (1.0 - y_true_f) * K.log(1.0 - y_pred_f))
losses = tf.boolean_mask(losses, mask)
masked_loss = tf.reduce_mean(losses)
return masked_loss
def Conv1DTranspose(input_tensor, filters, kernel_size, strides=2, padding='same'):
x = Lambda(lambda x: K.expand_dims(x, axis=2))(input_tensor)
x = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding)(x)
x = Lambda(lambda x: K.squeeze(x, axis=2))(x)
return x
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
mask=K.cast(K.greater_equal(y_true_f,-0.5),dtype='float32')
intersection = K.sum(y_true_f * y_pred_f * mask)
return (2. * intersection + ss) / (K.sum(y_true_f * mask) + K.sum(y_pred_f * mask) + ss)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((size, channel)) #2**16
print(inputs.shape)
# conv01 = BatchNormalization()(Conv1D(120, 7, activation='relu', padding='same')(inputs))
# conv01 = BatchNormalization()(Conv1D(120, 7, activation='relu', padding='same')(conv01))
# pool01 = MaxPooling1D(pool_size=2)(conv01) #2**15
# conv0 = BatchNormalization()(Conv1D(240, 7, activation='relu', padding='same')(pool01))#+8
# conv0 = BatchNormalization()(Conv1D(180, 7, activation='relu', padding='same')(conv0))
# pool0 = MaxPooling1D(pool_size=2)(conv0) #2**14
lstm0 = LSTM(60,return_sequences=True)(inputs)
conv22 = Conv1D(1, 1, activation='sigmoid')(lstm0)
# conv1 = BatchNormalization()(Conv1D(210, 7, activation='relu', padding='same')(pool0))#+8
# conv1 = BatchNormalization()(Conv1D(210, 7, activation='relu', padding='same')(conv1))
# pool1 = MaxPooling1D(pool_size=2)(conv1) #2**13
#
# conv2 = BatchNormalization()(Conv1D(25, 7, activation='relu', padding='same')(pool1))#+16
# conv2 = BatchNormalization()(Conv1D(25, 7, activation='relu', padding='same')(conv2))
# pool2 = MaxPooling1D(pool_size=4)(conv2) #4096*16
#
# conv3 = BatchNormalization()(Conv1D(30, 7, activation='relu', padding='same')(pool2))#+16
# conv3 = BatchNormalization()(Conv1D(30, 7, activation='relu', padding='same')(conv3))
# pool3 = MaxPooling1D(pool_size=4)(conv3) #4096*4
#
## conv4 = BatchNormalization()(Conv1D(112, 7, activation='relu', padding='same')(pool3))#+32
## conv4 = BatchNormalization()(Conv1D(112, 7, activation='relu', padding='same')(conv4))
## pool4 = MaxPooling1D(pool_size=2)(conv4) #4096*2
#
# conv5 = BatchNormalization()(Conv1D(60, 7, activation='relu', padding='same')(pool3))#+32
# conv5 = BatchNormalization()(Conv1D(60, 7, activation='relu', padding='same')(conv5))
# pool5 = MaxPooling1D(pool_size=4)(conv5) #4096
#
## conv6 = BatchNormalization()(Conv1D(208, 7, activation='relu', padding='same')(pool5))#+64
## conv6 = BatchNormalization()(Conv1D(208, 7, activation='relu', padding='same')(conv6))
## pool6 = MaxPooling1D(pool_size=2)(conv6) #2048
#
# conv7 = BatchNormalization()(Conv1D(120, 7, activation='relu', padding='same')(pool5))#+64
# conv7 = BatchNormalization()(Conv1D(120, 7, activation='relu', padding='same')(conv7))
# pool7 = MaxPooling1D(pool_size=4)(conv7) #1024
#
## conv8 = BatchNormalization()(Conv1D(400, 7, activation='relu', padding='same')(pool7))#+128
## conv8 = BatchNormalization()(Conv1D(400, 7, activation='relu', padding='same')(conv8))
## pool8 = MaxPooling1D(pool_size=2)(conv8) #512
#
# conv9 = BatchNormalization()(Conv1D(240, 7, activation='relu', padding='same')(pool7))#+128
# conv9 = BatchNormalization()(Conv1D(240, 7, activation='relu', padding='same')(conv9))
# pool9 = MaxPooling1D(pool_size=4)(conv9) #256
#
# conv10 = BatchNormalization()(Conv1D(480, 7, activation='relu', padding='same')(pool9))#+496
# conv10 = BatchNormalization()(Conv1D(480, 7, activation='relu', padding='same')(conv10))
# #lstm0 = CuDNNLSTM(1900,return_sequences=True)(conv10)
#
# up11 = concatenate([Conv1DTranspose(conv10,240, 4, strides=4, padding='same'), conv9], axis=2)
# conv11 = BatchNormalization()(Conv1D(240, 7, activation='relu', padding='same')(up11))
# conv11 = BatchNormalization()(Conv1D(240, 7, activation='relu', padding='same')(conv11)) #1024
#
## up12 = concatenate([Conv1DTranspose(conv11,400, 2, strides=2, padding='same'), conv8], axis=2)
## conv12 = BatchNormalization()(Conv1D(400, 7, activation='relu', padding='same')(up12))
## conv12 = BatchNormalization()(Conv1D(400, 7, activation='relu', padding='same')(conv12)) #1024
#
# up13 = concatenate([Conv1DTranspose(conv11,120, 4, strides=4, padding='same'), conv7], axis=2)
# conv13 = BatchNormalization()(Conv1D(120, 7, activation='relu', padding='same')(up13))
# conv13 = BatchNormalization()(Conv1D(120, 7, activation='relu', padding='same')(conv13)) #4096
#
## up14 = concatenate([Conv1DTranspose(conv13,208, 2, strides=2, padding='same'), conv6], axis=2)
## conv14 = BatchNormalization()(Conv1D(208, 7, activation='relu', padding='same')(up14))
## conv14 = BatchNormalization()(Conv1D(208, 7, activation='relu', padding='same')(conv14)) #4096
#
# up15 = concatenate([Conv1DTranspose(conv13,60, 4, strides=4, padding='same'), conv5], axis=2)
# conv15 = BatchNormalization()(Conv1D(60, 7, activation='relu', padding='same')(up15))
# conv15 = BatchNormalization()(Conv1D(60, 7, activation='relu', padding='same')(conv15)) #4096*4
#
## up16 = concatenate([Conv1DTranspose(conv15,112, 2, strides=2, padding='same'), conv4], axis=2)
## conv16 = BatchNormalization()(Conv1D(112, 7, activation='relu', padding='same')(up16))
## conv16 = BatchNormalization()(Conv1D(112, 7, activation='relu', padding='same')(conv16)) #4096*4
#
# up17 = concatenate([Conv1DTranspose(conv15,30, 4, strides=4, padding='same'), conv3], axis=2)
# conv17 = BatchNormalization()(Conv1D(30, 7, activation='relu', padding='same')(up17))
# conv17 = BatchNormalization()(Conv1D(30, 7, activation='relu', padding='same')(conv17)) #4096*16
#
# up18 = concatenate([Conv1DTranspose(conv17,25, 4, strides=4, padding='same'), conv2], axis=2)
# conv18 = BatchNormalization()(Conv1D(25, 7, activation='relu', padding='same')(up18))
# conv18 = BatchNormalization()(Conv1D(25, 7, activation='relu', padding='same')(conv18)) #4096*16
#
# up19 = concatenate([Conv1DTranspose(conv18,21, 4, strides=4, padding='same'), conv1], axis=2)
# conv19 = BatchNormalization()(Conv1D(21, 7, activation='relu', padding='same')(up19))
# conv19 = BatchNormalization()(Conv1D(21, 7, activation='relu', padding='same')(conv19)) #4096*64
#
# up20 = concatenate([Conv1DTranspose(conv19,18, 4, strides=4, padding='same'), conv0], axis=2)
# conv20 = BatchNormalization()(Conv1D(18, 7, activation='relu', padding='same')(up20))
# conv20 = BatchNormalization()(Conv1D(18, 7, activation='relu', padding='same')(conv20)) #4096*64
#
# up21 = concatenate([Conv1DTranspose(conv20,15, 2, strides=2, padding='same'), conv01], axis=2)
# conv21 = BatchNormalization()(Conv1D(15, 7, activation='relu', padding='same')(up21))
# conv21 = BatchNormalization()(Conv1D(15, 7, activation='relu', padding='same')(conv21)) #4096*256
#
# conv22 = Conv1D(1, 1, activation='sigmoid')(conv21)
model = Model(inputs=[inputs], outputs=[conv22])
model.compile(optimizer=Adam(lr=1e-3,beta_1=0.9, beta_2=0.999,decay=1e-5), loss=crossentropy_cut, metrics=[dice_coef])
return model
| StarcoderdataPython |
5145660 | <filename>mmfewshot/classification/apis/test.py
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Dict, Optional, Union
import mmcv
import numpy as np
import torch
from mmcls.apis.test import collect_results_cpu
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, get_dist_info
from mmcv.utils import print_log
from torch import nn
from torch.utils.data import DataLoader
from mmfewshot.classification.datasets import label_wrapper
from mmfewshot.classification.utils import MetaTestParallel
# z scores of different confidence intervals
Z_SCORE = {
0.50: 0.674,
0.80: 1.282,
0.90: 1.645,
0.95: 1.960,
0.98: 2.326,
0.99: 2.576,
}
def single_gpu_meta_test(model: Union[MMDataParallel, nn.Module],
num_test_tasks: int,
support_dataloader: DataLoader,
query_dataloader: DataLoader,
test_set_dataloader: Optional[DataLoader] = None,
meta_test_cfg: Optional[Dict] = None,
eval_kwargs: Optional[Dict] = None,
logger: Optional[object] = None,
confidence_interval: float = 0.95,
show_task_results: bool = False) -> Dict:
"""Meta testing on single gpu.
During meta testing, model might be further fine-tuned or added extra
parameters. While the tested model need to be restored after meta
testing since meta testing can be used as the validation in the middle
of training. To detach model from previous phase, the model will be
copied and wrapped with :obj:`MetaTestParallel`. And it has full
independence from the training model and will be discarded after the
meta testing.
Args:
model (:obj:`MMDataParallel` | nn.Module): Model to be meta tested.
num_test_tasks (int): Number of meta testing tasks.
support_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
support data and it is used to fetch support data for each task.
query_dataloader (:obj:`DataLoader`): A PyTorch dataloader of query
data and it is used to fetch query data for each task.
test_set_dataloader (:obj:`DataLoader`): A PyTorch dataloader of all
test data and it is used for feature extraction from whole dataset
to accelerate the testing. Default: None.
meta_test_cfg (dict): Config for meta testing. Default: None.
eval_kwargs (dict): Any keyword argument to be used for evaluation.
Default: None.
logger (logging.Logger | None): Logger used for printing
related information during evaluation. Default: None.
confidence_interval (float): Confidence interval. Default: 0.95.
show_task_results (bool): Whether to record the eval result of
each task. Default: False.
Returns:
dict: Dict of meta evaluate results, containing `accuracy_mean`
and `accuracy_std` of all test tasks.
"""
assert confidence_interval in Z_SCORE.keys()
# To avoid deep copying the whole :obj:`MMDataParallel`, we simply
# copy the module and wrap it with a :class:`MetaTestParallel`.
# MetaTestParallel will send data to the same device as model.
if isinstance(model, MMDataParallel):
model = MetaTestParallel(copy.deepcopy(model.module))
else:
model = MetaTestParallel(copy.deepcopy(model))
# for the backbone-fixed methods, the features can be pre-computed
# and saved in dataset to achieve acceleration
if meta_test_cfg.get('fast_test', False):
print_log('extracting features from all images.', logger=logger)
extract_features_for_fast_test(model, support_dataloader,
query_dataloader, test_set_dataloader)
print_log('start meta testing', logger=logger)
# prepare for meta test
model.before_meta_test(meta_test_cfg)
results_list = []
prog_bar = mmcv.ProgressBar(num_test_tasks)
for task_id in range(num_test_tasks):
# set support and query dataloader to the same task by task id
query_dataloader.dataset.set_task_id(task_id)
support_dataloader.dataset.set_task_id(task_id)
# test a task
results, gt_labels = test_single_task(model, support_dataloader,
query_dataloader, meta_test_cfg)
# evaluate predict result
eval_result = query_dataloader.dataset.evaluate(
results, gt_labels, logger=logger, **eval_kwargs)
eval_result['task_id'] = task_id
results_list.append(eval_result)
prog_bar.update()
if show_task_results:
# the result of each task will be logged into logger
for results in results_list:
msg = ' '.join([f'{k}: {results[k]}' for k in results.keys()])
print_log(msg, logger=logger)
meta_eval_results = dict()
# get the average accuracy and std
for k in results_list[0].keys():
if k == 'task_id':
continue
mean = np.mean([res[k] for res in results_list])
std = np.std([res[k] for res in results_list])
std = Z_SCORE[confidence_interval] * std / np.sqrt(num_test_tasks)
meta_eval_results[f'{k}_mean'] = mean
meta_eval_results[f'{k}_std'] = std
return meta_eval_results
def multi_gpu_meta_test(model: MMDistributedDataParallel,
num_test_tasks: int,
support_dataloader: DataLoader,
query_dataloader: DataLoader,
test_set_dataloader: Optional[DataLoader] = None,
meta_test_cfg: Optional[Dict] = None,
eval_kwargs: Optional[Dict] = None,
logger: Optional[object] = None,
confidence_interval: float = 0.95,
show_task_results: bool = False) -> Dict:
"""Distributed meta testing on multiple gpus.
During meta testing, model might be further fine-tuned or added extra
parameters. While the tested model need to be restored after meta
testing since meta testing can be used as the validation in the middle
of training. To detach model from previous phase, the model will be
copied and wrapped with :obj:`MetaTestParallel`. And it has full
independence from the training model and will be discarded after the
meta testing.
In the distributed situation, the :obj:`MetaTestParallel` on each GPU
is also independent. The test tasks in few shot leaning usually are very
small and hardly benefit from distributed acceleration. Thus, in
distributed meta testing, each task is done in single GPU and each GPU
is assigned a certain number of tasks. The number of test tasks
for each GPU is ceil(num_test_tasks / world_size). After all GPUs finish
their tasks, the results will be aggregated to get the final result.
Args:
model (:obj:`MMDistributedDataParallel`): Model to be meta tested.
num_test_tasks (int): Number of meta testing tasks.
support_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
support data.
query_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
query data.
test_set_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
all test data. Default: None.
meta_test_cfg (dict): Config for meta testing. Default: None.
eval_kwargs (dict): Any keyword argument to be used for evaluation.
Default: None.
logger (logging.Logger | None): Logger used for printing
related information during evaluation. Default: None.
confidence_interval (float): Confidence interval. Default: 0.95.
show_task_results (bool): Whether to record the eval result of
each task. Default: False.
Returns:
dict | None: Dict of meta evaluate results, containing `accuracy_mean`
and `accuracy_std` of all test tasks.
"""
assert confidence_interval in Z_SCORE.keys()
rank, world_size = get_dist_info()
# Note that each task is tested on a single GPU. Thus the data and model
# on different GPU should be independent. :obj:`MMDistributedDataParallel`
# always automatically synchronizes the grad in different GPUs when doing
# the loss backward, which can not meet the requirements. Thus we simply
# copy the module and wrap it with an :obj:`MetaTestParallel`, which will
# send data to the device model.
model = MetaTestParallel(copy.deepcopy(model.module))
# for the backbone-fixed methods, the features can be pre-computed
# and saved in dataset to achieve acceleration
if meta_test_cfg.get('fast_test', False):
print_log('extracting features from all images.', logger=logger)
extract_features_for_fast_test(model, support_dataloader,
query_dataloader, test_set_dataloader)
print_log('start meta testing', logger=logger)
# prepare for meta test
model.before_meta_test(meta_test_cfg)
results_list = []
# tasks will be evenly distributed on each gpus
sub_num_test_tasks = num_test_tasks // world_size
sub_num_test_tasks += 1 if num_test_tasks % world_size != 0 else 0
if rank == 0:
prog_bar = mmcv.ProgressBar(num_test_tasks)
for i in range(sub_num_test_tasks):
task_id = (i * world_size + rank)
if task_id >= num_test_tasks:
continue
# set support and query dataloader to the same task by task id
query_dataloader.dataset.set_task_id(task_id)
support_dataloader.dataset.set_task_id(task_id)
# test a task
results, gt_labels = test_single_task(model, support_dataloader,
query_dataloader, meta_test_cfg)
# evaluate predict result
eval_result = query_dataloader.dataset.evaluate(
results, gt_labels, logger=logger, **eval_kwargs)
eval_result['task_id'] = task_id
results_list.append(eval_result)
if rank == 0:
prog_bar.update(world_size)
collect_results_list = collect_results_cpu(
results_list, num_test_tasks, tmpdir=None)
if rank == 0:
if show_task_results:
# the result of each task will be logged into logger
for results in collect_results_list:
msg = ' '.join([f'{k}: {results[k]}' for k in results.keys()])
print_log(msg, logger=logger)
meta_eval_results = dict()
print_log(
f'number of tasks: {len(collect_results_list)}', logger=logger)
# get the average accuracy and std
for k in collect_results_list[0].keys():
if k == 'task_id':
continue
mean = np.mean([res[k] for res in collect_results_list])
std = np.std([res[k] for res in collect_results_list])
std = Z_SCORE[confidence_interval] * std / np.sqrt(num_test_tasks)
meta_eval_results[f'{k}_mean'] = mean
meta_eval_results[f'{k}_std'] = std
return meta_eval_results
else:
return None
def extract_features_for_fast_test(model: MetaTestParallel,
support_dataloader: DataLoader,
query_dataloader: DataLoader,
test_set_dataloader: DataLoader) -> None:
"""Extracting and saving features for testing acceleration.
In some methods, the backbone is fixed during meta testing, which results
in the features from backbone are also fixed for whole dataset. So we can
calculate the features in advance and save them into `support_dataloader`
and `query_dataloader`. In this way, the model can skip the feature
extraction phase during the meta testing, which can obviously accelerate
the meta testing.
Args:
model (:obj:`MetaTestParallel`): Model to be meta tested.
support_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
support data.
query_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
query data.
test_set_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
all test data.
"""
feats_list, img_metas_list = [], []
rank, _ = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(test_set_dataloader.dataset))
model.eval()
# traverse the whole dataset and compute the features from backbone
with torch.no_grad():
for i, data in enumerate(test_set_dataloader):
img_metas_list.extend(data['img_metas'].data[0])
# forward in `extract_feat` mode
feats = model(img=data['img'], mode='extract_feat')
feats_list.append(feats)
if rank == 0:
prog_bar.update(num_tasks=len(data['img_metas'].data[0]))
feats = torch.cat(feats_list, dim=0)
# cache the pre-computed features into dataset
query_dataloader.dataset.cache_feats(feats, img_metas_list)
support_dataloader.dataset.cache_feats(feats, img_metas_list)
def test_single_task(model: MetaTestParallel, support_dataloader: DataLoader,
query_dataloader: DataLoader, meta_test_cfg: Dict):
"""Test a single task.
A task has two stages: handling the support set and predicting the
query set. In stage one, it currently supports fine-tune based and
metric based methods. In stage two, it simply forward the query set
and gather all the results.
Args:
model (:obj:`MetaTestParallel`): Model to be meta tested.
support_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
support data.
query_dataloader (:obj:`DataLoader`): A PyTorch dataloader of
query data.
meta_test_cfg (dict): Config for meta testing.
Returns:
tuple:
- results_list (list[np.ndarray]): Predict results.
- gt_labels (np.ndarray): Ground truth labels.
"""
# use copy of model for each task
model = copy.deepcopy(model)
# get ids of all classes in this task
task_class_ids = query_dataloader.dataset.get_task_class_ids()
# forward support set
model.before_forward_support()
support_cfg = meta_test_cfg.get('support', dict())
# methods with fine-tune stage
if support_cfg.get('train', False):
optimizer = build_optimizer(model, support_cfg.train['optimizer'])
num_steps = support_cfg.train['num_steps']
dataloader_iterator = iter(support_dataloader)
for i in range(num_steps):
try:
data = next(dataloader_iterator)
except StopIteration:
dataloader_iterator = iter(support_dataloader)
data = next(dataloader_iterator)
# map input labels into range of 0 to numbers of classes-1
data['gt_label'] = label_wrapper(data['gt_label'], task_class_ids)
optimizer.zero_grad()
# forward in `support` mode
outputs = model.forward(**data, mode='support')
outputs['loss'].backward()
optimizer.step()
# methods without fine-tune stage
else:
for i, data in enumerate(support_dataloader):
# map input labels into range of 0 to numbers of classes-1
data['gt_label'] = label_wrapper(data['gt_label'], task_class_ids)
# forward in `support` mode
model.forward(**data, mode='support')
# forward query set
model.before_forward_query()
results_list, gt_label_list = [], []
model.eval()
with torch.no_grad():
for i, data in enumerate(query_dataloader):
gt_label_list.append(data.pop('gt_label'))
# forward in `query` mode
result = model.forward(**data, mode='query')
results_list.extend(result)
gt_labels = torch.cat(gt_label_list, dim=0).cpu().numpy()
# map gt labels into range of 0 to numbers of classes-1.
gt_labels = label_wrapper(gt_labels, task_class_ids)
return results_list, gt_labels
| StarcoderdataPython |
4890536 | <gh_stars>0
"""Tests for move groups."""
import asyncio
from typing import Iterator
import pytest
from _pytest.fixtures import FixtureRequest
from opentrons_hardware.firmware_bindings import NodeId
from opentrons_hardware.firmware_bindings.messages.message_definitions import (
AddLinearMoveRequest,
GetMoveGroupRequest,
GetMoveGroupResponse,
)
from opentrons_hardware.firmware_bindings.messages.payloads import (
AddLinearMoveRequestPayload,
MoveGroupRequestPayload,
)
from opentrons_hardware.firmware_bindings.utils import (
UInt8Field,
Int32Field,
UInt32Field,
)
from opentrons_hardware.drivers.can_bus import CanMessenger, WaitableCallback
@pytest.fixture(
scope="session",
params=list(range(3)),
)
def group_id(request: FixtureRequest) -> Iterator[int]:
"""A group id test fixture."""
yield request.param # type: ignore[attr-defined]
@pytest.mark.requires_emulator
async def test_add_moves(
loop: asyncio.BaseEventLoop,
can_messenger: CanMessenger,
can_messenger_queue: WaitableCallback,
motor_node_id: NodeId,
group_id: int,
) -> None:
"""It should add moves and verify that they were stored correctly."""
durations = 100, 200, 300
moves = (
AddLinearMoveRequest(
payload=AddLinearMoveRequestPayload(
group_id=UInt8Field(group_id),
seq_id=UInt8Field(i),
duration=UInt32Field(duration),
request_stop_condition=UInt8Field(0),
acceleration=Int32Field(0),
velocity=Int32Field(0),
)
)
for i, duration in enumerate(durations)
)
# Add the moves
for move in moves:
await can_messenger.send(node_id=motor_node_id, message=move)
# Get the move group
await can_messenger.send(
node_id=motor_node_id,
message=GetMoveGroupRequest(
payload=MoveGroupRequestPayload(group_id=UInt8Field(group_id))
),
)
response, arbitration_id = await asyncio.wait_for(can_messenger_queue.read(), 1)
assert isinstance(response, GetMoveGroupResponse)
assert response.payload.num_moves.value == len(durations)
assert response.payload.total_duration.value == sum(durations)
| StarcoderdataPython |
12841414 | <filename>lib/data_coll/source_file.py
import os
import config
def walk():
"""
遍历源文件目录
:return: Generator
"""
for root in config.Source.include:
for path, _, files in os.walk(root):
if path not in config.Source.exclude:
for file in files:
yield (path, file)
| StarcoderdataPython |
5055204 | <filename>test_cmds_txs.py
import binascii
TRANSACTIONS = [
{
"id": "0f51ac8bd9c7413ea9a6ceb1d67688f1786dd43f6bb71b9715e9ff0ebda61136",
"tokens": [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>"
],
"outputs": [
{
"index": 0,
"value": 74187765000000000,
"height": 284761,
"tree": "101004020e36100204a00b08cd0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798ea02d192a39a8cc7a7017300730110010204020404040004c0fd4f05808c82f5f6030580b8c9e5ae040580f882ad16040204c0944004c0f407040004000580f882ad16d19683030191a38cc7a7019683020193c2b2a57300007473017302830108cdeeac93a38cc7b2a573030001978302019683040193b1a5730493c2a7c2b2a573050093958fa3730673079973089c73097e9a730a9d99a3730b730c0599c1a7c1b2a5730d00938cc7b2a5730e0001a390c1a7730f",
"tokens": [
{"index": 0, "value": 67500123000},
{"index": 1, "value": 12300675000},
],
"registers": "03070327e65711a59378c59359c3e1d0f7abe906479eccb76094e50fe79d743ccc15e60e20e26d41ed030a30cd563681e72f0b9c07825ac983f8c253a87a43c1da21958ece05feaff5de0f"
},
{
"index": 1,
"value": 67500000000,
"height": 284761,
"tree": "100204a00b08cd021dde34603426402615658f1d970cfa7c7bd92ac81a8b16eeebff264d59ce4604ea02d192a39a8cc7a70173007301",
"tokens": [
{"index": 1, "value": 32100675000},
{"index": 2, "value": 300000},
{"index": 3, "value": 123456789}
],
"registers": "02070327e65711a59378c59359c3e1d0f7abe906479eccb76094e50fe79d743ccc15e60e20e26d41ed030a30cd563681e72f0b9c07825ac983f8c253a87a43c1da21958ece"
}
]
}
]
def serialize_box_header(info: dict, tx_id: str) -> bytes:
return binascii.unhexlify(tx_id) + \
info["index"].to_bytes(2, 'big', signed=False) + \
info["value"].to_bytes(8, 'big', signed=False) + \
int(len(info["tree"]) / 2).to_bytes(4, 'big', signed=False) + \
info["height"].to_bytes(4, 'big', signed=False) + \
len(info["tokens"]).to_bytes(1, 'big', signed=False) + \
int(len(info["registers"]) / 2).to_bytes(4, 'big', signed=False)
def serialize_box_header_sign(info: dict) -> bytes:
return info["value"].to_bytes(8, 'big', signed=False) + \
int(len(info["tree"]) / 2).to_bytes(4, 'big', signed=False) + \
info["height"].to_bytes(4, 'big', signed=False) + \
len(info["tokens"]).to_bytes(1, 'big', signed=False) + \
int(len(info["registers"]) / 2).to_bytes(4, 'big', signed=False)
def serialize_tx_tokens(tokens: list) -> bytes:
result = bytes()
for token in tokens:
result += binascii.unhexlify(token)
return result
def serialize_box_tokens(box_tokens: list, tx_tokens: list) -> bytes:
result = bytes()
for token in box_tokens:
result += binascii.unhexlify(tx_tokens[token["index"]])
result += token["value"].to_bytes(8, 'big', signed=False)
return result
def serialize_box_tokens_sign(tokens: list) -> bytes:
result = bytes()
for token in tokens:
result += token["index"].to_bytes(4, 'big', signed=False)
result += token["value"].to_bytes(8, 'big', signed=False)
return result
def print_input_frame(frame: bytes):
offset = 0
print(f"Frame:")
print(f" ID: {binascii.hexlify(frame[offset:offset+32]).decode()}")
offset += 32
print(f" Count: {frame[offset]}")
offset += 1
print(f" Index: {frame[offset]}")
offset += 1
value = int.from_bytes(frame[offset:offset+8], 'big', signed=False)
offset += 8
print(f" Value: {value}")
token_count = int(frame[offset])
offset += 1
print(f" Tokens[{token_count}]:")
for token in range(0, token_count):
id = frame[offset:offset+32]
offset += 32
value = int.from_bytes(frame[offset:offset+8], 'big', signed=False)
offset += 8
print(f" [{token}] Id: {binascii.hexlify(id).decode()}")
print(f" [{token}] Value: {value}")
print(f" Signature: {binascii.hexlify(frame[offset:]).decode()}")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.