hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70bd2c627e3404c5ea43791336a763164c6fba4
| 1,318
|
py
|
Python
|
HDPython/ast/ast_classes/ast_op_stream_out.py
|
HardwareDesignWithPython/HDPython
|
aade03aaa092b1684fa12bffd17674cf1c45f5ac
|
[
"MIT"
] | null | null | null |
HDPython/ast/ast_classes/ast_op_stream_out.py
|
HardwareDesignWithPython/HDPython
|
aade03aaa092b1684fa12bffd17674cf1c45f5ac
|
[
"MIT"
] | null | null | null |
HDPython/ast/ast_classes/ast_op_stream_out.py
|
HardwareDesignWithPython/HDPython
|
aade03aaa092b1684fa12bffd17674cf1c45f5ac
|
[
"MIT"
] | 1
|
2021-10-20T20:08:16.000Z
|
2021-10-20T20:08:16.000Z
|
from HDPython.ast.ast_classes.ast_base import v_ast_base, add_class
import HDPython.hdl_converter as hdl
from HDPython.ast.ast_hdl_error import HDPython_error
from HDPython.base import HDPython_base
class v_re_assigne_rhsift(v_ast_base):
def __init__(self,lhs, rhs,context=None, astParser=None):
self.lhs = lhs
self.rhs = rhs
self.context =context
self.astParser = astParser
def __str__(self):
if issubclass(type(self.lhs),HDPython_base):
return hdl.impl_reasign_rshift_(self.lhs, self.rhs, astParser=self.astParser, context_str=self.context )
return str(self.lhs) + " := " + str(self.rhs)
def body_RShift(astParser,Node):
rhs = astParser.Unfold_body(Node.right)
lhs = astParser.Unfold_body(Node.left)
if issubclass( type(lhs),HDPython_base) and issubclass( type(rhs),HDPython_base):
rhs.__Driver__ = astParser.ContextName[-1]
return v_re_assigne_rhsift(lhs, rhs,context=astParser.ContextName[-1],astParser=astParser)
err_msg = HDPython_error(
astParser.sourceFileName,
Node.lineno,
Node.col_offset,
type(lhs).__name__,
"right shift is only supported for HDPyhon objects"
)
raise Exception(err_msg,lhs)
add_class("RShift",body_RShift)
| 33.794872
| 116
| 0.695751
|
from HDPython.ast.ast_classes.ast_base import v_ast_base, add_class
import HDPython.hdl_converter as hdl
from HDPython.ast.ast_hdl_error import HDPython_error
from HDPython.base import HDPython_base
class v_re_assigne_rhsift(v_ast_base):
def __init__(self,lhs, rhs,context=None, astParser=None):
self.lhs = lhs
self.rhs = rhs
self.context =context
self.astParser = astParser
def __str__(self):
if issubclass(type(self.lhs),HDPython_base):
return hdl.impl_reasign_rshift_(self.lhs, self.rhs, astParser=self.astParser, context_str=self.context )
return str(self.lhs) + " := " + str(self.rhs)
def body_RShift(astParser,Node):
rhs = astParser.Unfold_body(Node.right)
lhs = astParser.Unfold_body(Node.left)
if issubclass( type(lhs),HDPython_base) and issubclass( type(rhs),HDPython_base):
rhs.__Driver__ = astParser.ContextName[-1]
return v_re_assigne_rhsift(lhs, rhs,context=astParser.ContextName[-1],astParser=astParser)
err_msg = HDPython_error(
astParser.sourceFileName,
Node.lineno,
Node.col_offset,
type(lhs).__name__,
"right shift is only supported for HDPyhon objects"
)
raise Exception(err_msg,lhs)
add_class("RShift",body_RShift)
| true
| true
|
f70bd2ee450be0f82157aa65881304ad6a24cb47
| 1,884
|
py
|
Python
|
dask_kubernetes/conftest.py
|
ddelange/dask-kubernetes
|
42bcf9817ea963bf048f9dd06caec1622656302a
|
[
"BSD-3-Clause"
] | 1
|
2022-01-20T12:38:27.000Z
|
2022-01-20T12:38:27.000Z
|
dask_kubernetes/conftest.py
|
ddelange/dask-kubernetes
|
42bcf9817ea963bf048f9dd06caec1622656302a
|
[
"BSD-3-Clause"
] | null | null | null |
dask_kubernetes/conftest.py
|
ddelange/dask-kubernetes
|
42bcf9817ea963bf048f9dd06caec1622656302a
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import pathlib
import os
import subprocess
import tempfile
from kopf.testing import KopfRunner
from dask_kubernetes.common.utils import check_dependency
DIR = pathlib.Path(__file__).parent.absolute()
check_dependency("helm")
check_dependency("kubectl")
check_dependency("docker")
@pytest.fixture()
async def kopf_runner(k8s_cluster):
yield KopfRunner(["run", "-m", "dask_kubernetes.operator", "--verbose"])
@pytest.fixture(scope="session")
def docker_image():
image_name = "dask-kubernetes:dev"
subprocess.check_output(["docker", "build", "-t", image_name, "./ci/"])
return image_name
@pytest.fixture(scope="session")
def k8s_cluster(kind_cluster, docker_image):
os.environ["KUBECONFIG"] = str(kind_cluster.kubeconfig_path)
kind_cluster.load_docker_image(docker_image)
yield kind_cluster
del os.environ["KUBECONFIG"]
@pytest.fixture(scope="session")
def ns(k8s_cluster):
return "default"
def run_generate(crd_path, patch_path, temp_path):
subprocess.run(
["k8s-crd-resolver", "-r", "-j", patch_path, crd_path, temp_path],
check=True,
env={**os.environ},
)
@pytest.fixture(scope="session", autouse=True)
def customresources(k8s_cluster):
temp_dir = tempfile.TemporaryDirectory()
crd_path = os.path.join(DIR, "operator", "customresources")
run_generate(
os.path.join(crd_path, "daskcluster.yaml"),
os.path.join(crd_path, "daskcluster.patch.yaml"),
os.path.join(temp_dir.name, "daskcluster.yaml"),
)
run_generate(
os.path.join(crd_path, "daskworkergroup.yaml"),
os.path.join(crd_path, "daskworkergroup.patch.yaml"),
os.path.join(temp_dir.name, "daskworkergroup.yaml"),
)
k8s_cluster.kubectl("apply", "-f", temp_dir.name)
yield
k8s_cluster.kubectl("delete", "-f", temp_dir.name)
temp_dir.cleanup()
| 25.808219
| 76
| 0.701168
|
import pytest
import pathlib
import os
import subprocess
import tempfile
from kopf.testing import KopfRunner
from dask_kubernetes.common.utils import check_dependency
DIR = pathlib.Path(__file__).parent.absolute()
check_dependency("helm")
check_dependency("kubectl")
check_dependency("docker")
@pytest.fixture()
async def kopf_runner(k8s_cluster):
yield KopfRunner(["run", "-m", "dask_kubernetes.operator", "--verbose"])
@pytest.fixture(scope="session")
def docker_image():
image_name = "dask-kubernetes:dev"
subprocess.check_output(["docker", "build", "-t", image_name, "./ci/"])
return image_name
@pytest.fixture(scope="session")
def k8s_cluster(kind_cluster, docker_image):
os.environ["KUBECONFIG"] = str(kind_cluster.kubeconfig_path)
kind_cluster.load_docker_image(docker_image)
yield kind_cluster
del os.environ["KUBECONFIG"]
@pytest.fixture(scope="session")
def ns(k8s_cluster):
return "default"
def run_generate(crd_path, patch_path, temp_path):
subprocess.run(
["k8s-crd-resolver", "-r", "-j", patch_path, crd_path, temp_path],
check=True,
env={**os.environ},
)
@pytest.fixture(scope="session", autouse=True)
def customresources(k8s_cluster):
temp_dir = tempfile.TemporaryDirectory()
crd_path = os.path.join(DIR, "operator", "customresources")
run_generate(
os.path.join(crd_path, "daskcluster.yaml"),
os.path.join(crd_path, "daskcluster.patch.yaml"),
os.path.join(temp_dir.name, "daskcluster.yaml"),
)
run_generate(
os.path.join(crd_path, "daskworkergroup.yaml"),
os.path.join(crd_path, "daskworkergroup.patch.yaml"),
os.path.join(temp_dir.name, "daskworkergroup.yaml"),
)
k8s_cluster.kubectl("apply", "-f", temp_dir.name)
yield
k8s_cluster.kubectl("delete", "-f", temp_dir.name)
temp_dir.cleanup()
| true
| true
|
f70bd49fe5654e00114c7d8e83bb1de6aef33e5b
| 1,019
|
py
|
Python
|
bigquery/samples/tests/test_query_to_arrow.py
|
ryanyuan/google-cloud-python
|
db481bfdd6816d020d99df0d4caa307358ab1141
|
[
"Apache-2.0"
] | 2
|
2021-11-26T07:08:43.000Z
|
2022-03-07T20:20:04.000Z
|
bigquery/samples/tests/test_query_to_arrow.py
|
ryanyuan/google-cloud-python
|
db481bfdd6816d020d99df0d4caa307358ab1141
|
[
"Apache-2.0"
] | null | null | null |
bigquery/samples/tests/test_query_to_arrow.py
|
ryanyuan/google-cloud-python
|
db481bfdd6816d020d99df0d4caa307358ab1141
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyarrow
from .. import query_to_arrow
def test_query_to_arrow(capsys, client):
arrow_table = query_to_arrow.query_to_arrow(client)
out, err = capsys.readouterr()
assert "Downloaded 8 rows, 2 columns." in out
arrow_schema = arrow_table.schema
assert arrow_schema.names == ["race", "participant"]
assert pyarrow.types.is_string(arrow_schema.types[0])
assert pyarrow.types.is_struct(arrow_schema.types[1])
| 33.966667
| 74
| 0.75368
|
import pyarrow
from .. import query_to_arrow
def test_query_to_arrow(capsys, client):
arrow_table = query_to_arrow.query_to_arrow(client)
out, err = capsys.readouterr()
assert "Downloaded 8 rows, 2 columns." in out
arrow_schema = arrow_table.schema
assert arrow_schema.names == ["race", "participant"]
assert pyarrow.types.is_string(arrow_schema.types[0])
assert pyarrow.types.is_struct(arrow_schema.types[1])
| true
| true
|
f70bd5b228ad260502fad0f468efc7ec516cb86b
| 4,898
|
py
|
Python
|
src/oci/autoscaling/models/auto_scaling_policy_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/autoscaling/models/auto_scaling_policy_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/autoscaling/models/auto_scaling_policy_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AutoScalingPolicySummary(object):
"""
Summary information for an autoscaling policy.
"""
def __init__(self, **kwargs):
"""
Initializes a new AutoScalingPolicySummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this AutoScalingPolicySummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this AutoScalingPolicySummary.
:type display_name: str
:param policy_type:
The value to assign to the policy_type property of this AutoScalingPolicySummary.
:type policy_type: str
:param is_enabled:
The value to assign to the is_enabled property of this AutoScalingPolicySummary.
:type is_enabled: bool
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'policy_type': 'str',
'is_enabled': 'bool'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'policy_type': 'policyType',
'is_enabled': 'isEnabled'
}
self._id = None
self._display_name = None
self._policy_type = None
self._is_enabled = None
@property
def id(self):
"""
**[Required]** Gets the id of this AutoScalingPolicySummary.
The ID of the autoscaling policy that is assigned after creation.
:return: The id of this AutoScalingPolicySummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this AutoScalingPolicySummary.
The ID of the autoscaling policy that is assigned after creation.
:param id: The id of this AutoScalingPolicySummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
Gets the display_name of this AutoScalingPolicySummary.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:return: The display_name of this AutoScalingPolicySummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this AutoScalingPolicySummary.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param display_name: The display_name of this AutoScalingPolicySummary.
:type: str
"""
self._display_name = display_name
@property
def policy_type(self):
"""
**[Required]** Gets the policy_type of this AutoScalingPolicySummary.
The type of autoscaling policy.
:return: The policy_type of this AutoScalingPolicySummary.
:rtype: str
"""
return self._policy_type
@policy_type.setter
def policy_type(self, policy_type):
"""
Sets the policy_type of this AutoScalingPolicySummary.
The type of autoscaling policy.
:param policy_type: The policy_type of this AutoScalingPolicySummary.
:type: str
"""
self._policy_type = policy_type
@property
def is_enabled(self):
"""
Gets the is_enabled of this AutoScalingPolicySummary.
Whether the autoscaling policy is enabled.
:return: The is_enabled of this AutoScalingPolicySummary.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this AutoScalingPolicySummary.
Whether the autoscaling policy is enabled.
:param is_enabled: The is_enabled of this AutoScalingPolicySummary.
:type: bool
"""
self._is_enabled = is_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 29.865854
| 245
| 0.64067
|
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AutoScalingPolicySummary(object):
def __init__(self, **kwargs):
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'policy_type': 'str',
'is_enabled': 'bool'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'policy_type': 'policyType',
'is_enabled': 'isEnabled'
}
self._id = None
self._display_name = None
self._policy_type = None
self._is_enabled = None
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
@property
def policy_type(self):
return self._policy_type
@policy_type.setter
def policy_type(self, policy_type):
self._policy_type = policy_type
@property
def is_enabled(self):
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
self._is_enabled = is_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70bd624daea884c43b42cf57519b4e3a69a5311
| 1,413
|
py
|
Python
|
python/consumer.py
|
vishnuhd/getting-started-with-apache-kafka
|
7b900fc419cbcda8ab882121c2a72e63bdc8b2c4
|
[
"Apache-2.0"
] | 14
|
2018-11-01T16:11:41.000Z
|
2019-06-03T14:52:03.000Z
|
python/consumer.py
|
vishnuhd/getting-started-with-apache-kafka
|
7b900fc419cbcda8ab882121c2a72e63bdc8b2c4
|
[
"Apache-2.0"
] | 1
|
2018-10-31T15:39:24.000Z
|
2018-11-30T03:05:10.000Z
|
python/consumer.py
|
vishnuhd/getting-started-with-apache-kafka
|
7b900fc419cbcda8ab882121c2a72e63bdc8b2c4
|
[
"Apache-2.0"
] | 8
|
2018-10-31T15:39:39.000Z
|
2019-06-06T12:21:55.000Z
|
from confluent_kafka import Consumer, KafkaException, KafkaError
import sys
import logging
from pprint import pformat
def print_assignment(consumer, partitions):
print('Assignment:', partitions)
if __name__ == '__main__':
conf = {
'bootstrap.servers': 'localhost:9092',
'group.id': 'devnation-python',
'session.timeout.ms': 6000,
'auto.offset.reset': 'earliest'
}
c = Consumer(conf)
c.subscribe(['devnation'], on_assign=print_assignment)
# Read messages from Kafka, print to stdout
try:
while True:
msg = c.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
# Continue -> we reached the end of the partition
continue
else:
sys.stderr.write('-E- Something went wrong: %s' % msg.error())
break
else:
# Proper message
sys.stderr.write('-I- %s [%d] at offset %d with key %s: ' %
(msg.topic(), msg.partition(), msg.offset(),
str(msg.key())))
print(msg.value())
except KeyboardInterrupt:
sys.stderr.write('%% Aborted by user\n')
finally:
c.close()
| 31.4
| 82
| 0.515924
|
from confluent_kafka import Consumer, KafkaException, KafkaError
import sys
import logging
from pprint import pformat
def print_assignment(consumer, partitions):
print('Assignment:', partitions)
if __name__ == '__main__':
conf = {
'bootstrap.servers': 'localhost:9092',
'group.id': 'devnation-python',
'session.timeout.ms': 6000,
'auto.offset.reset': 'earliest'
}
c = Consumer(conf)
c.subscribe(['devnation'], on_assign=print_assignment)
try:
while True:
msg = c.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
continue
else:
sys.stderr.write('-E- Something went wrong: %s' % msg.error())
break
else:
sys.stderr.write('-I- %s [%d] at offset %d with key %s: ' %
(msg.topic(), msg.partition(), msg.offset(),
str(msg.key())))
print(msg.value())
except KeyboardInterrupt:
sys.stderr.write('%% Aborted by user\n')
finally:
c.close()
| true
| true
|
f70bd6f163acbc02bb3dfe9ad87f6d700204f840
| 73,983
|
py
|
Python
|
WrightTools/data/_data.py
|
untzag/WrightTools
|
05480d2f91ceeca422d9e5ac381fce1840207cb0
|
[
"MIT"
] | 12
|
2017-07-11T15:58:12.000Z
|
2021-05-10T20:33:26.000Z
|
WrightTools/data/_data.py
|
untzag/WrightTools
|
05480d2f91ceeca422d9e5ac381fce1840207cb0
|
[
"MIT"
] | 808
|
2015-04-12T00:36:08.000Z
|
2022-03-27T21:06:06.000Z
|
WrightTools/data/_data.py
|
untzag/WrightTools
|
05480d2f91ceeca422d9e5ac381fce1840207cb0
|
[
"MIT"
] | 9
|
2017-07-22T18:54:23.000Z
|
2022-02-17T20:31:05.000Z
|
"""Central data class and associated."""
# --- import --------------------------------------------------------------------------------------
import collections
import operator
import functools
import warnings
import numpy as np
import h5py
import scipy
from scipy.interpolate import griddata, interp1d
from .._group import Group
from .. import collection as wt_collection
from .. import exceptions as wt_exceptions
from .. import kit as wt_kit
from .. import units as wt_units
from ._axis import Axis, identifier_to_operator
from ._channel import Channel
from ._constant import Constant
from ._variable import Variable
# --- define --------------------------------------------------------------------------------------
__all__ = ["Data"]
# --- class ---------------------------------------------------------------------------------------
class Data(Group):
"""Multidimensional dataset."""
class_name = "Data"
def __init__(self, *args, **kwargs):
self._axes = []
self._constants = []
Group.__init__(self, *args, **kwargs)
# populate axes, constants from attrs string
for identifier in self.attrs.get("axes", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
# Should not be needed for wt5 >= 1.0.3, kept for opening older wt5 files.
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
axis = Axis(self, expression, units)
self._axes.append(axis)
for identifier in self.attrs.get("constants", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
const = Constant(self, expression, units)
self._constants.append(const)
self._current_axis_identities_in_natural_namespace = []
if self.file.mode is not None and self.file.mode != "r":
self._on_constants_updated()
self._on_axes_updated()
# the following are populated if not already recorded
self.channel_names
self.source
self.variable_names
def __repr__(self) -> str:
return "<WrightTools.Data '{0}' {1} at {2}>".format(
self.natural_name, str(self.axis_names), "::".join([self.filepath, self.name])
)
@property
def axes(self) -> tuple:
return tuple(self._axes)
@property
def axis_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._axes)
@property
def axis_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._axes)
@property
def constants(self) -> tuple:
return tuple(self._constants)
@property
def constant_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._constants)
@property
def constant_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._constants)
@property
def channel_names(self) -> tuple:
"""Channel names."""
if "channel_names" not in self.attrs.keys():
self.attrs["channel_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["channel_names"])
@channel_names.setter
def channel_names(self, value):
"""Set channel names."""
self.attrs["channel_names"] = np.array(value, dtype="S")
@property
def channels(self) -> tuple:
"""Channels."""
return tuple(self[n] for n in self.channel_names)
@property
def datasets(self) -> tuple:
"""Datasets."""
return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))
@property
def kind(self):
"""Kind."""
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None
@property
def ndim(self) -> int:
"""Get number of dimensions."""
try:
assert self._ndim is not None
except (AssertionError, AttributeError):
if len(self.variables) == 0:
self._ndim = 0
else:
self._ndim = self.variables[0].ndim
finally:
return self._ndim
@property
def shape(self) -> tuple:
"""Shape."""
try:
assert self._shape is not None
except (AssertionError, AttributeError):
self._shape = wt_kit.joint_shape(*self.variables)
finally:
return self._shape
@property
def size(self) -> int:
"""Size."""
return functools.reduce(operator.mul, self.shape)
@property
def source(self):
"""Source."""
if "source" not in self.attrs.keys():
self.attrs["source"] = "None"
value = self.attrs["source"]
return value if not value == "None" else None
@property
def units(self) -> tuple:
"""All axis units."""
return tuple(a.units for a in self._axes)
@property
def constant_units(self) -> tuple:
"""All constant units."""
return tuple(a.units for a in self._constants)
@property
def variable_names(self) -> tuple:
"""Variable names."""
if "variable_names" not in self.attrs.keys():
self.attrs["variable_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["variable_names"])
@variable_names.setter
def variable_names(self, value):
"""Set variable names."""
self.attrs["variable_names"] = np.array(value, dtype="S")
@property
def variables(self) -> tuple:
"""Variables."""
try:
assert self._variables is not None
except (AssertionError, AttributeError):
self._variables = [self[n] for n in self.variable_names]
finally:
return tuple(self._variables)
@property
def _leaf(self):
return "{0} {1}".format(self.natural_name, self.shape)
def _on_axes_updated(self):
"""Method to run when axes are changed in any way.
Propagates updated axes properly.
"""
# update attrs
self.attrs["axes"] = np.array([a.identity.encode() for a in self._axes], dtype="S")
# remove old attributes
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass # already gone
# populate new attributes
for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key)
def _on_constants_updated(self):
"""Method to run when constants are changed in any way.
Propagates updated constants properly.
"""
# update attrs
self.attrs["constants"] = np.array(
[a.identity.encode() for a in self._constants], dtype="S"
)
def _print_branch(self, prefix, depth, verbose):
def print_leaves(prefix, lis, vline=True):
for i, item in enumerate(lis):
if vline:
a = "│ "
else:
a = " "
if i + 1 == len(lis):
b = "└── "
else:
b = "├── "
s = prefix + a + b + "{0}: {1}".format(i, item._leaf)
print(s)
if verbose:
# axes
print(prefix + "├── axes")
print_leaves(prefix, self.axes)
# constants
print(prefix + "├── constants")
print_leaves(prefix, self.constants)
# variables
print(prefix + "├── variables")
print_leaves(prefix, self.variables)
# channels
print(prefix + "└── channels")
print_leaves(prefix, self.channels, vline=False)
else:
# axes
s = "axes: "
s += ", ".join(["{0} ({1})".format(a.expression, a.units) for a in self.axes])
print(prefix + "├── " + s)
# constants
s = "constants: "
s += ", ".join(
["{0} ({1} {2})".format(a.expression, a.value, a.units) for a in self.constants]
)
print(prefix + "├── " + s)
# channels
s = "channels: "
s += ", ".join(self.channel_names)
print(prefix + "└── " + s)
def bring_to_front(self, channel):
"""Bring a specific channel to the zero-indexed position in channels.
All other channels get pushed back but remain in order.
Parameters
----------
channel : int or str
Channel index or name.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
new.insert(0, new.pop(channel_index))
self.channel_names = new
def chop(self, *args, at={}, parent=None, verbose=True) -> wt_collection.Collection:
"""Divide the dataset into its lower-dimensionality components.
Parameters
----------
axis : str or int (args)
Axes of the returned data objects. Strings refer to the names of
axes in this object, integers refer to their index. Provide multiple
axes to return multidimensional data objects.
at : dict (optional)
Choice of position along an axis. Keys are axis names, values are lists
``[position, input units]``. If exact position does not exist,
the closest valid position is used.
parent : WrightTools Collection instance (optional)
Collection to place the new "chop" collection within. Default is
None (new parent).
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools Collection
Collection of chopped data objects.
Examples
--------
>>> data.axis_names
['d2', 'w1', 'w2']
Get all w1 wigners.
>>> datas = data.chop('d2', 'w1')
>>> len(datas)
51
Get 2D frequency at d2=0 fs.
>>> datas = data.chop('w1', 'w2', at={'d2': [0, 'fs']})
>>> len(datas)
0
>>> datas[0].axis_names
['w1', 'w2']
>>> datas[0].d2[:]
0.
See Also
--------
collapse
Collapse the dataset along one axis.
split
Split the dataset while maintaining its dimensionality.
"""
from ._axis import operators, operator_to_identifier
# parse args
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, int):
args[i] = self._axes[arg].natural_name
elif isinstance(arg, str):
# same normalization that occurs in the natural_name @property
arg = arg.strip()
for op in operators:
arg = arg.replace(op, operator_to_identifier[op])
args[i] = wt_kit.string2identifier(arg)
# normalize the at keys to the natural name
for k in [ak for ak in at.keys() if type(ak) == str]:
for op in operators:
if op in k:
nk = k.replace(op, operator_to_identifier[op])
at[nk] = at[k]
at.pop(k)
k = nk
# get output collection
out = wt_collection.Collection(name="chop", parent=parent)
# get output shape
kept = args + [ak for ak in at.keys() if type(ak) == str]
kept_axes = [self._axes[self.axis_names.index(a)] for a in kept]
removed_axes = [a for a in self._axes if a not in kept_axes]
removed_shape = wt_kit.joint_shape(*removed_axes)
if removed_shape == ():
removed_shape = (1,) * self.ndim
removed_shape = list(removed_shape)
for i in at.keys():
if type(i) == int:
removed_shape[i] = 1
for ax in kept_axes:
if ax.shape.count(1) == ax.ndim - 1:
removed_shape[ax.shape.index(ax.size)] = 1
removed_shape = tuple(removed_shape)
# iterate
i = 0
for idx in np.ndindex(removed_shape):
idx = np.array(idx, dtype=object)
idx[np.array(removed_shape) == 1] = slice(None)
for axis, point in at.items():
if type(axis) == int:
idx[axis] = point
continue
point, units = point
destination_units = self._axes[self.axis_names.index(axis)].units
point = wt_units.converter(point, units, destination_units)
axis_index = self.axis_names.index(axis)
axis = self._axes[axis_index]
idx_index = np.array(axis.shape) > 1
if np.sum(idx_index) > 1:
raise wt_exceptions.MultidimensionalAxisError("chop", axis.natural_name)
idx_index = list(idx_index).index(True)
idx[idx_index] = np.argmin(np.abs(axis[tuple(idx)] - point))
data = out.create_data(name="chop%03i" % i)
for v in self.variables:
kwargs = {}
kwargs["name"] = v.natural_name
kwargs["values"] = v[idx]
kwargs["units"] = v.units
kwargs["label"] = v.label
kwargs.update(v.attrs)
data.create_variable(**kwargs)
for c in self.channels:
kwargs = {}
kwargs["name"] = c.natural_name
kwargs["values"] = c[idx]
kwargs["units"] = c.units
kwargs["label"] = c.label
kwargs["signed"] = c.signed
kwargs.update(c.attrs)
data.create_channel(**kwargs)
new_axes = [a.expression for a in kept_axes if a.expression not in at.keys()]
new_axis_units = [a.units for a in kept_axes if a.expression not in at.keys()]
data.transform(*new_axes)
for const in self.constant_expressions:
data.create_constant(const, verbose=False)
for ax in self.axis_expressions:
if ax not in new_axes:
data.create_constant(ax, verbose=False)
for j, units in enumerate(new_axis_units):
data.axes[j].convert(units)
i += 1
out.flush()
# return
if verbose:
print("chopped data into %d piece(s)" % len(out), "in", new_axes)
return out
def gradient(self, axis, *, channel=0):
"""
Compute the gradient along one axis.
New channels have names ``<channel name>_<axis name>_gradient``.
Parameters
----------
axis : int or str
The axis to differentiate along.
If given as an integer, the axis in the underlying array is used,
and unitary spacing is assumed.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to differentiate.
Default is the first channel.
"""
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis '{}' is a single point, cannot compute gradient".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute gradient".format(
channel, axis
)
)
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_gradient".format(channel, axis),
values=np.empty(self[channel].shape, dtype=rtype),
)
channel = self[channel]
if axis == axis_index:
new[:] = np.gradient(channel[:], axis=axis_index)
else:
new[:] = np.gradient(channel[:], self[axis].points, axis=axis_index)
def moment(self, axis, channel=0, moment=1, *, resultant=None):
"""Take the nth moment the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_moment_<moment num>``.
Moment 0 is the integral of the slice.
Moment 1 is the weighted average or "Center of Mass", normalized by the integral
Moment 2 is the variance, the central moment about the center of mass,
normalized by the integral
Moments 3+ are central moments about the center of mass, normalized by the integral
and by the standard deviation to the power of the moment.
Moments, especially higher order moments, are susceptible to noise and baseline.
It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip`
in conjunction with moments to reduce effects of noise.
Parameters
----------
axis : int or str
The axis to take the moment along.
If given as an integer, the axis with that index is used.
If given as a string, the axis with that name is used.
The axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The collapsed axis must be monotonic to produce correct results.
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to take the moment.
If given as an integer, the channel with that index is used.
If given as a string, the channel with that name is used.
The channel must have values along the axis
(i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``)
Default is 0, the first channel.
moment : int or tuple of int
The moments to take.
One channel will be created for each number given.
Default is 1, the center of mass.
resultant : tuple of int
The resultant shape after the moment operation.
By default, it is intuited by the axis along which the moment is being taken.
This default only works if that axis is 1D, so resultant is required if a
multidimensional axis is passed as the first argument.
The requirement of monotonicity applies on a per pixel basis.
See Also
--------
collapse
Reduce dimensionality by some mathematical operation
clip
Set values above/below a threshold to a particular value
WrightTools.kit.joint_shape
Useful for setting `resultant` kwarg based off of axes not collapsed.
"""
# get axis index --------------------------------------------------------------------------
axis_index = None
if resultant is not None:
for i, (s, r) in enumerate(zip(wt_kit.joint_shape(*self.axes), resultant)):
if s != r and r == 1 and axis_index is None:
axis_index = i
elif s == r:
continue
else:
raise wt_exceptions.ValueError(
f"Invalid resultant shape '{resultant}' for shape {wt_kit.joint_shape(*self.axes)}. "
+ "Consider using `wt.kit.joint_shape` to join non-collapsed axes."
)
index = wt_kit.get_index(self.axis_names, axis)
if axis_index is None:
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "moment")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot compute moment".format(axis)
)
axis_index = axes[0]
warnings.warn("moment", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute moment".format(
channel, axis
)
)
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
channel = self[channel]
axis_inp = axis
axis = self.axes[index]
x = axis[:]
if np.any(np.isnan(x)):
raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp))
y = np.nan_to_num(channel[:])
try:
moments = tuple(moment)
except TypeError:
moments = (moment,)
multiplier = 1
if 0 in moments:
# May be possible to optimize, probably doesn't need the sum
# only matters for integral, all others normalize by integral
multiplier = np.sign(
np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)
)
for moment in moments:
about = 0
norm = 1
if moment > 0:
norm = np.trapz(y, x, axis=axis_index)
norm = np.array(norm)
norm.shape = new_shape
if moment > 1:
about = np.trapz(x * y, x, axis=axis_index)
about = np.array(about)
about.shape = new_shape
about /= norm
if moment > 2:
sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index)
sigma = np.array(sigma)
sigma.shape = new_shape
sigma /= norm
sigma **= 0.5
norm *= sigma ** moment
values = np.trapz((x - about) ** moment * y, x, axis=axis_index)
values = np.array(values)
values.shape = new_shape
values /= norm
if moment == 0:
values *= multiplier
self.create_channel(
"{}_{}_{}_{}".format(channel.natural_name, axis_inp, "moment", moment),
values=values,
)
def collapse(self, axis, method="sum"):
"""Collapse the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_<method>``.
Parameters
----------
axis : int or str
The axis to collapse along.
If given as an integer, the axis in the underlying array is used.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
method : {'average', 'sum', 'max', 'min'} (optional)
The method of collapsing the given axis. Method may also be list
of methods corresponding to the channels of the object. Default
is sum. NaNs are ignored.
Can also be a list, allowing for different treatment for varied channels.
In this case, None indicates that no change to that channel should occur.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
split
Split the dataset while maintaining its dimensionality.
moment
Take the moment along a particular axis
"""
if method in ("int", "integrate"):
warnings.warn(
"integrate method of collapse is deprecated, use moment(moment=0) instead",
wt_exceptions.VisibleDeprecationWarning,
)
for channel in self.channel_names:
try:
self.moment(axis, channel, moment=0)
self.rename_channels(
**{self.channel_names[-1]: f"{channel}_{axis}_{method}"}, verbose=False
)
except wt_exceptions.ValueError:
pass # may have some channels which fail, do so silently
return
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot collapse".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
new_shape = list(self.shape)
new_shape[axis_index] = 1
func = {
"sum": np.nansum,
"max": np.nanmax,
"maximum": np.nanmax,
"min": np.nanmin,
"minimum": np.nanmin,
"ave": np.nanmean,
"average": np.nanmean,
"mean": np.nanmean,
}
# methods ---------------------------------------------------------------------------------
if isinstance(method, str):
methods = [method for _ in self.channels]
if isinstance(method, list):
if len(method) == len(self.channels):
methods = method
else:
raise wt_exceptions.ValueError(
"method argument must have same number of elements as there are channels"
)
for m in methods:
if m not in func.keys():
raise wt_exceptions.ValueError("method '{}' not recognized".format(m))
warnings.warn("collapse", category=wt_exceptions.EntireDatasetInMemoryWarning)
# collapse --------------------------------------------------------------------------------
for method, channel in zip(methods, self.channel_names):
if method is None:
continue
if self[channel].shape[axis_index] == 1:
continue # Cannot collapse any further, don't clutter data object
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
rtype = self[channel].dtype
if method in ["ave", "average", "mean"]:
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_{}".format(channel, axis, method),
values=np.empty(new_shape, dtype=rtype),
units=self[channel].units,
)
new[:] = func[method](self[channel], axis=axis_index, keepdims=True)
def convert(self, destination_units, *, convert_variables=False, verbose=True):
"""Convert all compatable axes and constants to given units.
Parameters
----------
destination_units : str
Destination units.
convert_variables : boolean (optional)
Toggle conversion of stored arrays. Default is False
verbose : bool (optional)
Toggle talkback. Default is True.
See Also
--------
Axis.convert
Convert a single axis object to compatable units. Call on an
axis object in data.axes.
"""
# apply to all compatible axes
for axis in self.axes:
if wt_units.is_valid_conversion(axis.units, destination_units):
orig = axis.units
axis.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"axis {} converted from {} to {}".format(
axis.expression, orig, destination_units
)
)
# apply to all compatible constants
for constant in self.constants:
if wt_units.is_valid_conversion(constant.units, destination_units):
orig = constant.units
constant.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"constant {} converted from {} to {}".format(
constant.expression, orig, destination_units
)
)
if convert_variables:
for var in self.variables:
if wt_units.is_valid_conversion(var.units, destination_units):
orig = var.units
var.convert(destination_units)
if verbose:
print(
"variable {} converted from {} to {}".format(
var.natural_name, orig, destination_units
)
)
self._on_axes_updated()
self._on_constants_updated()
def create_channel(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Channel:
"""Append a new channel.
Parameters
----------
name : string
Unique name for this channel.
values : array (optional)
Array. If None, an empty array equaling the data shape is
created. Default is None.
shape : tuple of int
Shape to use. Must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Channel units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs : dict
Additional keyword arguments passed to Channel instantiation.
Returns
-------
Channel
Created channel.
"""
if name in self.channel_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.variable_names:
raise wt_exceptions.NameNotUniqueError(name)
require_kwargs = {"chunks": True}
if values is None:
if shape is None:
require_kwargs["shape"] = self.shape
else:
require_kwargs["shape"] = shape
if dtype is None:
require_kwargs["dtype"] = np.dtype(np.float64)
else:
require_kwargs["dtype"] = dtype
if require_kwargs["dtype"].kind in "fcmM":
require_kwargs["fillvalue"] = np.nan
else:
require_kwargs["fillvalue"] = 0
else:
require_kwargs["data"] = values
require_kwargs["shape"] = values.shape
require_kwargs["dtype"] = values.dtype
if np.prod(require_kwargs["shape"]) == 1:
require_kwargs["chunks"] = None
# create dataset
dataset_id = self.require_dataset(name=name, **require_kwargs).id
channel = Channel(self, dataset_id, units=units, **kwargs)
# finish
self.attrs["channel_names"] = np.append(self.attrs["channel_names"], name.encode())
return channel
def create_variable(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Variable:
"""Add new child variable.
Parameters
----------
name : string
Unique identifier.
values : array-like (optional)
Array to populate variable with. If None, an variable will be filled with NaN.
Default is None.
shape : tuple of int
Shape to use. must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Variable units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs
Additional kwargs to variable instantiation.
Returns
-------
WrightTools Variable
New child variable.
"""
if name in self.variable_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.channel_names:
raise wt_exceptions.NameNotUniqueError(name)
if values is None:
if shape is None:
shape = self.shape
if dtype is None:
dtype = np.dtype(np.float64)
if dtype.kind in "fcmM":
fillvalue = np.nan
else:
fillvalue = 0
else:
shape = values.shape
dtype = values.dtype
fillvalue = None
# create dataset
id = self.require_dataset(
name=name, data=values, shape=shape, dtype=dtype, fillvalue=fillvalue
).id
variable = Variable(self, id, units=units, **kwargs)
# finish
self._variables = None
self.attrs["variable_names"] = np.append(self.attrs["variable_names"], name.encode())
return variable
def get_nadir(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the minimum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmin()
# finish
return tuple(a[idx] for a in self._axes)
def get_zenith(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the maximum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmax()
# finish
return tuple(a[idx] for a in self._axes)
def heal(self, channel=0, method="linear", fill_value=np.nan, verbose=True):
"""
Remove nans from channel using interpolation.
Parameters
----------
channel : int or str (optional)
Channel to heal. Default is 0.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
fill_value : number-like (optional)
The value written to pixels that cannot be filled by interpolation.
Default is nan.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
.. note:: Healing may take several minutes for large datasets.
Interpolation time goes as nearest, linear, then cubic.
"""
warnings.warn("heal", category=wt_exceptions.EntireDatasetInMemoryWarning)
timer = wt_kit.Timer(verbose=False)
with timer:
# channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
values = self.channels[channel_index][:]
points = [axis[:] for axis in self._axes]
xi = tuple(np.meshgrid(*points, indexing="ij"))
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, values.size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = values.flatten()
# remove nans
arr = arr[:, ~np.isnan(arr).any(axis=0)]
# grid data wants tuples
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# grid data
out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)
self.channels[channel_index][:] = out
# print
if verbose:
print(
"channel {0} healed in {1} seconds".format(
channel.name, np.around(timer.interval, decimals=3)
)
)
def level(self, channel, axis, npts, *, verbose=True):
"""Subtract the average value of npts at the edge of a given axis.
Parameters
----------
channel : int or str
Channel to level.
axis : int
Axis to level along.
npts : int
Number of points to average for each slice. Positive numbers
take points at leading indicies and negative numbers take points
at trailing indicies.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("level", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channels[channel_index]
# verify npts not zero
npts = int(npts)
if npts == 0:
raise wt_exceptions.ValueError("npts must not be zero")
# get subtrahend
ss = [slice(None)] * self.ndim
if npts > 0:
ss[axis] = slice(0, npts, None)
else:
ss[axis] = slice(npts, None, None)
subtrahend = np.nanmean(channel[ss], axis=axis)
if self.ndim > 1:
subtrahend = np.expand_dims(subtrahend, axis=axis)
# level
channel -= subtrahend
# finish
channel._null = 0
if verbose:
print("channel {0} leveled along axis {1}".format(channel.natural_name, axis))
def map_variable(
self, variable, points, input_units="same", *, name=None, parent=None, verbose=True
) -> "Data":
"""Map points of an axis to new points using linear interpolation.
Out-of-bounds points are written nan.
Parameters
----------
variable : string
The variable to map onto.
points : array-like or int
If array, the new points. If int, new points will have the same
limits, with int defining the number of evenly spaced points
between.
input_units : str (optional)
The units of the new points. Default is same, which assumes
the new points have the same units as the axis.
name : string (optional)
The name of the new data object. If None, generated from
natural_name. Default is None.
parent : WrightTools.Collection (optional)
Parent of new data object. If None, data is made at root of a
new temporary file.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.Data
New data object.
"""
# get variable index
variable_index = wt_kit.get_index(self.variable_names, variable)
variable = self.variables[variable_index]
# get points
if isinstance(points, int):
points = np.linspace(variable.min(), variable.max(), points)
points = np.array(points)
# points dimensionality
if points.ndim < variable.ndim:
for i, d in enumerate(variable.shape):
if d == 1:
points = np.expand_dims(points, axis=i)
# convert points
if input_units == "same":
pass
else:
points = wt_units.converter(points, input_units, variable.units)
# construct new data object
special = ["name", "axes", "constants", "channel_names", "variable_names"]
kwargs = {k: v for k, v in self.attrs.items() if k not in special}
if name is None:
name = "{0}_{1}_mapped".format(self.natural_name, variable.natural_name)
kwargs["name"] = name
kwargs["parent"] = parent
out = Data(**kwargs)
# mapped variable
values = points
out.create_variable(values=values, **variable.attrs)
# orthogonal variables
for v in self.variables:
if wt_kit.orthogonal(v.shape, variable.shape):
out.create_variable(values=v[:], **v.attrs)
out.transform(*self.axis_expressions)
# interpolate
if self.ndim == 1:
def interpolate(dataset, points):
function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)
return function(points)
else:
pts = np.array([a.full.flatten() for a in self.axes]).T
out_pts = np.array([a.full.flatten() for a in out.axes]).T
def interpolate(dataset, points):
values = dataset.full.flatten()
function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)
new = function(out_pts)
new.shape = out.shape
return new
for v in self.variables:
if v.natural_name not in out.variable_names:
out.create_variable(values=interpolate(v, points), **v.attrs)
out.variable_names = self.variable_names # enforce old order
out._variables = None # force regeneration of variables @property
for channel in self.channels:
out.create_channel(values=interpolate(channel, points), **channel.attrs)
# finish
if verbose:
print("data mapped from {0} to {1}".format(self.shape, out.shape))
return out
def offset(
self,
points,
offsets,
along,
offset_axis,
units="same",
offset_units="same",
mode="valid",
method="linear",
verbose=True,
):
"""Offset one axis based on another axis' values.
Useful for correcting instrumental artifacts such as zerotune.
Parameters
----------
points : 1D array-like
Points.
offsets : 1D array-like
Offsets.
along : str or int
Axis that points array lies along.
offset_axis : str or int
Axis to offset using offsets.
units : str (optional)
Units of points array.
offset_units : str (optional)
Units of offsets aray.
mode : {'valid', 'full', 'old'} (optional)
Define how far the new axis will extend. Points outside of valid
interpolation range will be written nan.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
>>> points # an array of w1 points
>>> offsets # an array of d1 corrections
>>> data.offset(points, offsets, 'w1', 'd1')
"""
raise NotImplementedError
# axis ------------------------------------------------------------------------------------
if isinstance(along, int):
axis_index = along
elif isinstance(along, str):
axis_index = self.axis_names.index(along)
else:
raise TypeError("along: expected {int, str}, got %s" % type(along))
axis = self._axes[axis_index]
# values & points -------------------------------------------------------------------------
# get values, points, units
if units == "same":
input_units = axis.units
else:
input_units = units
# check offsets is 1D or 0D
if len(offsets.shape) == 1:
pass
else:
raise RuntimeError("values must be 1D or 0D in offset!")
# check if units is compatible, convert
dictionary = getattr(wt_units, axis.units_kind)
if input_units in dictionary.keys():
pass
else:
raise RuntimeError("units incompatible in offset!")
points = wt_units.converter(points, input_units, axis.units)
# create correction array
function = interp1d(points, offsets, bounds_error=False)
corrections = function(axis[:])
# remove nans
finite_indicies = np.where(np.isfinite(corrections))[0]
left_pad_width = finite_indicies[0]
right_pad_width = len(corrections) - finite_indicies[-1] - 1
corrections = np.pad(
corrections[np.isfinite(corrections)],
(int(left_pad_width), int(right_pad_width)),
mode="edge",
)
# do correction ---------------------------------------------------------------------------
# transpose so axis is last
transpose_order = np.arange(len(self._axes))
transpose_order[axis_index] = len(self._axes) - 1
transpose_order[-1] = axis_index
self.transpose(transpose_order, verbose=False)
# get offset axis index
if isinstance(offset_axis, int):
offset_axis_index = offset_axis
elif isinstance(offset_axis, str):
offset_axis_index = self.axis_names.index(offset_axis)
else:
raise TypeError("offset_axis: expected {int, str}, got %s" % type(offset_axis))
# new points
new_points = [a[:] for a in self._axes]
old_offset_axis_points = self._axes[offset_axis_index][:]
spacing = abs(
(old_offset_axis_points.max() - old_offset_axis_points.min())
/ float(len(old_offset_axis_points))
)
if mode == "old":
new_offset_axis_points = old_offset_axis_points
elif mode == "valid":
_max = old_offset_axis_points.max() + corrections.min()
_min = old_offset_axis_points.min() + corrections.max()
n = int(abs(np.ceil((_max - _min) / spacing)))
new_offset_axis_points = np.linspace(_min, _max, n)
elif mode == "full":
_max = old_offset_axis_points.max() + corrections.max()
_min = old_offset_axis_points.min() + corrections.min()
n = np.ceil((_max - _min) / spacing)
new_offset_axis_points = np.linspace(_min, _max, n)
new_points[offset_axis_index] = new_offset_axis_points
new_xi = tuple(np.meshgrid(*new_points, indexing="ij"))
xi = tuple(np.meshgrid(*[a[:] for a in self._axes], indexing="ij"))
for channel in self.channels:
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, channel[:].size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = channel[:].flatten()
# do corrections
corrections = list(corrections)
corrections = corrections * int((len(arr[0]) / len(corrections)))
arr[offset_axis_index] += corrections
# grid data
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# note that rescale is crucial in this operation
out = griddata(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)
channel[:] = out
self._axes[offset_axis_index][:] = new_offset_axis_points
# transpose out
self.transpose(transpose_order, verbose=False)
def print_tree(self, *, verbose=True):
"""Print a ascii-formatted tree representation of the data contents."""
print("{0} ({1})".format(self.natural_name, self.filepath))
self._print_branch("", depth=0, verbose=verbose)
def prune(self, keep_channels=True, *, verbose=True):
"""Remove unused variables and (optionally) channels from the Data object.
Unused variables are those that are not included in either axes or constants.
Unused channels are those not specified in keep_channels, or the first channel.
Parameters
----------
keep_channels : boolean or int or str or tuple
If False, removes all but the first channel.
If int or str, removes all but that index/name channel.
If tuple, removes all channels except those in the tuple by index or name.
Default is True: do not delete channels
verbose : boolean
Toggle talkback. Default is True.
"""
for v in self.variables:
for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):
if v == var:
break
else:
self.remove_variable(v.natural_name, implied=False, verbose=verbose)
if keep_channels is not True:
try:
if isinstance(keep_channels, str):
raise TypeError
indexes = tuple(keep_channels)
except TypeError:
indexes = (keep_channels,)
for i, ch in enumerate(self.channels):
if i not in indexes and not ch.natural_name in indexes:
self.remove_channel(ch.natural_name, verbose=verbose)
def remove_channel(self, channel, *, verbose=True):
"""Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
name = new.pop(channel_index)
del self[name]
self.channel_names = new
if verbose:
print("channel {0} removed".format(name))
def remove_variable(self, variable, *, implied=True, verbose=True):
"""Remove variable from data.
Parameters
----------
variable : int or str
Variable index or name to remove.
implied : boolean (optional)
Toggle deletion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
if isinstance(variable, int):
variable = self.variable_names[variable]
# find all of the implied variables
removed = []
if implied:
for n in self.variable_names:
if n.startswith(variable):
removed.append(n)
else:
removed = [variable]
# check that axes will not be ruined
for n in removed:
for a in self._axes:
if n in [v.natural_name for v in a.variables]:
message = "{0} is contained in axis {1}".format(n, a.expression)
raise RuntimeError(message)
for c in self._constants:
if n in [v.natural_name for v in c.variables]:
warnings.warn(
"Variable being removed used in a constant",
wt_exceptions.WrightToolsWarning,
)
# do removal
for n in removed:
variable_index = wt_kit.get_index(self.variable_names, n)
new = list(self.variable_names)
name = new.pop(variable_index)
del self[name]
self.variable_names = new
self._variables = None
# finish
if verbose:
print("{0} variable(s) removed:".format(len(removed)))
for n in removed:
print(" {0}".format(n))
def rename_channels(self, *, verbose=True, **kwargs):
"""Rename a set of channels.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.channel_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.channel_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.channel_names = names
# finish
if verbose:
print("{0} channel(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def rename_variables(self, *, implied=True, verbose=True, **kwargs):
"""Rename a set of variables.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
implied : boolean (optional)
Toggle inclusion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# find all of the implied variables
kwargs = collections.OrderedDict(kwargs)
if implied:
new = collections.OrderedDict()
for k, v in kwargs.items():
for n in self.variable_names:
if n.startswith(k):
new[n] = n.replace(k, v, 1)
kwargs = new
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.variable_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.variable_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.variable_names = names
units = self.units
new = list(self.axis_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.transform(*new)
for a, u in zip(self._axes, units):
a.convert(u)
units = self.constant_units
new = list(self.constant_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.set_constants(*new)
for c, u in zip(self._constants, units):
c.convert(u)
# finish
if verbose:
print("{0} variable(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def share_nans(self):
"""Share not-a-numbers between all channels.
If any channel is nan at a given index, all channels will be nan
at that index after this operation.
Uses the share_nans method found in wt.kit.
"""
def f(_, s, channels):
outs = wt_kit.share_nans(*[c[s] for c in channels])
for c, o in zip(channels, outs):
c[s] = o
self.channels[0].chunkwise(f, self.channels)
def smooth(self, factors, channel=None, verbose=True) -> "Data":
"""Smooth a channel using an n-dimenional kaiser window.
Note, all arrays are loaded into memory.
For more info see `Kaiser_window`__ wikipedia entry.
__ https://en.wikipedia.org/wiki/Kaiser_window
Parameters
----------
factors : int or list of int
The smoothing factor. You may provide a list of smoothing factors
for each axis.
channel : int or str or None (optional)
The channel to smooth. If None, all channels will be smoothed.
Default is None.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("smooth", category=wt_exceptions.EntireDatasetInMemoryWarning)
# get factors -----------------------------------------------------------------------------
if isinstance(factors, list):
pass
else:
dummy = np.zeros(len(self._axes))
dummy[::] = factors
factors = list(dummy)
# get channels ----------------------------------------------------------------------------
if channel is None:
channels = self.channels
else:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channels = [self.channels[channel_index]]
# smooth ----------------------------------------------------------------------------------
for channel in channels:
values = channel[:]
for axis_index in range(len(factors)):
factor = factors[axis_index]
# transpose so the axis of interest is last
transpose_order = range(len(values.shape))
# replace axis_index with zero
transpose_order = [
len(values.shape) - 1 if i == axis_index else i for i in transpose_order
]
transpose_order[len(values.shape) - 1] = axis_index
values = values.transpose(transpose_order)
# get kaiser window
beta = 5.0
w = np.kaiser(2 * factor + 1, beta)
# for all slices...
for index in np.ndindex(values[..., 0].shape):
current_slice = values[index]
temp_slice = np.pad(current_slice, int(factor), mode=str("edge"))
values[index] = np.convolve(temp_slice, w / w.sum(), mode=str("valid"))
# transpose out
values = values.transpose(transpose_order)
# return array to channel object
channel[:] = values
if verbose:
print("smoothed data")
def split(
self, expression, positions, *, units=None, parent=None, verbose=True
) -> wt_collection.Collection:
"""
Split the data object along a given expression, in units.
Parameters
----------
expression : int or str
The expression to split along. If given as an integer, the axis at that index
is used.
positions : number-type or 1D array-type
The position(s) to split at, in units.
units : str (optional)
The units of the given positions. Default is same, which assumes
input units are identical to first variable units.
parent : WrightTools.Collection (optional)
The parent collection in which to place the 'split' collection.
Default is a new Collection.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.collection.Collection
A Collection of data objects.
The order of the objects is such that the axis points retain their original order.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
collapse
Collapse the dataset along one axis.
"""
# axis ------------------------------------------------------------------------------------
old_expr = self.axis_expressions
old_units = self.units
out = wt_collection.Collection(name="split", parent=parent)
if isinstance(expression, int):
if units is None:
units = self._axes[expression].units
expression = self._axes[expression].expression
elif isinstance(expression, str):
pass
else:
raise TypeError("expression: expected {int, str}, got %s" % type(expression))
self.transform(expression)
if units:
self.convert(units, verbose=False)
try:
positions = [-np.inf] + sorted(list(positions)) + [np.inf]
except TypeError:
positions = [-np.inf, positions, np.inf]
values = self._axes[0].full
masks = [(values >= lo) & (values < hi) for lo, hi in wt_kit.pairwise(positions)]
omasks = []
cuts = []
for mask in masks:
try:
omasks.append(wt_kit.mask_reduce(mask))
cuts.append([i == 1 for i in omasks[-1].shape])
# Ensure at least one axis is kept
if np.all(cuts[-1]):
cuts[-1][0] = False
except ValueError:
omasks.append(None)
cuts.append(None)
for i in range(len(positions) - 1):
out.create_data("split%03i" % i)
for var in self.variables:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, var.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, var.shape)
out_arr[omask] = var[:][imask]
out[i].create_variable(values=out_arr, **var.attrs)
for ch in self.channels:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, ch.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, ch.shape)
out_arr[omask] = ch[:][imask]
out[i].create_channel(values=out_arr, **ch.attrs)
if verbose:
for d in out.values():
try:
d.transform(expression)
except IndexError:
continue
print("split data into {0} pieces along <{1}>:".format(len(positions) - 1, expression))
for i, (lo, hi) in enumerate(wt_kit.pairwise(positions)):
new_data = out[i]
if new_data.shape == ():
print(" {0} : None".format(i))
else:
new_axis = new_data.axes[0]
print(
" {0} : {1:0.2f} to {2:0.2f} {3} {4}".format(
i, lo, hi, self.axes[0].units, new_axis.shape
)
)
for d in out.values():
try:
d.transform(*old_expr)
keep = []
keep_units = []
for ax, u in zip(d.axes, old_units):
if ax.size > 1:
keep.append(ax.expression)
keep_units.append(u)
else:
d.create_constant(ax.expression, verbose=False)
d.transform(*keep)
for ax, u in zip(d.axes, keep_units):
ax.convert(u)
except IndexError:
continue
tempax = Axis(d, expression)
if all(
np.all(
np.sum(~np.isnan(tempax.masked), axis=tuple(set(range(tempax.ndim)) - {j}))
<= 1
)
for j in range(tempax.ndim)
):
d.create_constant(expression, verbose=False)
self.transform(*old_expr)
for ax, u in zip(self.axes, old_units):
ax.convert(u)
return out
def transform(self, *axes, verbose=True):
"""Transform the data.
Parameters
----------
axes : strings
Expressions for the new set of axes.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Similar method except for constants
"""
# TODO: ensure that transform does not break data
# create
new = []
newt = "newt" in self.axis_expressions
current = {a.expression: a for a in self._axes}
for expression in axes:
axis = current.get(expression, Axis(self, expression))
new.append(axis)
self._axes = new
# units
for a in self._axes:
if a.units is None:
a.convert(a.variables[0].units)
# finish
self.flush()
self._on_axes_updated()
nownewt = "newt" in self.axis_expressions
if verbose and nownewt and not newt:
print("Look she turned me into a newt")
elif verbose and newt and not nownewt:
print("I got better")
def set_constants(self, *constants, verbose=True):
"""Set the constants associated with the data.
Parameters
----------
constants : str
Expressions for the new set of constants.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
transform
Similar method except for axes.
create_constant
Add an individual constant.
remove_constant
Remove an individual constant.
"""
# create
new = []
current = {c.expression: c for c in self._constants}
for expression in constants:
constant = current.get(expression, Constant(self, expression))
new.append(constant)
self._constants = new
# units
for c in self._constants:
if c.units is None:
c.convert(c.variables[0].units)
# finish
self.flush()
self._on_constants_updated()
def create_constant(self, expression, *, verbose=True):
"""Append a constant to the stored list.
Parameters
----------
expression : str
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
remove_constant
Remove an individual constant.
"""
if expression in self.constant_expressions:
wt_exceptions.ObjectExistsWarning.warn(expression)
return self.constants[self.constant_expressions.index(expression)]
constant = Constant(self, expression)
if constant.units is None:
constant.convert(constant.variables[0].units)
self._constants.append(constant)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' added".format(constant.expression))
return constant
def remove_constant(self, constant, *, verbose=True):
"""Remove a constant from the stored list.
Parameters
----------
constant : str or Constant or int
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
create_constant
Add an individual constant.
"""
if isinstance(constant, (str, int)):
constant_index = wt_kit.get_index(self.constant_expressions, constant)
elif isinstance(constant, Constant):
constant_index = wt_kit.get_index(self.constants, constant)
constant = self._constants[constant_index]
self._constants.pop(constant_index)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' removed".format(constant.expression))
def zoom(self, factor, order=1, verbose=True):
"""Zoom the data array using spline interpolation of the requested order.
The number of points along each axis is increased by factor.
See `scipy ndimage`__ for more info.
__ http://docs.scipy.org/doc/scipy/reference/
generated/scipy.ndimage.interpolation.zoom.html
Parameters
----------
factor : float
The number of points along each axis will increase by this factor.
order : int (optional)
The order of the spline used to interpolate onto new points.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
raise NotImplementedError
import scipy.ndimage
# axes
for axis in self._axes:
axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)
# channels
for channel in self.channels:
channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)
# return
if verbose:
print("data zoomed to new shape:", self.shape)
| 38.273668
| 109
| 0.541989
|
import collections
import operator
import functools
import warnings
import numpy as np
import h5py
import scipy
from scipy.interpolate import griddata, interp1d
from .._group import Group
from .. import collection as wt_collection
from .. import exceptions as wt_exceptions
from .. import kit as wt_kit
from .. import units as wt_units
from ._axis import Axis, identifier_to_operator
from ._channel import Channel
from ._constant import Constant
from ._variable import Variable
__all__ = ["Data"]
class Data(Group):
class_name = "Data"
def __init__(self, *args, **kwargs):
self._axes = []
self._constants = []
Group.__init__(self, *args, **kwargs)
for identifier in self.attrs.get("axes", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") axis = Axis(self, expression, units)
self._axes.append(axis)
for identifier in self.attrs.get("constants", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") const = Constant(self, expression, units)
self._constants.append(const)
self._current_axis_identities_in_natural_namespace = []
if self.file.mode is not None and self.file.mode != "r":
self._on_constants_updated()
self._on_axes_updated()
self.channel_names
self.source
self.variable_names
def __repr__(self) -> str:
return "<WrightTools.Data '{0}' {1} at {2}>".format(
self.natural_name, str(self.axis_names), "::".join([self.filepath, self.name])
)
@property
def axes(self) -> tuple:
return tuple(self._axes)
@property
def axis_expressions(self) -> tuple:
return tuple(a.expression for a in self._axes)
@property
def axis_names(self) -> tuple:
return tuple(a.natural_name for a in self._axes)
@property
def constants(self) -> tuple:
return tuple(self._constants)
@property
def constant_expressions(self) -> tuple:
return tuple(a.expression for a in self._constants)
@property
def constant_names(self) -> tuple:
return tuple(a.natural_name for a in self._constants)
@property
def channel_names(self) -> tuple:
if "channel_names" not in self.attrs.keys():
self.attrs["channel_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["channel_names"])
@channel_names.setter
def channel_names(self, value):
self.attrs["channel_names"] = np.array(value, dtype="S")
@property
def channels(self) -> tuple:
return tuple(self[n] for n in self.channel_names)
@property
def datasets(self) -> tuple:
return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))
@property
def kind(self):
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None
@property
def ndim(self) -> int:
try:
assert self._ndim is not None
except (AssertionError, AttributeError):
if len(self.variables) == 0:
self._ndim = 0
else:
self._ndim = self.variables[0].ndim
finally:
return self._ndim
@property
def shape(self) -> tuple:
try:
assert self._shape is not None
except (AssertionError, AttributeError):
self._shape = wt_kit.joint_shape(*self.variables)
finally:
return self._shape
@property
def size(self) -> int:
return functools.reduce(operator.mul, self.shape)
@property
def source(self):
if "source" not in self.attrs.keys():
self.attrs["source"] = "None"
value = self.attrs["source"]
return value if not value == "None" else None
@property
def units(self) -> tuple:
return tuple(a.units for a in self._axes)
@property
def constant_units(self) -> tuple:
return tuple(a.units for a in self._constants)
@property
def variable_names(self) -> tuple:
if "variable_names" not in self.attrs.keys():
self.attrs["variable_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["variable_names"])
@variable_names.setter
def variable_names(self, value):
self.attrs["variable_names"] = np.array(value, dtype="S")
@property
def variables(self) -> tuple:
try:
assert self._variables is not None
except (AssertionError, AttributeError):
self._variables = [self[n] for n in self.variable_names]
finally:
return tuple(self._variables)
@property
def _leaf(self):
return "{0} {1}".format(self.natural_name, self.shape)
def _on_axes_updated(self):
self.attrs["axes"] = np.array([a.identity.encode() for a in self._axes], dtype="S")
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key)
def _on_constants_updated(self):
self.attrs["constants"] = np.array(
[a.identity.encode() for a in self._constants], dtype="S"
)
def _print_branch(self, prefix, depth, verbose):
def print_leaves(prefix, lis, vline=True):
for i, item in enumerate(lis):
if vline:
a = "│ "
else:
a = " "
if i + 1 == len(lis):
b = "└── "
else:
b = "├── "
s = prefix + a + b + "{0}: {1}".format(i, item._leaf)
print(s)
if verbose:
print(prefix + "├── axes")
print_leaves(prefix, self.axes)
print(prefix + "├── constants")
print_leaves(prefix, self.constants)
print(prefix + "├── variables")
print_leaves(prefix, self.variables)
print(prefix + "└── channels")
print_leaves(prefix, self.channels, vline=False)
else:
s = "axes: "
s += ", ".join(["{0} ({1})".format(a.expression, a.units) for a in self.axes])
print(prefix + "├── " + s)
s = "constants: "
s += ", ".join(
["{0} ({1} {2})".format(a.expression, a.value, a.units) for a in self.constants]
)
print(prefix + "├── " + s)
s = "channels: "
s += ", ".join(self.channel_names)
print(prefix + "└── " + s)
def bring_to_front(self, channel):
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
new.insert(0, new.pop(channel_index))
self.channel_names = new
def chop(self, *args, at={}, parent=None, verbose=True) -> wt_collection.Collection:
from ._axis import operators, operator_to_identifier
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, int):
args[i] = self._axes[arg].natural_name
elif isinstance(arg, str):
arg = arg.strip()
for op in operators:
arg = arg.replace(op, operator_to_identifier[op])
args[i] = wt_kit.string2identifier(arg)
for k in [ak for ak in at.keys() if type(ak) == str]:
for op in operators:
if op in k:
nk = k.replace(op, operator_to_identifier[op])
at[nk] = at[k]
at.pop(k)
k = nk
out = wt_collection.Collection(name="chop", parent=parent)
kept = args + [ak for ak in at.keys() if type(ak) == str]
kept_axes = [self._axes[self.axis_names.index(a)] for a in kept]
removed_axes = [a for a in self._axes if a not in kept_axes]
removed_shape = wt_kit.joint_shape(*removed_axes)
if removed_shape == ():
removed_shape = (1,) * self.ndim
removed_shape = list(removed_shape)
for i in at.keys():
if type(i) == int:
removed_shape[i] = 1
for ax in kept_axes:
if ax.shape.count(1) == ax.ndim - 1:
removed_shape[ax.shape.index(ax.size)] = 1
removed_shape = tuple(removed_shape)
i = 0
for idx in np.ndindex(removed_shape):
idx = np.array(idx, dtype=object)
idx[np.array(removed_shape) == 1] = slice(None)
for axis, point in at.items():
if type(axis) == int:
idx[axis] = point
continue
point, units = point
destination_units = self._axes[self.axis_names.index(axis)].units
point = wt_units.converter(point, units, destination_units)
axis_index = self.axis_names.index(axis)
axis = self._axes[axis_index]
idx_index = np.array(axis.shape) > 1
if np.sum(idx_index) > 1:
raise wt_exceptions.MultidimensionalAxisError("chop", axis.natural_name)
idx_index = list(idx_index).index(True)
idx[idx_index] = np.argmin(np.abs(axis[tuple(idx)] - point))
data = out.create_data(name="chop%03i" % i)
for v in self.variables:
kwargs = {}
kwargs["name"] = v.natural_name
kwargs["values"] = v[idx]
kwargs["units"] = v.units
kwargs["label"] = v.label
kwargs.update(v.attrs)
data.create_variable(**kwargs)
for c in self.channels:
kwargs = {}
kwargs["name"] = c.natural_name
kwargs["values"] = c[idx]
kwargs["units"] = c.units
kwargs["label"] = c.label
kwargs["signed"] = c.signed
kwargs.update(c.attrs)
data.create_channel(**kwargs)
new_axes = [a.expression for a in kept_axes if a.expression not in at.keys()]
new_axis_units = [a.units for a in kept_axes if a.expression not in at.keys()]
data.transform(*new_axes)
for const in self.constant_expressions:
data.create_constant(const, verbose=False)
for ax in self.axis_expressions:
if ax not in new_axes:
data.create_constant(ax, verbose=False)
for j, units in enumerate(new_axis_units):
data.axes[j].convert(units)
i += 1
out.flush()
if verbose:
print("chopped data into %d piece(s)" % len(out), "in", new_axes)
return out
def gradient(self, axis, *, channel=0):
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis '{}' is a single point, cannot compute gradient".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute gradient".format(
channel, axis
)
)
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_gradient".format(channel, axis),
values=np.empty(self[channel].shape, dtype=rtype),
)
channel = self[channel]
if axis == axis_index:
new[:] = np.gradient(channel[:], axis=axis_index)
else:
new[:] = np.gradient(channel[:], self[axis].points, axis=axis_index)
def moment(self, axis, channel=0, moment=1, *, resultant=None):
axis_index = None
if resultant is not None:
for i, (s, r) in enumerate(zip(wt_kit.joint_shape(*self.axes), resultant)):
if s != r and r == 1 and axis_index is None:
axis_index = i
elif s == r:
continue
else:
raise wt_exceptions.ValueError(
f"Invalid resultant shape '{resultant}' for shape {wt_kit.joint_shape(*self.axes)}. "
+ "Consider using `wt.kit.joint_shape` to join non-collapsed axes."
)
index = wt_kit.get_index(self.axis_names, axis)
if axis_index is None:
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "moment")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot compute moment".format(axis)
)
axis_index = axes[0]
warnings.warn("moment", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute moment".format(
channel, axis
)
)
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
channel = self[channel]
axis_inp = axis
axis = self.axes[index]
x = axis[:]
if np.any(np.isnan(x)):
raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp))
y = np.nan_to_num(channel[:])
try:
moments = tuple(moment)
except TypeError:
moments = (moment,)
multiplier = 1
if 0 in moments:
# only matters for integral, all others normalize by integral
multiplier = np.sign(
np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)
)
for moment in moments:
about = 0
norm = 1
if moment > 0:
norm = np.trapz(y, x, axis=axis_index)
norm = np.array(norm)
norm.shape = new_shape
if moment > 1:
about = np.trapz(x * y, x, axis=axis_index)
about = np.array(about)
about.shape = new_shape
about /= norm
if moment > 2:
sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index)
sigma = np.array(sigma)
sigma.shape = new_shape
sigma /= norm
sigma **= 0.5
norm *= sigma ** moment
values = np.trapz((x - about) ** moment * y, x, axis=axis_index)
values = np.array(values)
values.shape = new_shape
values /= norm
if moment == 0:
values *= multiplier
self.create_channel(
"{}_{}_{}_{}".format(channel.natural_name, axis_inp, "moment", moment),
values=values,
)
def collapse(self, axis, method="sum"):
if method in ("int", "integrate"):
warnings.warn(
"integrate method of collapse is deprecated, use moment(moment=0) instead",
wt_exceptions.VisibleDeprecationWarning,
)
for channel in self.channel_names:
try:
self.moment(axis, channel, moment=0)
self.rename_channels(
**{self.channel_names[-1]: f"{channel}_{axis}_{method}"}, verbose=False
)
except wt_exceptions.ValueError:
pass # may have some channels which fail, do so silently
return
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot collapse".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
new_shape = list(self.shape)
new_shape[axis_index] = 1
func = {
"sum": np.nansum,
"max": np.nanmax,
"maximum": np.nanmax,
"min": np.nanmin,
"minimum": np.nanmin,
"ave": np.nanmean,
"average": np.nanmean,
"mean": np.nanmean,
}
# methods ---------------------------------------------------------------------------------
if isinstance(method, str):
methods = [method for _ in self.channels]
if isinstance(method, list):
if len(method) == len(self.channels):
methods = method
else:
raise wt_exceptions.ValueError(
"method argument must have same number of elements as there are channels"
)
for m in methods:
if m not in func.keys():
raise wt_exceptions.ValueError("method '{}' not recognized".format(m))
warnings.warn("collapse", category=wt_exceptions.EntireDatasetInMemoryWarning)
# collapse --------------------------------------------------------------------------------
for method, channel in zip(methods, self.channel_names):
if method is None:
continue
if self[channel].shape[axis_index] == 1:
continue # Cannot collapse any further, don't clutter data object
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
rtype = self[channel].dtype
if method in ["ave", "average", "mean"]:
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_{}".format(channel, axis, method),
values=np.empty(new_shape, dtype=rtype),
units=self[channel].units,
)
new[:] = func[method](self[channel], axis=axis_index, keepdims=True)
def convert(self, destination_units, *, convert_variables=False, verbose=True):
for axis in self.axes:
if wt_units.is_valid_conversion(axis.units, destination_units):
orig = axis.units
axis.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"axis {} converted from {} to {}".format(
axis.expression, orig, destination_units
)
)
for constant in self.constants:
if wt_units.is_valid_conversion(constant.units, destination_units):
orig = constant.units
constant.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"constant {} converted from {} to {}".format(
constant.expression, orig, destination_units
)
)
if convert_variables:
for var in self.variables:
if wt_units.is_valid_conversion(var.units, destination_units):
orig = var.units
var.convert(destination_units)
if verbose:
print(
"variable {} converted from {} to {}".format(
var.natural_name, orig, destination_units
)
)
self._on_axes_updated()
self._on_constants_updated()
def create_channel(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Channel:
if name in self.channel_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.variable_names:
raise wt_exceptions.NameNotUniqueError(name)
require_kwargs = {"chunks": True}
if values is None:
if shape is None:
require_kwargs["shape"] = self.shape
else:
require_kwargs["shape"] = shape
if dtype is None:
require_kwargs["dtype"] = np.dtype(np.float64)
else:
require_kwargs["dtype"] = dtype
if require_kwargs["dtype"].kind in "fcmM":
require_kwargs["fillvalue"] = np.nan
else:
require_kwargs["fillvalue"] = 0
else:
require_kwargs["data"] = values
require_kwargs["shape"] = values.shape
require_kwargs["dtype"] = values.dtype
if np.prod(require_kwargs["shape"]) == 1:
require_kwargs["chunks"] = None
dataset_id = self.require_dataset(name=name, **require_kwargs).id
channel = Channel(self, dataset_id, units=units, **kwargs)
self.attrs["channel_names"] = np.append(self.attrs["channel_names"], name.encode())
return channel
def create_variable(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Variable:
if name in self.variable_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.channel_names:
raise wt_exceptions.NameNotUniqueError(name)
if values is None:
if shape is None:
shape = self.shape
if dtype is None:
dtype = np.dtype(np.float64)
if dtype.kind in "fcmM":
fillvalue = np.nan
else:
fillvalue = 0
else:
shape = values.shape
dtype = values.dtype
fillvalue = None
id = self.require_dataset(
name=name, data=values, shape=shape, dtype=dtype, fillvalue=fillvalue
).id
variable = Variable(self, id, units=units, **kwargs)
self._variables = None
self.attrs["variable_names"] = np.append(self.attrs["variable_names"], name.encode())
return variable
def get_nadir(self, channel=0) -> tuple:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
idx = channel.argmin()
return tuple(a[idx] for a in self._axes)
def get_zenith(self, channel=0) -> tuple:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
idx = channel.argmax()
return tuple(a[idx] for a in self._axes)
def heal(self, channel=0, method="linear", fill_value=np.nan, verbose=True):
warnings.warn("heal", category=wt_exceptions.EntireDatasetInMemoryWarning)
timer = wt_kit.Timer(verbose=False)
with timer:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
values = self.channels[channel_index][:]
points = [axis[:] for axis in self._axes]
xi = tuple(np.meshgrid(*points, indexing="ij"))
arr = np.zeros((len(self._axes) + 1, values.size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = values.flatten()
arr = arr[:, ~np.isnan(arr).any(axis=0)]
tup = tuple([arr[i] for i in range(len(arr) - 1)])
out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)
self.channels[channel_index][:] = out
if verbose:
print(
"channel {0} healed in {1} seconds".format(
channel.name, np.around(timer.interval, decimals=3)
)
)
def level(self, channel, axis, npts, *, verbose=True):
warnings.warn("level", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channels[channel_index]
npts = int(npts)
if npts == 0:
raise wt_exceptions.ValueError("npts must not be zero")
ss = [slice(None)] * self.ndim
if npts > 0:
ss[axis] = slice(0, npts, None)
else:
ss[axis] = slice(npts, None, None)
subtrahend = np.nanmean(channel[ss], axis=axis)
if self.ndim > 1:
subtrahend = np.expand_dims(subtrahend, axis=axis)
channel -= subtrahend
channel._null = 0
if verbose:
print("channel {0} leveled along axis {1}".format(channel.natural_name, axis))
def map_variable(
self, variable, points, input_units="same", *, name=None, parent=None, verbose=True
) -> "Data":
variable_index = wt_kit.get_index(self.variable_names, variable)
variable = self.variables[variable_index]
if isinstance(points, int):
points = np.linspace(variable.min(), variable.max(), points)
points = np.array(points)
if points.ndim < variable.ndim:
for i, d in enumerate(variable.shape):
if d == 1:
points = np.expand_dims(points, axis=i)
if input_units == "same":
pass
else:
points = wt_units.converter(points, input_units, variable.units)
special = ["name", "axes", "constants", "channel_names", "variable_names"]
kwargs = {k: v for k, v in self.attrs.items() if k not in special}
if name is None:
name = "{0}_{1}_mapped".format(self.natural_name, variable.natural_name)
kwargs["name"] = name
kwargs["parent"] = parent
out = Data(**kwargs)
values = points
out.create_variable(values=values, **variable.attrs)
for v in self.variables:
if wt_kit.orthogonal(v.shape, variable.shape):
out.create_variable(values=v[:], **v.attrs)
out.transform(*self.axis_expressions)
if self.ndim == 1:
def interpolate(dataset, points):
function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)
return function(points)
else:
pts = np.array([a.full.flatten() for a in self.axes]).T
out_pts = np.array([a.full.flatten() for a in out.axes]).T
def interpolate(dataset, points):
values = dataset.full.flatten()
function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)
new = function(out_pts)
new.shape = out.shape
return new
for v in self.variables:
if v.natural_name not in out.variable_names:
out.create_variable(values=interpolate(v, points), **v.attrs)
out.variable_names = self.variable_names out._variables = None for channel in self.channels:
out.create_channel(values=interpolate(channel, points), **channel.attrs)
if verbose:
print("data mapped from {0} to {1}".format(self.shape, out.shape))
return out
def offset(
self,
points,
offsets,
along,
offset_axis,
units="same",
offset_units="same",
mode="valid",
method="linear",
verbose=True,
):
raise NotImplementedError
if isinstance(along, int):
axis_index = along
elif isinstance(along, str):
axis_index = self.axis_names.index(along)
else:
raise TypeError("along: expected {int, str}, got %s" % type(along))
axis = self._axes[axis_index]
if units == "same":
input_units = axis.units
else:
input_units = units
if len(offsets.shape) == 1:
pass
else:
raise RuntimeError("values must be 1D or 0D in offset!")
dictionary = getattr(wt_units, axis.units_kind)
if input_units in dictionary.keys():
pass
else:
raise RuntimeError("units incompatible in offset!")
points = wt_units.converter(points, input_units, axis.units)
function = interp1d(points, offsets, bounds_error=False)
corrections = function(axis[:])
finite_indicies = np.where(np.isfinite(corrections))[0]
left_pad_width = finite_indicies[0]
right_pad_width = len(corrections) - finite_indicies[-1] - 1
corrections = np.pad(
corrections[np.isfinite(corrections)],
(int(left_pad_width), int(right_pad_width)),
mode="edge",
)
transpose_order = np.arange(len(self._axes))
transpose_order[axis_index] = len(self._axes) - 1
transpose_order[-1] = axis_index
self.transpose(transpose_order, verbose=False)
if isinstance(offset_axis, int):
offset_axis_index = offset_axis
elif isinstance(offset_axis, str):
offset_axis_index = self.axis_names.index(offset_axis)
else:
raise TypeError("offset_axis: expected {int, str}, got %s" % type(offset_axis))
new_points = [a[:] for a in self._axes]
old_offset_axis_points = self._axes[offset_axis_index][:]
spacing = abs(
(old_offset_axis_points.max() - old_offset_axis_points.min())
/ float(len(old_offset_axis_points))
)
if mode == "old":
new_offset_axis_points = old_offset_axis_points
elif mode == "valid":
_max = old_offset_axis_points.max() + corrections.min()
_min = old_offset_axis_points.min() + corrections.max()
n = int(abs(np.ceil((_max - _min) / spacing)))
new_offset_axis_points = np.linspace(_min, _max, n)
elif mode == "full":
_max = old_offset_axis_points.max() + corrections.max()
_min = old_offset_axis_points.min() + corrections.min()
n = np.ceil((_max - _min) / spacing)
new_offset_axis_points = np.linspace(_min, _max, n)
new_points[offset_axis_index] = new_offset_axis_points
new_xi = tuple(np.meshgrid(*new_points, indexing="ij"))
xi = tuple(np.meshgrid(*[a[:] for a in self._axes], indexing="ij"))
for channel in self.channels:
arr = np.zeros((len(self._axes) + 1, channel[:].size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = channel[:].flatten()
corrections = list(corrections)
corrections = corrections * int((len(arr[0]) / len(corrections)))
arr[offset_axis_index] += corrections
tup = tuple([arr[i] for i in range(len(arr) - 1)])
out = griddata(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)
channel[:] = out
self._axes[offset_axis_index][:] = new_offset_axis_points
self.transpose(transpose_order, verbose=False)
def print_tree(self, *, verbose=True):
print("{0} ({1})".format(self.natural_name, self.filepath))
self._print_branch("", depth=0, verbose=verbose)
def prune(self, keep_channels=True, *, verbose=True):
for v in self.variables:
for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):
if v == var:
break
else:
self.remove_variable(v.natural_name, implied=False, verbose=verbose)
if keep_channels is not True:
try:
if isinstance(keep_channels, str):
raise TypeError
indexes = tuple(keep_channels)
except TypeError:
indexes = (keep_channels,)
for i, ch in enumerate(self.channels):
if i not in indexes and not ch.natural_name in indexes:
self.remove_channel(ch.natural_name, verbose=verbose)
def remove_channel(self, channel, *, verbose=True):
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
name = new.pop(channel_index)
del self[name]
self.channel_names = new
if verbose:
print("channel {0} removed".format(name))
def remove_variable(self, variable, *, implied=True, verbose=True):
if isinstance(variable, int):
variable = self.variable_names[variable]
removed = []
if implied:
for n in self.variable_names:
if n.startswith(variable):
removed.append(n)
else:
removed = [variable]
for n in removed:
for a in self._axes:
if n in [v.natural_name for v in a.variables]:
message = "{0} is contained in axis {1}".format(n, a.expression)
raise RuntimeError(message)
for c in self._constants:
if n in [v.natural_name for v in c.variables]:
warnings.warn(
"Variable being removed used in a constant",
wt_exceptions.WrightToolsWarning,
)
for n in removed:
variable_index = wt_kit.get_index(self.variable_names, n)
new = list(self.variable_names)
name = new.pop(variable_index)
del self[name]
self.variable_names = new
self._variables = None
if verbose:
print("{0} variable(s) removed:".format(len(removed)))
for n in removed:
print(" {0}".format(n))
def rename_channels(self, *, verbose=True, **kwargs):
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.channel_names.index(k)
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
del self[k]
names = list(self.channel_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.channel_names = names
if verbose:
print("{0} channel(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def rename_variables(self, *, implied=True, verbose=True, **kwargs):
kwargs = collections.OrderedDict(kwargs)
if implied:
new = collections.OrderedDict()
for k, v in kwargs.items():
for n in self.variable_names:
if n.startswith(k):
new[n] = n.replace(k, v, 1)
kwargs = new
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.variable_names.index(k)
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
del self[k]
names = list(self.variable_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.variable_names = names
units = self.units
new = list(self.axis_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.transform(*new)
for a, u in zip(self._axes, units):
a.convert(u)
units = self.constant_units
new = list(self.constant_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.set_constants(*new)
for c, u in zip(self._constants, units):
c.convert(u)
if verbose:
print("{0} variable(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def share_nans(self):
def f(_, s, channels):
outs = wt_kit.share_nans(*[c[s] for c in channels])
for c, o in zip(channels, outs):
c[s] = o
self.channels[0].chunkwise(f, self.channels)
def smooth(self, factors, channel=None, verbose=True) -> "Data":
warnings.warn("smooth", category=wt_exceptions.EntireDatasetInMemoryWarning)
if isinstance(factors, list):
pass
else:
dummy = np.zeros(len(self._axes))
dummy[::] = factors
factors = list(dummy)
if channel is None:
channels = self.channels
else:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channels = [self.channels[channel_index]]
for channel in channels:
values = channel[:]
for axis_index in range(len(factors)):
factor = factors[axis_index]
transpose_order = range(len(values.shape))
transpose_order = [
len(values.shape) - 1 if i == axis_index else i for i in transpose_order
]
transpose_order[len(values.shape) - 1] = axis_index
values = values.transpose(transpose_order)
beta = 5.0
w = np.kaiser(2 * factor + 1, beta)
for index in np.ndindex(values[..., 0].shape):
current_slice = values[index]
temp_slice = np.pad(current_slice, int(factor), mode=str("edge"))
values[index] = np.convolve(temp_slice, w / w.sum(), mode=str("valid"))
values = values.transpose(transpose_order)
channel[:] = values
if verbose:
print("smoothed data")
def split(
self, expression, positions, *, units=None, parent=None, verbose=True
) -> wt_collection.Collection:
old_expr = self.axis_expressions
old_units = self.units
out = wt_collection.Collection(name="split", parent=parent)
if isinstance(expression, int):
if units is None:
units = self._axes[expression].units
expression = self._axes[expression].expression
elif isinstance(expression, str):
pass
else:
raise TypeError("expression: expected {int, str}, got %s" % type(expression))
self.transform(expression)
if units:
self.convert(units, verbose=False)
try:
positions = [-np.inf] + sorted(list(positions)) + [np.inf]
except TypeError:
positions = [-np.inf, positions, np.inf]
values = self._axes[0].full
masks = [(values >= lo) & (values < hi) for lo, hi in wt_kit.pairwise(positions)]
omasks = []
cuts = []
for mask in masks:
try:
omasks.append(wt_kit.mask_reduce(mask))
cuts.append([i == 1 for i in omasks[-1].shape])
if np.all(cuts[-1]):
cuts[-1][0] = False
except ValueError:
omasks.append(None)
cuts.append(None)
for i in range(len(positions) - 1):
out.create_data("split%03i" % i)
for var in self.variables:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
continue
omask = wt_kit.enforce_mask_shape(omask, var.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, var.shape)
out_arr[omask] = var[:][imask]
out[i].create_variable(values=out_arr, **var.attrs)
for ch in self.channels:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
continue
omask = wt_kit.enforce_mask_shape(omask, ch.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, ch.shape)
out_arr[omask] = ch[:][imask]
out[i].create_channel(values=out_arr, **ch.attrs)
if verbose:
for d in out.values():
try:
d.transform(expression)
except IndexError:
continue
print("split data into {0} pieces along <{1}>:".format(len(positions) - 1, expression))
for i, (lo, hi) in enumerate(wt_kit.pairwise(positions)):
new_data = out[i]
if new_data.shape == ():
print(" {0} : None".format(i))
else:
new_axis = new_data.axes[0]
print(
" {0} : {1:0.2f} to {2:0.2f} {3} {4}".format(
i, lo, hi, self.axes[0].units, new_axis.shape
)
)
for d in out.values():
try:
d.transform(*old_expr)
keep = []
keep_units = []
for ax, u in zip(d.axes, old_units):
if ax.size > 1:
keep.append(ax.expression)
keep_units.append(u)
else:
d.create_constant(ax.expression, verbose=False)
d.transform(*keep)
for ax, u in zip(d.axes, keep_units):
ax.convert(u)
except IndexError:
continue
tempax = Axis(d, expression)
if all(
np.all(
np.sum(~np.isnan(tempax.masked), axis=tuple(set(range(tempax.ndim)) - {j}))
<= 1
)
for j in range(tempax.ndim)
):
d.create_constant(expression, verbose=False)
self.transform(*old_expr)
for ax, u in zip(self.axes, old_units):
ax.convert(u)
return out
def transform(self, *axes, verbose=True):
new = []
newt = "newt" in self.axis_expressions
current = {a.expression: a for a in self._axes}
for expression in axes:
axis = current.get(expression, Axis(self, expression))
new.append(axis)
self._axes = new
for a in self._axes:
if a.units is None:
a.convert(a.variables[0].units)
self.flush()
self._on_axes_updated()
nownewt = "newt" in self.axis_expressions
if verbose and nownewt and not newt:
print("Look she turned me into a newt")
elif verbose and newt and not nownewt:
print("I got better")
def set_constants(self, *constants, verbose=True):
new = []
current = {c.expression: c for c in self._constants}
for expression in constants:
constant = current.get(expression, Constant(self, expression))
new.append(constant)
self._constants = new
for c in self._constants:
if c.units is None:
c.convert(c.variables[0].units)
self.flush()
self._on_constants_updated()
def create_constant(self, expression, *, verbose=True):
if expression in self.constant_expressions:
wt_exceptions.ObjectExistsWarning.warn(expression)
return self.constants[self.constant_expressions.index(expression)]
constant = Constant(self, expression)
if constant.units is None:
constant.convert(constant.variables[0].units)
self._constants.append(constant)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' added".format(constant.expression))
return constant
def remove_constant(self, constant, *, verbose=True):
if isinstance(constant, (str, int)):
constant_index = wt_kit.get_index(self.constant_expressions, constant)
elif isinstance(constant, Constant):
constant_index = wt_kit.get_index(self.constants, constant)
constant = self._constants[constant_index]
self._constants.pop(constant_index)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' removed".format(constant.expression))
def zoom(self, factor, order=1, verbose=True):
raise NotImplementedError
import scipy.ndimage
for axis in self._axes:
axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)
for channel in self.channels:
channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)
if verbose:
print("data zoomed to new shape:", self.shape)
| true
| true
|
f70bd75e36b14f005bee3275929a42154f09dfe5
| 54,196
|
py
|
Python
|
tensorflow/python/framework/func_graph.py
|
ahmedsabie/tensorflow
|
1c47355978f562a6a40cd8b0597e2638fb73e07d
|
[
"Apache-2.0"
] | 2
|
2020-04-02T11:52:00.000Z
|
2020-05-29T09:02:00.000Z
|
tensorflow/python/framework/func_graph.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1
|
2020-05-16T01:56:36.000Z
|
2020-05-16T01:56:36.000Z
|
tensorflow/python/framework/func_graph.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1
|
2021-12-06T17:11:35.000Z
|
2021-12-06T17:11:35.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
ALLOWLIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
_EAGER_CONST_THRESHOLD = 128
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UnknownArgument instead of any unsupported types.
"""
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join(str(p) for p in path)
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, composite_tensor.CompositeTensor):
# TODO(b/133606651) Do we need to inject arg_name?
return arg._type_spec # pylint: disable=protected-access
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
name = "/".join(str(p) for p in path)
return resource_variable_ops.VariableSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
str,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
type_spec.TypeSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped)
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not allowlisted, and both
read and write to the outer graph's collections that are allowlisted.
The current allowlisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = object_identity.ObjectIdentityWeakSet()
self.is_control_flow_graph = False
outer_graph = ops.get_default_graph()
self._weak_outer_graph = weakref.ref(outer_graph)
while outer_graph.building_function:
outer_graph = outer_graph.outer_graph
# If self._weak_outer_graph is deleted, we revert to the outermost Graph
# active when the FuncGraph was traced. This will not be a FuncGraph.
self._fallback_outer_graph = outer_graph
self._captures = py_collections.OrderedDict()
# If not None, records the names of output args of this function. Used to
# preserve the output names in the signature of a serialized+deserialized
# function. Private at the moment mostly because it's often out of date.
self._output_names = None
# Maps arbitrary key -> (closure, nest of placeholders), where at function
# call time the value of closure() will be used to feed the nest of
# placeholders.
self._deferred_captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
# Keep track of whether this FuncGraph is exportable to SavedModel. Use
# `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any
# dependent functions as unsaveable.
self._saveable = True
self._saving_errors = set()
# Keep track of callbacks to run when this graph exits default scope
self._scope_exit_callbacks = None
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def capture_call_time_value(self, closure, spec, key=None):
"""Creates a placeholder which at call time has the value closure().
Useful, for example, to respect TensorFlow context managers, which are often
dynamically scoped.
Args:
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
key: optional. If not None, multiple calls to lazy_capture with the same
key in the same graph will return the same placeholder, and the
first closure will be used at function call time.
Returns:
Nest of placeholders which, at function call time, will be fed with the
result of calling closure().
Raises:
ValueError: at function call time, if the return value of closure() is
not compatible with `spec`.
"""
if key is None:
key = object()
if key not in self._deferred_captures:
def convert_to_placeholder(s):
if not isinstance(s, tensor_spec.DenseSpec):
raise TypeError(
"Expected a nest of `TypeSpec` objects, found %s of type %s." %
(s, type(s)))
return array_ops.placeholder(dtype=s.dtype, shape=s.shape)
placeholder = nest.map_structure(
convert_to_placeholder, spec, expand_composites=True)
def wrapped_closure():
ret_nest = closure()
nest.assert_same_structure(spec, ret_nest, expand_composites=True)
# This uses the tensor dtype defined in `spec` when converting values
# in `ret_nest` to tensors.
# pylint: disable=protected-access
y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,
expand_composites=False)
# pylint: enable=protected-access
return nest.flatten(y, expand_composites=True)
self._deferred_captures[key] = (wrapped_closure, placeholder)
return self._deferred_captures[key][1]
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
graph = ops.get_default_graph()
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
# However, we need to preserve the outer device stack in the following
# cases in non eager context:
# 1. device stack is callable
# 2. When using distribution strategy with legacy graph mode.
old_device_stack = self._device_function_stack
if (not context.executing_eagerly() and
(device_stack_has_callable(graph._device_function_stack) or
(self._distribution_strategy_stack and
not ops.executing_eagerly_outside_functions()))):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
old_scope_exit_callbacks = self._scope_exit_callbacks
self._scope_exit_callbacks = []
with outer_cm as g:
try:
yield g
finally:
try:
for fn in self._scope_exit_callbacks:
fn()
finally:
self._scope_exit_callbacks = old_scope_exit_callbacks
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def outer_graph(self):
"""The Graph this FuncGraph is nested in.
Functions may capture Tensors from graphs they are nested in (transitive).
Returns:
A Graph object. Initially set to the current default graph when the
FuncGraph was created. If the previous `outer_graph` was deleted because
the function that owns it was deleted, `outer_graph` is reset to the
outermost default graph active when the FuncGraph was created. This
FuncGraph won't have captured anything from the new `outer_graph` (and
likely not from the previous setting, since that would have created a
strong reference), but it is returned so that FuncGraphs always have a
parent.
"""
current = self._weak_outer_graph()
if current is None:
return self._fallback_outer_graph
return current
@outer_graph.setter
def outer_graph(self, new_outer_graph):
"""Sets `outer_graph` to `new_outer_graph`."""
self._weak_outer_graph = weakref.ref(new_outer_graph)
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def trainable_variables(self):
"""A sequence of trainable variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of trainable variables for this func graph.
"""
return tuple(v for v in self.variables if v.trainable)
@property
def variables(self):
"""A sequence of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of variables for this func graph.
"""
def deref(weak_v):
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
return v
return tuple(deref(v) for v in self._weak_variables)
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((id(v), k) for k, v in self.captures)
uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access
op_type,
uncaptured_inputs,
dtypes,
input_types,
name,
attrs,
op_def,
compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
# Use a different list to avoid modifying the original inputs list.
captured_inputs = []
for inp in inputs:
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
captured_inputs.append(inp)
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,
compute_device)
def capture(self, tensor, name=None, shape=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
shape: Optional shape if a placeholder is created.
Returns:
Tensor from this FuncGraph.
Raises:
InaccessibleTensorError: if any tensors are accessed in a manner that
bypasses the mechanisms required for the data dependencies to be correctly
wired.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
# Small EagerTensors are captured with Const ops
if (tensor.dtype in dtypes.TF_VALUE_DTYPES and
np.prod(tensor.shape) <= _EAGER_CONST_THRESHOLD):
return self.capture_eager_tensor(tensor, name)
# Large EagerTensors and resources are captured with Placeholder ops
return self._capture_helper(tensor, name, shape)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise errors.InaccessibleTensorError(
"The tensor '%s' cannot be accessed here: it is defined"
" in another function or code block. Use return values,"
" explicit Python locals or TensorFlow collections to access"
" it. Defined in: %s; accessed from: %s.\n"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name, shape=None):
capture = self._captures.get(id(tensor))
if capture is None:
placeholder = _create_substitute_placeholder(
tensor, name=name, dtype=tensor.dtype, shape=shape)
# Record the composite device as an attribute to the placeholder.
# This attribute would be propogated into the arg_attr of the FunctionDef.
# Currently, a packed eager tensor is always placed on a CompositeDevice.
if isinstance(tensor, ops.EagerTensor) and tensor.is_packed:
placeholder.op._set_attr( # pylint: disable=protected-access
"_composite_device",
attr_value_pb2.AttrValue(s=compat.as_bytes(tensor.device)))
self.add_capture(tensor, placeholder)
else:
placeholder = capture[1]
tape.record_operation("captured_value", [placeholder], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return placeholder
@property
def captures(self):
"""Order list of tuples containing external and internal captures."""
return self._captures.values()
def add_capture(self, tensor, placeholder):
"""Capture a specific tensor and utilize the provided placeholder.
Args:
tensor: Tensor to captures.
placeholder: Provided placeholder for the tensor.
"""
self._captures[id(tensor)] = (tensor, placeholder)
self.inputs.append(placeholder)
def replace_capture(self, tensor, placeholder):
"""Replace already existing capture."""
self._captures[id(tensor)] = (tensor, placeholder)
def reset_captures(self, capture_list):
"""Set the captures with the provided list of captures & placeholder."""
self._captures = py_collections.OrderedDict()
for tensor, placeholder in capture_list:
self._captures[id(tensor)] = (tensor, placeholder)
def pop_capture(self, tensor):
"""Remove the capture and return the generated placeholder."""
capture = self._captures.pop(id(tensor), None)
if capture is None:
return None
return capture[1]
def clear_captures(self):
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while self._captures:
self._captures.popitem()
memory.dismantle_ordered_dict(self._captures)
while self._deferred_captures:
self._deferred_captures.popitem()
memory.dismantle_ordered_dict(self._deferred_captures)
def capture_distributed_variable(self, variable, placeholder):
"""Add given distributed variable to captures with given placeholder."""
self._captures[id(variable)] = (variable, placeholder)
tape.record_operation("captured_value", [placeholder], [variable],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
def capture_eager_tensor(self, tensor, name):
capture = self._captures.get(id(tensor))
if capture is None:
# We clear all control dependencies and place the Const op on the same
# device as the source tensor. The device placement may be relaxed at
# a later date.
with ops.control_dependencies(None), self.device(tensor.device):
constant_value = tensor_util.constant_value(tensor)
if constant_value is None:
# Some eager tensors, e.g. parallel tensors, are not convertible to a
# single constant. We'll use a placeholder for this case.
return self._capture_helper(tensor, name)
graph_const = constant_op.constant(constant_value, dtype=tensor.dtype,
shape=tensor.shape, name=name)
self.add_capture(tensor, graph_const)
else:
graph_const = capture[1]
tape.record_operation("captured_value", [graph_const], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return graph_const
def captured(self, tensor):
"""Check if the specified tensor has been captured."""
return id(tensor) in self._captures
@property
def external_captures(self):
"""External tensors captured by this function."""
return [c[0] for c in self._captures.values()]
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return [c[1] for c in self._captures.values()]
@property
def deferred_external_captures(self):
"""Ordered nest of tensors whose placeholders will be fed at call time."""
return [c[0] for c in self._deferred_captures.values()]
@property
def deferred_internal_captures(self):
"""List of nest of placeholders which at call time will be fed."""
return [c[1] for c in self._deferred_captures.values()]
@property
def variable_captures(self):
"""Map of python object ids of variables to variables which are captured."""
return {
id(self._captures[id(v)][1]): v
for v in self.variables
if id(v) in self._captures
}
def mark_as_unsaveable(self, error_message):
"""Marks this FuncGraph as unsaveable.
Any attempts to export this FuncGraph will raise an error with the specified
message.
Args:
error_message: List or string containing the error message to be raised
when saving this FuncGraph to SavedModel.
"""
self._saveable = False
if isinstance(error_message, str):
error_message = [error_message]
self._saving_errors.update(error_message)
@property
def saveable(self):
"""Returns whether this FuncGraph is saveable."""
return self._saveable
@property
def saving_errors(self):
"""Returns set of errors preventing this FuncGraph from being saved."""
return self._saving_errors
def _add_scope_exit_callback(self, fn):
"""Add a function to call when this graph exits the default scope."""
if not callable(fn):
raise TypeError("fn is not callable: {}".format(fn))
if self._scope_exit_callbacks is None:
raise RuntimeError(
"Attempting to add a scope exit callback, but the default graph is "
"not the context scope graph. Did you forget to call "
"'with graph.as_default(): ...'?")
self._scope_exit_callbacks.append(fn)
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not allowlisted, and both
read and write to the outer graph's collections that are allowlisted.
The current allowlisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
deps_control_manager = auto_control_deps.AutomaticControlDependencies()
else:
deps_control_manager = ops.NullContextmanager()
with func_graph.as_default(), deps_control_manager as deps_ctx:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = deps_ctx.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
"""Calls a converted version of original_func."""
# TODO(mdan): Push this block higher in tf.function's call stack.
try:
return autograph.converted_call(
original_func,
args,
kwargs,
options=autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
user_requested=True,
))
except Exception as e: # pylint:disable=broad-except
if hasattr(e, "ag_error_metadata"):
raise e.ag_error_metadata.to_exception(e)
else:
raise
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
else:
_, original_func = tf_decorator.unwrap(python_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args, original_func)
check_mutation(func_kwargs_before, func_kwargs, original_func)
finally:
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = object_identity.ObjectIdentitySet()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.pop_capture(arg.handle)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = (
inputs + func_graph.internal_captures + nest.flatten(
func_graph.deferred_internal_captures, expand_composites=True))
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)
func_graph.collective_manager_ids_used = (
deps_control_manager.collective_manager_ids_used)
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures:
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2, func):
"""Check if two list of arguments are exactly the same."""
func_name = getattr(func, "__name__", func)
errmsg = ("{}() should not modify its Python input arguments."
" Check if it modifies any lists or dicts passed as"
" arguments. Modifying a copy is allowed.".format(func_name))
try:
# TODO(mdan): Compare more robustly so that argument names can be reported.
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None, shape=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
if shape is None:
shape = value.shape
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_composite_tensor_spec(x):
"""Returns the TypeSpec for x if it's a composite tensor, or x otherwise."""
return (x._type_spec # pylint: disable=protected-access
if isinstance(x, composite_tensor.CompositeTensor) else x)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
# Replace any composite tensors with their TypeSpecs. This is important
# for ensuring that shape information that's not preserved by the TypeSpec
# (such as the number of values in a SparseTensor) gets properly masked.
arg_value = nest.map_structure(_get_composite_tensor_spec, arg_value)
flattened = nest.flatten(arg_value, expand_composites=True)
for arg in flattened:
# We have a shape entry for each arg, regardless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
arg_is_spec = isinstance(arg, tensor_spec.TensorSpec)
if arg_is_spec and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if not arg_is_spec:
custom_gradient.copy_handle_data(arg, placeholder)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, (resource_variable_ops.BaseResourceVariable,
resource_variable_ops.VariableSpec)):
if isinstance(arg, resource_variable_ops.VariableSpec):
name = arg.name or name
with func_graph.outer_graph.as_default():
placeholder = graph_placeholder(dtypes.resource, arg.shape,
name=name)
arg = resource_variable_ops.BaseResourceVariable(
name=name,
shape=arg.shape,
dtype=arg.dtype,
handle=placeholder,
handle_name=name)
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
func_graph.clear_captures()
ops.dismantle_graph(func_graph)
def override_func_graph_name_scope(func_graph, name_scope):
func_graph._name_stack = name_scope # pylint: disable=protected-access
| 41.721324
| 97
| 0.694811
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
ALLOWLIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, variable_scope._VARSCOPESTORE_KEY ]
_EAGER_CONST_THRESHOLD = 128
class UnknownArgument(object):
pass
def convert_structure_to_signature(structure, arg_names=None):
def encode_arg(arg, path):
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
name = user_specified_name
else:
name = "/".join(str(p) for p in path)
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, composite_tensor.CompositeTensor):
return arg._type_spec if isinstance(arg, resource_variable_ops.BaseResourceVariable):
name = "/".join(str(p) for p in path)
return resource_variable_ops.VariableSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
str,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
type_spec.TypeSpec,
)):
return arg
return UnknownArgument()
flattened = nest.flatten_with_tuple_paths(structure)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped)
class FuncGraph(ops.Graph):
def __init__(self, name, collections=None, capture_by_value=None):
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = object_identity.ObjectIdentityWeakSet()
self.is_control_flow_graph = False
outer_graph = ops.get_default_graph()
self._weak_outer_graph = weakref.ref(outer_graph)
while outer_graph.building_function:
outer_graph = outer_graph.outer_graph
# If self._weak_outer_graph is deleted, we revert to the outermost Graph
# active when the FuncGraph was traced. This will not be a FuncGraph.
self._fallback_outer_graph = outer_graph
self._captures = py_collections.OrderedDict()
# If not None, records the names of output args of this function. Used to
# preserve the output names in the signature of a serialized+deserialized
# function. Private at the moment mostly because it's often out of date.
self._output_names = None
self._deferred_captures = py_collections.OrderedDict()
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
self._colocation_stack = graph._colocation_stack.copy()
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
self._saveable = True
self._saving_errors = set()
self._scope_exit_callbacks = None
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def capture_call_time_value(self, closure, spec, key=None):
if key is None:
key = object()
if key not in self._deferred_captures:
def convert_to_placeholder(s):
if not isinstance(s, tensor_spec.DenseSpec):
raise TypeError(
"Expected a nest of `TypeSpec` objects, found %s of type %s." %
(s, type(s)))
return array_ops.placeholder(dtype=s.dtype, shape=s.shape)
placeholder = nest.map_structure(
convert_to_placeholder, spec, expand_composites=True)
def wrapped_closure():
ret_nest = closure()
nest.assert_same_structure(spec, ret_nest, expand_composites=True)
y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,
expand_composites=False)
return nest.flatten(y, expand_composites=True)
self._deferred_captures[key] = (wrapped_closure, placeholder)
return self._deferred_captures[key][1]
def control_dependencies(self, control_inputs):
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
graph = ops.get_default_graph()
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
# However, we need to preserve the outer device stack in the following
# cases in non eager context:
# 1. device stack is callable
# 2. When using distribution strategy with legacy graph mode.
old_device_stack = self._device_function_stack
if (not context.executing_eagerly() and
(device_stack_has_callable(graph._device_function_stack) or
(self._distribution_strategy_stack and
not ops.executing_eagerly_outside_functions()))):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
old_scope_exit_callbacks = self._scope_exit_callbacks
self._scope_exit_callbacks = []
with outer_cm as g:
try:
yield g
finally:
try:
for fn in self._scope_exit_callbacks:
fn()
finally:
self._scope_exit_callbacks = old_scope_exit_callbacks
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def outer_graph(self):
current = self._weak_outer_graph()
if current is None:
return self._fallback_outer_graph
return current
@outer_graph.setter
def outer_graph(self, new_outer_graph):
self._weak_outer_graph = weakref.ref(new_outer_graph)
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def trainable_variables(self):
return tuple(v for v in self.variables if v.trainable)
@property
def variables(self):
def deref(weak_v):
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
return v
return tuple(deref(v) for v in self._weak_variables)
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((id(v), k) for k, v in self.captures)
uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access
op_type,
uncaptured_inputs,
dtypes,
input_types,
name,
attrs,
op_def,
compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
# Use a different list to avoid modifying the original inputs list.
captured_inputs = []
for inp in inputs:
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
captured_inputs.append(inp)
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,
compute_device)
def capture(self, tensor, name=None, shape=None):
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
# Small EagerTensors are captured with Const ops
if (tensor.dtype in dtypes.TF_VALUE_DTYPES and
np.prod(tensor.shape) <= _EAGER_CONST_THRESHOLD):
return self.capture_eager_tensor(tensor, name)
# Large EagerTensors and resources are captured with Placeholder ops
return self._capture_helper(tensor, name, shape)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise errors.InaccessibleTensorError(
"The tensor '%s' cannot be accessed here: it is defined"
" in another function or code block. Use return values,"
" explicit Python locals or TensorFlow collections to access"
" it. Defined in: %s; accessed from: %s.\n"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name, shape=None):
capture = self._captures.get(id(tensor))
if capture is None:
placeholder = _create_substitute_placeholder(
tensor, name=name, dtype=tensor.dtype, shape=shape)
# Record the composite device as an attribute to the placeholder.
# This attribute would be propogated into the arg_attr of the FunctionDef.
# Currently, a packed eager tensor is always placed on a CompositeDevice.
if isinstance(tensor, ops.EagerTensor) and tensor.is_packed:
placeholder.op._set_attr( # pylint: disable=protected-access
"_composite_device",
attr_value_pb2.AttrValue(s=compat.as_bytes(tensor.device)))
self.add_capture(tensor, placeholder)
else:
placeholder = capture[1]
tape.record_operation("captured_value", [placeholder], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return placeholder
@property
def captures(self):
return self._captures.values()
def add_capture(self, tensor, placeholder):
self._captures[id(tensor)] = (tensor, placeholder)
self.inputs.append(placeholder)
def replace_capture(self, tensor, placeholder):
self._captures[id(tensor)] = (tensor, placeholder)
def reset_captures(self, capture_list):
self._captures = py_collections.OrderedDict()
for tensor, placeholder in capture_list:
self._captures[id(tensor)] = (tensor, placeholder)
def pop_capture(self, tensor):
capture = self._captures.pop(id(tensor), None)
if capture is None:
return None
return capture[1]
def clear_captures(self):
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while self._captures:
self._captures.popitem()
memory.dismantle_ordered_dict(self._captures)
while self._deferred_captures:
self._deferred_captures.popitem()
memory.dismantle_ordered_dict(self._deferred_captures)
def capture_distributed_variable(self, variable, placeholder):
self._captures[id(variable)] = (variable, placeholder)
tape.record_operation("captured_value", [placeholder], [variable],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
def capture_eager_tensor(self, tensor, name):
capture = self._captures.get(id(tensor))
if capture is None:
# We clear all control dependencies and place the Const op on the same
# device as the source tensor. The device placement may be relaxed at
# a later date.
with ops.control_dependencies(None), self.device(tensor.device):
constant_value = tensor_util.constant_value(tensor)
if constant_value is None:
# Some eager tensors, e.g. parallel tensors, are not convertible to a
# single constant. We'll use a placeholder for this case.
return self._capture_helper(tensor, name)
graph_const = constant_op.constant(constant_value, dtype=tensor.dtype,
shape=tensor.shape, name=name)
self.add_capture(tensor, graph_const)
else:
graph_const = capture[1]
tape.record_operation("captured_value", [graph_const], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return graph_const
def captured(self, tensor):
return id(tensor) in self._captures
@property
def external_captures(self):
return [c[0] for c in self._captures.values()]
@property
def internal_captures(self):
return [c[1] for c in self._captures.values()]
@property
def deferred_external_captures(self):
return [c[0] for c in self._deferred_captures.values()]
@property
def deferred_internal_captures(self):
return [c[1] for c in self._deferred_captures.values()]
@property
def variable_captures(self):
return {
id(self._captures[id(v)][1]): v
for v in self.variables
if id(v) in self._captures
}
def mark_as_unsaveable(self, error_message):
self._saveable = False
if isinstance(error_message, str):
error_message = [error_message]
self._saving_errors.update(error_message)
@property
def saveable(self):
return self._saveable
@property
def saving_errors(self):
return self._saving_errors
def _add_scope_exit_callback(self, fn):
if not callable(fn):
raise TypeError("fn is not callable: {}".format(fn))
if self._scope_exit_callbacks is None:
raise RuntimeError(
"Attempting to add a scope exit callback, but the default graph is "
"not the context scope graph. Did you forget to call "
"'with graph.as_default(): ...'?")
self._scope_exit_callbacks.append(fn)
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
deps_control_manager = auto_control_deps.AutomaticControlDependencies()
else:
deps_control_manager = ops.NullContextmanager()
with func_graph.as_default(), deps_control_manager as deps_ctx:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = deps_ctx.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
# TODO(mdan): Push this block higher in tf.function's call stack.
try:
return autograph.converted_call(
original_func,
args,
kwargs,
options=autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
user_requested=True,
))
except Exception as e: if hasattr(e, "ag_error_metadata"):
raise e.ag_error_metadata.to_exception(e)
else:
raise
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
else:
_, original_func = tf_decorator.unwrap(python_func)
func_outputs = python_func(*func_args, **func_kwargs)
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args, original_func)
check_mutation(func_kwargs_before, func_kwargs, original_func)
finally:
current_scope.set_use_resource(default_use_recource)
graph_variables = list(func_graph._watched_variables) arg_variables = object_identity.ObjectIdentitySet()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.pop_capture(arg.handle)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = (
inputs + func_graph.internal_captures + nest.flatten(
func_graph.deferred_internal_captures, expand_composites=True))
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)
func_graph.collective_manager_ids_used = (
deps_control_manager.collective_manager_ids_used)
return func_graph
def maybe_captured(tensor):
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures:
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2, func):
func_name = getattr(func, "__name__", func)
errmsg = ("{}() should not modify its Python input arguments."
" Check if it modifies any lists or dicts passed as"
" arguments. Modifying a copy is allowed.".format(func_name))
try:
# TODO(mdan): Compare more robustly so that argument names can be reported.
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None, shape=None):
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
if shape is None:
shape = value.shape
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_composite_tensor_spec(x):
return (x._type_spec # pylint: disable=protected-access
if isinstance(x, composite_tensor.CompositeTensor) else x)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
# Replace any composite tensors with their TypeSpecs. This is important
# for ensuring that shape information that's not preserved by the TypeSpec
arg_value = nest.map_structure(_get_composite_tensor_spec, arg_value)
flattened = nest.flatten(arg_value, expand_composites=True)
for arg in flattened:
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
arg_is_spec = isinstance(arg, tensor_spec.TensorSpec)
if arg_is_spec and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if not arg_is_spec:
custom_gradient.copy_handle_data(arg, placeholder)
if name is not None:
# Record the requested/user-specified name in case it's different than
placeholder.op._set_attr( "_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, (resource_variable_ops.BaseResourceVariable,
resource_variable_ops.VariableSpec)):
if isinstance(arg, resource_variable_ops.VariableSpec):
name = arg.name or name
with func_graph.outer_graph.as_default():
placeholder = graph_placeholder(dtypes.resource, arg.shape,
name=name)
arg = resource_variable_ops.BaseResourceVariable(
name=name,
shape=arg.shape,
dtype=arg.dtype,
handle=placeholder,
handle_name=name)
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
func_graph.clear_captures()
ops.dismantle_graph(func_graph)
def override_func_graph_name_scope(func_graph, name_scope):
func_graph._name_stack = name_scope
| true
| true
|
f70bd7df44af8f81da661aee94f5ea0c7b3f53cd
| 245
|
py
|
Python
|
scripts/project_package_name/setup.py
|
godzilla-but-nicer/SporeLoss
|
8159a628e5f17191254583c053891070ba3d6e7f
|
[
"MIT"
] | null | null | null |
scripts/project_package_name/setup.py
|
godzilla-but-nicer/SporeLoss
|
8159a628e5f17191254583c053891070ba3d6e7f
|
[
"MIT"
] | null | null | null |
scripts/project_package_name/setup.py
|
godzilla-but-nicer/SporeLoss
|
8159a628e5f17191254583c053891070ba3d6e7f
|
[
"MIT"
] | 1
|
2022-01-10T00:40:05.000Z
|
2022-01-10T00:40:05.000Z
|
#!/usr/bin/env python
# encoding: utf-8
from distutils.core import setup
setup(name='project_package_name',
version='0.1',
description = 'project description',
author = '...',
packages = ['project_package_name'],
)
| 20.416667
| 43
| 0.636735
|
from distutils.core import setup
setup(name='project_package_name',
version='0.1',
description = 'project description',
author = '...',
packages = ['project_package_name'],
)
| true
| true
|
f70bda91f9c7115e3b24b393a5a89e703a6ef8f7
| 2,137
|
py
|
Python
|
model.py
|
JulianNovakovic/Vislice
|
061a252e6aafd60157b740cfcca9b2d76ff27926
|
[
"MIT"
] | null | null | null |
model.py
|
JulianNovakovic/Vislice
|
061a252e6aafd60157b740cfcca9b2d76ff27926
|
[
"MIT"
] | null | null | null |
model.py
|
JulianNovakovic/Vislice
|
061a252e6aafd60157b740cfcca9b2d76ff27926
|
[
"MIT"
] | null | null | null |
STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = '+'
PONOVLJENA_CRKA = 'o'
NAPACNA_CRKA = '-'
ZMAGA = 'W'
PORAZ = 'X'
class Igra:
def __init__(self, geslo, crke):
self.geslo = geslo
self.crke = crke[:]
def napacne_crke(self):
return [crka for crka in self.crke if crka not in self.geslo]
def pravilne_crke(self):
return [crka for crka in self.crke if crka in self.geslo]
def stevilo_napak(self):
return len(self.napacne_crke())
def zmaga(self):
vse_crke = True
for crka in self.geslo:
if crka in self.pravilne_crke():
pass
else:
vse_crke = False
break
# vse_crke1 all(crka in self.crke for crka in self.geslo)
return vse_crke and STEVILO_DOVOLJENIH_NAPAK >= self.stevilo_napak()
def poraz(self):
return STEVILO_DOVOLJENIH_NAPAK < self.stevilo_napak()
def pravilni_del_gesla(self):
delni = ''
ugibanje = [crka.upper() for crka in self.crke]
for crka in self.geslo:
if crka.upper() in ugibanje:
delni += crka
else:
delni += '_ '
return delni.strip()
def nepravili_ugibi(self):
return ' '.join(self.napacne_crke())
def ugibaj(self, crka):
crka = crka.upper()
if crka in self.crke:
return PONOVLJENA_CRKA
elif crka in self.geslo:
self.crke.append(crka)
if self.zmaga():
return ZMAGA
else:
return PRAVILNA_CRKA
else:
self.crke.append(crka)
if self.poraz():
return PORAZ
else:
return NAPACNA_CRKA
with open('Vislice/besede.txt', 'r') as f:
bazen_besed = [beseda.strip().upper() for beseda in f.readlines()]
import random
def nova_igra():
geslo = random.choice(bazen_besed)
return Igra(geslo, [])
# testno_geslo = 'DEŽUJE'
# testne_crke = ['A', 'E', 'I', 'O', 'U', 'D', 'J', 'K', 'Ž']
# igra = Igra(testno_geslo, testne_crke)
# print(testno_geslo)
| 24.848837
| 76
| 0.560131
|
STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = '+'
PONOVLJENA_CRKA = 'o'
NAPACNA_CRKA = '-'
ZMAGA = 'W'
PORAZ = 'X'
class Igra:
def __init__(self, geslo, crke):
self.geslo = geslo
self.crke = crke[:]
def napacne_crke(self):
return [crka for crka in self.crke if crka not in self.geslo]
def pravilne_crke(self):
return [crka for crka in self.crke if crka in self.geslo]
def stevilo_napak(self):
return len(self.napacne_crke())
def zmaga(self):
vse_crke = True
for crka in self.geslo:
if crka in self.pravilne_crke():
pass
else:
vse_crke = False
break
return vse_crke and STEVILO_DOVOLJENIH_NAPAK >= self.stevilo_napak()
def poraz(self):
return STEVILO_DOVOLJENIH_NAPAK < self.stevilo_napak()
def pravilni_del_gesla(self):
delni = ''
ugibanje = [crka.upper() for crka in self.crke]
for crka in self.geslo:
if crka.upper() in ugibanje:
delni += crka
else:
delni += '_ '
return delni.strip()
def nepravili_ugibi(self):
return ' '.join(self.napacne_crke())
def ugibaj(self, crka):
crka = crka.upper()
if crka in self.crke:
return PONOVLJENA_CRKA
elif crka in self.geslo:
self.crke.append(crka)
if self.zmaga():
return ZMAGA
else:
return PRAVILNA_CRKA
else:
self.crke.append(crka)
if self.poraz():
return PORAZ
else:
return NAPACNA_CRKA
with open('Vislice/besede.txt', 'r') as f:
bazen_besed = [beseda.strip().upper() for beseda in f.readlines()]
import random
def nova_igra():
geslo = random.choice(bazen_besed)
return Igra(geslo, [])
| true
| true
|
f70bdaa5cd1ef8f895b12e61c5190d35da36ec24
| 9,996
|
py
|
Python
|
tools/yaml-nic-config-2-script.py
|
smolar/tripleo-heat-templates
|
6b858eb39f96cc2a81a115246fd4a2ef6a0b0097
|
[
"Apache-2.0"
] | null | null | null |
tools/yaml-nic-config-2-script.py
|
smolar/tripleo-heat-templates
|
6b858eb39f96cc2a81a115246fd4a2ef6a0b0097
|
[
"Apache-2.0"
] | null | null | null |
tools/yaml-nic-config-2-script.py
|
smolar/tripleo-heat-templates
|
6b858eb39f96cc2a81a115246fd4a2ef6a0b0097
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import collections
import datetime
import os
import re
import shutil
import six
import sys
import traceback
import yaml
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Convert an old style NIC config file into the new format '
'using run-os-net-config.sh')
parser.add_argument('--script-dir', metavar='<script directory>',
help="Relative path to run-os-net-config.sh",
default="network/scripts/run-os-net-config.sh")
parser.add_argument('files', nargs="+", metavar='<file>',
help='List of one or more NIC config files to convert')
parser.add_argument('--yes',
action='store_true',
help=("Use --yes to skip the confirmation "
"to overwrite the original config file "),
)
opts = parser.parse_args(argv[1:])
return opts
def to_commented_yaml(filename):
"""Convert comments into 'comments<num>: ...' YAML"""
out_str = ''
last_non_comment_spaces = ''
with open(filename, 'r') as f:
comment_count = 0
for line in f:
# skip blank line
if line.isspace():
continue
char_count = 0
spaces = ''
for char in line:
char_count += 1
if char == ' ':
spaces += ' '
next
elif char == '#':
last_non_comment_spaces = spaces
comment_count += 1
comment = line[char_count:-1]
out_str += "%scomment%i_%i: '%s'\n" % \
(last_non_comment_spaces, comment_count, len(spaces),
comment)
break
else:
last_non_comment_spaces = spaces
out_str += line
# inline comments check
m = re.match(".*:.*#(.*)", line)
if m:
comment_count += 1
out_str += "%s inline_comment%i: '%s'\n" % \
(last_non_comment_spaces, comment_count,
m.group(1))
break
with open(filename, 'w') as f:
f.write(out_str)
return out_str
def to_normal_yaml(filename):
"""Convert back to normal #commented YAML"""
with open(filename, 'r') as f:
data = f.read()
out_str = ''
next_line_break = False
for line in data.split('\n'):
# get_input not supported by run-os-net-config.sh script
line = line.replace('get_input: ', '')
# normal comments
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line)
# inline comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line)
if m:
if next_line_break:
out_str += '\n'
next_line_break = False
for x in range(0, int(m.group(1))):
out_str += " "
out_str += "#%s\n" % m.group(2)
elif i:
out_str += " #%s\n" % i.group(1)
next_line_break = False
else:
if next_line_break:
out_str += '\n'
out_str += line
next_line_break = True
if next_line_break:
out_str += '\n'
with open(filename, 'w') as f:
f.write(out_str)
return out_str
class description(six.text_type):
pass
# FIXME: Some of this duplicates code from build_endpoint_map.py, we should
# refactor to share the common code
class TemplateDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
return self.represent_dict(data.items())
def description_presenter(self, data):
if '\n' in data:
style = '>'
else:
style = ''
return self.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG, data, style=style)
# We load mappings into OrderedDict to preserve their order
class TemplateLoader(yaml.SafeLoader):
def construct_mapping(self, node):
self.flatten_mapping(node)
return collections.OrderedDict(self.construct_pairs(node))
TemplateDumper.add_representer(description,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(collections.OrderedDict,
TemplateDumper.represent_ordered_dict)
TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
TemplateLoader.construct_mapping)
def write_template(template, filename=None):
with open(filename, 'w') as f:
yaml.dump(template, f, TemplateDumper, width=120,
default_flow_style=False)
def convert(filename, script_path):
print('Converting %s' % filename)
try:
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=TemplateLoader)
except Exception:
print(traceback.format_exc())
return 0
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
new_r = collections.OrderedDict()
new_r['type'] = 'OS::Heat::SoftwareConfig'
new_r['properties'] = collections.OrderedDict()
new_r['properties']['group'] = 'script'
old_net_config = r[1].get(
'properties', {}).get('config', {}).get('os_net_config')
new_config = {'str_replace': collections.OrderedDict()}
new_config['str_replace']['template'] = {'get_file': script_path}
new_config['str_replace']['params'] = \
{'$network_config': old_net_config}
new_r['properties']['config'] = new_config
tpl['resources'][r[0]] = new_r
else:
print("No match %s" % r[0])
return 0
# Preserve typical HOT template key ordering
od_result = collections.OrderedDict()
# Need to bump the HOT version so str_replace supports serializing to json
od_result['heat_template_version'] = "rocky"
if tpl.get('description'):
od_result['description'] = description(tpl['description'])
od_result['parameters'] = tpl['parameters']
od_result['resources'] = tpl['resources']
od_result['outputs'] = tpl['outputs']
write_template(od_result, filename)
return 1
def check_old_style(filename):
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
if isinstance(tpl.get('resources', {}), dict):
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
return True
return False
opts = parse_opts(sys.argv)
exit_val = 0
num_converted = 0
for base_path in opts.files:
if os.path.isfile(base_path) and base_path.endswith('.yaml'):
if check_old_style(base_path):
# Check for script in the user entered (or default) location or in
# path relative to NIC config files
script_paths = [opts.script_dir]
script_paths.append('../../scripts/run-os-net-config.sh')
script_paths.append('../network/scripts/run-os-net-config.sh')
script_paths.append('/usr/share/openstack-tripleo-heat-templates/'
'network/scripts/run-os-net-config.sh')
script_path = None
for p in script_paths:
if os.path.isfile(os.path.join(os.path.dirname(base_path), p)):
script_path = p
break
if script_path is None:
print("Error couldn't find run-os-net-config.sh relative "
"to filename")
sys.exit(1)
print("Using script at %s" % script_path)
extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_filename = os.path.realpath(base_path) + '.' + extension
print('The yaml file will be overwritten and the original saved '
'as %s' % backup_filename)
if not (opts.yes or
input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print("Skipping file %s" % base_path)
continue
if os.path.exists(backup_filename):
print("Backup file already exists, skipping file %s" %
base_path)
continue
shutil.copyfile(base_path, backup_filename)
to_commented_yaml(base_path)
num_converted += convert(base_path, script_path)
to_normal_yaml(base_path)
else:
print('File %s is not using old style NIC configuration' %
base_path)
else:
print('Unexpected argument %s' % base_path)
if num_converted == 0:
exit_val = 1
sys.exit(exit_val)
| 33.884746
| 87
| 0.560524
|
import argparse
import collections
import datetime
import os
import re
import shutil
import six
import sys
import traceback
import yaml
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Convert an old style NIC config file into the new format '
'using run-os-net-config.sh')
parser.add_argument('--script-dir', metavar='<script directory>',
help="Relative path to run-os-net-config.sh",
default="network/scripts/run-os-net-config.sh")
parser.add_argument('files', nargs="+", metavar='<file>',
help='List of one or more NIC config files to convert')
parser.add_argument('--yes',
action='store_true',
help=("Use --yes to skip the confirmation "
"to overwrite the original config file "),
)
opts = parser.parse_args(argv[1:])
return opts
def to_commented_yaml(filename):
out_str = ''
last_non_comment_spaces = ''
with open(filename, 'r') as f:
comment_count = 0
for line in f:
if line.isspace():
continue
char_count = 0
spaces = ''
for char in line:
char_count += 1
if char == ' ':
spaces += ' '
next
elif char == '#':
last_non_comment_spaces = spaces
comment_count += 1
comment = line[char_count:-1]
out_str += "%scomment%i_%i: '%s'\n" % \
(last_non_comment_spaces, comment_count, len(spaces),
comment)
break
else:
last_non_comment_spaces = spaces
out_str += line
m = re.match(".*:.*#(.*)", line)
if m:
comment_count += 1
out_str += "%s inline_comment%i: '%s'\n" % \
(last_non_comment_spaces, comment_count,
m.group(1))
break
with open(filename, 'w') as f:
f.write(out_str)
return out_str
def to_normal_yaml(filename):
with open(filename, 'r') as f:
data = f.read()
out_str = ''
next_line_break = False
for line in data.split('\n'):
line = line.replace('get_input: ', '')
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line)
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line)
if m:
if next_line_break:
out_str += '\n'
next_line_break = False
for x in range(0, int(m.group(1))):
out_str += " "
out_str += "#%s\n" % m.group(2)
elif i:
out_str += " #%s\n" % i.group(1)
next_line_break = False
else:
if next_line_break:
out_str += '\n'
out_str += line
next_line_break = True
if next_line_break:
out_str += '\n'
with open(filename, 'w') as f:
f.write(out_str)
return out_str
class description(six.text_type):
pass
class TemplateDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
return self.represent_dict(data.items())
def description_presenter(self, data):
if '\n' in data:
style = '>'
else:
style = ''
return self.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG, data, style=style)
class TemplateLoader(yaml.SafeLoader):
def construct_mapping(self, node):
self.flatten_mapping(node)
return collections.OrderedDict(self.construct_pairs(node))
TemplateDumper.add_representer(description,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(collections.OrderedDict,
TemplateDumper.represent_ordered_dict)
TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
TemplateLoader.construct_mapping)
def write_template(template, filename=None):
with open(filename, 'w') as f:
yaml.dump(template, f, TemplateDumper, width=120,
default_flow_style=False)
def convert(filename, script_path):
print('Converting %s' % filename)
try:
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=TemplateLoader)
except Exception:
print(traceback.format_exc())
return 0
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
new_r = collections.OrderedDict()
new_r['type'] = 'OS::Heat::SoftwareConfig'
new_r['properties'] = collections.OrderedDict()
new_r['properties']['group'] = 'script'
old_net_config = r[1].get(
'properties', {}).get('config', {}).get('os_net_config')
new_config = {'str_replace': collections.OrderedDict()}
new_config['str_replace']['template'] = {'get_file': script_path}
new_config['str_replace']['params'] = \
{'$network_config': old_net_config}
new_r['properties']['config'] = new_config
tpl['resources'][r[0]] = new_r
else:
print("No match %s" % r[0])
return 0
od_result = collections.OrderedDict()
od_result['heat_template_version'] = "rocky"
if tpl.get('description'):
od_result['description'] = description(tpl['description'])
od_result['parameters'] = tpl['parameters']
od_result['resources'] = tpl['resources']
od_result['outputs'] = tpl['outputs']
write_template(od_result, filename)
return 1
def check_old_style(filename):
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
if isinstance(tpl.get('resources', {}), dict):
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
return True
return False
opts = parse_opts(sys.argv)
exit_val = 0
num_converted = 0
for base_path in opts.files:
if os.path.isfile(base_path) and base_path.endswith('.yaml'):
if check_old_style(base_path):
script_paths = [opts.script_dir]
script_paths.append('../../scripts/run-os-net-config.sh')
script_paths.append('../network/scripts/run-os-net-config.sh')
script_paths.append('/usr/share/openstack-tripleo-heat-templates/'
'network/scripts/run-os-net-config.sh')
script_path = None
for p in script_paths:
if os.path.isfile(os.path.join(os.path.dirname(base_path), p)):
script_path = p
break
if script_path is None:
print("Error couldn't find run-os-net-config.sh relative "
"to filename")
sys.exit(1)
print("Using script at %s" % script_path)
extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_filename = os.path.realpath(base_path) + '.' + extension
print('The yaml file will be overwritten and the original saved '
'as %s' % backup_filename)
if not (opts.yes or
input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print("Skipping file %s" % base_path)
continue
if os.path.exists(backup_filename):
print("Backup file already exists, skipping file %s" %
base_path)
continue
shutil.copyfile(base_path, backup_filename)
to_commented_yaml(base_path)
num_converted += convert(base_path, script_path)
to_normal_yaml(base_path)
else:
print('File %s is not using old style NIC configuration' %
base_path)
else:
print('Unexpected argument %s' % base_path)
if num_converted == 0:
exit_val = 1
sys.exit(exit_val)
| true
| true
|
f70bdc3a15e7b88c2514b1fcf2a401008c840d78
| 336
|
py
|
Python
|
geekshop/basketapp/urls.py
|
TonyBrother32/Django-shop
|
723a1eb9ff5b74fa968e8c09268bbcbb2fed857c
|
[
"MIT"
] | null | null | null |
geekshop/basketapp/urls.py
|
TonyBrother32/Django-shop
|
723a1eb9ff5b74fa968e8c09268bbcbb2fed857c
|
[
"MIT"
] | null | null | null |
geekshop/basketapp/urls.py
|
TonyBrother32/Django-shop
|
723a1eb9ff5b74fa968e8c09268bbcbb2fed857c
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'basketapp'
urlpatterns = [
path('', views.view, name='view'),
path('add/<int:product_id>/', views.add, name='add'),
path('remove/<int:basket_item_id>)/', views.remove, name='remove'),
path('edit/<int:basket_item_id>/<int:quantity>/', views.edit, name='edit'),
]
| 30.545455
| 78
| 0.666667
|
from django.urls import path
from . import views
app_name = 'basketapp'
urlpatterns = [
path('', views.view, name='view'),
path('add/<int:product_id>/', views.add, name='add'),
path('remove/<int:basket_item_id>)/', views.remove, name='remove'),
path('edit/<int:basket_item_id>/<int:quantity>/', views.edit, name='edit'),
]
| true
| true
|
f70bddf32a2b465ec48d7297b542087af6cbef33
| 1,441
|
py
|
Python
|
google/cloud/monitoring_dashboard/v1/__init__.py
|
vam-google/python-monitoring-dashboards
|
effbff2703ade03269ad8ddacf4ab31637d8a799
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/monitoring_dashboard/v1/__init__.py
|
vam-google/python-monitoring-dashboards
|
effbff2703ade03269ad8ddacf4ab31637d8a799
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/monitoring_dashboard/v1/__init__.py
|
vam-google/python-monitoring-dashboards
|
effbff2703ade03269ad8ddacf4ab31637d8a799
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import warnings
from google.cloud.monitoring_dashboard.v1 import types
from google.cloud.monitoring_dashboard.v1.gapic import dashboards_service_client
from google.cloud.monitoring_dashboard.v1.gapic import enums
if sys.version_info[:2] == (2, 7):
message = (
"A future version of this library will drop support for Python 2.7."
"More details about Python 2 support for Google Cloud Client Libraries"
"can be found at https://cloud.google.com/python/docs/python2-sunset/"
)
warnings.warn(message, DeprecationWarning)
class DashboardsServiceClient(dashboards_service_client.DashboardsServiceClient):
__doc__ = dashboards_service_client.DashboardsServiceClient.__doc__
enums = enums
__all__ = ("enums", "types", "DashboardsServiceClient")
| 34.309524
| 81
| 0.762665
|
from __future__ import absolute_import
import sys
import warnings
from google.cloud.monitoring_dashboard.v1 import types
from google.cloud.monitoring_dashboard.v1.gapic import dashboards_service_client
from google.cloud.monitoring_dashboard.v1.gapic import enums
if sys.version_info[:2] == (2, 7):
message = (
"A future version of this library will drop support for Python 2.7."
"More details about Python 2 support for Google Cloud Client Libraries"
"can be found at https://cloud.google.com/python/docs/python2-sunset/"
)
warnings.warn(message, DeprecationWarning)
class DashboardsServiceClient(dashboards_service_client.DashboardsServiceClient):
__doc__ = dashboards_service_client.DashboardsServiceClient.__doc__
enums = enums
__all__ = ("enums", "types", "DashboardsServiceClient")
| true
| true
|
f70bdf701bbdb41d790f24af7996716a5faf0ff5
| 7,004
|
py
|
Python
|
python/GafferUI/Editor.py
|
sebaDesmet/gaffer
|
47b2d093c40452bd77947e3b5bd0722a366c8d59
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/Editor.py
|
sebaDesmet/gaffer
|
47b2d093c40452bd77947e3b5bd0722a366c8d59
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/Editor.py
|
sebaDesmet/gaffer
|
47b2d093c40452bd77947e3b5bd0722a366c8d59
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import types
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtWidgets
class _EditorMetaclass( Gaffer.Trackable.__class__ ) :
def __call__( cls, *args, **kw ) :
instance = type.__call__( cls, *args, **kw )
while hasattr( cls, "instanceCreatedSignal" ) :
cls.instanceCreatedSignal()( instance )
cls = cls.__bases__[0]
return instance
## Base class for UI components which display or manipulate a ScriptNode
# or its children. These make up the tabs in the UI layout.
class Editor( GafferUI.Widget ) :
__metaclass__ = _EditorMetaclass
def __init__( self, topLevelWidget, scriptNode, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self._qtWidget().setFocusPolicy( QtCore.Qt.ClickFocus )
assert( isinstance( scriptNode, Gaffer.ScriptNode ) )
self.__scriptNode = scriptNode
self.__context = None
self.__title = ""
self.__titleChangedSignal = GafferUI.WidgetSignal()
self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False )
self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False )
self.__setContextInternal( scriptNode.context(), callUpdate=False )
def scriptNode( self ) :
return self.__scriptNode
## May be called to explicitly set the title for this editor. The
# editor itself is not responsible for displaying the title - this
# is left to the enclosing ui.
def setTitle( self, title ) :
if title == self.__title :
return
self.__title = title
self.titleChangedSignal()( self )
## May be overridden to provide sensible default behaviour for
# the title, but must return BaseClass.getTitle() if it is non-empty.
def getTitle( self ) :
if self.__title :
return self.__title
# if there's no explicit title and a derived class
# has overridden getTitle() then we return the empty
# string to signify that the derived class is free
# to return what it wants
c = self.__class__
while c is not Editor :
if "getTitle" in c.__dict__ :
return ""
c = c.__bases__[0]
# otherwise we default to using the classname
return IECore.CamelCase.toSpaced( self.__class__.__name__ )
## A signal emitted whenever the title changes.
def titleChangedSignal( self ) :
return self.__titleChangedSignal
## By default Editors operate in the main context held by the script node. This function
# allows an alternative context to be provided, making it possible for an editor to
# display itself at a custom frame (or with any other context modification).
def setContext( self, context ) :
self.__setContextInternal( context, callUpdate=True )
def getContext( self ) :
return self.__context
def __setContextInternal( self, context, callUpdate ) :
assert( isinstance( context, ( Gaffer.Context, types.NoneType ) ) )
previousContext = self.__context
self.__context = context
if self.__context is not None :
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
else :
## \todo I'm not sure why this code allows a None context - surely we
# should always have a valid one?
self.__contextChangedConnection = None
if callUpdate :
modifiedItems = set()
if previousContext is not None :
modifiedItems |= set( previousContext.names() )
if self.__context is not None :
modifiedItems |= set( self.__context.names() )
self._updateFromContext( modifiedItems )
## May be implemented by derived classes to update state based on a change of context.
# To temporarily suspend calls to this function, use Gaffer.BlockedConnection( self._contextChangedConnection() ).
def _updateFromContext( self, modifiedItems ) :
pass
def _contextChangedConnection( self ) :
return self.__contextChangedConnection
## This must be implemented by all derived classes as it is used for serialisation of layouts.
# It is not expected that the script being edited is also serialised as part of this operation -
# instead the new script will be provided later as a variable named scriptNode. So a suitable
# serialisation will look like "GafferUI.Editor( scriptNode )".
def __repr__( self ) :
raise NotImplementedError
def __contextChanged( self, context, key ) :
assert( context.isSame( self.getContext() ) )
self._updateFromContext( set( [ key ] ) )
@classmethod
def types( cls ) :
return cls.__namesToCreators.keys()
@classmethod
def create( cls, name, scriptNode ) :
return cls.__namesToCreators[name]( scriptNode = scriptNode )
@classmethod
def registerType( cls, name, creator ) :
cls.__namesToCreators[name] = creator
__namesToCreators = {}
@classmethod
def instanceCreatedSignal( cls ) :
s = cls.__dict__.get( "__instanceCreatedSignal", None )
if s is not None :
return s
s = Gaffer.Signal1()
setattr( cls, "__instanceCreatedSignal", s )
return s
def __enter( self, widget ) :
if not isinstance( QtWidgets.QApplication.focusWidget(), ( QtWidgets.QLineEdit, QtWidgets.QPlainTextEdit ) ) :
self._qtWidget().setFocus()
def __leave( self, widget ) :
self._qtWidget().clearFocus()
| 32.276498
| 121
| 0.719446
|
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
import types
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtWidgets
class _EditorMetaclass( Gaffer.Trackable.__class__ ) :
def __call__( cls, *args, **kw ) :
instance = type.__call__( cls, *args, **kw )
while hasattr( cls, "instanceCreatedSignal" ) :
cls.instanceCreatedSignal()( instance )
cls = cls.__bases__[0]
return instance
class Editor( GafferUI.Widget ) :
__metaclass__ = _EditorMetaclass
def __init__( self, topLevelWidget, scriptNode, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self._qtWidget().setFocusPolicy( QtCore.Qt.ClickFocus )
assert( isinstance( scriptNode, Gaffer.ScriptNode ) )
self.__scriptNode = scriptNode
self.__context = None
self.__title = ""
self.__titleChangedSignal = GafferUI.WidgetSignal()
self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False )
self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False )
self.__setContextInternal( scriptNode.context(), callUpdate=False )
def scriptNode( self ) :
return self.__scriptNode
def setTitle( self, title ) :
if title == self.__title :
return
self.__title = title
self.titleChangedSignal()( self )
def getTitle( self ) :
if self.__title :
return self.__title
# has overridden getTitle() then we return the empty
# string to signify that the derived class is free
# to return what it wants
c = self.__class__
while c is not Editor :
if "getTitle" in c.__dict__ :
return ""
c = c.__bases__[0]
# otherwise we default to using the classname
return IECore.CamelCase.toSpaced( self.__class__.__name__ )
## A signal emitted whenever the title changes.
def titleChangedSignal( self ) :
return self.__titleChangedSignal
## By default Editors operate in the main context held by the script node. This function
# allows an alternative context to be provided, making it possible for an editor to
# display itself at a custom frame (or with any other context modification).
def setContext( self, context ) :
self.__setContextInternal( context, callUpdate=True )
def getContext( self ) :
return self.__context
def __setContextInternal( self, context, callUpdate ) :
assert( isinstance( context, ( Gaffer.Context, types.NoneType ) ) )
previousContext = self.__context
self.__context = context
if self.__context is not None :
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
else :
## \todo I'm not sure why this code allows a None context - surely we
self.__contextChangedConnection = None
if callUpdate :
modifiedItems = set()
if previousContext is not None :
modifiedItems |= set( previousContext.names() )
if self.__context is not None :
modifiedItems |= set( self.__context.names() )
self._updateFromContext( modifiedItems )
def _updateFromContext( self, modifiedItems ) :
pass
def _contextChangedConnection( self ) :
return self.__contextChangedConnection
def __repr__( self ) :
raise NotImplementedError
def __contextChanged( self, context, key ) :
assert( context.isSame( self.getContext() ) )
self._updateFromContext( set( [ key ] ) )
@classmethod
def types( cls ) :
return cls.__namesToCreators.keys()
@classmethod
def create( cls, name, scriptNode ) :
return cls.__namesToCreators[name]( scriptNode = scriptNode )
@classmethod
def registerType( cls, name, creator ) :
cls.__namesToCreators[name] = creator
__namesToCreators = {}
@classmethod
def instanceCreatedSignal( cls ) :
s = cls.__dict__.get( "__instanceCreatedSignal", None )
if s is not None :
return s
s = Gaffer.Signal1()
setattr( cls, "__instanceCreatedSignal", s )
return s
def __enter( self, widget ) :
if not isinstance( QtWidgets.QApplication.focusWidget(), ( QtWidgets.QLineEdit, QtWidgets.QPlainTextEdit ) ) :
self._qtWidget().setFocus()
def __leave( self, widget ) :
self._qtWidget().clearFocus()
| true
| true
|
f70be103caf3bbb85e059906041c636a547700ab
| 4,304
|
py
|
Python
|
tests/fixtures/pooldata.py
|
curvefi/deposit-and-stake-zap
|
2183cfa03d23b9a1e572d46332d73ad30b39845d
|
[
"MIT"
] | null | null | null |
tests/fixtures/pooldata.py
|
curvefi/deposit-and-stake-zap
|
2183cfa03d23b9a1e572d46332d73ad30b39845d
|
[
"MIT"
] | null | null | null |
tests/fixtures/pooldata.py
|
curvefi/deposit-and-stake-zap
|
2183cfa03d23b9a1e572d46332d73ad30b39845d
|
[
"MIT"
] | null | null | null |
import pytest
import brownie
from brownie import Contract, ZERO_ADDRESS
# gusd
gusd_token_address = "0xD2967f45c4f384DEEa880F807Be904762a3DeA07"
gusd_gauge_addresses = "0xC5cfaDA84E902aD92DD40194f0883ad49639b023"
# susd
susd_token_address = '0xC25a3A3b969415c80451098fa907EC722572917F'
susd_gauge_address = '0xA90996896660DEcC6E997655E065b23788857849'
@pytest.fixture(scope="module")
def swap_address(pool_data):
return pool_data['swap_address']
@pytest.fixture(scope="module")
def token_address(pool_data):
return pool_data['lp_token_address']
@pytest.fixture(scope="module")
def gauge_address(pool_data):
return pool_data['gauge_addresses'][0]
@pytest.fixture(scope="module")
def deposit_address(pool_data):
return pool_data['zap_address'] if 'zap_address' in pool_data else pool_data['swap_address']
@pytest.fixture(scope="module")
def other_token_address(pool_data):
return gusd_token_address if gusd_token_address != pool_data["lp_token_address"] else susd_token_address
@pytest.fixture(scope="module")
def other_gauge_address(pool_data):
return gusd_gauge_addresses if gusd_gauge_addresses != pool_data["gauge_addresses"][0] else susd_gauge_address
@pytest.fixture(scope="module")
def gauge(gauge_address):
return Contract(gauge_address)
@pytest.fixture(scope="module")
def underlying_decimals(pool_data, base_pool_data):
# number of decimal places for each underlying coin in the active pool
decimals = [i.get("decimals", i.get("wrapped_decimals")) for i in pool_data["coins"]]
if base_pool_data is None:
return decimals
base_decimals = [i.get("decimals", i.get("wrapped_decimals")) for i in base_pool_data["coins"]]
return decimals[:-1] + base_decimals
@pytest.fixture(scope="module")
def wrapped_decimals(pool_data):
# number of decimal places for each wrapped coin in the active pool
yield [i.get("wrapped_decimals", i.get("decimals")) for i in pool_data["coins"]]
@pytest.fixture(scope="module")
def wrapped_amounts_to_mint(wrapped_decimals):
return [100 * 10 ** i for i in wrapped_decimals]
@pytest.fixture(scope="module")
def underlying_amounts_to_mint(underlying_decimals):
return [100 * 10 ** i for i in underlying_decimals]
@pytest.fixture(scope="module")
def wrong_amounts_to_mint():
return [100 * 10 ** 18] * 5
# Different amounts are needed to always pass test_wrong_order_of_coins
@pytest.fixture(scope="module")
def wrapped_amounts(wrapped_decimals, n_coins_wrapped):
return [(10 + i) * 10 ** wrapped_decimals[i] for i in range(n_coins_wrapped)] + [0] * (5 - n_coins_wrapped)
# Different amounts are needed to always pass test_wrong_order_of_coins
@pytest.fixture(scope="module")
def underlying_amounts(underlying_decimals, n_coins_underlying):
return [(10 + i) * 10 ** underlying_decimals[i] for i in range(n_coins_underlying)] + [0] * (5 - n_coins_underlying)
@pytest.fixture(scope="module")
def n_coins_wrapped(wrapped_decimals):
return len(wrapped_decimals)
@pytest.fixture(scope="module")
def n_coins_underlying(underlying_decimals):
yield len(underlying_decimals)
@pytest.fixture(scope="module")
def value_wrapped(wrapped_amounts, wrapped_coins):
return wrapped_amounts[wrapped_coins.index(brownie.ETH_ADDRESS)] if brownie.ETH_ADDRESS in wrapped_coins else 0
@pytest.fixture(scope="module")
def value_underlying(underlying_amounts, underlying_coins):
return underlying_amounts[underlying_coins.index(brownie.ETH_ADDRESS)] if brownie.ETH_ADDRESS in underlying_coins else 0
@pytest.fixture(scope="module")
def use_underlying(pool_data):
if pool_data['swap_address'] in [
"0xDeBF20617708857ebe4F679508E7b7863a8A8EeE", # aave
"0xeb16ae0052ed37f479f7fe63849198df1765a733", # saave
"0x2dded6Da1BF5DBdF597C45fcFaa3194e53EcfeAF", # ib
"0x8301AE4fc9c624d1D396cbDAa1ed877821D7C511", # crveth (use_eth)
"0xB576491F1E6e5E62f1d8F26062Ee822B40B0E0d4", # cvxeth (use_eth)
]:
return True
return False
@pytest.fixture(scope="module")
def is_meta(pool_data):
return "meta" in pool_data.get("pool_types", [])
@pytest.fixture(scope="module")
def factory_pool_address(pool_data):
return pool_data["swap_address"] if "factory" in pool_data.get("pool_types", []) else ZERO_ADDRESS
| 31.881481
| 124
| 0.766496
|
import pytest
import brownie
from brownie import Contract, ZERO_ADDRESS
gusd_token_address = "0xD2967f45c4f384DEEa880F807Be904762a3DeA07"
gusd_gauge_addresses = "0xC5cfaDA84E902aD92DD40194f0883ad49639b023"
susd_token_address = '0xC25a3A3b969415c80451098fa907EC722572917F'
susd_gauge_address = '0xA90996896660DEcC6E997655E065b23788857849'
@pytest.fixture(scope="module")
def swap_address(pool_data):
return pool_data['swap_address']
@pytest.fixture(scope="module")
def token_address(pool_data):
return pool_data['lp_token_address']
@pytest.fixture(scope="module")
def gauge_address(pool_data):
return pool_data['gauge_addresses'][0]
@pytest.fixture(scope="module")
def deposit_address(pool_data):
return pool_data['zap_address'] if 'zap_address' in pool_data else pool_data['swap_address']
@pytest.fixture(scope="module")
def other_token_address(pool_data):
return gusd_token_address if gusd_token_address != pool_data["lp_token_address"] else susd_token_address
@pytest.fixture(scope="module")
def other_gauge_address(pool_data):
return gusd_gauge_addresses if gusd_gauge_addresses != pool_data["gauge_addresses"][0] else susd_gauge_address
@pytest.fixture(scope="module")
def gauge(gauge_address):
return Contract(gauge_address)
@pytest.fixture(scope="module")
def underlying_decimals(pool_data, base_pool_data):
decimals = [i.get("decimals", i.get("wrapped_decimals")) for i in pool_data["coins"]]
if base_pool_data is None:
return decimals
base_decimals = [i.get("decimals", i.get("wrapped_decimals")) for i in base_pool_data["coins"]]
return decimals[:-1] + base_decimals
@pytest.fixture(scope="module")
def wrapped_decimals(pool_data):
yield [i.get("wrapped_decimals", i.get("decimals")) for i in pool_data["coins"]]
@pytest.fixture(scope="module")
def wrapped_amounts_to_mint(wrapped_decimals):
return [100 * 10 ** i for i in wrapped_decimals]
@pytest.fixture(scope="module")
def underlying_amounts_to_mint(underlying_decimals):
return [100 * 10 ** i for i in underlying_decimals]
@pytest.fixture(scope="module")
def wrong_amounts_to_mint():
return [100 * 10 ** 18] * 5
@pytest.fixture(scope="module")
def wrapped_amounts(wrapped_decimals, n_coins_wrapped):
return [(10 + i) * 10 ** wrapped_decimals[i] for i in range(n_coins_wrapped)] + [0] * (5 - n_coins_wrapped)
@pytest.fixture(scope="module")
def underlying_amounts(underlying_decimals, n_coins_underlying):
return [(10 + i) * 10 ** underlying_decimals[i] for i in range(n_coins_underlying)] + [0] * (5 - n_coins_underlying)
@pytest.fixture(scope="module")
def n_coins_wrapped(wrapped_decimals):
return len(wrapped_decimals)
@pytest.fixture(scope="module")
def n_coins_underlying(underlying_decimals):
yield len(underlying_decimals)
@pytest.fixture(scope="module")
def value_wrapped(wrapped_amounts, wrapped_coins):
return wrapped_amounts[wrapped_coins.index(brownie.ETH_ADDRESS)] if brownie.ETH_ADDRESS in wrapped_coins else 0
@pytest.fixture(scope="module")
def value_underlying(underlying_amounts, underlying_coins):
return underlying_amounts[underlying_coins.index(brownie.ETH_ADDRESS)] if brownie.ETH_ADDRESS in underlying_coins else 0
@pytest.fixture(scope="module")
def use_underlying(pool_data):
if pool_data['swap_address'] in [
"0xDeBF20617708857ebe4F679508E7b7863a8A8EeE", "0xeb16ae0052ed37f479f7fe63849198df1765a733", "0x2dded6Da1BF5DBdF597C45fcFaa3194e53EcfeAF", "0x8301AE4fc9c624d1D396cbDAa1ed877821D7C511", "0xB576491F1E6e5E62f1d8F26062Ee822B40B0E0d4", ]:
return True
return False
@pytest.fixture(scope="module")
def is_meta(pool_data):
return "meta" in pool_data.get("pool_types", [])
@pytest.fixture(scope="module")
def factory_pool_address(pool_data):
return pool_data["swap_address"] if "factory" in pool_data.get("pool_types", []) else ZERO_ADDRESS
| true
| true
|
f70be16b7667d5ef4e1fe1961c226707b73ee15d
| 795
|
py
|
Python
|
todo/task/repository.py
|
matiasjavierlucero/todo-challenge
|
4c0dbc518a5e8fc9d99b6034163be14246fd6666
|
[
"MIT"
] | null | null | null |
todo/task/repository.py
|
matiasjavierlucero/todo-challenge
|
4c0dbc518a5e8fc9d99b6034163be14246fd6666
|
[
"MIT"
] | null | null | null |
todo/task/repository.py
|
matiasjavierlucero/todo-challenge
|
4c0dbc518a5e8fc9d99b6034163be14246fd6666
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
from django.db.models import Sum
from django.shortcuts import get_object_or_404
from datetime import date, timedelta
from .models import Task
class TaskRepository:
"""Repository for tasks."""
def list(self):
return Task.objects.all()
def create(self, title: str, description: str, status: int):
return Task.objects.create(
title=title, description=description, status=status
)
def detail(self, id):
return get_object_or_404(Task, pk=id)
def update(self, request, id):
task = get_object_or_404(Task, pk=id)
task.status = request.data.get('status')
task.save()
return task
def destroy(self, pk=None):
task = Task.objects.get(id=pk)
task.delete()
| 25.645161
| 64
| 0.654088
|
from decimal import Decimal
from django.db.models import Sum
from django.shortcuts import get_object_or_404
from datetime import date, timedelta
from .models import Task
class TaskRepository:
def list(self):
return Task.objects.all()
def create(self, title: str, description: str, status: int):
return Task.objects.create(
title=title, description=description, status=status
)
def detail(self, id):
return get_object_or_404(Task, pk=id)
def update(self, request, id):
task = get_object_or_404(Task, pk=id)
task.status = request.data.get('status')
task.save()
return task
def destroy(self, pk=None):
task = Task.objects.get(id=pk)
task.delete()
| true
| true
|
f70be1a781ef564608b08997e1b98fa857b29328
| 17,551
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20190401/express_route_circuit_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20190401/express_route_circuit_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20190401/express_route_circuit_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitPeering']
class ExpressRouteCircuitPeering(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_asn: Optional[pulumi.Input[int]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_peering_config: Optional[pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[int]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
peering_type: Optional[pulumi.Input[str]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stats: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']]] = None,
vlan_id: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Peering in an ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] azure_asn: The Azure ASN.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]] connections: The list of circuit connections associated with Azure Private Peering for this circuit.
:param pulumi.Input[str] gateway_manager_etag: The GatewayManager Etag.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']] ipv6_peering_config: The IPv6 peering configuration.
:param pulumi.Input[str] last_modified_by: Gets whether the provider or the customer last modified the peering.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']] microsoft_peering_config: The Microsoft peering configuration.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[int] peer_asn: The peer ASN.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] peering_type: The peering type.
:param pulumi.Input[str] primary_azure_port: The primary port.
:param pulumi.Input[str] primary_peer_address_prefix: The primary address prefix.
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] route_filter: The reference of the RouteFilter resource.
:param pulumi.Input[str] secondary_azure_port: The secondary port.
:param pulumi.Input[str] secondary_peer_address_prefix: The secondary address prefix.
:param pulumi.Input[str] shared_key: The shared key.
:param pulumi.Input[str] state: The peering state.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']] stats: Gets peering stats.
:param pulumi.Input[int] vlan_id: The VLAN ID.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_asn'] = azure_asn
if circuit_name is None:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['connections'] = connections
__props__['gateway_manager_etag'] = gateway_manager_etag
__props__['id'] = id
__props__['ipv6_peering_config'] = ipv6_peering_config
__props__['last_modified_by'] = last_modified_by
__props__['microsoft_peering_config'] = microsoft_peering_config
__props__['name'] = name
__props__['peer_asn'] = peer_asn
if peering_name is None:
raise TypeError("Missing required property 'peering_name'")
__props__['peering_name'] = peering_name
__props__['peering_type'] = peering_type
__props__['primary_azure_port'] = primary_azure_port
__props__['primary_peer_address_prefix'] = primary_peer_address_prefix
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_filter'] = route_filter
__props__['secondary_azure_port'] = secondary_azure_port
__props__['secondary_peer_address_prefix'] = secondary_peer_address_prefix
__props__['shared_key'] = shared_key
__props__['state'] = state
__props__['stats'] = stats
__props__['vlan_id'] = vlan_id
__props__['etag'] = None
__props__['express_route_connection'] = None
__props__['peered_connections'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitPeering")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure-nextgen:network/v20190401:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitPeering':
"""
Get an existing ExpressRouteCircuitPeering resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRouteCircuitPeering(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> pulumi.Output[Optional[int]]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def connections(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']]]:
"""
The list of circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "connections")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteConnection")
def express_route_connection(self) -> pulumi.Output[Optional['outputs.ExpressRouteConnectionIdResponse']]:
"""
The ExpressRoute connection.
"""
return pulumi.get(self, "express_route_connection")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> pulumi.Output[Optional[str]]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> pulumi.Output[Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse']]:
"""
The IPv6 peering configuration.
"""
return pulumi.get(self, "ipv6_peering_config")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> pulumi.Output[Optional[str]]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']]:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> pulumi.Output[Optional[int]]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeredConnections")
def peered_connections(self) -> pulumi.Output[Sequence['outputs.PeerExpressRouteCircuitConnectionResponse']]:
"""
The list of peered circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "peered_connections")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> pulumi.Output[Optional[str]]:
"""
The peering type.
"""
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> pulumi.Output[Optional[str]]:
"""
The peering state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitStatsResponse']]:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> pulumi.Output[Optional[int]]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.469208
| 2,845
| 0.685089
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitPeering']
class ExpressRouteCircuitPeering(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_asn: Optional[pulumi.Input[int]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_peering_config: Optional[pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[int]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
peering_type: Optional[pulumi.Input[str]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stats: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']]] = None,
vlan_id: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_asn'] = azure_asn
if circuit_name is None:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['connections'] = connections
__props__['gateway_manager_etag'] = gateway_manager_etag
__props__['id'] = id
__props__['ipv6_peering_config'] = ipv6_peering_config
__props__['last_modified_by'] = last_modified_by
__props__['microsoft_peering_config'] = microsoft_peering_config
__props__['name'] = name
__props__['peer_asn'] = peer_asn
if peering_name is None:
raise TypeError("Missing required property 'peering_name'")
__props__['peering_name'] = peering_name
__props__['peering_type'] = peering_type
__props__['primary_azure_port'] = primary_azure_port
__props__['primary_peer_address_prefix'] = primary_peer_address_prefix
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_filter'] = route_filter
__props__['secondary_azure_port'] = secondary_azure_port
__props__['secondary_peer_address_prefix'] = secondary_peer_address_prefix
__props__['shared_key'] = shared_key
__props__['state'] = state
__props__['stats'] = stats
__props__['vlan_id'] = vlan_id
__props__['etag'] = None
__props__['express_route_connection'] = None
__props__['peered_connections'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitPeering")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure-nextgen:network/v20190401:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitPeering':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRouteCircuitPeering(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def connections(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']]]:
return pulumi.get(self, "connections")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteConnection")
def express_route_connection(self) -> pulumi.Output[Optional['outputs.ExpressRouteConnectionIdResponse']]:
return pulumi.get(self, "express_route_connection")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> pulumi.Output[Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse']]:
return pulumi.get(self, "ipv6_peering_config")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']]:
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeredConnections")
def peered_connections(self) -> pulumi.Output[Sequence['outputs.PeerExpressRouteCircuitConnectionResponse']]:
return pulumi.get(self, "peered_connections")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitStatsResponse']]:
return pulumi.get(self, "stats")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "vlan_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
f70be1eb045c7bfc67774263850d50bd2296ed60
| 1,394
|
py
|
Python
|
mdn/yari/testing/integration/conftest.py
|
private-face/webextensions-docset
|
9743056dbb3d3ccd8a20a665fcb0f98d388819a6
|
[
"MIT"
] | 1
|
2021-11-22T20:01:26.000Z
|
2021-11-22T20:01:26.000Z
|
mdn/yari/testing/integration/conftest.py
|
Kapeli/mdn-offline-build
|
e700fcc597be32dde7a2cdadeaa56343ccf0a678
|
[
"MIT"
] | 1
|
2021-07-02T18:35:07.000Z
|
2021-07-02T18:35:07.000Z
|
mdn/yari/testing/integration/conftest.py
|
Kapeli/mdn-offline-build
|
e700fcc597be32dde7a2cdadeaa56343ccf0a678
|
[
"MIT"
] | 2
|
2021-06-21T12:09:37.000Z
|
2021-07-02T12:15:52.000Z
|
from urllib.parse import urlsplit, urlunsplit
import pytest
import requests
_KUMA_STATUS = None
def pytest_configure(config):
"""Configure pytest for the Kuma deployment under test."""
global _KUMA_STATUS
# The pytest-base-url plugin adds --base-url, and sets the default from
# environment variable PYTEST_BASE_URL. If still unset, force to staging.
if config.option.base_url is None:
config.option.base_url = "https://developer.allizom.org"
base_url = config.getoption("base_url")
# Process the server status from _kuma_status.json
base_parts = urlsplit(base_url)
kuma_status_url = urlunsplit(
(base_parts.scheme, base_parts.netloc, "_kuma_status.json", "", "")
)
response = requests.get(kuma_status_url, headers={"Accept": "application/json"})
response.raise_for_status()
_KUMA_STATUS = response.json()
_KUMA_STATUS["response"] = {"headers": response.headers}
@pytest.fixture(scope="session")
def kuma_status(base_url):
return _KUMA_STATUS
@pytest.fixture(scope="session")
def is_behind_cdn(kuma_status):
return "x-amz-cf-id" in kuma_status["response"]["headers"]
@pytest.fixture(scope="session")
def media_url():
return "https://media.prod.mdn.mozit.cloud"
@pytest.fixture(scope="session")
def attachment_url(kuma_status):
return f'https://{kuma_status["settings"]["ATTACHMENT_HOST"]}'
| 27.88
| 84
| 0.721664
|
from urllib.parse import urlsplit, urlunsplit
import pytest
import requests
_KUMA_STATUS = None
def pytest_configure(config):
global _KUMA_STATUS
if config.option.base_url is None:
config.option.base_url = "https://developer.allizom.org"
base_url = config.getoption("base_url")
base_parts = urlsplit(base_url)
kuma_status_url = urlunsplit(
(base_parts.scheme, base_parts.netloc, "_kuma_status.json", "", "")
)
response = requests.get(kuma_status_url, headers={"Accept": "application/json"})
response.raise_for_status()
_KUMA_STATUS = response.json()
_KUMA_STATUS["response"] = {"headers": response.headers}
@pytest.fixture(scope="session")
def kuma_status(base_url):
return _KUMA_STATUS
@pytest.fixture(scope="session")
def is_behind_cdn(kuma_status):
return "x-amz-cf-id" in kuma_status["response"]["headers"]
@pytest.fixture(scope="session")
def media_url():
return "https://media.prod.mdn.mozit.cloud"
@pytest.fixture(scope="session")
def attachment_url(kuma_status):
return f'https://{kuma_status["settings"]["ATTACHMENT_HOST"]}'
| true
| true
|
f70be1fc76ddddc85e3bee71647489d92784fa4f
| 74
|
py
|
Python
|
indoorair/foundations/urls.py
|
juby-gif/indoorair_webapp-b
|
51f8799e8b124748bec7f1e52a3b73bcb4c119a8
|
[
"BSD-3-Clause"
] | null | null | null |
indoorair/foundations/urls.py
|
juby-gif/indoorair_webapp-b
|
51f8799e8b124748bec7f1e52a3b73bcb4c119a8
|
[
"BSD-3-Clause"
] | null | null | null |
indoorair/foundations/urls.py
|
juby-gif/indoorair_webapp-b
|
51f8799e8b124748bec7f1e52a3b73bcb4c119a8
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
]
| 9.25
| 28
| 0.675676
|
from django.urls import path
from . import views
urlpatterns = [
]
| true
| true
|
f70be2546519bf0303806a830af9c2a53f69831e
| 3,946
|
py
|
Python
|
test/functional/feature_minchainwork.py
|
puzcoin/catcoin
|
dc3ad8d15b0c3303e8396514dffeb7685f0edf63
|
[
"MIT"
] | null | null | null |
test/functional/feature_minchainwork.py
|
puzcoin/catcoin
|
dc3ad8d15b0c3303e8396514dffeb7685f0edf63
|
[
"MIT"
] | null | null | null |
test/functional/feature_minchainwork.py
|
puzcoin/catcoin
|
dc3ad8d15b0c3303e8396514dffeb7685f0edf63
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Catcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import CatcoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(CatcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| 43.844444
| 108
| 0.701977
|
import time
from test_framework.test_framework import CatcoinTestFramework
from test_framework.util import connect_nodes, assert_equal
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(CatcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| true
| true
|
f70be36c4203d27c522cdba5e3ca275e4037d7b3
| 15,955
|
py
|
Python
|
qzone/Qzone.py
|
lwpdzq/spiders
|
68f471f3dd92e1a59fe9ccc130fd529f1def3644
|
[
"MIT"
] | null | null | null |
qzone/Qzone.py
|
lwpdzq/spiders
|
68f471f3dd92e1a59fe9ccc130fd529f1def3644
|
[
"MIT"
] | null | null | null |
qzone/Qzone.py
|
lwpdzq/spiders
|
68f471f3dd92e1a59fe9ccc130fd529f1def3644
|
[
"MIT"
] | null | null | null |
import time
import re
import random
import requests
from urllib import parse
import qq_init as qq
import pymongo
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class Spider(object):
def __init__(self):
'''
初始化
'''
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome(chrome_options=chrome_options)
self.driver.get('https://i.qq.com/')
self.__username = qq.USERNAME
self.__password = qq.PASSWORD
self.headers = {
'host': 'h5.qzone.qq.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.8',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'connection': 'keep-alive'
}
self.req = requests.Session()
self.cookies = {}
self.client = pymongo.MongoClient(host=qq.HOST, port=qq.PORT)
self.db = self.client[qq.DB]
def login(self):
'''
登录、调用get_g_tk()、get_friends()函数
:return:
'''
self.driver.switch_to.frame('login_frame')
self.driver.find_element_by_id('switcher_plogin').click()
self.driver.find_element_by_id('u').clear()
self.driver.find_element_by_id('u').send_keys(self.__username)
self.driver.find_element_by_id('p').clear()
self.driver.find_element_by_id('p').send_keys(self.__password)
self.driver.find_element_by_id('login_button').click()
time.sleep(7)
self.driver.get('http://user.qzone.qq.com/{}'.format(self.__username))
cookie = ''
for item in self.driver.get_cookies():
cookie += item["name"] + '=' + item['value'] + ';'
self.cookies = cookie
self.get_g_tk()
self.headers['Cookie'] = self.cookies
self.get_friends()
self.driver.quit()
def get_friends(self):
'''
获取全部好友
:return: qq, name
'''
url = 'https://user.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/tfriend/friend_hat_get.cgi?'
params = {
'uin': self.__username,
'fupdate': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
friends = self.req.get(url, headers=self.headers).text
name, qq_num = [], []
for _qq, _name in zip(re.findall('"\d+"', friends), re.findall('"realname":.*"', friends)):
name.append(re.sub('"|realname|:', '', _name))
qq_num.append(re.sub('"', '', _qq))
self.name, self.qq_num = name, qq_num
def get_g_tk(self):
'''
获取g_tk()
:return: 生成的g_tk
'''
p_skey = self.cookies[self.cookies.find('p_skey=') + 7: self.cookies.find(';', self.cookies.find('p_skey='))]
if len(p_skey) > 50:
self.driver.quit()
raise BaseException(
'登录出错'
)
h = 5381
for i in p_skey:
h += (h << 5) + ord(i)
print('g_tk', h & 2147483647)
self.g_tk = h & 2147483647
def get_mood(self):
'''
构造说说请求链接
对所有好友进行请求
获取点赞好友信息
正则解析
存入数据库
设置时长 5 秒,防封号
:return:
'''
url = 'https://h5.qzone.qq.com/proxy/domain/taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6?'
params = {
'inCharset': 'utf-8',
'outCharset': 'utf-8',
'sort': 0,
'num': 20,
'repllyunm': 100,
'cgi_host': 'http://taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6',
'callback': '_preloadCallback',
'code_version': 1,
'format': 'jsonp',
'need_private_comment': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
for q in self.qq_num:
num = 0
t1, pos = True, 0
url_ = url + '&uin=' + str(q)
black, shuoshuo = self.db['black'], self.db['mood']
while(t1):
url__ = url_ + '&pos=' + str(pos)
mood = self.req.get(url=url__, headers=self.headers)
if '\"msglist\":null' in mood.text or "\"message\":\"对不起,主人设置了保密,您没有权限查看\"" in mood.text:
t1 = False
if '\"message\":\"对不起,主人设置了保密,您没有权限查看\"' in mood.text:
data = {
'name': self.name[self.qq_num.index(q)],
'qq': q
}
black.insert(data)
else:
created_time = re.findall('created_time":\d+', mood.text)
source = re.findall('source_appid":".*?"source_name":".*?"', mood.text)
contents = re.findall('],"content":".*?"', mood.text)
forword = re.findall('fwdnum":\d+', mood.text)
comment_content = re.findall('commentlist":(null|.*?],)', mood.text)
comments = re.findall('cmtnum":\d+', mood.text)
pics = re.findall('","pic(_template|".*?])', mood.text)
like_url = 'https://user.qzone.qq.com/proxy/domain/users.qzone.qq.com/cgi-bin/likes/get_like_list_app?'
tids = re.findall('tid":".*?"', mood.text)
for _time, _source, _content, _forword, _comment_content, _comment, _pic, _tid in \
zip(created_time, source, contents, forword, comment_content, comments, pics, tids):
param = {
'uin': self.__username,
'unikey': 'http://user.qzone.qq.com/{}/mood/'.format(q)+re.sub('tid":"|"', '', _tid)+'.1',
'begin_uin': 0,
'query_count': 60,
'if_first_page': 1,
'g_tk': self.g_tk
}
like_url_current = like_url + parse.urlencode(param)
like = self.req.get(url=like_url_current, headers=self.headers)
likers = like.text.encode(like.encoding).decode('utf-8')
if likers is None:
likers = []
fuin, nick, sex, constellation, address = re.findall('fuin":\d+', likers), re.findall('nick":".*?"', likers), re.findall('gender":".*?"', likers), re.findall('tion":".*?"', likers), re.findall('addr":".*?"', likers)
infos = []
# 点赞信息
for _fuin, _nick, _sex, _constellation, _address in zip(fuin, nick, sex, constellation, address):
info = {
'fuin': re.sub('fuin":', '', _fuin),
'nick': re.sub('nick":"|"', '', _nick),
'sex': re.sub('gender":"|"', '', _sex),
'constellation': re.sub('tion":"|"', '', _constellation),
'address': re.sub('addr":"|"', '', _address)
}
infos.append(info)
num = num + 1
print(num)
data = {
# '_id': str(q) + '_' + str(random.random() * 10).replace('.', ''),
'_id': str(q) + '_' + str(num),
'name': self.name[self.qq_num.index(q)],
'CreateTime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(re.sub('created_time":', '', _time)))),
'source': re.sub('source_appid":".*?"source_name":"|"', '', _source),
'content': re.sub('],"content":"|"', '', _content),
'forward': re.sub('fwdnum":', '', _forword),
'comment_content': re.sub('null|commentlist":', '', _comment_content) if 'null' in _comment_content else str([(re.sub('content":"|"', '', x), re.sub('createTime2":"|"', '', y), re.sub('name":"|"', '', z), re.sub('uin":', '', zz)) for x, y, z, zz in zip(re.findall('content":".*?"', _comment_content), re.findall('createTime2":".*?"', _comment_content), re.findall('name":".*?"', _comment_content), re.findall('uin":\d+', _comment_content))]),
'comment': re.sub('cmtnum":', '', _comment),
'pic': [] if 'template' in _pic else [re.sub('url2":|"', '', i) for i in re.findall('url2":".*?"', _pic)],
'lick_url' : like_url_current
}
try:
data['like'] = re.sub('number":', '', re.search('number":\d+', likers).group())
except Exception as identifier:
print(identifier)
data['like'] = 0
data['likers'] = infos
if shuoshuo.insert(data):
print('%s 的说说写入到数据库成功!' % self.name[self.qq_num.index(q)])
else:
with open('filed', 'a+', encoding='utf-8') as f:
f.write('%s 的说说爬取失败!\n' % self.name[self.qq_num.index(q)])
print('%s 的说说写入到数据库成功!' % self.name[self.qq_num.index(q)])
pos += 20
time.sleep(4)
def get_board(self):
'''
获取留言, 与获取说说大同小异
:return:
'''
url = 'https://user.qzone.qq.com/proxy/domain/m.qzone.qq.com/cgi-bin/new/get_msgb?'
params = {
'uin': self.__username,
'num': 10,
'hostword': 0,
'essence': 1,
'inCharset': 'utf-8',
'outCharset': 'utf-8',
'format': 'jsonp',
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
for q in self.qq_num:
num = 0
t2 = True
url_ = url + '&hostUin=' + str(q)
start = 0
boardb = self.db['board']
while(t2):
url__ = url_ + '&start=' + str(start)
board = self.req.get(url=url__, headers=self.headers)
if '\"message":"空间主人设置了访问权限,您无法进行操作\"' in board.text or '\"message\":\"空间未开通\"' in board.text or '\"commentList\":[]' in board.text or '\"total\":0' in board.text:
t2 = False
else:
text = board.text
ids, nickname, uin, pubtime, content, replyList = \
re.findall('id":"\d+', text), re.findall('nickname":".*?"', text), re.findall('uin":\d+,\n"nick', text),\
re.findall('pubtime":".*?"', text), re.findall('ubbContent":".*?"', text), re.findall('"replyList":(\[\]|.*?\}\])', text, re.S)
for _id, _nickname, _uin, _time, _content, _reply in zip(ids, nickname, uin, pubtime, content, replyList):
num = num + 1
print(num)
data = {
# '_id': str(q) + '_' + re.sub('id":"', '', _id),
'_id': str(q) + '_' + str(num),
'owner': self.name[self.qq_num.index(q)],
'total': re.sub('total":', '', re.search('total":\d+', board.text).group()),
'name': re.sub('nickname":"|"', '', _nickname),
'qq': re.sub('uin":|,\n"nick', '', _uin),
'time': re.sub('pubtime":"|"', '', _time),
'content': re.sub('ubbContent":"|"', '', _content), # 下行需要改动
'replyList': [] if '[]' in _reply else str([re.sub('nick":"|"', '', name) + re.sub('content"|"', '', con) for name, con in zip(re.findall('nick":".*?"', _reply), re.findall('content":".*?"', _reply))])
}
if boardb.insert(data):
print('%s 的留言存储到Mongodb成功!' % self.name[self.qq_num.index(q)])
start += 10
def get_information(self):
'''
构造请求,正则解析
:return:
'''
url = 'https://h5.qzone.qq.com/proxy/domain/base.qzone.qq.com/cgi-bin/user/cgi_userinfo_get_all?'
params = {
'vuin': self.__username,
'fupdate': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
table = self.db['information']
for q in self.qq_num:
t3 = True
url_ = url + '&uin=' + str(q)
while(t3):
info = self.req.get(url=url_, headers=self.headers)
if '\"message\":\"您无权访问\"' in info.text:
t3 = False
else:
text = info.text
sex = ['其他', '男', '女']
constellation = ['白羊座', '金牛座', '双子座', '巨蟹座', '狮子座', '处女座', '天秤座', '天蝎座', '射手座', '摩羯座', '水瓶座', '双鱼座', '未填写']
data = {
'_id': str(q) + '_' + str(random.random() * 10).replace('.', ''),
'nickname': re.sub('nickname":"|"', '', re.search('nickname":".*?"', text).group()),
'spacename': re.sub('spacename":"|"', '', re.search('spacename":".*?"', text).group()),
'desc': re.sub('desc":"|"', '', re.search('desc":".*?"', text).group()),
'signature': re.sub('signature":"|"', '', re.search('signature":".*?"', text).group()),
'sex': sex[int(re.sub('sex":', '', re.search('sex":\d+', text).group()))],
'age': re.sub('"age":', '', re.search('"age":\d+', text).group()),
'birthday': re.sub('birthyear":', '', re.search('birthyear":\d+', text).group()) + '-' + re.sub('birthday":"|"', '', re.search('birthday":".*"', text).group()),
'constellation': constellation[int(re.sub('constellation":|,', '', re.search('constellation":.*,', text).group()).replace('-1', '12'))],
'country': re.sub('country":"|"', '', re.search('country":".*"', text).group()),
'province': re.sub('province":"|"', '', re.search('province":".*?"', text).group()),
'city': re.sub('city":"|"', '', re.search('city":".*?"', text).group()),
'hometown': re.sub('hco":"|"|,|\n|hc|hp|:', '', re.search('hco":".*\n".*\n".*', text).group()),
# 'marriage': marriage[int(re.sub('marriage":', '', re.search('marriage":\d', text).group()))],
'career': re.sub('career":"|"', '', re.search('career":".*?"', text).group()),
'address': re.sub('cb":"|"', '', re.search('cb":".*?"', text).group())
}
if table.insert(data):
print('%s 的信息写入到数据库成功!' % self.name[self.qq_num.index(q)])
t3 = False
if __name__ == '__main__':
sp = Spider()
sp.login()
sp.get_information()
t = time.perf_counter()
sp.get_board()
sp.get_mood()
End = time.perf_counter() - t
print('所有内容爬取完成!总用时%s!' % End)
| 51.302251
| 471
| 0.443936
|
import time
import re
import random
import requests
from urllib import parse
import qq_init as qq
import pymongo
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class Spider(object):
def __init__(self):
chrome_options = Options()
self.driver = webdriver.Chrome(chrome_options=chrome_options)
self.driver.get('https://i.qq.com/')
self.__username = qq.USERNAME
self.__password = qq.PASSWORD
self.headers = {
'host': 'h5.qzone.qq.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.8',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'connection': 'keep-alive'
}
self.req = requests.Session()
self.cookies = {}
self.client = pymongo.MongoClient(host=qq.HOST, port=qq.PORT)
self.db = self.client[qq.DB]
def login(self):
self.driver.switch_to.frame('login_frame')
self.driver.find_element_by_id('switcher_plogin').click()
self.driver.find_element_by_id('u').clear()
self.driver.find_element_by_id('u').send_keys(self.__username)
self.driver.find_element_by_id('p').clear()
self.driver.find_element_by_id('p').send_keys(self.__password)
self.driver.find_element_by_id('login_button').click()
time.sleep(7)
self.driver.get('http://user.qzone.qq.com/{}'.format(self.__username))
cookie = ''
for item in self.driver.get_cookies():
cookie += item["name"] + '=' + item['value'] + ';'
self.cookies = cookie
self.get_g_tk()
self.headers['Cookie'] = self.cookies
self.get_friends()
self.driver.quit()
def get_friends(self):
url = 'https://user.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/tfriend/friend_hat_get.cgi?'
params = {
'uin': self.__username,
'fupdate': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
friends = self.req.get(url, headers=self.headers).text
name, qq_num = [], []
for _qq, _name in zip(re.findall('"\d+"', friends), re.findall('"realname":.*"', friends)):
name.append(re.sub('"|realname|:', '', _name))
qq_num.append(re.sub('"', '', _qq))
self.name, self.qq_num = name, qq_num
def get_g_tk(self):
p_skey = self.cookies[self.cookies.find('p_skey=') + 7: self.cookies.find(';', self.cookies.find('p_skey='))]
if len(p_skey) > 50:
self.driver.quit()
raise BaseException(
'登录出错'
)
h = 5381
for i in p_skey:
h += (h << 5) + ord(i)
print('g_tk', h & 2147483647)
self.g_tk = h & 2147483647
def get_mood(self):
url = 'https://h5.qzone.qq.com/proxy/domain/taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6?'
params = {
'inCharset': 'utf-8',
'outCharset': 'utf-8',
'sort': 0,
'num': 20,
'repllyunm': 100,
'cgi_host': 'http://taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6',
'callback': '_preloadCallback',
'code_version': 1,
'format': 'jsonp',
'need_private_comment': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
for q in self.qq_num:
num = 0
t1, pos = True, 0
url_ = url + '&uin=' + str(q)
black, shuoshuo = self.db['black'], self.db['mood']
while(t1):
url__ = url_ + '&pos=' + str(pos)
mood = self.req.get(url=url__, headers=self.headers)
if '\"msglist\":null' in mood.text or "\"message\":\"对不起,主人设置了保密,您没有权限查看\"" in mood.text:
t1 = False
if '\"message\":\"对不起,主人设置了保密,您没有权限查看\"' in mood.text:
data = {
'name': self.name[self.qq_num.index(q)],
'qq': q
}
black.insert(data)
else:
created_time = re.findall('created_time":\d+', mood.text)
source = re.findall('source_appid":".*?"source_name":".*?"', mood.text)
contents = re.findall('],"content":".*?"', mood.text)
forword = re.findall('fwdnum":\d+', mood.text)
comment_content = re.findall('commentlist":(null|.*?],)', mood.text)
comments = re.findall('cmtnum":\d+', mood.text)
pics = re.findall('","pic(_template|".*?])', mood.text)
like_url = 'https://user.qzone.qq.com/proxy/domain/users.qzone.qq.com/cgi-bin/likes/get_like_list_app?'
tids = re.findall('tid":".*?"', mood.text)
for _time, _source, _content, _forword, _comment_content, _comment, _pic, _tid in \
zip(created_time, source, contents, forword, comment_content, comments, pics, tids):
param = {
'uin': self.__username,
'unikey': 'http://user.qzone.qq.com/{}/mood/'.format(q)+re.sub('tid":"|"', '', _tid)+'.1',
'begin_uin': 0,
'query_count': 60,
'if_first_page': 1,
'g_tk': self.g_tk
}
like_url_current = like_url + parse.urlencode(param)
like = self.req.get(url=like_url_current, headers=self.headers)
likers = like.text.encode(like.encoding).decode('utf-8')
if likers is None:
likers = []
fuin, nick, sex, constellation, address = re.findall('fuin":\d+', likers), re.findall('nick":".*?"', likers), re.findall('gender":".*?"', likers), re.findall('tion":".*?"', likers), re.findall('addr":".*?"', likers)
infos = []
# 点赞信息
for _fuin, _nick, _sex, _constellation, _address in zip(fuin, nick, sex, constellation, address):
info = {
'fuin': re.sub('fuin":', '', _fuin),
'nick': re.sub('nick":"|"', '', _nick),
'sex': re.sub('gender":"|"', '', _sex),
'constellation': re.sub('tion":"|"', '', _constellation),
'address': re.sub('addr":"|"', '', _address)
}
infos.append(info)
num = num + 1
print(num)
data = {
'_id': str(q) + '_' + str(num),
'name': self.name[self.qq_num.index(q)],
'CreateTime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(re.sub('created_time":', '', _time)))),
'source': re.sub('source_appid":".*?"source_name":"|"', '', _source),
'content': re.sub('],"content":"|"', '', _content),
'forward': re.sub('fwdnum":', '', _forword),
'comment_content': re.sub('null|commentlist":', '', _comment_content) if 'null' in _comment_content else str([(re.sub('content":"|"', '', x), re.sub('createTime2":"|"', '', y), re.sub('name":"|"', '', z), re.sub('uin":', '', zz)) for x, y, z, zz in zip(re.findall('content":".*?"', _comment_content), re.findall('createTime2":".*?"', _comment_content), re.findall('name":".*?"', _comment_content), re.findall('uin":\d+', _comment_content))]),
'comment': re.sub('cmtnum":', '', _comment),
'pic': [] if 'template' in _pic else [re.sub('url2":|"', '', i) for i in re.findall('url2":".*?"', _pic)],
'lick_url' : like_url_current
}
try:
data['like'] = re.sub('number":', '', re.search('number":\d+', likers).group())
except Exception as identifier:
print(identifier)
data['like'] = 0
data['likers'] = infos
if shuoshuo.insert(data):
print('%s 的说说写入到数据库成功!' % self.name[self.qq_num.index(q)])
else:
with open('filed', 'a+', encoding='utf-8') as f:
f.write('%s 的说说爬取失败!\n' % self.name[self.qq_num.index(q)])
print('%s 的说说写入到数据库成功!' % self.name[self.qq_num.index(q)])
pos += 20
time.sleep(4)
def get_board(self):
url = 'https://user.qzone.qq.com/proxy/domain/m.qzone.qq.com/cgi-bin/new/get_msgb?'
params = {
'uin': self.__username,
'num': 10,
'hostword': 0,
'essence': 1,
'inCharset': 'utf-8',
'outCharset': 'utf-8',
'format': 'jsonp',
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
for q in self.qq_num:
num = 0
t2 = True
url_ = url + '&hostUin=' + str(q)
start = 0
boardb = self.db['board']
while(t2):
url__ = url_ + '&start=' + str(start)
board = self.req.get(url=url__, headers=self.headers)
if '\"message":"空间主人设置了访问权限,您无法进行操作\"' in board.text or '\"message\":\"空间未开通\"' in board.text or '\"commentList\":[]' in board.text or '\"total\":0' in board.text:
t2 = False
else:
text = board.text
ids, nickname, uin, pubtime, content, replyList = \
re.findall('id":"\d+', text), re.findall('nickname":".*?"', text), re.findall('uin":\d+,\n"nick', text),\
re.findall('pubtime":".*?"', text), re.findall('ubbContent":".*?"', text), re.findall('"replyList":(\[\]|.*?\}\])', text, re.S)
for _id, _nickname, _uin, _time, _content, _reply in zip(ids, nickname, uin, pubtime, content, replyList):
num = num + 1
print(num)
data = {
'_id': str(q) + '_' + str(num),
'owner': self.name[self.qq_num.index(q)],
'total': re.sub('total":', '', re.search('total":\d+', board.text).group()),
'name': re.sub('nickname":"|"', '', _nickname),
'qq': re.sub('uin":|,\n"nick', '', _uin),
'time': re.sub('pubtime":"|"', '', _time),
'content': re.sub('ubbContent":"|"', '', _content), # 下行需要改动
'replyList': [] if '[]' in _reply else str([re.sub('nick":"|"', '', name) + re.sub('content"|"', '', con) for name, con in zip(re.findall('nick":".*?"', _reply), re.findall('content":".*?"', _reply))])
}
if boardb.insert(data):
print('%s 的留言存储到Mongodb成功!' % self.name[self.qq_num.index(q)])
start += 10
def get_information(self):
url = 'https://h5.qzone.qq.com/proxy/domain/base.qzone.qq.com/cgi-bin/user/cgi_userinfo_get_all?'
params = {
'vuin': self.__username,
'fupdate': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
table = self.db['information']
for q in self.qq_num:
t3 = True
url_ = url + '&uin=' + str(q)
while(t3):
info = self.req.get(url=url_, headers=self.headers)
if '\"message\":\"您无权访问\"' in info.text:
t3 = False
else:
text = info.text
sex = ['其他', '男', '女']
constellation = ['白羊座', '金牛座', '双子座', '巨蟹座', '狮子座', '处女座', '天秤座', '天蝎座', '射手座', '摩羯座', '水瓶座', '双鱼座', '未填写']
data = {
'_id': str(q) + '_' + str(random.random() * 10).replace('.', ''),
'nickname': re.sub('nickname":"|"', '', re.search('nickname":".*?"', text).group()),
'spacename': re.sub('spacename":"|"', '', re.search('spacename":".*?"', text).group()),
'desc': re.sub('desc":"|"', '', re.search('desc":".*?"', text).group()),
'signature': re.sub('signature":"|"', '', re.search('signature":".*?"', text).group()),
'sex': sex[int(re.sub('sex":', '', re.search('sex":\d+', text).group()))],
'age': re.sub('"age":', '', re.search('"age":\d+', text).group()),
'birthday': re.sub('birthyear":', '', re.search('birthyear":\d+', text).group()) + '-' + re.sub('birthday":"|"', '', re.search('birthday":".*"', text).group()),
'constellation': constellation[int(re.sub('constellation":|,', '', re.search('constellation":.*,', text).group()).replace('-1', '12'))],
'country': re.sub('country":"|"', '', re.search('country":".*"', text).group()),
'province': re.sub('province":"|"', '', re.search('province":".*?"', text).group()),
'city': re.sub('city":"|"', '', re.search('city":".*?"', text).group()),
'hometown': re.sub('hco":"|"|,|\n|hc|hp|:', '', re.search('hco":".*\n".*\n".*', text).group()),
# 'marriage': marriage[int(re.sub('marriage":', '', re.search('marriage":\d', text).group()))],
'career': re.sub('career":"|"', '', re.search('career":".*?"', text).group()),
'address': re.sub('cb":"|"', '', re.search('cb":".*?"', text).group())
}
if table.insert(data):
print('%s 的信息写入到数据库成功!' % self.name[self.qq_num.index(q)])
t3 = False
if __name__ == '__main__':
sp = Spider()
sp.login()
sp.get_information()
t = time.perf_counter()
sp.get_board()
sp.get_mood()
End = time.perf_counter() - t
print('所有内容爬取完成!总用时%s!' % End)
| true
| true
|
f70be413dfeca425596a01e87c4bbb46375b6a26
| 1,194
|
py
|
Python
|
src/napalm_digineo_procurve/device.py
|
digineo/napalm-digineo-procurve
|
477befcd09b0ce209c42f9742b2c4bb0986fceb8
|
[
"Apache-2.0"
] | 4
|
2019-06-07T07:59:56.000Z
|
2020-12-09T19:27:56.000Z
|
src/napalm_digineo_procurve/device.py
|
digineo/napalm-digineo-procurve
|
477befcd09b0ce209c42f9742b2c4bb0986fceb8
|
[
"Apache-2.0"
] | 1
|
2021-03-31T19:04:16.000Z
|
2021-03-31T19:04:16.000Z
|
src/napalm_digineo_procurve/device.py
|
digineo/napalm-digineo-procurve
|
477befcd09b0ce209c42f9742b2c4bb0986fceb8
|
[
"Apache-2.0"
] | 1
|
2019-12-24T11:05:24.000Z
|
2019-12-24T11:05:24.000Z
|
import typing
import netmiko
import napalm_digineo_procurve.queries.interfaces
import napalm_digineo_procurve.queries.lldp_neighbors
import napalm_digineo_procurve.queries.device_info
import napalm_digineo_procurve.queries.system_info
import napalm_digineo_procurve.queries.uptime
def get_uptime(device: netmiko.BaseConnection) -> float:
return napalm_digineo_procurve.queries.uptime.query(device)
def get_system_information(
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.system_info.SystemInformation:
return napalm_digineo_procurve.queries.system_info.query(device)
def get_device_manufacturer_info(
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.device_info.DeviceInformation:
return napalm_digineo_procurve.queries.device_info.query(device)
def get_interfaces(
device: netmiko.BaseConnection
) -> typing.Sequence[napalm_digineo_procurve.queries.interfaces.Interface]:
return napalm_digineo_procurve.queries.interfaces.query(device)
def get_lldp_neighbors(
device: netmiko.BaseConnection
) -> typing.List[typing.Mapping[str, str]]:
return napalm_digineo_procurve.queries.lldp_neighbors.query(device)
| 31.421053
| 75
| 0.836683
|
import typing
import netmiko
import napalm_digineo_procurve.queries.interfaces
import napalm_digineo_procurve.queries.lldp_neighbors
import napalm_digineo_procurve.queries.device_info
import napalm_digineo_procurve.queries.system_info
import napalm_digineo_procurve.queries.uptime
def get_uptime(device: netmiko.BaseConnection) -> float:
return napalm_digineo_procurve.queries.uptime.query(device)
def get_system_information(
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.system_info.SystemInformation:
return napalm_digineo_procurve.queries.system_info.query(device)
def get_device_manufacturer_info(
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.device_info.DeviceInformation:
return napalm_digineo_procurve.queries.device_info.query(device)
def get_interfaces(
device: netmiko.BaseConnection
) -> typing.Sequence[napalm_digineo_procurve.queries.interfaces.Interface]:
return napalm_digineo_procurve.queries.interfaces.query(device)
def get_lldp_neighbors(
device: netmiko.BaseConnection
) -> typing.List[typing.Mapping[str, str]]:
return napalm_digineo_procurve.queries.lldp_neighbors.query(device)
| true
| true
|
f70be4df0b57c7639cd90df01dd882374cfb6959
| 413
|
py
|
Python
|
test.py
|
Oversize204/cvbpy_show_cases
|
ccd352761aa9bfab220feb888e6639f3cb9a5ad7
|
[
"MIT"
] | null | null | null |
test.py
|
Oversize204/cvbpy_show_cases
|
ccd352761aa9bfab220feb888e6639f3cb9a5ad7
|
[
"MIT"
] | null | null | null |
test.py
|
Oversize204/cvbpy_show_cases
|
ccd352761aa9bfab220feb888e6639f3cb9a5ad7
|
[
"MIT"
] | null | null | null |
import os
import cvb
print("acquire images from CVMock.vin")
device = cvb.DeviceFactory.open("/opt/cvb/drivers/CVMock.vin")
stream = device.stream
stream.start()
for i in range(5):
image, status = stream.wait()
if status == cvb.WaitStatus.Ok:
image_file = os.path.join(".", ".cvb", "test" + str(i) + ".jpg")
image.save(image_file)
print("saving: " + image_file)
stream.abort()
| 22.944444
| 72
| 0.644068
|
import os
import cvb
print("acquire images from CVMock.vin")
device = cvb.DeviceFactory.open("/opt/cvb/drivers/CVMock.vin")
stream = device.stream
stream.start()
for i in range(5):
image, status = stream.wait()
if status == cvb.WaitStatus.Ok:
image_file = os.path.join(".", ".cvb", "test" + str(i) + ".jpg")
image.save(image_file)
print("saving: " + image_file)
stream.abort()
| true
| true
|
f70be525727b66a9997338896913e2083a32e400
| 222
|
py
|
Python
|
MinkowskiEngine/MinkowskiFunctional.py
|
dendisuhubdy/MinkowskiEngine
|
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
|
[
"MIT"
] | 1
|
2019-05-12T00:06:10.000Z
|
2019-05-12T00:06:10.000Z
|
MinkowskiEngine/MinkowskiFunctional.py
|
dendisuhubdy/MinkowskiEngine
|
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
|
[
"MIT"
] | null | null | null |
MinkowskiEngine/MinkowskiFunctional.py
|
dendisuhubdy/MinkowskiEngine
|
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
|
[
"MIT"
] | null | null | null |
import torch.nn.functional as F
from SparseTensor import SparseTensor
def relu(input):
output = F.relu(input.F)
return SparseTensor(
output, coords_key=input.coords_key, coords_manager=input.coords_man)
| 22.2
| 77
| 0.752252
|
import torch.nn.functional as F
from SparseTensor import SparseTensor
def relu(input):
output = F.relu(input.F)
return SparseTensor(
output, coords_key=input.coords_key, coords_manager=input.coords_man)
| true
| true
|
f70be67a2d18174e2398b18aa0d130f82252a8f8
| 994
|
py
|
Python
|
app/core/models.py
|
shreyask543/Recipe-api
|
34c43db4ee6cdcd90cdcf8e88a536ef66452ddb6
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
shreyask543/Recipe-api
|
34c43db4ee6cdcd90cdcf8e88a536ef66452ddb6
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
shreyask543/Recipe-api
|
34c43db4ee6cdcd90cdcf8e88a536ef66452ddb6
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('User must have an email address')
user=self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user=self.create_user(email,password)
user.is_staff=True
user.is_superuser=True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email=models.EmailField(max_length=255, unique=True)
name=models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects=UserManager()
USERNAME_FIELD= 'email'
| 29.235294
| 90
| 0.709256
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('User must have an email address')
user=self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user=self.create_user(email,password)
user.is_staff=True
user.is_superuser=True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email=models.EmailField(max_length=255, unique=True)
name=models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects=UserManager()
USERNAME_FIELD= 'email'
| true
| true
|
f70be84014189656325b4993a31615b0adec7c88
| 3,617
|
py
|
Python
|
rbac/common/role/reject_owner.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
rbac/common/role/reject_owner.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | 1
|
2018-09-10T19:12:31.000Z
|
2018-09-10T19:12:31.000Z
|
rbac/common/role/reject_owner.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Implements the REJECT_ADD_ROLE_OWNER message
usage: rbac.role.owner.reject.create()"""
import logging
from rbac.common import addresser
from rbac.common.proposal.proposal_reject import ProposalReject
LOGGER = logging.getLogger(__name__)
class RejectAddRoleOwner(ProposalReject):
"""Implements the REJECT_ADD_ROLE_OWNER message
usage: rbac.role.owner.reject.create()"""
def __init__(self):
super().__init__()
self._register()
@property
def message_action_type(self):
"""The action type performed by this message"""
return addresser.MessageActionType.REJECT
@property
def message_subaction_type(self):
"""The subsequent action performed or proposed by this message"""
return addresser.MessageActionType.ADD
@property
def message_object_type(self):
"""The object type this message acts upon"""
return addresser.ObjectType.ROLE
@property
def message_related_type(self):
"""the object type of the related object this message acts upon"""
return addresser.ObjectType.USER
@property
def message_relationship_type(self):
"""The relationship type this message acts upon"""
return addresser.RelationshipType.OWNER
def make_addresses(self, message, signer_user_id):
"""Makes the appropriate inputs & output addresses for the message"""
inputs, outputs = super().make_addresses(message, signer_user_id)
# should be owner not admin
signer_admin_address = addresser.role.admin.address(
message.object_id, signer_user_id
)
inputs.add(signer_admin_address)
signer_owner_address = addresser.role.owner.address(
message.object_id, signer_user_id
)
inputs.add(signer_owner_address)
proposal_address = self.address(
object_id=message.object_id, related_id=message.related_id
)
inputs.add(proposal_address)
outputs.add(proposal_address)
return inputs, outputs
def validate_state(self, context, message, payload, input_state, store):
"""Validates that:
1. the signer is an owner of the role"""
super().validate_state(
context=context,
message=message,
payload=payload,
input_state=input_state,
store=store,
)
# TODO: change to verify proposal assignment and hierarchy
# TODO: should be owners
# if not addresser.role.admin.exists_in_state_inputs(
# inputs=payload.inputs,
# input_state=input_state,
# object_id=message.object_id,
# related_id=payload.signer.user_id,
# ):
# raise ValueError(
# "Signer {} must be an admin of the role {}".format(
# payload.signer.user_id, message.object_id
# )
# )
| 34.447619
| 79
| 0.656345
|
import logging
from rbac.common import addresser
from rbac.common.proposal.proposal_reject import ProposalReject
LOGGER = logging.getLogger(__name__)
class RejectAddRoleOwner(ProposalReject):
def __init__(self):
super().__init__()
self._register()
@property
def message_action_type(self):
return addresser.MessageActionType.REJECT
@property
def message_subaction_type(self):
return addresser.MessageActionType.ADD
@property
def message_object_type(self):
return addresser.ObjectType.ROLE
@property
def message_related_type(self):
return addresser.ObjectType.USER
@property
def message_relationship_type(self):
return addresser.RelationshipType.OWNER
def make_addresses(self, message, signer_user_id):
inputs, outputs = super().make_addresses(message, signer_user_id)
signer_admin_address = addresser.role.admin.address(
message.object_id, signer_user_id
)
inputs.add(signer_admin_address)
signer_owner_address = addresser.role.owner.address(
message.object_id, signer_user_id
)
inputs.add(signer_owner_address)
proposal_address = self.address(
object_id=message.object_id, related_id=message.related_id
)
inputs.add(proposal_address)
outputs.add(proposal_address)
return inputs, outputs
def validate_state(self, context, message, payload, input_state, store):
super().validate_state(
context=context,
message=message,
payload=payload,
input_state=input_state,
store=store,
)
| true
| true
|
f70be9c29d438c8bd7ae0af6ada925b74f12119d
| 781
|
py
|
Python
|
src/c3nav/editor/tasks.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 132
|
2016-11-12T01:45:23.000Z
|
2022-03-08T15:17:10.000Z
|
src/c3nav/editor/tasks.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 66
|
2016-09-29T09:46:19.000Z
|
2022-03-11T23:26:18.000Z
|
src/c3nav/editor/tasks.py
|
johnjohndoe/c3nav
|
a17f863a3512e305595c16b0300796b6bae81241
|
[
"Apache-2.0"
] | 42
|
2016-09-29T08:34:57.000Z
|
2022-03-08T15:17:15.000Z
|
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from c3nav.celery import app
logger = logging.getLogger('c3nav')
@app.task(bind=True, max_retries=3)
def send_changeset_proposed_notification(self, pk, author, title, description):
subject = '[c3nav] New Changeset by %s: %s' % (author, title)
for user in User.objects.filter(permissions__review_changesets=True):
if not user.email:
continue
text = (
('Hi %s!\n\n' % user.username) +
('A new Changeset has been proposed by %s:\n\n' % author) +
('---\n\n') +
(title+'\n\n'+description)
)
send_mail(subject, text, settings.MAIL_FROM, [user.email])
| 31.24
| 79
| 0.644046
|
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from c3nav.celery import app
logger = logging.getLogger('c3nav')
@app.task(bind=True, max_retries=3)
def send_changeset_proposed_notification(self, pk, author, title, description):
subject = '[c3nav] New Changeset by %s: %s' % (author, title)
for user in User.objects.filter(permissions__review_changesets=True):
if not user.email:
continue
text = (
('Hi %s!\n\n' % user.username) +
('A new Changeset has been proposed by %s:\n\n' % author) +
('---\n\n') +
(title+'\n\n'+description)
)
send_mail(subject, text, settings.MAIL_FROM, [user.email])
| true
| true
|
f70bea4fa1a8a385185194767936953325c64e31
| 5,772
|
py
|
Python
|
node_modules/clarifai/scripts/app_and_key_for_tests.py
|
seycileli/facerecognitionapp-api
|
c0c4b2bdb57cd73c9b58178438f033777f72bd5b
|
[
"MIT"
] | 346
|
2016-05-26T20:02:41.000Z
|
2022-03-24T20:43:31.000Z
|
node_modules/clarifai/scripts/app_and_key_for_tests.py
|
seycileli/facerecognitionapp-api
|
c0c4b2bdb57cd73c9b58178438f033777f72bd5b
|
[
"MIT"
] | 76
|
2015-10-25T13:03:47.000Z
|
2022-02-19T09:36:10.000Z
|
node_modules/clarifai/scripts/app_and_key_for_tests.py
|
seycileli/facerecognitionapp-api
|
c0c4b2bdb57cd73c9b58178438f033777f72bd5b
|
[
"MIT"
] | 136
|
2015-09-04T13:48:27.000Z
|
2021-06-12T16:48:36.000Z
|
import json
import os
import sys
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request, build_opener, HTTPHandler
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, build_opener, HTTPHandler
EMAIL = os.environ['CLARIFAI_USER_EMAIL']
PASSWORD = os.environ['CLARIFAI_USER_PASSWORD']
BASE = 'https://api.clarifai.com/v2'
def _request(method, url, payload={}, headers={}):
opener = build_opener(HTTPHandler)
full_url = '%s%s' % (BASE, url)
request = Request(full_url, data=json.dumps(payload).encode())
for k in headers.keys():
request.add_header(k, headers[k])
request.get_method = lambda: method
return json.loads(opener.open(request).read().decode())
def create_app(env_name):
session_token, user_id = _login()
url = '/users/%s/apps' % user_id
payload = {'apps': [{'name': 'auto-created-in-%s-ci-test-run' % env_name}]}
response = _request(method='POST', url=url, payload=payload, headers=_auth_headers(session_token))
_raise_on_http_error(response)
data = response
app_id = data['apps'][0]['id']
# This print needs to be present so we can read the value in CI.
print(app_id)
def create_key(app_id):
session_token, user_id = _login()
url = '/users/%s/keys' % user_id
payload = {
'keys': [{
'description': 'Auto-created in a CI test run',
'scopes': ['All'],
'apps': [{'id': app_id, 'user_id': user_id}]
}]
}
response = _request(method='POST', url=url, payload=payload, headers=_auth_headers(session_token))
_raise_on_http_error(response)
data = response
key_id = data['keys'][0]['id']
# This print needs to be present so we can read the value in CI.
print(key_id)
def delete(app_id):
session_token, user_id = _login()
# All the related keys will be deleted automatically when the app is deleted
_delete_app(session_token, user_id, app_id)
def create_sample_workflow(api_key):
url = '/workflows'
payload = {
'workflows': [
{
'id': 'food-and-general',
'nodes': [
{
'id': 'food-workflow-node',
'model': {
'id': 'bd367be194cf45149e75f01d59f77ba7',
'model_version': {
'id': 'dfebc169854e429086aceb8368662641'
}
}
},
{
'id': 'general-workflow-node',
'model': {
'id': 'aaa03c23b3724a16a56b629203edc62c',
'model_version': {
'id': 'aa9ca48295b37401f8af92ad1af0d91d'
}
}
}
]
}
]
}
response = _request(method='POST', url=url, payload=payload, headers=_auth_headers_for_api_key_key(api_key))
_raise_on_http_error(response)
def _delete_app(session_token, user_id, app_id):
url = '/users/%s/apps/%s' % (user_id, app_id)
response = _request(method='DELETE', url=url, headers=_auth_headers(session_token))
_raise_on_http_error(response)
def _auth_headers(session_token):
headers = {'Content-Type': 'application/json', 'X-Clarifai-Session-Token': session_token}
return headers
def _auth_headers_for_api_key_key(api_key):
headers = {'Content-Type': 'application/json', 'Authorization': 'Key ' + api_key}
return headers
def _login():
url = '/login'
payload = {'email': EMAIL, 'password': PASSWORD}
response = _request(method='POST', url=url, payload=payload)
_raise_on_http_error(response)
data = response
user_id = data['v2_user_id']
session_token = data['session_token']
return session_token, user_id
def _raise_on_http_error(response):
# TODO: Make this work with urllib.
# if int(response.status_code) // 100 != 2:
# raise Exception('Unexpected response %s: %s' % (response.status_code, response.text))
pass
def run(arguments):
command = arguments[0] if arguments else '--help'
if command == '--create-app':
if len(arguments) != 2:
raise Exception('--create-app takes one argument')
env_name = arguments[1]
create_app(env_name)
elif command == '--create-key':
if len(arguments) != 2:
raise Exception('--create-key takes one argument')
app_id = arguments[1]
create_key(app_id)
elif command == '--delete-app':
if len(arguments) != 2:
raise Exception('--delete-app takes one argument')
app_id = arguments[1]
delete(app_id)
elif command == '--create-workflow':
if len(arguments) != 2:
raise Exception('--create-workflow takes one argument')
api_key = arguments[1]
create_sample_workflow(api_key)
elif command == '--help':
print('''DESCRIPTION: Creates and delete applications and API keys
ARGUMENTS:
--create-app [env_name] ... Creates a new application.
--create-key [app_id] ... Creates a new API key.
--delete-app [app_id] ... Deletes an application (API keys that use it are deleted as well).
--create-workflow [api_key] ... Creates a sample workflow to be used in int. tests.
--help ... This text.''')
else:
print('Unknown argument. Please see --help')
exit(1)
if __name__ == '__main__':
run(arguments=sys.argv[1:])
| 31.889503
| 112
| 0.595461
|
import json
import os
import sys
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request, build_opener, HTTPHandler
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, build_opener, HTTPHandler
EMAIL = os.environ['CLARIFAI_USER_EMAIL']
PASSWORD = os.environ['CLARIFAI_USER_PASSWORD']
BASE = 'https://api.clarifai.com/v2'
def _request(method, url, payload={}, headers={}):
opener = build_opener(HTTPHandler)
full_url = '%s%s' % (BASE, url)
request = Request(full_url, data=json.dumps(payload).encode())
for k in headers.keys():
request.add_header(k, headers[k])
request.get_method = lambda: method
return json.loads(opener.open(request).read().decode())
def create_app(env_name):
session_token, user_id = _login()
url = '/users/%s/apps' % user_id
payload = {'apps': [{'name': 'auto-created-in-%s-ci-test-run' % env_name}]}
response = _request(method='POST', url=url, payload=payload, headers=_auth_headers(session_token))
_raise_on_http_error(response)
data = response
app_id = data['apps'][0]['id']
print(app_id)
def create_key(app_id):
session_token, user_id = _login()
url = '/users/%s/keys' % user_id
payload = {
'keys': [{
'description': 'Auto-created in a CI test run',
'scopes': ['All'],
'apps': [{'id': app_id, 'user_id': user_id}]
}]
}
response = _request(method='POST', url=url, payload=payload, headers=_auth_headers(session_token))
_raise_on_http_error(response)
data = response
key_id = data['keys'][0]['id']
print(key_id)
def delete(app_id):
session_token, user_id = _login()
_delete_app(session_token, user_id, app_id)
def create_sample_workflow(api_key):
url = '/workflows'
payload = {
'workflows': [
{
'id': 'food-and-general',
'nodes': [
{
'id': 'food-workflow-node',
'model': {
'id': 'bd367be194cf45149e75f01d59f77ba7',
'model_version': {
'id': 'dfebc169854e429086aceb8368662641'
}
}
},
{
'id': 'general-workflow-node',
'model': {
'id': 'aaa03c23b3724a16a56b629203edc62c',
'model_version': {
'id': 'aa9ca48295b37401f8af92ad1af0d91d'
}
}
}
]
}
]
}
response = _request(method='POST', url=url, payload=payload, headers=_auth_headers_for_api_key_key(api_key))
_raise_on_http_error(response)
def _delete_app(session_token, user_id, app_id):
url = '/users/%s/apps/%s' % (user_id, app_id)
response = _request(method='DELETE', url=url, headers=_auth_headers(session_token))
_raise_on_http_error(response)
def _auth_headers(session_token):
headers = {'Content-Type': 'application/json', 'X-Clarifai-Session-Token': session_token}
return headers
def _auth_headers_for_api_key_key(api_key):
headers = {'Content-Type': 'application/json', 'Authorization': 'Key ' + api_key}
return headers
def _login():
url = '/login'
payload = {'email': EMAIL, 'password': PASSWORD}
response = _request(method='POST', url=url, payload=payload)
_raise_on_http_error(response)
data = response
user_id = data['v2_user_id']
session_token = data['session_token']
return session_token, user_id
def _raise_on_http_error(response):
pass
def run(arguments):
command = arguments[0] if arguments else '--help'
if command == '--create-app':
if len(arguments) != 2:
raise Exception('--create-app takes one argument')
env_name = arguments[1]
create_app(env_name)
elif command == '--create-key':
if len(arguments) != 2:
raise Exception('--create-key takes one argument')
app_id = arguments[1]
create_key(app_id)
elif command == '--delete-app':
if len(arguments) != 2:
raise Exception('--delete-app takes one argument')
app_id = arguments[1]
delete(app_id)
elif command == '--create-workflow':
if len(arguments) != 2:
raise Exception('--create-workflow takes one argument')
api_key = arguments[1]
create_sample_workflow(api_key)
elif command == '--help':
print('''DESCRIPTION: Creates and delete applications and API keys
ARGUMENTS:
--create-app [env_name] ... Creates a new application.
--create-key [app_id] ... Creates a new API key.
--delete-app [app_id] ... Deletes an application (API keys that use it are deleted as well).
--create-workflow [api_key] ... Creates a sample workflow to be used in int. tests.
--help ... This text.''')
else:
print('Unknown argument. Please see --help')
exit(1)
if __name__ == '__main__':
run(arguments=sys.argv[1:])
| true
| true
|
f70beb1fa4370b3f14474cd5e4594b48d60c8bf8
| 7,308
|
py
|
Python
|
test/calibration/experiments/test_drag.py
|
QuantumHardware/qiskit-experiments
|
c09cf35bb922419354955abe8d536a97a9ea286b
|
[
"Apache-2.0"
] | null | null | null |
test/calibration/experiments/test_drag.py
|
QuantumHardware/qiskit-experiments
|
c09cf35bb922419354955abe8d536a97a9ea286b
|
[
"Apache-2.0"
] | null | null | null |
test/calibration/experiments/test_drag.py
|
QuantumHardware/qiskit-experiments
|
c09cf35bb922419354955abe8d536a97a9ea286b
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test drag calibration experiment."""
from test.base import QiskitExperimentsTestCase
import unittest
import numpy as np
from qiskit.circuit import Parameter
from qiskit.exceptions import QiskitError
from qiskit.pulse import DriveChannel, Drag
import qiskit.pulse as pulse
from qiskit.qobj.utils import MeasLevel
from qiskit import transpile
from qiskit_experiments.exceptions import CalibrationError
from qiskit_experiments.library import RoughDrag, RoughDragCal
from qiskit_experiments.test.mock_iq_backend import DragBackend
from qiskit_experiments.calibration_management.basis_gate_library import FixedFrequencyTransmon
from qiskit_experiments.calibration_management import Calibrations
class TestDragEndToEnd(QiskitExperimentsTestCase):
"""Test the drag experiment."""
def setUp(self):
"""Setup some schedules."""
super().setUp()
beta = Parameter("β")
with pulse.build(name="xp") as xp:
pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))
self.x_plus = xp
self.test_tol = 0.05
def test_reps(self):
"""Test that setting reps raises and error if reps is not of length three."""
drag = RoughDrag(0, self.x_plus)
with self.assertRaises(CalibrationError):
drag.set_experiment_options(reps=[1, 2, 3, 4])
def test_end_to_end(self):
"""Test the drag experiment end to end."""
backend = DragBackend(gate_name="Drag(xp)")
drag = RoughDrag(1, self.x_plus)
expdata = drag.run(backend)
self.assertExperimentDone(expdata)
result = expdata.analysis_results(1)
self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)
self.assertEqual(result.quality, "good")
# Small leakage will make the curves very flat, in this case one should
# rather increase beta.
backend = DragBackend(error=0.0051, gate_name="Drag(xp)")
drag = RoughDrag(0, self.x_plus)
drag.analysis.set_options(p0={"beta": 1.2})
exp_data = drag.run(backend)
self.assertExperimentDone(exp_data)
result = exp_data.analysis_results(1)
self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)
self.assertEqual(result.quality, "good")
# Large leakage will make the curves oscillate quickly.
backend = DragBackend(error=0.05, gate_name="Drag(xp)")
drag = RoughDrag(1, self.x_plus, betas=np.linspace(-4, 4, 31))
drag.set_run_options(shots=200)
drag.analysis.set_options(p0={"beta": 1.8, "freq0": 0.08, "freq1": 0.16, "freq2": 0.32})
exp_data = drag.run(backend)
self.assertExperimentDone(exp_data)
result = exp_data.analysis_results(1)
meas_level = exp_data.metadata["job_metadata"][-1]["run_options"]["meas_level"]
self.assertEqual(meas_level, MeasLevel.CLASSIFIED)
self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)
self.assertEqual(result.quality, "good")
class TestDragCircuits(QiskitExperimentsTestCase):
"""Test the circuits of the drag calibration."""
def setUp(self):
"""Setup some schedules."""
super().setUp()
beta = Parameter("β")
with pulse.build(name="xp") as xp:
pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))
self.x_plus = xp
def test_default_circuits(self):
"""Test the default circuit."""
backend = DragBackend(error=0.005, gate_name="Drag(xp)")
drag = RoughDrag(0, self.x_plus)
drag.set_experiment_options(reps=[2, 4, 8])
drag.backend = DragBackend(gate_name="Drag(xp)")
circuits = drag.circuits()
for idx, expected in enumerate([4, 8, 16]):
ops = transpile(circuits[idx * 51], backend).count_ops()
self.assertEqual(ops["Drag(xp)"], expected)
def test_raise_multiple_parameter(self):
"""Check that the experiment raises with unassigned parameters."""
beta = Parameter("β")
amp = Parameter("amp")
with pulse.build(name="xp") as xp:
pulse.play(Drag(duration=160, amp=amp, sigma=40, beta=beta), DriveChannel(0))
with self.assertRaises(QiskitError):
RoughDrag(1, xp, betas=np.linspace(-3, 3, 21))
class TestRoughDragCalUpdate(QiskitExperimentsTestCase):
"""Test that a Drag calibration experiment properly updates the calibrations."""
def setUp(self):
"""Setup the tests"""
super().setUp()
library = FixedFrequencyTransmon()
self.backend = DragBackend(gate_name="Drag(x)")
self.cals = Calibrations.from_backend(self.backend, library)
self.test_tol = 0.05
def test_update(self):
"""Test that running RoughDragCal updates the calibrations."""
qubit = 0
prev_beta = self.cals.get_parameter_value("β", (0,), "x")
self.assertEqual(prev_beta, 0)
expdata = RoughDragCal(qubit, self.cals, backend=self.backend).run()
self.assertExperimentDone(expdata)
new_beta = self.cals.get_parameter_value("β", (0,), "x")
self.assertTrue(abs(new_beta - self.backend.ideal_beta) < self.test_tol)
self.assertTrue(abs(new_beta) > self.test_tol)
def test_dragcal_experiment_config(self):
"""Test RoughDragCal config can round trip"""
exp = RoughDragCal(0, self.cals, backend=self.backend)
loaded_exp = RoughDragCal.from_config(exp.config())
self.assertNotEqual(exp, loaded_exp)
self.assertTrue(self.json_equiv(exp, loaded_exp))
@unittest.skip("Calibration experiments are not yet JSON serializable")
def test_dragcal_roundtrip_serializable(self):
"""Test round trip JSON serialization"""
exp = RoughDragCal(0, self.cals)
self.assertRoundTripSerializable(exp, self.json_equiv)
def test_drag_experiment_config(self):
"""Test RoughDrag config can roundtrip"""
with pulse.build(name="xp") as sched:
pulse.play(pulse.Drag(160, 0.5, 40, Parameter("β")), pulse.DriveChannel(0))
exp = RoughDrag(0, backend=self.backend, schedule=sched)
loaded_exp = RoughDrag.from_config(exp.config())
self.assertNotEqual(exp, loaded_exp)
self.assertTrue(self.json_equiv(exp, loaded_exp))
@unittest.skip("Schedules are not yet JSON serializable")
def test_drag_roundtrip_serializable(self):
"""Test round trip JSON serialization"""
with pulse.build(name="xp") as sched:
pulse.play(pulse.Drag(160, 0.5, 40, Parameter("β")), pulse.DriveChannel(0))
exp = RoughDrag(0, backend=self.backend, schedule=sched)
self.assertRoundTripSerializable(exp, self.json_equiv)
| 37.096447
| 96
| 0.676382
|
from test.base import QiskitExperimentsTestCase
import unittest
import numpy as np
from qiskit.circuit import Parameter
from qiskit.exceptions import QiskitError
from qiskit.pulse import DriveChannel, Drag
import qiskit.pulse as pulse
from qiskit.qobj.utils import MeasLevel
from qiskit import transpile
from qiskit_experiments.exceptions import CalibrationError
from qiskit_experiments.library import RoughDrag, RoughDragCal
from qiskit_experiments.test.mock_iq_backend import DragBackend
from qiskit_experiments.calibration_management.basis_gate_library import FixedFrequencyTransmon
from qiskit_experiments.calibration_management import Calibrations
class TestDragEndToEnd(QiskitExperimentsTestCase):
def setUp(self):
super().setUp()
beta = Parameter("β")
with pulse.build(name="xp") as xp:
pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))
self.x_plus = xp
self.test_tol = 0.05
def test_reps(self):
drag = RoughDrag(0, self.x_plus)
with self.assertRaises(CalibrationError):
drag.set_experiment_options(reps=[1, 2, 3, 4])
def test_end_to_end(self):
backend = DragBackend(gate_name="Drag(xp)")
drag = RoughDrag(1, self.x_plus)
expdata = drag.run(backend)
self.assertExperimentDone(expdata)
result = expdata.analysis_results(1)
self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)
self.assertEqual(result.quality, "good")
backend = DragBackend(error=0.0051, gate_name="Drag(xp)")
drag = RoughDrag(0, self.x_plus)
drag.analysis.set_options(p0={"beta": 1.2})
exp_data = drag.run(backend)
self.assertExperimentDone(exp_data)
result = exp_data.analysis_results(1)
self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)
self.assertEqual(result.quality, "good")
backend = DragBackend(error=0.05, gate_name="Drag(xp)")
drag = RoughDrag(1, self.x_plus, betas=np.linspace(-4, 4, 31))
drag.set_run_options(shots=200)
drag.analysis.set_options(p0={"beta": 1.8, "freq0": 0.08, "freq1": 0.16, "freq2": 0.32})
exp_data = drag.run(backend)
self.assertExperimentDone(exp_data)
result = exp_data.analysis_results(1)
meas_level = exp_data.metadata["job_metadata"][-1]["run_options"]["meas_level"]
self.assertEqual(meas_level, MeasLevel.CLASSIFIED)
self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)
self.assertEqual(result.quality, "good")
class TestDragCircuits(QiskitExperimentsTestCase):
def setUp(self):
super().setUp()
beta = Parameter("β")
with pulse.build(name="xp") as xp:
pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))
self.x_plus = xp
def test_default_circuits(self):
backend = DragBackend(error=0.005, gate_name="Drag(xp)")
drag = RoughDrag(0, self.x_plus)
drag.set_experiment_options(reps=[2, 4, 8])
drag.backend = DragBackend(gate_name="Drag(xp)")
circuits = drag.circuits()
for idx, expected in enumerate([4, 8, 16]):
ops = transpile(circuits[idx * 51], backend).count_ops()
self.assertEqual(ops["Drag(xp)"], expected)
def test_raise_multiple_parameter(self):
beta = Parameter("β")
amp = Parameter("amp")
with pulse.build(name="xp") as xp:
pulse.play(Drag(duration=160, amp=amp, sigma=40, beta=beta), DriveChannel(0))
with self.assertRaises(QiskitError):
RoughDrag(1, xp, betas=np.linspace(-3, 3, 21))
class TestRoughDragCalUpdate(QiskitExperimentsTestCase):
def setUp(self):
super().setUp()
library = FixedFrequencyTransmon()
self.backend = DragBackend(gate_name="Drag(x)")
self.cals = Calibrations.from_backend(self.backend, library)
self.test_tol = 0.05
def test_update(self):
qubit = 0
prev_beta = self.cals.get_parameter_value("β", (0,), "x")
self.assertEqual(prev_beta, 0)
expdata = RoughDragCal(qubit, self.cals, backend=self.backend).run()
self.assertExperimentDone(expdata)
new_beta = self.cals.get_parameter_value("β", (0,), "x")
self.assertTrue(abs(new_beta - self.backend.ideal_beta) < self.test_tol)
self.assertTrue(abs(new_beta) > self.test_tol)
def test_dragcal_experiment_config(self):
exp = RoughDragCal(0, self.cals, backend=self.backend)
loaded_exp = RoughDragCal.from_config(exp.config())
self.assertNotEqual(exp, loaded_exp)
self.assertTrue(self.json_equiv(exp, loaded_exp))
@unittest.skip("Calibration experiments are not yet JSON serializable")
def test_dragcal_roundtrip_serializable(self):
exp = RoughDragCal(0, self.cals)
self.assertRoundTripSerializable(exp, self.json_equiv)
def test_drag_experiment_config(self):
with pulse.build(name="xp") as sched:
pulse.play(pulse.Drag(160, 0.5, 40, Parameter("β")), pulse.DriveChannel(0))
exp = RoughDrag(0, backend=self.backend, schedule=sched)
loaded_exp = RoughDrag.from_config(exp.config())
self.assertNotEqual(exp, loaded_exp)
self.assertTrue(self.json_equiv(exp, loaded_exp))
@unittest.skip("Schedules are not yet JSON serializable")
def test_drag_roundtrip_serializable(self):
with pulse.build(name="xp") as sched:
pulse.play(pulse.Drag(160, 0.5, 40, Parameter("β")), pulse.DriveChannel(0))
exp = RoughDrag(0, backend=self.backend, schedule=sched)
self.assertRoundTripSerializable(exp, self.json_equiv)
| true
| true
|
f70beb38a18f8aaf4b1f040a4d2c358773707a65
| 1,278
|
py
|
Python
|
sa/profiles/ZTE/ZXDSL531/get_dot11_associations.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
sa/profiles/ZTE/ZXDSL531/get_dot11_associations.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
sa/profiles/ZTE/ZXDSL531/get_dot11_associations.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# ZTE.ZXDSL531.get_dot11_associations
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetdot11associations import IGetDot11Associations
from noc.core.text import strip_html_tags
rx_mac = re.compile(
"(?P<mac>[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})"
)
class Script(BaseScript):
name = "ZTE.ZXDSL531.get_dot11_associations"
interface = IGetDot11Associations
def execute(self):
if self.access_profile.scheme == self.TELNET:
v = self.cli("wlctl authe_sta_list")
elif self.access_profile.scheme == self.HTTP:
v = self.http.get("/wlclientview.cmd")
v = strip_html_tags(v)
else:
raise Exception("Unsupported access scheme")
r = []
for l in v.split("\n"):
m = rx_mac.search(l)
if m:
r.append({"mac": m.group("mac")})
return r
| 31.95
| 86
| 0.514085
|
import re
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetdot11associations import IGetDot11Associations
from noc.core.text import strip_html_tags
rx_mac = re.compile(
"(?P<mac>[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})"
)
class Script(BaseScript):
name = "ZTE.ZXDSL531.get_dot11_associations"
interface = IGetDot11Associations
def execute(self):
if self.access_profile.scheme == self.TELNET:
v = self.cli("wlctl authe_sta_list")
elif self.access_profile.scheme == self.HTTP:
v = self.http.get("/wlclientview.cmd")
v = strip_html_tags(v)
else:
raise Exception("Unsupported access scheme")
r = []
for l in v.split("\n"):
m = rx_mac.search(l)
if m:
r.append({"mac": m.group("mac")})
return r
| true
| true
|
f70bebf637a63567b98981f041f242cabd79d5e8
| 563
|
py
|
Python
|
apps/bulk/migrations/0007_auto_20150302_1935.py
|
Sult/daf
|
a4da9e8c96f70577e2490c05e82bdf7d0de1a563
|
[
"MIT"
] | null | null | null |
apps/bulk/migrations/0007_auto_20150302_1935.py
|
Sult/daf
|
a4da9e8c96f70577e2490c05e82bdf7d0de1a563
|
[
"MIT"
] | null | null | null |
apps/bulk/migrations/0007_auto_20150302_1935.py
|
Sult/daf
|
a4da9e8c96f70577e2490c05e82bdf7d0de1a563
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('bulk', '0006_auto_20150302_1750'),
]
operations = [
migrations.AlterField(
model_name='sovereigntyholder',
name='last_refresh',
field=models.DateTimeField(default=datetime.datetime(2015, 3, 2, 19, 35, 37, 7218, tzinfo=utc)),
preserve_default=True,
),
]
| 24.478261
| 108
| 0.64476
|
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('bulk', '0006_auto_20150302_1750'),
]
operations = [
migrations.AlterField(
model_name='sovereigntyholder',
name='last_refresh',
field=models.DateTimeField(default=datetime.datetime(2015, 3, 2, 19, 35, 37, 7218, tzinfo=utc)),
preserve_default=True,
),
]
| true
| true
|
f70bec598956cee8807a514f594b2be632fc271b
| 6,033
|
py
|
Python
|
pydotorg/settings/base.py
|
caputomarcos/pythondotorg
|
da96fee61bb5c92b7060bccb6ed467fe00136dd7
|
[
"Apache-2.0"
] | null | null | null |
pydotorg/settings/base.py
|
caputomarcos/pythondotorg
|
da96fee61bb5c92b7060bccb6ed467fe00136dd7
|
[
"Apache-2.0"
] | null | null | null |
pydotorg/settings/base.py
|
caputomarcos/pythondotorg
|
da96fee61bb5c92b7060bccb6ed467fe00136dd7
|
[
"Apache-2.0"
] | null | null | null |
import os
import dj_database_url
### Basic config
BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
DEBUG = TEMPLATE_DEBUG = True
SITE_ID = 1
SECRET_KEY = 'its-a-secret-to-everybody'
# Until Sentry works on Py3, do errors the old-fashioned way.
ADMINS = []
# General project information
# These are available in the template as SITE_INFO.<title>
SITE_VARIABLES = {
'site_name': 'Python.org',
'site_descript': 'The official home of the Python Programming Language',
}
### Databases
DATABASES = {
'default': dj_database_url.config(default='postgres:///python.org')
}
### Locale settings
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'Y-m-d'
### Files (media and static)
MEDIA_ROOT = os.path.join(BASE, 'media')
MEDIA_URL = '/m/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(BASE, 'static-root')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE, 'static'),
]
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
### Authentication
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SOCIALACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_EMAIL_VERIFICATION = True
SOCIALACCOUNT_QUERY_EMAIL = True
### Templates
TEMPLATE_DIRS = [
os.path.join(BASE, 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pydotorg.context_processors.site_info",
"pydotorg.context_processors.url_name",
"pydotorg.context_processors.get_host_with_scheme",
]
### URLs, WSGI, middleware, etc.
ROOT_URLCONF = 'pydotorg.urls'
MIDDLEWARE_CLASSES = (
'pydotorg.middleware.AdminNoCaching',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'waffle.middleware.WaffleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pages.middleware.PageFallbackMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
AUTH_USER_MODEL = 'users.User'
WSGI_APPLICATION = 'pydotorg.wsgi.application'
### Apps
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django_comments_xtd',
'jsonfield',
'pipeline',
'sitetree',
'timedelta',
'imagekit',
'haystack',
'honeypot',
'waffle',
'users',
'boxes',
'cms',
'companies',
'feedbacks',
'community',
'jobs',
'pages',
'sponsors',
'successstories',
'events',
'minutes',
'peps',
'blogs',
'downloads',
'codesamples',
'work_groups',
'allauth',
'allauth.account',
'allauth.socialaccount',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.twitter',
# Tastypie needs the `users` app to be already loaded.
'tastypie',
'debug_toolbar',
]
# Fixtures
FIXTURE_DIRS = (
os.path.join(BASE, 'fixtures'),
)
### Testing
SKIP_NETWORK_TESTS = True
### Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
### Development
DEV_FIXTURE_URL = 'https://www.python.org/m/fixtures/dev-fixtures.json.gz'
### Comments
COMMENTS_APP = 'django_comments_xtd'
COMMENTS_XTD_MAX_THREAD_LEVEL = 0
COMMENTS_XTD_FORM_CLASS = "jobs.forms.JobCommentForm"
### Honeypot
HONEYPOT_FIELD_NAME = 'email_body_text'
HONEYPOT_VALUE = 'write your message'
### Blog Feed URL
PYTHON_BLOG_FEED_URL = "http://feeds.feedburner.com/PythonInsider"
PYTHON_BLOG_URL = "http://blog.python.org"
### Registration mailing lists
MAILING_LIST_PSF_MEMBERS = "psf-members-announce-request@python.org"
### PEP Repo Location
PEP_REPO_PATH = ''
### Fastly ###
FASTLY_API_KEY = False # Set to Fastly API key in production to allow pages to
# be purged on save
# Jobs
JOB_THRESHOLD_DAYS = 90
JOB_FROM_EMAIL = 'jobs@python.org'
### Pipeline
from .pipeline import (
PIPELINE_CSS, PIPELINE_JS,
PIPELINE_COMPILERS,
PIPELINE_SASS_BINARY, PIPELINE_SASS_ARGUMENTS,
PIPELINE_CSS_COMPRESSOR, PIPELINE_JS_COMPRESSOR,
)
### django-waffle settings
WAFFLE_OVERRIDE = True
| 24.326613
| 79
| 0.697331
|
import os
import dj_database_url
BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
DEBUG = TEMPLATE_DEBUG = True
SITE_ID = 1
SECRET_KEY = 'its-a-secret-to-everybody'
ADMINS = []
SITE_VARIABLES = {
'site_name': 'Python.org',
'site_descript': 'The official home of the Python Programming Language',
}
DATABASES = {
'default': dj_database_url.config(default='postgres:///python.org')
}
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'Y-m-d'
MEDIA_ROOT = os.path.join(BASE, 'media')
MEDIA_URL = '/m/'
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
STATIC_ROOT = os.path.join(BASE, 'static-root')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE, 'static'),
]
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SOCIALACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_EMAIL_VERIFICATION = True
SOCIALACCOUNT_QUERY_EMAIL = True
TEMPLATE_DIRS = [
os.path.join(BASE, 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pydotorg.context_processors.site_info",
"pydotorg.context_processors.url_name",
"pydotorg.context_processors.get_host_with_scheme",
]
ROOT_URLCONF = 'pydotorg.urls'
MIDDLEWARE_CLASSES = (
'pydotorg.middleware.AdminNoCaching',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'waffle.middleware.WaffleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pages.middleware.PageFallbackMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
AUTH_USER_MODEL = 'users.User'
WSGI_APPLICATION = 'pydotorg.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django_comments_xtd',
'jsonfield',
'pipeline',
'sitetree',
'timedelta',
'imagekit',
'haystack',
'honeypot',
'waffle',
'users',
'boxes',
'cms',
'companies',
'feedbacks',
'community',
'jobs',
'pages',
'sponsors',
'successstories',
'events',
'minutes',
'peps',
'blogs',
'downloads',
'codesamples',
'work_groups',
'allauth',
'allauth.account',
'allauth.socialaccount',
'tastypie',
'debug_toolbar',
]
FIXTURE_DIRS = (
os.path.join(BASE, 'fixtures'),
)
SKIP_NETWORK_TESTS = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
DEV_FIXTURE_URL = 'https://www.python.org/m/fixtures/dev-fixtures.json.gz'
COMMENTS_APP = 'django_comments_xtd'
COMMENTS_XTD_MAX_THREAD_LEVEL = 0
COMMENTS_XTD_FORM_CLASS = "jobs.forms.JobCommentForm"
HONEYPOT_FIELD_NAME = 'email_body_text'
HONEYPOT_VALUE = 'write your message'
PYTHON_BLOG_FEED_URL = "http://feeds.feedburner.com/PythonInsider"
PYTHON_BLOG_URL = "http://blog.python.org"
MAILING_LIST_PSF_MEMBERS = "psf-members-announce-request@python.org"
PEP_REPO_PATH = ''
FASTLY_API_KEY = False
JOB_THRESHOLD_DAYS = 90
JOB_FROM_EMAIL = 'jobs@python.org'
from .pipeline import (
PIPELINE_CSS, PIPELINE_JS,
PIPELINE_COMPILERS,
PIPELINE_SASS_BINARY, PIPELINE_SASS_ARGUMENTS,
PIPELINE_CSS_COMPRESSOR, PIPELINE_JS_COMPRESSOR,
)
WAFFLE_OVERRIDE = True
| true
| true
|
f70beccc8167dc8794e9af202f98dc483d904d5e
| 464
|
py
|
Python
|
ecom/api/migrations/0001_initial.py
|
Lisgevan/FULL_STACK_DEVELOMENT_DJANGO_REACT
|
9e87db526a4126a6e3cbac9dd2b88b8ea88a2318
|
[
"MIT"
] | null | null | null |
ecom/api/migrations/0001_initial.py
|
Lisgevan/FULL_STACK_DEVELOMENT_DJANGO_REACT
|
9e87db526a4126a6e3cbac9dd2b88b8ea88a2318
|
[
"MIT"
] | null | null | null |
ecom/api/migrations/0001_initial.py
|
Lisgevan/FULL_STACK_DEVELOMENT_DJANGO_REACT
|
9e87db526a4126a6e3cbac9dd2b88b8ea88a2318
|
[
"MIT"
] | null | null | null |
from django.db import migrations
from api.user.models import CustomUser
class Migration(migrations.Migration):
def seed_data(apps, schema_editor):
user = CustomUser(
name = 'admin',
email = 'admin@admin.dev',
is_staff = True,
is_superuser = True,
phone = "9876554321",
gender = 'Male'
)
user.set_password('qwerty')
user.save()
dependencies = [ ]
operations = [
migrations.RunPython(seed_data),
]
| 22.095238
| 38
| 0.640086
|
from django.db import migrations
from api.user.models import CustomUser
class Migration(migrations.Migration):
def seed_data(apps, schema_editor):
user = CustomUser(
name = 'admin',
email = 'admin@admin.dev',
is_staff = True,
is_superuser = True,
phone = "9876554321",
gender = 'Male'
)
user.set_password('qwerty')
user.save()
dependencies = [ ]
operations = [
migrations.RunPython(seed_data),
]
| true
| true
|
f70bed112c601c3087cd5a890d8455f8167600dd
| 13,335
|
py
|
Python
|
tests/test_type.py
|
thyneb19/lux
|
07a282d6a5f60c05942d866fa6f33636c3428abc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_type.py
|
thyneb19/lux
|
07a282d6a5f60c05942d866fa6f33636c3428abc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_type.py
|
thyneb19/lux
|
07a282d6a5f60c05942d866fa6f33636c3428abc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import random
import pandas as pd
import warnings
# Suite of test that checks if data_type inferred correctly by Lux
def test_check_cars():
lux.config.set_SQL_connection("")
df = pd.read_csv("lux/data/car.csv")
df.maintain_metadata()
assert df.data_type["Name"] == "nominal"
assert df.data_type["MilesPerGal"] == "quantitative"
assert df.data_type["Cylinders"] == "nominal"
assert df.data_type["Displacement"] == "quantitative"
assert df.data_type["Horsepower"] == "quantitative"
assert df.data_type["Weight"] == "quantitative"
assert df.data_type["Acceleration"] == "quantitative"
assert df.data_type["Year"] == "temporal"
assert df.data_type["Origin"] == "nominal"
def test_check_int_id():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/instacart_sample.csv?raw=true"
)
df._repr_html_()
inverted_data_type = lux.config.executor.invert_data_type(df.data_type)
assert len(inverted_data_type["id"]) == 3
assert (
"<code>order_id</code>, <code>product_id</code>, <code>user_id</code> is not visualized since it resembles an ID field."
in df._message.to_html()
)
def test_check_str_id():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/churn.csv?raw=true")
df._repr_html_()
assert (
"<code>customerID</code> is not visualized since it resembles an ID field.</li>"
in df._message.to_html()
)
def test_check_hpi():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/hpi.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"HPIRank": "quantitative",
"Country": "geographical",
"SubRegion": "nominal",
"AverageLifeExpectancy": "quantitative",
"AverageWellBeing": "quantitative",
"HappyLifeYears": "quantitative",
"Footprint": "quantitative",
"InequalityOfOutcomes": "quantitative",
"InequalityAdjustedLifeExpectancy": "quantitative",
"InequalityAdjustedWellbeing": "quantitative",
"HappyPlanetIndex": "quantitative",
"GDPPerCapita": "quantitative",
"Population": "quantitative",
}
def test_check_airbnb():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/airbnb_nyc.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"id": "id",
"name": "nominal",
"host_id": "id",
"host_name": "nominal",
"neighbourhood_group": "nominal",
"neighbourhood": "nominal",
"latitude": "quantitative",
"longitude": "quantitative",
"room_type": "nominal",
"price": "quantitative",
"minimum_nights": "quantitative",
"number_of_reviews": "quantitative",
"last_review": "temporal",
"reviews_per_month": "quantitative",
"calculated_host_listings_count": "quantitative",
"availability_365": "quantitative",
}
def test_check_airports():
df = pd.read_csv(
"https://raw.githubusercontent.com/altair-viz/vega_datasets/master/vega_datasets/_data/airports.csv"
)
df.maintain_metadata()
assert df.data_type == {
"iata": "id",
"name": "nominal",
"city": "nominal",
"state": "geographical",
"country": "geographical",
"latitude": "quantitative",
"longitude": "quantitative",
}
def test_check_datetime():
df = pd.DataFrame(
{
"a": ["2020-01-01"],
"b": ["20-01-01"],
"c": ["20-jan-01"],
"d": ["20-january-01"],
"e": ["2020 January 01"],
"f": ["2020 January 01 00:00:00 pm PT"],
"g": ["2020 January 01 13:00:00"],
"h": ["2020 January 01 23:59:59 GTC-6"],
}
)
df.maintain_metadata()
assert df.data_type == {
"a": "temporal",
"b": "temporal",
"c": "temporal",
"d": "temporal",
"e": "temporal",
"f": "temporal",
"g": "temporal",
"h": "temporal",
}
def test_check_datetime_numeric_values():
car_df = pd.read_csv("lux/data/car.csv")
car_df = car_df.rename(columns={"Year": "blah"})
car_df.maintain_metadata()
assert car_df.data_type["blah"] == "temporal"
spotify_df = pd.read_csv(
"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/spotify.csv"
)
spotify_df = spotify_df.rename(columns={"year": "blah"})
spotify_df.maintain_metadata()
assert spotify_df.data_type["blah"] == "temporal"
assert spotify_df.data_type["release_date"] == "temporal"
def test_check_stock():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/stocks.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"symbol": "nominal",
"monthdate": "temporal",
"price": "quantitative",
}, "Stock dataset type detection error"
def test_check_college():
df = pd.read_csv("lux/data/college.csv")
df.maintain_metadata()
assert df.data_type == {
"Name": "nominal",
"PredominantDegree": "nominal",
"HighestDegree": "nominal",
"FundingModel": "nominal",
"Region": "nominal",
"Geography": "nominal",
"AdmissionRate": "quantitative",
"ACTMedian": "quantitative",
"SATAverage": "quantitative",
"AverageCost": "quantitative",
"Expenditure": "quantitative",
"AverageFacultySalary": "quantitative",
"MedianDebt": "quantitative",
"AverageAgeofEntry": "quantitative",
"MedianFamilyIncome": "quantitative",
"MedianEarnings": "quantitative",
}
def test_float_categorical():
values = [
{"A": 6.0, "B": 1.0, "C": 1.0, "D": 3.0, "E": 2.0, "F": 5.0},
{"A": 5.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 3.0},
{"A": 3.0, "B": 6.0, "C": 3.0, "D": 3.0, "E": 2.0, "F": 5.0},
{"A": 6.0, "B": 3.0, "C": 3.0, "D": 2.0, "E": 2.0, "F": 2.0},
{"A": 7.0, "B": 4.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 4.0},
{"A": 5.0, "B": 3.0, "C": 6.0, "D": 3.0, "E": 3.0, "F": 4.0},
{"A": 3.0, "B": 4.0, "C": 3.0, "D": 6.0, "E": 5.0, "F": 5.0},
{"A": 3.0, "B": 3.0, "C": 2.0, "D": 2.0, "E": 4.0, "F": 5.0},
{"A": 3.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 4.0},
{"A": 1.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 6.0},
{"A": 3.0, "B": 3.0, "C": 2.0, "D": 3.0, "E": 3.0, "F": 5.0},
{"A": 7.0, "B": 1.0, "C": 1.0, "D": 2.0, "E": 2.0, "F": 3.0},
{"A": 6.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 3.0},
{"A": 2.0, "B": 3.0, "C": 2.0, "D": 3.0, "E": 3.0, "F": 4.0},
{"A": 6.0, "B": 2.0, "C": 3.0, "D": 3.0, "E": 3.0, "F": 5.0},
]
df = pd.DataFrame(values)
df.maintain_metadata()
inverted_data_type = lux.config.executor.invert_data_type(df.data_type)
assert inverted_data_type["nominal"] == [
"A",
"B",
"C",
"D",
"E",
"F",
], "Float column should be detected as categorical"
for x in list(df.dtypes):
assert x == "float64", "Source dataframe preserved as float dtype"
def test_set_data_type():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true"
)
with pytest.warns(UserWarning) as w:
df._repr_html_()
assert "starter template that you can use" in str(w[-1].message)
assert "df.set_data_type" in str(w[-1].message)
df.set_data_type({"Month": "nominal", "Year": "nominal"})
assert df.data_type["Month"] == "nominal"
assert df.data_type["Year"] == "nominal"
with warnings.catch_warnings() as w:
warnings.simplefilter("always")
df._repr_html_()
assert not w
def test_set_data_type_invalid():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true"
)
with pytest.raises(ValueError):
df.set_data_type({"Month": "nomnal", "Year": "nomnal"})
def test_set_wrong_data_type():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true"
)
df.set_data_type({"Year": "quantitative"})
assert df.data_type["Year"] == "quantitative"
def test_id_with_label():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/state_timeseries.csv?raw=true"
)
df.maintain_metadata()
assert df.data_type == {"Date": "temporal", "State": "geographical", "Value": "quantitative"}
def test_ID_random():
"""Tests whether a ID column not satisfying other properties of an ID gets recognized."""
values = [
{"ID": random.randint(0, 1000), "A": 6.0, "B": 1.0, "C": 1.0, "D": 3.0, "E": 2.0, "F": 5.0}
for x in range(1000)
]
df = pd.DataFrame(values)
df.maintain_metadata()
assert df.data_type == {
"ID": "quantitative",
"A": "nominal",
"B": "nominal",
"C": "nominal",
"D": "nominal",
"E": "nominal",
"F": "nominal",
}
def test_ID():
"""Tests different ways of writing id"""
values = [{"ID": x, "A": 6.0, "B": 1.0, "C": 1.0, "D": 3.0, "E": 2.0, "F": 5.0} for x in range(1000)]
df = pd.DataFrame(values)
df.maintain_metadata()
assert df.data_type == {
"ID": "id",
"A": "nominal",
"B": "nominal",
"C": "nominal",
"D": "nominal",
"E": "nominal",
"F": "nominal",
}
def test_id_aug_test():
"""Tests in a different dataset
Reference: https://www.kaggle.com/arashnic/hr-analytics-job-change-of-data-scientists
"""
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/aug_test.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"enrollee_id": "id",
"city": "nominal",
"city_development_index": "quantitative",
"gender": "nominal",
"relevent_experience": "nominal",
"enrolled_university": "nominal",
"education_level": "nominal",
"major_discipline": "nominal",
"experience": "nominal",
"company_size": "nominal",
"company_type": "nominal",
"last_new_job": "nominal",
"training_hours": "quantitative",
}
def test_id_music_data():
"""Tests in a different dataset if a column not named as an ID is recognized as an identification.
Reference: https://www.kaggle.com/yamaerenay/spotify-dataset-19212020-160k-tracks
"""
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/spotify.csv?raw=true")
df["unique_num"] = df["id"]
df.drop(columns=["id"])
df.maintain_metadata()
assert df.data_type == {
"valence": "quantitative",
"year": "temporal",
"acousticness": "quantitative",
"artists": "nominal",
"danceability": "quantitative",
"duration_ms": "quantitative",
"energy": "quantitative",
"explicit": "nominal",
"unique_num": "id",
"instrumentalness": "quantitative",
"key": "nominal",
"liveness": "quantitative",
"loudness": "quantitative",
"mode": "nominal",
"name": "nominal",
"popularity": "quantitative",
"release_date": "temporal",
"speechiness": "quantitative",
"tempo": "quantitative",
"id": "id",
}
def test_id_absenteeism_data():
""" Tests whether an id named column is not recognized because even though it is named an id, it is not with its nature. """
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/absenteeism.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"ID": "quantitative",
"Reason for absence": "quantitative",
"Month of absence": "nominal",
"Day of the week": "nominal",
"Seasons": "nominal",
"Transportation expense": "quantitative",
"Distance from Residence to Work": "quantitative",
"Service time": "nominal",
"Age": "quantitative",
"Work load Average/day ": "quantitative",
"Hit target": "nominal",
"Disciplinary failure": "nominal",
"Education": "nominal",
"Son": "nominal",
"Social drinker": "nominal",
"Social smoker": "nominal",
"Pet": "nominal",
"Weight": "quantitative",
"Height": "nominal",
"Body mass index": "nominal",
"Absenteeism time in hours": "nominal",
}
| 34.546632
| 128
| 0.580427
|
from .context import lux
import pytest
import random
import pandas as pd
import warnings
def test_check_cars():
lux.config.set_SQL_connection("")
df = pd.read_csv("lux/data/car.csv")
df.maintain_metadata()
assert df.data_type["Name"] == "nominal"
assert df.data_type["MilesPerGal"] == "quantitative"
assert df.data_type["Cylinders"] == "nominal"
assert df.data_type["Displacement"] == "quantitative"
assert df.data_type["Horsepower"] == "quantitative"
assert df.data_type["Weight"] == "quantitative"
assert df.data_type["Acceleration"] == "quantitative"
assert df.data_type["Year"] == "temporal"
assert df.data_type["Origin"] == "nominal"
def test_check_int_id():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/instacart_sample.csv?raw=true"
)
df._repr_html_()
inverted_data_type = lux.config.executor.invert_data_type(df.data_type)
assert len(inverted_data_type["id"]) == 3
assert (
"<code>order_id</code>, <code>product_id</code>, <code>user_id</code> is not visualized since it resembles an ID field."
in df._message.to_html()
)
def test_check_str_id():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/churn.csv?raw=true")
df._repr_html_()
assert (
"<code>customerID</code> is not visualized since it resembles an ID field.</li>"
in df._message.to_html()
)
def test_check_hpi():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/hpi.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"HPIRank": "quantitative",
"Country": "geographical",
"SubRegion": "nominal",
"AverageLifeExpectancy": "quantitative",
"AverageWellBeing": "quantitative",
"HappyLifeYears": "quantitative",
"Footprint": "quantitative",
"InequalityOfOutcomes": "quantitative",
"InequalityAdjustedLifeExpectancy": "quantitative",
"InequalityAdjustedWellbeing": "quantitative",
"HappyPlanetIndex": "quantitative",
"GDPPerCapita": "quantitative",
"Population": "quantitative",
}
def test_check_airbnb():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/airbnb_nyc.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"id": "id",
"name": "nominal",
"host_id": "id",
"host_name": "nominal",
"neighbourhood_group": "nominal",
"neighbourhood": "nominal",
"latitude": "quantitative",
"longitude": "quantitative",
"room_type": "nominal",
"price": "quantitative",
"minimum_nights": "quantitative",
"number_of_reviews": "quantitative",
"last_review": "temporal",
"reviews_per_month": "quantitative",
"calculated_host_listings_count": "quantitative",
"availability_365": "quantitative",
}
def test_check_airports():
df = pd.read_csv(
"https://raw.githubusercontent.com/altair-viz/vega_datasets/master/vega_datasets/_data/airports.csv"
)
df.maintain_metadata()
assert df.data_type == {
"iata": "id",
"name": "nominal",
"city": "nominal",
"state": "geographical",
"country": "geographical",
"latitude": "quantitative",
"longitude": "quantitative",
}
def test_check_datetime():
df = pd.DataFrame(
{
"a": ["2020-01-01"],
"b": ["20-01-01"],
"c": ["20-jan-01"],
"d": ["20-january-01"],
"e": ["2020 January 01"],
"f": ["2020 January 01 00:00:00 pm PT"],
"g": ["2020 January 01 13:00:00"],
"h": ["2020 January 01 23:59:59 GTC-6"],
}
)
df.maintain_metadata()
assert df.data_type == {
"a": "temporal",
"b": "temporal",
"c": "temporal",
"d": "temporal",
"e": "temporal",
"f": "temporal",
"g": "temporal",
"h": "temporal",
}
def test_check_datetime_numeric_values():
car_df = pd.read_csv("lux/data/car.csv")
car_df = car_df.rename(columns={"Year": "blah"})
car_df.maintain_metadata()
assert car_df.data_type["blah"] == "temporal"
spotify_df = pd.read_csv(
"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/spotify.csv"
)
spotify_df = spotify_df.rename(columns={"year": "blah"})
spotify_df.maintain_metadata()
assert spotify_df.data_type["blah"] == "temporal"
assert spotify_df.data_type["release_date"] == "temporal"
def test_check_stock():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/stocks.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"symbol": "nominal",
"monthdate": "temporal",
"price": "quantitative",
}, "Stock dataset type detection error"
def test_check_college():
df = pd.read_csv("lux/data/college.csv")
df.maintain_metadata()
assert df.data_type == {
"Name": "nominal",
"PredominantDegree": "nominal",
"HighestDegree": "nominal",
"FundingModel": "nominal",
"Region": "nominal",
"Geography": "nominal",
"AdmissionRate": "quantitative",
"ACTMedian": "quantitative",
"SATAverage": "quantitative",
"AverageCost": "quantitative",
"Expenditure": "quantitative",
"AverageFacultySalary": "quantitative",
"MedianDebt": "quantitative",
"AverageAgeofEntry": "quantitative",
"MedianFamilyIncome": "quantitative",
"MedianEarnings": "quantitative",
}
def test_float_categorical():
values = [
{"A": 6.0, "B": 1.0, "C": 1.0, "D": 3.0, "E": 2.0, "F": 5.0},
{"A": 5.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 3.0},
{"A": 3.0, "B": 6.0, "C": 3.0, "D": 3.0, "E": 2.0, "F": 5.0},
{"A": 6.0, "B": 3.0, "C": 3.0, "D": 2.0, "E": 2.0, "F": 2.0},
{"A": 7.0, "B": 4.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 4.0},
{"A": 5.0, "B": 3.0, "C": 6.0, "D": 3.0, "E": 3.0, "F": 4.0},
{"A": 3.0, "B": 4.0, "C": 3.0, "D": 6.0, "E": 5.0, "F": 5.0},
{"A": 3.0, "B": 3.0, "C": 2.0, "D": 2.0, "E": 4.0, "F": 5.0},
{"A": 3.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 4.0},
{"A": 1.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 6.0},
{"A": 3.0, "B": 3.0, "C": 2.0, "D": 3.0, "E": 3.0, "F": 5.0},
{"A": 7.0, "B": 1.0, "C": 1.0, "D": 2.0, "E": 2.0, "F": 3.0},
{"A": 6.0, "B": 2.0, "C": 2.0, "D": 2.0, "E": 2.0, "F": 3.0},
{"A": 2.0, "B": 3.0, "C": 2.0, "D": 3.0, "E": 3.0, "F": 4.0},
{"A": 6.0, "B": 2.0, "C": 3.0, "D": 3.0, "E": 3.0, "F": 5.0},
]
df = pd.DataFrame(values)
df.maintain_metadata()
inverted_data_type = lux.config.executor.invert_data_type(df.data_type)
assert inverted_data_type["nominal"] == [
"A",
"B",
"C",
"D",
"E",
"F",
], "Float column should be detected as categorical"
for x in list(df.dtypes):
assert x == "float64", "Source dataframe preserved as float dtype"
def test_set_data_type():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true"
)
with pytest.warns(UserWarning) as w:
df._repr_html_()
assert "starter template that you can use" in str(w[-1].message)
assert "df.set_data_type" in str(w[-1].message)
df.set_data_type({"Month": "nominal", "Year": "nominal"})
assert df.data_type["Month"] == "nominal"
assert df.data_type["Year"] == "nominal"
with warnings.catch_warnings() as w:
warnings.simplefilter("always")
df._repr_html_()
assert not w
def test_set_data_type_invalid():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true"
)
with pytest.raises(ValueError):
df.set_data_type({"Month": "nomnal", "Year": "nomnal"})
def test_set_wrong_data_type():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true"
)
df.set_data_type({"Year": "quantitative"})
assert df.data_type["Year"] == "quantitative"
def test_id_with_label():
df = pd.read_csv(
"https://github.com/lux-org/lux-datasets/blob/master/data/state_timeseries.csv?raw=true"
)
df.maintain_metadata()
assert df.data_type == {"Date": "temporal", "State": "geographical", "Value": "quantitative"}
def test_ID_random():
values = [
{"ID": random.randint(0, 1000), "A": 6.0, "B": 1.0, "C": 1.0, "D": 3.0, "E": 2.0, "F": 5.0}
for x in range(1000)
]
df = pd.DataFrame(values)
df.maintain_metadata()
assert df.data_type == {
"ID": "quantitative",
"A": "nominal",
"B": "nominal",
"C": "nominal",
"D": "nominal",
"E": "nominal",
"F": "nominal",
}
def test_ID():
values = [{"ID": x, "A": 6.0, "B": 1.0, "C": 1.0, "D": 3.0, "E": 2.0, "F": 5.0} for x in range(1000)]
df = pd.DataFrame(values)
df.maintain_metadata()
assert df.data_type == {
"ID": "id",
"A": "nominal",
"B": "nominal",
"C": "nominal",
"D": "nominal",
"E": "nominal",
"F": "nominal",
}
def test_id_aug_test():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/aug_test.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"enrollee_id": "id",
"city": "nominal",
"city_development_index": "quantitative",
"gender": "nominal",
"relevent_experience": "nominal",
"enrolled_university": "nominal",
"education_level": "nominal",
"major_discipline": "nominal",
"experience": "nominal",
"company_size": "nominal",
"company_type": "nominal",
"last_new_job": "nominal",
"training_hours": "quantitative",
}
def test_id_music_data():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/spotify.csv?raw=true")
df["unique_num"] = df["id"]
df.drop(columns=["id"])
df.maintain_metadata()
assert df.data_type == {
"valence": "quantitative",
"year": "temporal",
"acousticness": "quantitative",
"artists": "nominal",
"danceability": "quantitative",
"duration_ms": "quantitative",
"energy": "quantitative",
"explicit": "nominal",
"unique_num": "id",
"instrumentalness": "quantitative",
"key": "nominal",
"liveness": "quantitative",
"loudness": "quantitative",
"mode": "nominal",
"name": "nominal",
"popularity": "quantitative",
"release_date": "temporal",
"speechiness": "quantitative",
"tempo": "quantitative",
"id": "id",
}
def test_id_absenteeism_data():
df = pd.read_csv("https://github.com/lux-org/lux-datasets/blob/master/data/absenteeism.csv?raw=true")
df.maintain_metadata()
assert df.data_type == {
"ID": "quantitative",
"Reason for absence": "quantitative",
"Month of absence": "nominal",
"Day of the week": "nominal",
"Seasons": "nominal",
"Transportation expense": "quantitative",
"Distance from Residence to Work": "quantitative",
"Service time": "nominal",
"Age": "quantitative",
"Work load Average/day ": "quantitative",
"Hit target": "nominal",
"Disciplinary failure": "nominal",
"Education": "nominal",
"Son": "nominal",
"Social drinker": "nominal",
"Social smoker": "nominal",
"Pet": "nominal",
"Weight": "quantitative",
"Height": "nominal",
"Body mass index": "nominal",
"Absenteeism time in hours": "nominal",
}
| true
| true
|
f70bed892a69b0a979a1236c72b4e27848a2e38e
| 4,455
|
py
|
Python
|
others/median_two_sorted.py
|
sumitsk/leetcode
|
bb3527b08ca794dea2c9d071efc24b4276bd1c05
|
[
"MIT"
] | null | null | null |
others/median_two_sorted.py
|
sumitsk/leetcode
|
bb3527b08ca794dea2c9d071efc24b4276bd1c05
|
[
"MIT"
] | null | null | null |
others/median_two_sorted.py
|
sumitsk/leetcode
|
bb3527b08ca794dea2c9d071efc24b4276bd1c05
|
[
"MIT"
] | null | null | null |
# INCOMPLETE / UNSUCCESSFUL
# find median of two sorted arrays
import ipdb
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
n1 = len(nums1)
n2 = len(nums2)
# special cases
if n1==0:
return self.median_sorted_array(nums2)
if n2==0:
return self.median_sorted_array(nums1)
N = n1 + n2
l1, r1 = 0, n1-1
l2, r2 = 0, n2-1
while True:
idx1 = (l1+r1)//2
# find index of largest element in nums2 smaller than v1
idx2 = self.find_largest_elem(nums2[l2:r2+1], nums1[idx1])
t2 = l2 + -1 if idx2 is None else idx2
# arr1[idx1] is at index 'n' in the joint array
n = idx1 + 1 + t2
if n < N//2 - 1:
# this should not be done if idx2 is None
next_l1 = (l1+r1)//2
next_l2 = (l2+r2)//2 if idx2 is not None else l2
next_r1, next_r2 = r1, r2
elif n == N//2 - 1:
next_val = self.next_num(nums1, nums2, idx1, t2)
if N%2==1:
return next_val
else:
return (nums1[idx1] + next_val)/2
elif n == N//2:
if N%2==1:
return nums1[idx1]
else:
prev_val = self.prev_num(nums1, nums2, idx1-1, t2)
return (prev_val + nums1[idx1])/2
else:
next_r1 = (l1+r1)//2
next_r2 = (l2+r2)//2 if idx2 is not None else r2
next_l1, next_l2 = l1, l2
l1, l2, r1, r2 = next_l1, next_l2, next_r1, next_r2
# if (l1,l2,r1,r2) == (next_l1,next_l2,next_r1,next_r2):
if r1-l1<=1 and r2-l2<=1:
# ipdb.set_trace()
# sort them until median index is reached
if n<=N//2-1:
while n!=N//2-1:
idx1, t2, val = self.next_indices_and_num(nums1, nums2, idx1, t2)
n += 1
next_val = self.next_num(nums1, nums2, idx1, t2)
if N%2==1:
return next_val
else:
return (val + next_val)/2
else:
while n!=N//2-1:
idx1, t2, next_val = self.prev_indices_and_num(nums1, nums2, idx1, t2)
n -= 1
if N%2==1:
return next_val
else:
val = self.prev_num(nums1, nums2, idx1, t2)
return (val + next_val)/2
ipdb.set_trace()
def median_sorted_array(self, arr):
# median of a sorted array
n = len(arr)
if n%2==1:
return arr[n//2]
return (arr[n//2-1] + arr[n//2])/2
def find_largest_elem(self, arr, val):
li = 0
ri = len(arr)
done = False
while not done:
if arr[(li+ri)//2] >= val:
ri = (li+ri)//2
else:
li = (li+ri)//2
done = li==ri or li+1==ri
if arr[li]<val:
return li
return None
def next_indices_and_num(self, arr1, arr2, idx1, idx2):
if idx1>=len(arr1)-1:
return idx1, idx2+1, arr2[idx2+1]
if idx2>=len(arr2)-1:
return idx1+1, idx2, arr1[idx1+1]
if arr1[idx1+1] < arr2[idx2+1]:
return idx1+1, idx2, arr1[idx1+1]
else:
return idx1, idx2+1, arr2[idx2+1]
def prev_indices_and_num(self, arr1, arr2, idx1, idx2):
if idx1<0:
return idx1, idx2-1, arr2[idx2]
if idx2<0:
return idx1-1, idx2, arr1[idx1]
if arr1[idx1] >= arr2[idx2]:
return idx1-1, idx2, arr1[idx1]
else:
return idx1, idx2-1, arr2[idx2]
def next_num(self, arr1, arr2, idx1, idx2):
return self.next_indices_and_num(arr1, arr2, idx1, idx2)[-1]
def prev_num(self, arr1, arr2, idx1, idx2):
return self.prev_indices_and_num(arr1, arr2, idx1, idx2)[-1]
nums1 = [1]
nums2 = [2,3,4,5,6]
sol = Solution()
median = sol.findMedianSortedArrays(nums1, nums2)
print(median)
| 33.246269
| 94
| 0.464198
|
import ipdb
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
n1 = len(nums1)
n2 = len(nums2)
if n1==0:
return self.median_sorted_array(nums2)
if n2==0:
return self.median_sorted_array(nums1)
N = n1 + n2
l1, r1 = 0, n1-1
l2, r2 = 0, n2-1
while True:
idx1 = (l1+r1)//2
idx2 = self.find_largest_elem(nums2[l2:r2+1], nums1[idx1])
t2 = l2 + -1 if idx2 is None else idx2
n = idx1 + 1 + t2
if n < N//2 - 1:
next_l1 = (l1+r1)//2
next_l2 = (l2+r2)//2 if idx2 is not None else l2
next_r1, next_r2 = r1, r2
elif n == N//2 - 1:
next_val = self.next_num(nums1, nums2, idx1, t2)
if N%2==1:
return next_val
else:
return (nums1[idx1] + next_val)/2
elif n == N//2:
if N%2==1:
return nums1[idx1]
else:
prev_val = self.prev_num(nums1, nums2, idx1-1, t2)
return (prev_val + nums1[idx1])/2
else:
next_r1 = (l1+r1)//2
next_r2 = (l2+r2)//2 if idx2 is not None else r2
next_l1, next_l2 = l1, l2
l1, l2, r1, r2 = next_l1, next_l2, next_r1, next_r2
if r1-l1<=1 and r2-l2<=1:
if n<=N//2-1:
while n!=N//2-1:
idx1, t2, val = self.next_indices_and_num(nums1, nums2, idx1, t2)
n += 1
next_val = self.next_num(nums1, nums2, idx1, t2)
if N%2==1:
return next_val
else:
return (val + next_val)/2
else:
while n!=N//2-1:
idx1, t2, next_val = self.prev_indices_and_num(nums1, nums2, idx1, t2)
n -= 1
if N%2==1:
return next_val
else:
val = self.prev_num(nums1, nums2, idx1, t2)
return (val + next_val)/2
ipdb.set_trace()
def median_sorted_array(self, arr):
n = len(arr)
if n%2==1:
return arr[n//2]
return (arr[n//2-1] + arr[n//2])/2
def find_largest_elem(self, arr, val):
li = 0
ri = len(arr)
done = False
while not done:
if arr[(li+ri)//2] >= val:
ri = (li+ri)//2
else:
li = (li+ri)//2
done = li==ri or li+1==ri
if arr[li]<val:
return li
return None
def next_indices_and_num(self, arr1, arr2, idx1, idx2):
if idx1>=len(arr1)-1:
return idx1, idx2+1, arr2[idx2+1]
if idx2>=len(arr2)-1:
return idx1+1, idx2, arr1[idx1+1]
if arr1[idx1+1] < arr2[idx2+1]:
return idx1+1, idx2, arr1[idx1+1]
else:
return idx1, idx2+1, arr2[idx2+1]
def prev_indices_and_num(self, arr1, arr2, idx1, idx2):
if idx1<0:
return idx1, idx2-1, arr2[idx2]
if idx2<0:
return idx1-1, idx2, arr1[idx1]
if arr1[idx1] >= arr2[idx2]:
return idx1-1, idx2, arr1[idx1]
else:
return idx1, idx2-1, arr2[idx2]
def next_num(self, arr1, arr2, idx1, idx2):
return self.next_indices_and_num(arr1, arr2, idx1, idx2)[-1]
def prev_num(self, arr1, arr2, idx1, idx2):
return self.prev_indices_and_num(arr1, arr2, idx1, idx2)[-1]
nums1 = [1]
nums2 = [2,3,4,5,6]
sol = Solution()
median = sol.findMedianSortedArrays(nums1, nums2)
print(median)
| true
| true
|
f70bee32aa74ac635fab5ef016db1d5deccb1d1a
| 594
|
py
|
Python
|
webscraper/__main__.py
|
neerajhp/neighboorhoodgems-webscraper
|
714e34d808225c9d7fac2da8fbfca64ab62a2534
|
[
"MIT"
] | null | null | null |
webscraper/__main__.py
|
neerajhp/neighboorhoodgems-webscraper
|
714e34d808225c9d7fac2da8fbfca64ab62a2534
|
[
"MIT"
] | null | null | null |
webscraper/__main__.py
|
neerajhp/neighboorhoodgems-webscraper
|
714e34d808225c9d7fac2da8fbfca64ab62a2534
|
[
"MIT"
] | null | null | null |
import siteScripts.timeout.scraper as timeoutScraper
import logging
from webscraper.models.landmark import Landmark
from webscraper.services.csv import saveLandmarksCSV
def main():
# File to save landmarks
f = "landmarks.csv"
# Scrapers
timeOutLandmarks = timeoutScraper.scrape()
# Save Data
saveLandmarksCSV(timeOutLandmarks, f)
if __name__ == '__main__':
logging.config.fileConfig(fname="./logs/logging.conf",
disable_existing_loggers=False)
logger = logging.getLogger(__name__)
logger.info("Let's Begin")
main()
| 22
| 61
| 0.700337
|
import siteScripts.timeout.scraper as timeoutScraper
import logging
from webscraper.models.landmark import Landmark
from webscraper.services.csv import saveLandmarksCSV
def main():
f = "landmarks.csv"
timeOutLandmarks = timeoutScraper.scrape()
saveLandmarksCSV(timeOutLandmarks, f)
if __name__ == '__main__':
logging.config.fileConfig(fname="./logs/logging.conf",
disable_existing_loggers=False)
logger = logging.getLogger(__name__)
logger.info("Let's Begin")
main()
| true
| true
|
f70beebd89d334569db63ed5fc6d13fbd127389a
| 9,667
|
py
|
Python
|
src/sv-pipeline/pre_SVCalling_and_QC/raw_vcf_qc/calc_num_svs_pick_outlier.py
|
leipzig/gatk-sv
|
96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a
|
[
"BSD-3-Clause"
] | 76
|
2020-06-18T21:31:43.000Z
|
2022-03-02T18:42:58.000Z
|
src/sv-pipeline/pre_SVCalling_and_QC/raw_vcf_qc/calc_num_svs_pick_outlier.py
|
iamh2o/gatk-sv
|
bf3704bd1d705339577530e267cd4d1b2f77a17f
|
[
"BSD-3-Clause"
] | 195
|
2020-06-22T15:12:28.000Z
|
2022-03-28T18:06:46.000Z
|
src/sv-pipeline/pre_SVCalling_and_QC/raw_vcf_qc/calc_num_svs_pick_outlier.py
|
iamh2o/gatk-sv
|
bf3704bd1d705339577530e267cd4d1b2f77a17f
|
[
"BSD-3-Clause"
] | 39
|
2020-07-03T06:47:18.000Z
|
2022-03-03T03:47:25.000Z
|
#!/usr/bin/env python
import sys
from typing import Sequence, Set
import argparse
import numpy
import pandas
_zero_svs_are_outliers = True
_outlier_std_threshold = 5.0
_column_order = ["CHROM", "SVTYPE", "Mean", "Median", "STD",
"Outlier_Sample", "Outlier_Number", "Outlier_Cate"]
def read_statfile(statfile: str) -> pandas.DataFrame:
"""
Special function needed to read in stats data table because
a) pandas doesn't understand that the '#' means header
b) there are multiple stats files concatenated together, resulting in headers being randomly mixed in
Args:
statfile: str
File name with concatenated tab-separated tables of variant stats
Returns:
stats_data: pandas.DataFrame
Table of variant stats
"""
with open(statfile, 'r') as f_in:
# get column header from first line, stripping '#'
columns = f_in.readline().lstrip('#').split()
# read rest of tsv file, using these columns as header and ignoring any future lines starting with '#'
return pandas.read_csv(statfile, sep='\t', comment='#', names=columns)
def pick_outliers_by_group(
chrom: str,
sv_type: str,
check_stats: pandas.DataFrame,
all_samples: Set[str],
zero_svs_are_outliers: bool = _zero_svs_are_outliers,
outlier_std_threshold: float = _outlier_std_threshold
) -> pandas.DataFrame:
"""
For given combination of contig and SV type, find samples that have outlier number of SVs. Return table of outliers
along with statistics about SV count.
Args:
chrom: str
Contig for checking SV counts
sv_type: str
SV type for checking SV counts
check_stats: pandas.DataFrame
Table with SV counts on this contig with this sv_type
all_samples: Set[str]
Set of all sample IDs in cohort
zero_svs_are_outliers: bool
Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts
outlier_std_threshold: float
Threshold for outlier status as multiple of standard deviation of SV counts
Returns:
outliers: pandas.DataFrame
Table of outliers
"""
# find samples that are missing: they have 0 SVs of this type on this contig
missing_samples = pandas.DataFrame(
tuple(
{"CHROM": chrom, "SVTYPE": sv_type, "SAMPLE": sample_id, "NUM": 0}
for sample_id in all_samples.difference(check_stats["SAMPLE"])
)
)
if zero_svs_are_outliers:
# THIS IS THE ORIGINAL PIPELINE BEHAVIOR
# compute basic stats about observed nonzero SV counts
count_mean = check_stats["NUM"].mean()
count_median = check_stats["NUM"].median()
count_std = check_stats["NUM"].std()
# Amongst samples that have SVs, find counts deviating by more than set multiple of std from the median
is_outlier = numpy.abs(
check_stats["NUM"] - count_median) > outlier_std_threshold * count_std
# Treat missing samples as outliers.
outliers = pandas.concat(
(missing_samples, check_stats.loc[is_outlier]), axis=0)
else:
# THIS FINDS FEWER, MORE MEANINGFUL OUTLIERS
# Which samples are missing / included but have zero counts is unpredictable.
# 1) concatenate all samples together
check_stats = pandas.concat((check_stats, missing_samples), axis=0)
# 2) compute stats from non-zero SV counts
nonzero = check_stats["NUM"] > 0
count_mean = check_stats.loc[nonzero, "NUM"].mean()
count_median = check_stats.loc[nonzero, "NUM"].median()
count_std = check_stats.loc[nonzero, "NUM"].std()
# 3) check outliers by usual means from those stats
# Set threshold to be set multiple of greater of: std of counts, sqrt(median of counts)
# (i.e. greater of std or expected Poisson std)
# Find counts those deviating by more than threshold from the median (including zeros)
is_outlier = (
numpy.abs(check_stats["NUM"] - count_median) >
outlier_std_threshold * numpy.maximum(count_std, numpy.sqrt(count_median))
)
outliers = check_stats.loc[is_outlier].copy()
if outliers.empty:
return pandas.DataFrame([], columns=_column_order)
# augment outlier table with some statistics
outliers["Mean"] = count_mean
outliers["Median"] = count_median
outliers["STD"] = count_std
outliers["Outlier_Cate"] = numpy.where(
outliers["NUM"] > count_median, "high", "low")
# rename and re-order columns
return outliers.rename({"NUM": "Outlier_Number", "SAMPLE": "Outlier_Sample"}, axis=1).reindex(_column_order, axis=1)
def pick_outliers(
stats_data: pandas.DataFrame,
zero_svs_are_outliers: bool = _zero_svs_are_outliers,
outlier_std_threshold: float = _outlier_std_threshold
) -> pandas.DataFrame:
"""
Find samples that have outlier number of SVs when broken down by contig and SV type. Return table of outliers
along with statistics about SV count.
Args:
stats_data: pandas.DataFrame
Table with SV counts
zero_svs_are_outliers: bool
Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts
outlier_std_threshold: float
Threshold for outlier status as multiple of standard deviation of SV counts
Returns:
outliers: pandas.DataFrame
Table of outliers
"""
# get set of all samples in stats data
all_samples = set(stats_data["SAMPLE"])
# loop over unique combinations of contig and sv type
# find outliers from each unique combination
# and concatenate those outliers into one table
outliers = pandas.concat(
tuple(
pick_outliers_by_group(
chrom=chrom, sv_type=sv_type, check_stats=check_stats, all_samples=all_samples,
zero_svs_are_outliers=zero_svs_are_outliers, outlier_std_threshold=outlier_std_threshold
)
for (chrom, sv_type), check_stats in stats_data.groupby(
["CHROM", "SVTYPE"], sort=False, as_index=False, group_keys=False
)
),
axis=0
)
return outliers
def write_outliers_file(
outliers: pandas.DataFrame,
outname: str,
outlier_type: str
):
"""
Write outliers of the appropriate type ("low" or "high") to TSV file.
Args:
outliers: pandas.DataFrame
Table of outlier data
outname: str
Base name of outlier TSV file. Final file name will have ".low" or ".high" appended to it.
outlier_type: str
"low" or "high".
"""
# write outliers to tsv. Add "#" in front of header
with open(outname + "." + outlier_type, 'w') as f_out:
f_out.write("#") # add '#' in front of header
outlier_wanted = outliers["Outlier_Cate"] == outlier_type
outliers.loc[outlier_wanted].to_csv(f_out, sep='\t', index=False)
def calc_num_svs_pick_outlier(
statfile: str,
outname: str,
zero_svs_are_outliers: bool = _zero_svs_are_outliers,
outlier_std_threshold: float = _outlier_std_threshold
):
"""
Find samples that have outlier number of SVs when broken down by contig and SV type.
Write two tables of outliers, along with statistics about SV count: one for those with above-median counts ("high")
and one for those at median or below ("low").
Args:
statfile: str
TSV file with table with SV counts
outname: str
Base name for saving outlier files. Low file will have ".low" appended to the name, and high file will have
".high"
zero_svs_are_outliers: bool
Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts
outlier_std_threshold: float
Threshold for outlier status as multiple of standard deviation of SV counts
"""
stats_data = read_statfile(statfile)
outliers = pick_outliers(stats_data, zero_svs_are_outliers=zero_svs_are_outliers,
outlier_std_threshold=outlier_std_threshold)
write_outliers_file(outliers, outname, "low")
write_outliers_file(outliers, outname, "high")
def _parse_arguments(argv: Sequence[str]) -> argparse.Namespace:
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="Find outliers in SV counts broken down by contig and SV type",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("statfile", type=str,
help="name of stats concatinated from all samples")
parser.add_argument("outname", type=str, help="name of output file")
parser.add_argument("-z", "--zero-counts-are-not-outliers", action="store_true",
help="don't make zero SV counts an automatic outlier, check deviation from median as usual")
parser.add_argument("-t", "--outlier-std-threshold", type=float, default=_outlier_std_threshold,
help="threshold multiple of std of counts for outliers")
return parser.parse_args(argv[1:])
if __name__ == "__main__":
args = _parse_arguments(sys.argv)
calc_num_svs_pick_outlier(statfile=args.statfile, outname=args.outname,
zero_svs_are_outliers=not args.zero_counts_are_not_outliers,
outlier_std_threshold=args.outlier_std_threshold)
| 42.774336
| 120
| 0.663701
|
import sys
from typing import Sequence, Set
import argparse
import numpy
import pandas
_zero_svs_are_outliers = True
_outlier_std_threshold = 5.0
_column_order = ["CHROM", "SVTYPE", "Mean", "Median", "STD",
"Outlier_Sample", "Outlier_Number", "Outlier_Cate"]
def read_statfile(statfile: str) -> pandas.DataFrame:
with open(statfile, 'r') as f_in:
columns = f_in.readline().lstrip('#').split()
return pandas.read_csv(statfile, sep='\t', comment='#', names=columns)
def pick_outliers_by_group(
chrom: str,
sv_type: str,
check_stats: pandas.DataFrame,
all_samples: Set[str],
zero_svs_are_outliers: bool = _zero_svs_are_outliers,
outlier_std_threshold: float = _outlier_std_threshold
) -> pandas.DataFrame:
missing_samples = pandas.DataFrame(
tuple(
{"CHROM": chrom, "SVTYPE": sv_type, "SAMPLE": sample_id, "NUM": 0}
for sample_id in all_samples.difference(check_stats["SAMPLE"])
)
)
if zero_svs_are_outliers:
count_mean = check_stats["NUM"].mean()
count_median = check_stats["NUM"].median()
count_std = check_stats["NUM"].std()
is_outlier = numpy.abs(
check_stats["NUM"] - count_median) > outlier_std_threshold * count_std
outliers = pandas.concat(
(missing_samples, check_stats.loc[is_outlier]), axis=0)
else:
check_stats = pandas.concat((check_stats, missing_samples), axis=0)
nonzero = check_stats["NUM"] > 0
count_mean = check_stats.loc[nonzero, "NUM"].mean()
count_median = check_stats.loc[nonzero, "NUM"].median()
count_std = check_stats.loc[nonzero, "NUM"].std()
is_outlier = (
numpy.abs(check_stats["NUM"] - count_median) >
outlier_std_threshold * numpy.maximum(count_std, numpy.sqrt(count_median))
)
outliers = check_stats.loc[is_outlier].copy()
if outliers.empty:
return pandas.DataFrame([], columns=_column_order)
outliers["Mean"] = count_mean
outliers["Median"] = count_median
outliers["STD"] = count_std
outliers["Outlier_Cate"] = numpy.where(
outliers["NUM"] > count_median, "high", "low")
return outliers.rename({"NUM": "Outlier_Number", "SAMPLE": "Outlier_Sample"}, axis=1).reindex(_column_order, axis=1)
def pick_outliers(
stats_data: pandas.DataFrame,
zero_svs_are_outliers: bool = _zero_svs_are_outliers,
outlier_std_threshold: float = _outlier_std_threshold
) -> pandas.DataFrame:
all_samples = set(stats_data["SAMPLE"])
outliers = pandas.concat(
tuple(
pick_outliers_by_group(
chrom=chrom, sv_type=sv_type, check_stats=check_stats, all_samples=all_samples,
zero_svs_are_outliers=zero_svs_are_outliers, outlier_std_threshold=outlier_std_threshold
)
for (chrom, sv_type), check_stats in stats_data.groupby(
["CHROM", "SVTYPE"], sort=False, as_index=False, group_keys=False
)
),
axis=0
)
return outliers
def write_outliers_file(
outliers: pandas.DataFrame,
outname: str,
outlier_type: str
):
with open(outname + "." + outlier_type, 'w') as f_out:
f_out.write("#") # add '#' in front of header
outlier_wanted = outliers["Outlier_Cate"] == outlier_type
outliers.loc[outlier_wanted].to_csv(f_out, sep='\t', index=False)
def calc_num_svs_pick_outlier(
statfile: str,
outname: str,
zero_svs_are_outliers: bool = _zero_svs_are_outliers,
outlier_std_threshold: float = _outlier_std_threshold
):
stats_data = read_statfile(statfile)
outliers = pick_outliers(stats_data, zero_svs_are_outliers=zero_svs_are_outliers,
outlier_std_threshold=outlier_std_threshold)
write_outliers_file(outliers, outname, "low")
write_outliers_file(outliers, outname, "high")
def _parse_arguments(argv: Sequence[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Find outliers in SV counts broken down by contig and SV type",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("statfile", type=str,
help="name of stats concatinated from all samples")
parser.add_argument("outname", type=str, help="name of output file")
parser.add_argument("-z", "--zero-counts-are-not-outliers", action="store_true",
help="don't make zero SV counts an automatic outlier, check deviation from median as usual")
parser.add_argument("-t", "--outlier-std-threshold", type=float, default=_outlier_std_threshold,
help="threshold multiple of std of counts for outliers")
return parser.parse_args(argv[1:])
if __name__ == "__main__":
args = _parse_arguments(sys.argv)
calc_num_svs_pick_outlier(statfile=args.statfile, outname=args.outname,
zero_svs_are_outliers=not args.zero_counts_are_not_outliers,
outlier_std_threshold=args.outlier_std_threshold)
| true
| true
|
f70bef908070d3279c1f2b01765777a4e765f230
| 1,997
|
py
|
Python
|
model-optimizer/mo/front/caffe/extractors/inner_product_test.py
|
shinh/dldt
|
693ab4e79a428e0801f17f4511b129a3fa8f4a62
|
[
"Apache-2.0"
] | 1
|
2021-02-20T21:48:36.000Z
|
2021-02-20T21:48:36.000Z
|
model-optimizer/mo/front/caffe/extractors/inner_product_test.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/front/caffe/extractors/inner_product_test.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | 1
|
2018-12-14T07:52:51.000Z
|
2018-12-14T07:52:51.000Z
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.caffe.extractors.inner_product import inner_product_ext
from mo.front.common.partial_infer.inner_product import caffe_inner_product
from mo.utils.unittest.extractors import FakeMultiParam, FakeModelLayer
class FakeProtoLayer:
def __init__(self, val):
self.inner_product_param = val
class TestInnerProduct(unittest.TestCase):
def test_inner_product_ext(self):
params = {
'num_output': 10,
'bias_term': True
}
mean_blob = np.array([1., 2.])
variance_blob = np.array([3., 4.])
blobs = [mean_blob, variance_blob]
res = inner_product_ext(FakeProtoLayer(FakeMultiParam(params)),
FakeModelLayer(blobs))
exp_res = {
'type': 'FullyConnected',
'out-size': 10,
'infer': caffe_inner_product,
'weights': mean_blob,
'biases': variance_blob,
'embedded_inputs': [
(1, 'weights', {
'bin': 'weights'
}),
(2, 'biases', {
'bin': 'biases'
})
]
}
for i in exp_res:
if i in ('weights', 'biases'):
np.testing.assert_array_equal(res[i], exp_res[i])
else:
self.assertEqual(res[i], exp_res[i])
| 32.209677
| 75
| 0.608413
|
import unittest
import numpy as np
from mo.front.caffe.extractors.inner_product import inner_product_ext
from mo.front.common.partial_infer.inner_product import caffe_inner_product
from mo.utils.unittest.extractors import FakeMultiParam, FakeModelLayer
class FakeProtoLayer:
def __init__(self, val):
self.inner_product_param = val
class TestInnerProduct(unittest.TestCase):
def test_inner_product_ext(self):
params = {
'num_output': 10,
'bias_term': True
}
mean_blob = np.array([1., 2.])
variance_blob = np.array([3., 4.])
blobs = [mean_blob, variance_blob]
res = inner_product_ext(FakeProtoLayer(FakeMultiParam(params)),
FakeModelLayer(blobs))
exp_res = {
'type': 'FullyConnected',
'out-size': 10,
'infer': caffe_inner_product,
'weights': mean_blob,
'biases': variance_blob,
'embedded_inputs': [
(1, 'weights', {
'bin': 'weights'
}),
(2, 'biases', {
'bin': 'biases'
})
]
}
for i in exp_res:
if i in ('weights', 'biases'):
np.testing.assert_array_equal(res[i], exp_res[i])
else:
self.assertEqual(res[i], exp_res[i])
| true
| true
|
f70befa47bcbbb2f37b411233346de8ecbc85bc3
| 57
|
py
|
Python
|
Tester/concurrentDL.py
|
garff/pyTorrent
|
fe8ff606ea0c146517e44ee6d475ebee58996d03
|
[
"MIT"
] | null | null | null |
Tester/concurrentDL.py
|
garff/pyTorrent
|
fe8ff606ea0c146517e44ee6d475ebee58996d03
|
[
"MIT"
] | null | null | null |
Tester/concurrentDL.py
|
garff/pyTorrent
|
fe8ff606ea0c146517e44ee6d475ebee58996d03
|
[
"MIT"
] | null | null | null |
import urllib
import concurrent.futures
import threading
| 19
| 25
| 0.877193
|
import urllib
import concurrent.futures
import threading
| true
| true
|
f70bf15027e783d8f8206b2b5debcae15150d1b6
| 25,199
|
py
|
Python
|
pysteps/io/exporters.py
|
savelov/nowcast
|
9c1168b1ba642f15bc4ffb000bdbca6db27c29b1
|
[
"BSD-3-Clause"
] | 6
|
2019-01-06T07:42:55.000Z
|
2021-02-03T13:59:50.000Z
|
pysteps/io/exporters.py
|
savelov/nowcast
|
9c1168b1ba642f15bc4ffb000bdbca6db27c29b1
|
[
"BSD-3-Clause"
] | 5
|
2018-12-23T15:10:27.000Z
|
2021-01-06T15:03:03.000Z
|
pysteps/io/exporters.py
|
savelov/nowcast
|
9c1168b1ba642f15bc4ffb000bdbca6db27c29b1
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T14:16:43.000Z
|
2019-08-13T00:36:31.000Z
|
"""
pysteps.io.exporter
===================
Methods for exporting forecasts of 2d precipitation fields into various file
formats.
Each exporter method in this module has its own initialization function that
implements the following interface::
initialize_forecast_exporter_xxx(filename, startdate, timestep,
num_timesteps, shape, num_ens_members,
metadata, incremental=None)
where xxx is the name (or abbreviation) of the file format.
This function creates the file and writes the metadata. The datasets are written
by calling :py:func:`pysteps.io.exporters.export_forecast_dataset`, and
the file is closed by calling :py:func:`pysteps.io.exporters.close_forecast_file`.
The arguments in the above are defined as follows:
.. tabularcolumns:: |p{2cm}|p{2cm}|L|
+---------------+-------------------+-----------------------------------------+
| Argument | Type/values | Description |
+===============+===================+=========================================+
| filename | str | name of the output file |
+---------------+-------------------+-----------------------------------------+
| startdate | datetime.datetime | start date of the forecast |
+---------------+-------------------+-----------------------------------------+
| timestep | int | time step of the forecast (minutes) |
+---------------+-------------------+-----------------------------------------+
| n_timesteps | int | number of time steps in the forecast |
| | | this argument is ignored if |
| | | incremental is set to 'timestep'. |
+---------------+-------------------+-----------------------------------------+
| shape | tuple | two-element tuple defining the shape |
| | | (height,width) of the forecast grids |
+---------------+-------------------+-----------------------------------------+
| n_ens_members | int | number of ensemble members in the |
| | | forecast. This argument is ignored if |
| | | incremental is set to 'member' |
+---------------+-------------------+-----------------------------------------+
| metadata | dict | metadata dictionary containing the |
| | | projection,x1,x2,y1,y2 and unit |
| | | attributes described in the |
| | | documentation of pysteps.io.importers |
+---------------+-------------------+-----------------------------------------+
| incremental | {None, 'timestep',| Allow incremental writing of datasets |
| | 'member'} | into the netCDF file |
| | | the available options are: |
| | | 'timestep' = write a forecast or a |
| | | forecast ensemble for a given |
| | | time step |
| | | 'member' = write a forecast sequence |
| | | for a given ensemble member |
+---------------+-------------------+-----------------------------------------+
The return value is a dictionary containing an exporter object. This can be
used with :py:func:`pysteps.io.exporters.export_forecast_dataset` to write
datasets into the given file format.
Available Exporters
-------------------
.. autosummary::
:toctree: ../generated/
initialize_forecast_exporter_kineros
initialize_forecast_exporter_netcdf
Generic functions
-----------------
.. autosummary::
:toctree: ../generated/
export_forecast_dataset
close_forecast_file
"""
from datetime import datetime
import numpy as np
import os
from pysteps.exceptions import MissingOptionalDependency
try:
import netCDF4
netcdf4_imported = True
except ImportError:
netcdf4_imported = False
try:
import pyproj
pyproj_imported = True
except ImportError:
pyproj_imported = False
# TODO(exporters): This is a draft version of the kineros exporter.
# Revise the variable names and
# the structure of the file if necessary.
def initialize_forecast_exporter_kineros(filename, startdate, timestep,
n_timesteps, shape, n_ens_members,
metadata, incremental=None):
"""Initialize a KINEROS2 Rainfall .pre file as specified
in https://www.tucson.ars.ag.gov/kineros/.
Grid points are treated as individual rain gauges and a separate file is
produced for each ensemble member.
Parameters
----------
filename : str
Name of the output file.
startdate : datetime.datetime
Start date of the forecast as datetime object.
timestep : int
Time step of the forecast (minutes).
n_timesteps : int
Number of time steps in the forecast this argument is ignored if
incremental is set to 'timestep'.
shape : tuple of int
Two-element tuple defining the shape (height,width) of the forecast
grids.
n_ens_members : int
Number of ensemble members in the forecast. This argument is ignored if
incremental is set to 'member'.
metadata: dict
Metadata dictionary containing the projection,x1,x2,y1,y2 and unit
attributes described in the documentation of
:py:mod:`pysteps.io.importers`.
incremental : {None}, optional
Currently not implemented for this method.
Returns
-------
exporter : dict
The return value is a dictionary containing an exporter object. This c
an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
to write datasets into the given file format.
"""
if incremental is not None:
raise ValueError("unknown option %s: incremental writing is not supported" % incremental)
exporter = {}
basefn, extfn = os.path.splitext(filename)
if extfn == "":
extfn = ".pre"
# one file for each member
n_ens_members = np.min((99, n_ens_members))
fns = []
for i in range(n_ens_members):
fn = "%s_N%02d%s" % (basefn, i, extfn)
with open(fn, "w") as fd:
# write header
fd.writelines("! pysteps-generated nowcast.\n")
fd.writelines("! created the %s.\n" % datetime.now().strftime("%c"))
# TODO(exporters): Add pySTEPS version here
fd.writelines("! Member = %02d.\n" % i)
fd.writelines("! Startdate = %s.\n" % startdate.strftime("%c"))
fns.append(fn)
fd.close()
h, w = shape
if metadata["unit"] == "mm/h":
var_name = "Intensity"
var_long_name = "Intensity in mm/hr"
var_unit = "mm/hr"
elif metadata["unit"] == "mm":
var_name = "Depth"
var_long_name = "Accumulated depth in mm"
var_unit = "mm"
else:
raise ValueError("unsupported unit %s" % metadata["unit"])
xr = np.linspace(metadata["x1"], metadata["x2"], w+1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(metadata["y1"], metadata["y2"], h+1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
X, Y = np.meshgrid(xr, yr)
XY_coords = np.stack([X, Y])
exporter["method"] = "kineros"
exporter["ncfile"] = fns
exporter["XY_coords"] = XY_coords
exporter["var_name"] = var_name
exporter["var_long_name"] = var_long_name
exporter["var_unit"] = var_unit
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
# TODO(exporters): This is a draft version of the netcdf exporter.
# Revise the variable names and
# the structure of the file if necessary.
def initialize_forecast_exporter_netcdf(filename, startdate, timestep,
n_timesteps, shape, n_ens_members,
metadata, product='precip_intensity',
incremental=None):
"""Initialize a netCDF forecast exporter.
Parameters
----------
filename : str
Name of the output file.
startdate : datetime.datetime
Start date of the forecast as datetime object.
timestep : int
Time step of the forecast (minutes).
n_timesteps : int
Number of time steps in the forecast this argument is ignored if
incremental is set to 'timestep'.
shape : tuple of int
Two-element tuple defining the shape (height,width) of the forecast
grids.
n_ens_members : int
Number of ensemble members in the forecast. This argument is ignored if
incremental is set to 'member'.
metadata: dict
Metadata dictionary containing the projection,x1,x2,y1,y2 and unit
attributes described in the documentation of
:py:mod:`pysteps.io.importers`.
product: str
product name can be 'precip_intensity' for intensity export,
'precip_probability' for probability export.
incremental : {None,'timestep','member'}, optional
Allow incremental writing of datasets into the netCDF file.\n
The available options are: 'timestep' = write a forecast or a forecast
ensemble for a given time step; 'member' = write a forecast sequence
for a given ensemble member. If set to None, incremental writing is
disabled.
Returns
-------
exporter : dict
The return value is a dictionary containing an exporter object. This c
an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
to write datasets into the given file format.
"""
if not netcdf4_imported:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed")
if not pyproj_imported:
raise MissingOptionalDependency(
"pyproj package is required for netcdf "
"exporters but it is not installed")
if incremental not in [None, "timestep", "member"]:
raise ValueError("unknown option %s: incremental must be 'timestep' or 'member'" % incremental)
if incremental == "timestep":
n_timesteps = None
elif incremental == "member":
n_ens_members = None
elif incremental is not None:
raise ValueError("unknown argument value incremental='%s': must be 'timestep' or 'member'" % str(incremental))
exporter = {}
filename = os.path.realpath(filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
ncf = netCDF4.Dataset(filename, 'w', format="NETCDF4")
ncf.Conventions = "CF-1.7"
ncf.title = "pysteps-generated nowcast"
ncf.institution = "the pySTEPS community (https://pysteps.github.io)"
ncf.source = "pysteps" # TODO(exporters): Add pySTEPS version here
ncf.history = ""
ncf.references = ""
ncf.comment = ""
h, w = shape
# if product != 'precip_probability':
# ncf.createDimension("ens_number", size=n_ens_members)
ncf.createDimension("time", size=n_timesteps)
ncf.createDimension("y", size=h)
ncf.createDimension("x", size=w)
# necessary settings for probability nowcasting
ncf.datetime = str(startdate)
if product == 'precip_probability':
#TODO: Add this metadata unit percent in the source
metadata["unit"] = "percent"
if metadata["unit"] == "mm/h":
var_name = "precip_intensity"
var_standard_name = None
var_long_name = "instantaneous precipitation rate"
var_unit = "mm h-1"
elif metadata["unit"] == "percent":
var_name = "precip_probability"
var_standard_name = None
var_long_name = "probablistic precipitation"
var_unit = "percent"
elif metadata["unit"] == "mm":
var_name = "precip_accum"
var_standard_name = None
var_long_name = "accumulated precipitation"
var_unit = "mm"
elif metadata["unit"] == "dBZ":
var_name = "reflectivity"
var_long_name = "equivalent reflectivity factor"
var_standard_name = "equivalent_reflectivity_factor"
var_unit = "dBZ"
else:
raise ValueError("unknown unit %s" % metadata["unit"])
xr = np.linspace(metadata["x1"], metadata["x2"], w+1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(metadata["y1"], metadata["y2"], h+1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
var_xc = ncf.createVariable("xc", np.float32, dimensions=("x",))
var_xc[:] = xr
var_xc.axis = 'X'
var_xc.standard_name = "projection_x_coordinate"
var_xc.long_name = "x-coordinate in Cartesian system"
# TODO(exporters): Don't hard-code the unit.
var_xc.units = 'm'
var_yc = ncf.createVariable("yc", np.float32, dimensions=("y",))
var_yc[:] = yr
var_yc.axis = 'Y'
var_yc.standard_name = "projection_y_coordinate"
var_yc.long_name = "y-coordinate in Cartesian system"
# TODO(exporters): Don't hard-code the unit.
var_yc.units = 'm'
X, Y = np.meshgrid(xr, yr)
pr = pyproj.Proj(metadata["projection"])
lon,lat = pr(X.flatten(), Y.flatten(), inverse=True)
lon, lat = pr(X.flatten(), Y.flatten(), inverse=True)
new_long, new_lat = np.zeros((h, w), dtype=np.float), np.zeros((h, w), dtype=np.float)
idx = 0
for row in range(h):
for col in range(w):
new_long[row][col] = lon[idx]
idx += 1
idx = 0
for row in range(h):
for col in range(w):
new_lat[row][col] = lat[idx]
idx += 1
var_lon = ncf.createVariable("lon", np.float32, dimensions=("y", "x"))
var_lon[:] = new_long
var_lon.standard_name = "longitude"
var_lon.long_name = "longitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lon.units = "degrees_east"
var_lat = ncf.createVariable("lat", np.float, dimensions=("y", "x"))
var_lat[:] = new_lat
var_lat.standard_name = "latitude"
var_lat.long_name = "latitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lat.units = "degrees_north"
ncf.projection = metadata["projection"]
grid_mapping_var_name, grid_mapping_name, grid_mapping_params = \
_convert_proj4_to_grid_mapping(metadata["projection"])
# skip writing the grid mapping if a matching name was not found
if grid_mapping_var_name is not None:
var_gm = ncf.createVariable(grid_mapping_var_name, np.int,
dimensions=())
var_gm.grid_mapping_name = grid_mapping_name
for i in grid_mapping_params.items():
var_gm.setncattr(i[0], i[1])
# if product != 'precip_probability':
# var_ens_num = ncf.createVariable("ens_number", np.int,
# dimensions=("ens_number",))
# if incremental != "member":
# var_ens_num[:] = list(range(1, n_ens_members+1))
# var_ens_num.long_name = "ensemble member"
# var_ens_num.units = ""
var_time = ncf.createVariable("time", np.int, dimensions=("time",))
if incremental != "timestep":
if product == 'precip_probability':
var_time[:] = [i*timestep for i in range(1, n_timesteps+1)]
else:
var_time[:] = [i*timestep*60 for i in range(1, n_timesteps+1)]
var_time.long_name = "forecast time"
startdate_str = datetime.strftime(startdate, "%Y-%m-%d %H:%M:%S")
var_time.units = "minutes since %s" % startdate_str if product == 'precip_probability' \
else "seconds since %s" % startdate_str
dimensions = ("time", "y", "x")
var_F = ncf.createVariable(var_name, np.float32,
dimensions=dimensions,
zlib=True, complevel=9)
if var_standard_name is not None:
var_F.standard_name = var_standard_name
var_F.long_name = var_long_name
var_F.coordinates = "y x"
var_F.units = var_unit
exporter["method"] = "netcdf"
exporter["ncfile"] = ncf
exporter["var_F"] = var_F
# if product != 'precip_probability':
# exporter["var_ens_num"] = var_ens_num
exporter["var_time"] = var_time
exporter["var_name"] = var_name
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
def export_forecast_dataset(F, exporter, mask=None):
"""Write a forecast array into a file.
The written dataset has dimensions
(num_ens_members,num_timesteps,shape[0],shape[1]), where shape refers to
the shape of the two-dimensional forecast grids. If the exporter was
initialized with incremental!=None, the array is appended to the existing
dataset either along the ensemble member or time axis.
Parameters
----------
exporter : dict
An exporter object created with any initialization method implemented
in :py:mod:`pysteps.io.exporters`.
F : array_like
The array to write. The required shape depends on the choice of the
'incremental' parameter the exporter was initialized with:
:TODO: Update this table incorporating 'precip_probability'
+-----------------+---------------------------------------------------+
| incremental | required shape |
+=================+===================================================+
| None | (num_ens_members,num_timesteps,shape[0],shape[1]) |
+-----------------+---------------------------------------------------+
| 'timestep' | (num_ens_members,shape[0],shape[1]) |
+-----------------+---------------------------------------------------+
| 'member' | (num_timesteps,shape[0],shape[1]) |
+-----------------+---------------------------------------------------+
"""
if exporter["method"] == "netcdf" and not netcdf4_imported:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed")
if exporter["incremental"] is None:
shp = (exporter["num_timesteps"], exporter["shape"][0], exporter["shape"][1])
if F.shape != shp:
raise ValueError("F has invalid shape: %s != %s" % (str(F.shape),str(shp)))
elif exporter["incremental"] == "timestep":
shp = (exporter["num_ens_members"], exporter["shape"][0],
exporter["shape"][1])
if F.shape != shp:
raise ValueError("F has invalid shape: %s != %s" % (str(F.shape),str(shp)))
elif exporter["incremental"] == "member":
shp = (exporter["num_timesteps"], exporter["shape"][0],
exporter["shape"][1])
if F.shape != shp:
raise ValueError("F has invalid shape: %s != %s" % (str(F.shape),str(shp)))
if exporter["method"] == "netcdf":
_export_netcdf(F, exporter, mask)
elif exporter["method"] == "kineros":
_export_kineros(F, exporter)
else:
raise ValueError("unknown exporter method %s" % exporter["method"])
def close_forecast_file(exporter):
"""Close the file associated with a forecast exporter.
Finish writing forecasts and close the file associated with a forecast
exporter.
Parameters
----------
exporter : dict
An exporter object created with any initialization method implemented
in :py:mod:`pysteps.io.exporters`.
"""
if exporter["method"] == "kineros":
pass # no need to close the file
else:
exporter["ncfile"].close()
def _export_kineros(F, exporter):
num_timesteps = exporter["num_timesteps"]
num_ens_members = exporter["num_ens_members"]
startdate = exporter["startdate"]
timestep = exporter["timestep"]
xgrid = exporter["XY_coords"][0, :, :].flatten()
ygrid = exporter["XY_coords"][1, :, :].flatten()
timemin = [(t + 1)*timestep for t in range(num_timesteps)]
for n in range(num_ens_members):
fn = exporter["ncfile"][n]
F_ = F[n, :, :, :].reshape((num_timesteps, -1))
if exporter["var_name"] == "Depth":
F_ = np.cumsum(F_, axis=0)
with open(fn, "a") as fd:
for m in range(F_.shape[1]):
fd.writelines("BEGIN RG%03d\n" % (m + 1))
fd.writelines(" X = %.2f, Y = %.2f\n" % (xgrid[m], ygrid[m]))
fd.writelines(" N = %i\n" % num_timesteps)
fd.writelines(" TIME %s\n" % exporter["var_name"].upper())
fd.writelines("! (min) (%s)\n" % exporter["var_unit"])
for t in range(num_timesteps):
line_new = "{:6.1f} {:11.2f}\n".format(timemin[t], F_[t, m])
fd.writelines(line_new)
fd.writelines("END\n\n")
def _export_netcdf(F, exporter, mask=None):
var_F = exporter["var_F"]
if exporter["incremental"] is None:
var_F[:] = F[:,::-1,:]
elif exporter["incremental"] == "timestep":
var_F[:, var_F.shape[1], :, :] = F
var_time = exporter["var_time"]
var_time[len(var_time)-1] = len(var_time) * exporter["timestep"] * 60
else:
var_F[var_F.shape[0], :, :, :] = F
var_ens_num = exporter["var_time"]
var_ens_num[len(var_ens_num)-1] = len(var_ens_num)
# TODO(exporters): Write methods for converting Proj.4 projection definitions
# into CF grid mapping attributes. Currently this has been implemented for
# the stereographic projection.
# The conversions implemented here are take from:
# https://github.com/cf-convention/cf-convention.github.io/blob/master/wkt-proj-4.md
def _convert_proj4_to_grid_mapping(proj4str):
tokens = proj4str.split('+')
d = {}
for t in tokens[1:]:
t = t.split('=')
if len(t) > 1:
d[t[0]] = t[1].strip()
params = {}
# TODO(exporters): implement more projection types here
if d["proj"] == "stere":
grid_mapping_var_name = "polar_stereographic"
grid_mapping_name = "polar_stereographic"
v = d["lon_0"] if d["lon_0"][-1] not in ["E", "W"] else d["lon_0"][:-1]
params["straight_vertical_longitude_from_pole"] = float(v)
v = d["lat_0"] if d["lat_0"][-1] not in ["N", "S"] else d["lat_0"][:-1]
params["latitude_of_projection_origin"] = float(v)
if "lat_ts" in list(d.keys()):
params["standard_parallel"] = float(d["lat_ts"])
elif "k_0" in list(d.keys()):
params["scale_factor_at_projection_origin"] = float(d["k_0"])
params["false_easting"] = float(d["x_0"])
params["false_northing"] = float(d["y_0"])
elif d["proj"] == "sterea":
grid_mapping_var_name = "oblique_stereographic"
grid_mapping_name = "oblique_stereographic"
v = d["lon_0"] if d["lon_0"][-1] not in ["E", "W"] else d["lon_0"][:-1]
params["longitude_of_projection_origin"] = float(v)
v = d["lat_0"] if d["lat_0"][-1] not in ["N", "S"] else d["lat_0"][:-1]
params["latitude_of_projection_origin"] = float(v)
if "lat_ts" in list(d.keys()):
params["standard_parallel"] = float(d["lat_ts"])
elif "k_0" in list(d.keys()):
params["scale_factor_at_projection_origin"] = float(d["k_0"])
params["false_easting"] = float(d["x_0"])
params["false_northing"] = float(d["y_0"])
elif d["proj"] == "aea": # Albers Conical Equal Area
grid_mapping_var_name = "proj"
grid_mapping_name = "albers_conical_equal_area"
params["false_easting"] = float(d["x_0"]) if "x_0" in d else float(0)
params["false_northing"] = float(d["y_0"]) if "y_0" in d else float(0)
v = d["lon_0"] if "lon_0" in d else float(0)
params["longitude_of_central_meridian"] = float(v)
v = d["lat_0"] if "lat_0" in d else float(0)
params["latitude_of_projection_origin"] = float(v)
v1 = d["lat_1"] if "lat_1" in d else float(0)
v2 = d["lat_2"] if "lat_2" in d else float(0)
params["standard_parallel"] = (float(v1), float(v2))
else:
print('unknown projection', d["proj"])
return None, None, None
return grid_mapping_var_name, grid_mapping_name, params
| 39.55887
| 118
| 0.5703
|
from datetime import datetime
import numpy as np
import os
from pysteps.exceptions import MissingOptionalDependency
try:
import netCDF4
netcdf4_imported = True
except ImportError:
netcdf4_imported = False
try:
import pyproj
pyproj_imported = True
except ImportError:
pyproj_imported = False
def initialize_forecast_exporter_kineros(filename, startdate, timestep,
n_timesteps, shape, n_ens_members,
metadata, incremental=None):
if incremental is not None:
raise ValueError("unknown option %s: incremental writing is not supported" % incremental)
exporter = {}
basefn, extfn = os.path.splitext(filename)
if extfn == "":
extfn = ".pre"
n_ens_members = np.min((99, n_ens_members))
fns = []
for i in range(n_ens_members):
fn = "%s_N%02d%s" % (basefn, i, extfn)
with open(fn, "w") as fd:
fd.writelines("! pysteps-generated nowcast.\n")
fd.writelines("! created the %s.\n" % datetime.now().strftime("%c"))
fd.writelines("! Member = %02d.\n" % i)
fd.writelines("! Startdate = %s.\n" % startdate.strftime("%c"))
fns.append(fn)
fd.close()
h, w = shape
if metadata["unit"] == "mm/h":
var_name = "Intensity"
var_long_name = "Intensity in mm/hr"
var_unit = "mm/hr"
elif metadata["unit"] == "mm":
var_name = "Depth"
var_long_name = "Accumulated depth in mm"
var_unit = "mm"
else:
raise ValueError("unsupported unit %s" % metadata["unit"])
xr = np.linspace(metadata["x1"], metadata["x2"], w+1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(metadata["y1"], metadata["y2"], h+1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
X, Y = np.meshgrid(xr, yr)
XY_coords = np.stack([X, Y])
exporter["method"] = "kineros"
exporter["ncfile"] = fns
exporter["XY_coords"] = XY_coords
exporter["var_name"] = var_name
exporter["var_long_name"] = var_long_name
exporter["var_unit"] = var_unit
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
def initialize_forecast_exporter_netcdf(filename, startdate, timestep,
n_timesteps, shape, n_ens_members,
metadata, product='precip_intensity',
incremental=None):
if not netcdf4_imported:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed")
if not pyproj_imported:
raise MissingOptionalDependency(
"pyproj package is required for netcdf "
"exporters but it is not installed")
if incremental not in [None, "timestep", "member"]:
raise ValueError("unknown option %s: incremental must be 'timestep' or 'member'" % incremental)
if incremental == "timestep":
n_timesteps = None
elif incremental == "member":
n_ens_members = None
elif incremental is not None:
raise ValueError("unknown argument value incremental='%s': must be 'timestep' or 'member'" % str(incremental))
exporter = {}
filename = os.path.realpath(filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
ncf = netCDF4.Dataset(filename, 'w', format="NETCDF4")
ncf.Conventions = "CF-1.7"
ncf.title = "pysteps-generated nowcast"
ncf.institution = "the pySTEPS community (https://pysteps.github.io)"
ncf.source = "pysteps" ncf.history = ""
ncf.references = ""
ncf.comment = ""
h, w = shape
ncf.createDimension("time", size=n_timesteps)
ncf.createDimension("y", size=h)
ncf.createDimension("x", size=w)
ncf.datetime = str(startdate)
if product == 'precip_probability':
metadata["unit"] = "percent"
if metadata["unit"] == "mm/h":
var_name = "precip_intensity"
var_standard_name = None
var_long_name = "instantaneous precipitation rate"
var_unit = "mm h-1"
elif metadata["unit"] == "percent":
var_name = "precip_probability"
var_standard_name = None
var_long_name = "probablistic precipitation"
var_unit = "percent"
elif metadata["unit"] == "mm":
var_name = "precip_accum"
var_standard_name = None
var_long_name = "accumulated precipitation"
var_unit = "mm"
elif metadata["unit"] == "dBZ":
var_name = "reflectivity"
var_long_name = "equivalent reflectivity factor"
var_standard_name = "equivalent_reflectivity_factor"
var_unit = "dBZ"
else:
raise ValueError("unknown unit %s" % metadata["unit"])
xr = np.linspace(metadata["x1"], metadata["x2"], w+1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(metadata["y1"], metadata["y2"], h+1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
var_xc = ncf.createVariable("xc", np.float32, dimensions=("x",))
var_xc[:] = xr
var_xc.axis = 'X'
var_xc.standard_name = "projection_x_coordinate"
var_xc.long_name = "x-coordinate in Cartesian system"
var_xc.units = 'm'
var_yc = ncf.createVariable("yc", np.float32, dimensions=("y",))
var_yc[:] = yr
var_yc.axis = 'Y'
var_yc.standard_name = "projection_y_coordinate"
var_yc.long_name = "y-coordinate in Cartesian system"
# TODO(exporters): Don't hard-code the unit.
var_yc.units = 'm'
X, Y = np.meshgrid(xr, yr)
pr = pyproj.Proj(metadata["projection"])
lon,lat = pr(X.flatten(), Y.flatten(), inverse=True)
lon, lat = pr(X.flatten(), Y.flatten(), inverse=True)
new_long, new_lat = np.zeros((h, w), dtype=np.float), np.zeros((h, w), dtype=np.float)
idx = 0
for row in range(h):
for col in range(w):
new_long[row][col] = lon[idx]
idx += 1
idx = 0
for row in range(h):
for col in range(w):
new_lat[row][col] = lat[idx]
idx += 1
var_lon = ncf.createVariable("lon", np.float32, dimensions=("y", "x"))
var_lon[:] = new_long
var_lon.standard_name = "longitude"
var_lon.long_name = "longitude coordinate"
var_lon.units = "degrees_east"
var_lat = ncf.createVariable("lat", np.float, dimensions=("y", "x"))
var_lat[:] = new_lat
var_lat.standard_name = "latitude"
var_lat.long_name = "latitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lat.units = "degrees_north"
ncf.projection = metadata["projection"]
grid_mapping_var_name, grid_mapping_name, grid_mapping_params = \
_convert_proj4_to_grid_mapping(metadata["projection"])
if grid_mapping_var_name is not None:
var_gm = ncf.createVariable(grid_mapping_var_name, np.int,
dimensions=())
var_gm.grid_mapping_name = grid_mapping_name
for i in grid_mapping_params.items():
var_gm.setncattr(i[0], i[1])
var_time = ncf.createVariable("time", np.int, dimensions=("time",))
if incremental != "timestep":
if product == 'precip_probability':
var_time[:] = [i*timestep for i in range(1, n_timesteps+1)]
else:
var_time[:] = [i*timestep*60 for i in range(1, n_timesteps+1)]
var_time.long_name = "forecast time"
startdate_str = datetime.strftime(startdate, "%Y-%m-%d %H:%M:%S")
var_time.units = "minutes since %s" % startdate_str if product == 'precip_probability' \
else "seconds since %s" % startdate_str
dimensions = ("time", "y", "x")
var_F = ncf.createVariable(var_name, np.float32,
dimensions=dimensions,
zlib=True, complevel=9)
if var_standard_name is not None:
var_F.standard_name = var_standard_name
var_F.long_name = var_long_name
var_F.coordinates = "y x"
var_F.units = var_unit
exporter["method"] = "netcdf"
exporter["ncfile"] = ncf
exporter["var_F"] = var_F
exporter["var_time"] = var_time
exporter["var_name"] = var_name
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
def export_forecast_dataset(F, exporter, mask=None):
if exporter["method"] == "netcdf" and not netcdf4_imported:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed")
if exporter["incremental"] is None:
shp = (exporter["num_timesteps"], exporter["shape"][0], exporter["shape"][1])
if F.shape != shp:
raise ValueError("F has invalid shape: %s != %s" % (str(F.shape),str(shp)))
elif exporter["incremental"] == "timestep":
shp = (exporter["num_ens_members"], exporter["shape"][0],
exporter["shape"][1])
if F.shape != shp:
raise ValueError("F has invalid shape: %s != %s" % (str(F.shape),str(shp)))
elif exporter["incremental"] == "member":
shp = (exporter["num_timesteps"], exporter["shape"][0],
exporter["shape"][1])
if F.shape != shp:
raise ValueError("F has invalid shape: %s != %s" % (str(F.shape),str(shp)))
if exporter["method"] == "netcdf":
_export_netcdf(F, exporter, mask)
elif exporter["method"] == "kineros":
_export_kineros(F, exporter)
else:
raise ValueError("unknown exporter method %s" % exporter["method"])
def close_forecast_file(exporter):
if exporter["method"] == "kineros":
pass else:
exporter["ncfile"].close()
def _export_kineros(F, exporter):
num_timesteps = exporter["num_timesteps"]
num_ens_members = exporter["num_ens_members"]
startdate = exporter["startdate"]
timestep = exporter["timestep"]
xgrid = exporter["XY_coords"][0, :, :].flatten()
ygrid = exporter["XY_coords"][1, :, :].flatten()
timemin = [(t + 1)*timestep for t in range(num_timesteps)]
for n in range(num_ens_members):
fn = exporter["ncfile"][n]
F_ = F[n, :, :, :].reshape((num_timesteps, -1))
if exporter["var_name"] == "Depth":
F_ = np.cumsum(F_, axis=0)
with open(fn, "a") as fd:
for m in range(F_.shape[1]):
fd.writelines("BEGIN RG%03d\n" % (m + 1))
fd.writelines(" X = %.2f, Y = %.2f\n" % (xgrid[m], ygrid[m]))
fd.writelines(" N = %i\n" % num_timesteps)
fd.writelines(" TIME %s\n" % exporter["var_name"].upper())
fd.writelines("! (min) (%s)\n" % exporter["var_unit"])
for t in range(num_timesteps):
line_new = "{:6.1f} {:11.2f}\n".format(timemin[t], F_[t, m])
fd.writelines(line_new)
fd.writelines("END\n\n")
def _export_netcdf(F, exporter, mask=None):
var_F = exporter["var_F"]
if exporter["incremental"] is None:
var_F[:] = F[:,::-1,:]
elif exporter["incremental"] == "timestep":
var_F[:, var_F.shape[1], :, :] = F
var_time = exporter["var_time"]
var_time[len(var_time)-1] = len(var_time) * exporter["timestep"] * 60
else:
var_F[var_F.shape[0], :, :, :] = F
var_ens_num = exporter["var_time"]
var_ens_num[len(var_ens_num)-1] = len(var_ens_num)
def _convert_proj4_to_grid_mapping(proj4str):
tokens = proj4str.split('+')
d = {}
for t in tokens[1:]:
t = t.split('=')
if len(t) > 1:
d[t[0]] = t[1].strip()
params = {}
if d["proj"] == "stere":
grid_mapping_var_name = "polar_stereographic"
grid_mapping_name = "polar_stereographic"
v = d["lon_0"] if d["lon_0"][-1] not in ["E", "W"] else d["lon_0"][:-1]
params["straight_vertical_longitude_from_pole"] = float(v)
v = d["lat_0"] if d["lat_0"][-1] not in ["N", "S"] else d["lat_0"][:-1]
params["latitude_of_projection_origin"] = float(v)
if "lat_ts" in list(d.keys()):
params["standard_parallel"] = float(d["lat_ts"])
elif "k_0" in list(d.keys()):
params["scale_factor_at_projection_origin"] = float(d["k_0"])
params["false_easting"] = float(d["x_0"])
params["false_northing"] = float(d["y_0"])
elif d["proj"] == "sterea":
grid_mapping_var_name = "oblique_stereographic"
grid_mapping_name = "oblique_stereographic"
v = d["lon_0"] if d["lon_0"][-1] not in ["E", "W"] else d["lon_0"][:-1]
params["longitude_of_projection_origin"] = float(v)
v = d["lat_0"] if d["lat_0"][-1] not in ["N", "S"] else d["lat_0"][:-1]
params["latitude_of_projection_origin"] = float(v)
if "lat_ts" in list(d.keys()):
params["standard_parallel"] = float(d["lat_ts"])
elif "k_0" in list(d.keys()):
params["scale_factor_at_projection_origin"] = float(d["k_0"])
params["false_easting"] = float(d["x_0"])
params["false_northing"] = float(d["y_0"])
elif d["proj"] == "aea": grid_mapping_var_name = "proj"
grid_mapping_name = "albers_conical_equal_area"
params["false_easting"] = float(d["x_0"]) if "x_0" in d else float(0)
params["false_northing"] = float(d["y_0"]) if "y_0" in d else float(0)
v = d["lon_0"] if "lon_0" in d else float(0)
params["longitude_of_central_meridian"] = float(v)
v = d["lat_0"] if "lat_0" in d else float(0)
params["latitude_of_projection_origin"] = float(v)
v1 = d["lat_1"] if "lat_1" in d else float(0)
v2 = d["lat_2"] if "lat_2" in d else float(0)
params["standard_parallel"] = (float(v1), float(v2))
else:
print('unknown projection', d["proj"])
return None, None, None
return grid_mapping_var_name, grid_mapping_name, params
| true
| true
|
f70bf420f2d3ab317f714627b80d6cfd01d77b6b
| 3,345
|
py
|
Python
|
update_sheetinRange.py
|
akifislam/CodeforcesAutoTracker
|
d147f6b6639d74a029208bb6e1407aec89212f27
|
[
"Apache-2.0"
] | null | null | null |
update_sheetinRange.py
|
akifislam/CodeforcesAutoTracker
|
d147f6b6639d74a029208bb6e1407aec89212f27
|
[
"Apache-2.0"
] | null | null | null |
update_sheetinRange.py
|
akifislam/CodeforcesAutoTracker
|
d147f6b6639d74a029208bb6e1407aec89212f27
|
[
"Apache-2.0"
] | 1
|
2022-02-15T20:21:47.000Z
|
2022-02-15T20:21:47.000Z
|
from bs4 import BeautifulSoup
import requests
import test
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import datetime
def update_inRange():
print("Today's Date : ",datetime.date.today())
today = datetime.date.today() - datetime.timedelta(1)
yesterday_month = today.strftime("%b")
yesterday_dayno = today.strftime("%d")
yesterday_full_Date = today.strftime("%d %B, %Y")
compareable_date = today.strftime("%b/%d")
print(compareable_date)
print()
print("----- Start -----")
print()
# Test
# todays_month = 'Sep'
# todays_day = '15'
# Test
print("Yesterday was : ",yesterday_full_Date)
scope = ["https://spreadsheets.google.com/feeds", 'https://www.googleapis.com/auth/spreadsheets',
"https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("CodeforcesAutoTracker-b2030a7afa6c.json", scope);
client = gspread.authorize(creds)
sheet = client.open("Codeforces Auto Tracker - Akif Islam").worksheet('Sheet2')
data = sheet.get_all_records()
# pprint(data)
date_column = sheet.col_values(1)
no_of_total_submission_column = sheet.col_values(2)
no_of_total_accepted_column = sheet.col_values(3)
source_link = "https://codeforces.com/submissions/miss.progga"
source = requests.get(source_link).text
soup = BeautifulSoup(source, "lxml").find('table', class_="status-frame-datatable")
submission_time = []
# 1. Collecting all dates from 50 submission of First Page of Codeforces Submission
for data in soup.findAll('span', class_="format-time"):
submission_time.append(data.text[0:6])
print("Submission's Time : ", submission_time)
print("OK !")
print()
# Execution
submission_count = int(0)
total_accepted = []
accepted_count = int(0)
accpeted_nonduplicate_set = []
# Total Accepted Count from 50s :
for data in soup.findAll('span', class_="submissionVerdictWrapper"):
total_accepted.append(data.text)
print(total_accepted)
print(len(total_accepted))
print(len(submission_time))
#Total Submission Count
for i in range(0,len(submission_time),1):
if submission_time[i][0:3] == yesterday_month and submission_time[i][4:6] == yesterday_dayno:
submission_count += 1
if(total_accepted[i]== "Accepted"):
str = test.get_problemlist()[i] + " Accepted"
accpeted_nonduplicate_set.append(str)
# Total Submission Count
accpeted_nonduplicate_set = set(accpeted_nonduplicate_set)
print("Accepted List : ",accpeted_nonduplicate_set)
accepted_count = len(accpeted_nonduplicate_set)
print("Total Submission : ", submission_count)
print("Total Accepted : ", accepted_count)
insert_list = [yesterday_full_Date, submission_count, accepted_count]
print(insert_list)
previous_date = sheet.cell(len(date_column), 1).value[0:2]
# if sheet.cell(len(date_column),1)[0:1] != todays_day :
if previous_date != yesterday_dayno:
sheet.insert_row(insert_list, (len(date_column) + 1))
else:
print("Duplicate Date Found ! ")
print()
print("----- Finished !-----")
print()
update_inRange()
| 32.163462
| 111
| 0.681614
|
from bs4 import BeautifulSoup
import requests
import test
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import datetime
def update_inRange():
print("Today's Date : ",datetime.date.today())
today = datetime.date.today() - datetime.timedelta(1)
yesterday_month = today.strftime("%b")
yesterday_dayno = today.strftime("%d")
yesterday_full_Date = today.strftime("%d %B, %Y")
compareable_date = today.strftime("%b/%d")
print(compareable_date)
print()
print("----- Start -----")
print()
# Test
# todays_month = 'Sep'
# todays_day = '15'
# Test
print("Yesterday was : ",yesterday_full_Date)
scope = ["https://spreadsheets.google.com/feeds", 'https://www.googleapis.com/auth/spreadsheets',
"https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("CodeforcesAutoTracker-b2030a7afa6c.json", scope);
client = gspread.authorize(creds)
sheet = client.open("Codeforces Auto Tracker - Akif Islam").worksheet('Sheet2')
data = sheet.get_all_records()
# pprint(data)
date_column = sheet.col_values(1)
no_of_total_submission_column = sheet.col_values(2)
no_of_total_accepted_column = sheet.col_values(3)
source_link = "https://codeforces.com/submissions/miss.progga"
source = requests.get(source_link).text
soup = BeautifulSoup(source, "lxml").find('table', class_="status-frame-datatable")
submission_time = []
# 1. Collecting all dates from 50 submission of First Page of Codeforces Submission
for data in soup.findAll('span', class_="format-time"):
submission_time.append(data.text[0:6])
print("Submission's Time : ", submission_time)
print("OK !")
print()
submission_count = int(0)
total_accepted = []
accepted_count = int(0)
accpeted_nonduplicate_set = []
for data in soup.findAll('span', class_="submissionVerdictWrapper"):
total_accepted.append(data.text)
print(total_accepted)
print(len(total_accepted))
print(len(submission_time))
for i in range(0,len(submission_time),1):
if submission_time[i][0:3] == yesterday_month and submission_time[i][4:6] == yesterday_dayno:
submission_count += 1
if(total_accepted[i]== "Accepted"):
str = test.get_problemlist()[i] + " Accepted"
accpeted_nonduplicate_set.append(str)
accpeted_nonduplicate_set = set(accpeted_nonduplicate_set)
print("Accepted List : ",accpeted_nonduplicate_set)
accepted_count = len(accpeted_nonduplicate_set)
print("Total Submission : ", submission_count)
print("Total Accepted : ", accepted_count)
insert_list = [yesterday_full_Date, submission_count, accepted_count]
print(insert_list)
previous_date = sheet.cell(len(date_column), 1).value[0:2]
if previous_date != yesterday_dayno:
sheet.insert_row(insert_list, (len(date_column) + 1))
else:
print("Duplicate Date Found ! ")
print()
print("----- Finished !-----")
print()
update_inRange()
| true
| true
|
f70bf444aeed4ac27e527b05648fdf6fe9dd813e
| 302
|
py
|
Python
|
Linkedin/linkedin-become-a-programmer-foundations/1.programming-foundations-fundamentals-3/challenge_1.py
|
mohammedelzanaty/myRoad2BeFullStack
|
eea3a5edb6c6a999136b04fdaea6ce0c81137a58
|
[
"MIT"
] | 2
|
2021-04-21T12:05:01.000Z
|
2022-01-19T09:58:38.000Z
|
Linkedin/linkedin-become-a-programmer-foundations/1.programming-foundations-fundamentals-3/challenge_1.py
|
mohammedelzanaty/myRoad2BeFullStack
|
eea3a5edb6c6a999136b04fdaea6ce0c81137a58
|
[
"MIT"
] | 34
|
2019-12-26T11:21:42.000Z
|
2022-02-27T19:55:10.000Z
|
Linkedin/linkedin-become-a-programmer-foundations/1.programming-foundations-fundamentals-3/challenge_1.py
|
mohammedelzanaty/myRoad2BeFullStack
|
eea3a5edb6c6a999136b04fdaea6ce0c81137a58
|
[
"MIT"
] | 2
|
2021-08-15T07:59:36.000Z
|
2022-01-16T06:17:32.000Z
|
print("Challenge 1:")
# A message for user
message = "This is goind to be tricky ;"
Message = "Very tricky!"
print(message) # show the message on the screen
# Perform mathematical operations
result = 2**3
print("2**3 =", result)
result = 5 - 3
print("5 - 3 =", result)
print("Challenge complete!")
| 18.875
| 47
| 0.678808
|
print("Challenge 1:")
message = "This is goind to be tricky ;"
Message = "Very tricky!"
print(message)
result = 2**3
print("2**3 =", result)
result = 5 - 3
print("5 - 3 =", result)
print("Challenge complete!")
| true
| true
|
f70bf45fbcab8216b7333ea95959f5208d7eb563
| 2,726
|
py
|
Python
|
blogs/models.py
|
6ba/bbgo
|
dfa9b55b8d40c53940105333c2e03a3c6abddb88
|
[
"MIT"
] | 22
|
2017-07-13T04:07:03.000Z
|
2021-06-10T05:39:29.000Z
|
blogs/models.py
|
genonfire/bbgo
|
5f374f0b620f4dc3e106de5969f26f4585044605
|
[
"MIT"
] | 7
|
2017-08-25T06:33:45.000Z
|
2019-10-14T05:49:32.000Z
|
blogs/models.py
|
6ba/bbgo
|
dfa9b55b8d40c53940105333c2e03a3c6abddb88
|
[
"MIT"
] | 9
|
2017-12-31T02:45:58.000Z
|
2021-01-22T03:09:02.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django.utils.translation import ugettext as _
class Blog(models.Model):
"""Blog of blogs"""
BLOG_STATUS = {
('1normal', _('status_published')),
('2temp', _('status_draft')),
('5hidden', _('status_pending')),
('6deleted', _('status_deleted')),
}
status = models.CharField(
max_length=10, choices=BLOG_STATUS, default='1normal')
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
category = models.CharField(max_length=23, blank=True)
title = models.CharField(max_length=41)
content = models.TextField()
view_count = models.IntegerField(default=0)
comment_count = models.IntegerField(default=0)
like_count = models.IntegerField(default=0)
like_users = models.TextField(default='', blank=True)
image = models.ImageField(
upload_to="featured_images/%Y-%m/", blank=True)
tags = models.TextField(default='', blank=True)
def get_absolute_url(self):
"""Back to list"""
return reverse_lazy('blogs:show_blogs', args=[1])
def get_post_url(self):
"""Back to post"""
return reverse_lazy('blogs:show_post', args=[self.id])
def get_edit_url(self):
"""Stay editing"""
return reverse_lazy('blogs:edit_post', args=[self.id])
def get_status_text(self):
"""Get status text"""
if self.status == '1normal':
return _('status_normal')
elif self.status == '2temp':
return _('status_draft')
elif self.status == '5hidden':
return _('status_pending')
elif self.status == '6deleted':
return _('status_deleted')
class Comment(models.Model):
"""Comment of blogs"""
COMMENT_STATUS = {
('1normal', _('status_normal')),
('6deleted', _('status_deleted')),
('7spam', _('status_spam')),
}
post_id = models.IntegerField(default=0)
comment_id = models.IntegerField(default=0)
status = models.CharField(
max_length=10, choices=COMMENT_STATUS, default='1normal')
userid = models.CharField(max_length=settings.ID_MAX_LENGTH, blank=True)
username = models.CharField(max_length=settings.USERNAME_MAX, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
content = models.TextField(max_length=settings.COMMENT_TEXT_MAX)
| 34.075
| 77
| 0.662509
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django.utils.translation import ugettext as _
class Blog(models.Model):
BLOG_STATUS = {
('1normal', _('status_published')),
('2temp', _('status_draft')),
('5hidden', _('status_pending')),
('6deleted', _('status_deleted')),
}
status = models.CharField(
max_length=10, choices=BLOG_STATUS, default='1normal')
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
category = models.CharField(max_length=23, blank=True)
title = models.CharField(max_length=41)
content = models.TextField()
view_count = models.IntegerField(default=0)
comment_count = models.IntegerField(default=0)
like_count = models.IntegerField(default=0)
like_users = models.TextField(default='', blank=True)
image = models.ImageField(
upload_to="featured_images/%Y-%m/", blank=True)
tags = models.TextField(default='', blank=True)
def get_absolute_url(self):
return reverse_lazy('blogs:show_blogs', args=[1])
def get_post_url(self):
return reverse_lazy('blogs:show_post', args=[self.id])
def get_edit_url(self):
return reverse_lazy('blogs:edit_post', args=[self.id])
def get_status_text(self):
if self.status == '1normal':
return _('status_normal')
elif self.status == '2temp':
return _('status_draft')
elif self.status == '5hidden':
return _('status_pending')
elif self.status == '6deleted':
return _('status_deleted')
class Comment(models.Model):
COMMENT_STATUS = {
('1normal', _('status_normal')),
('6deleted', _('status_deleted')),
('7spam', _('status_spam')),
}
post_id = models.IntegerField(default=0)
comment_id = models.IntegerField(default=0)
status = models.CharField(
max_length=10, choices=COMMENT_STATUS, default='1normal')
userid = models.CharField(max_length=settings.ID_MAX_LENGTH, blank=True)
username = models.CharField(max_length=settings.USERNAME_MAX, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
content = models.TextField(max_length=settings.COMMENT_TEXT_MAX)
| true
| true
|
f70bf471cf08b34e3769f50e1b418e61f0ca8aa4
| 2,823
|
py
|
Python
|
python3/koans/about_string_manipulation.py
|
OriginalTsynn/python_koans
|
f35ced3ebbf2c9c19f56183b2997beeb18aae9a9
|
[
"MIT"
] | null | null | null |
python3/koans/about_string_manipulation.py
|
OriginalTsynn/python_koans
|
f35ced3ebbf2c9c19f56183b2997beeb18aae9a9
|
[
"MIT"
] | null | null | null |
python3/koans/about_string_manipulation.py
|
OriginalTsynn/python_koans
|
f35ced3ebbf2c9c19f56183b2997beeb18aae9a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("a", string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual(["Sausage", "Egg", "Cheese"], words)
def test_strings_can_be_split_with_different_patterns(self):
import re # import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual(["the", "rain", "in", "spain"], words)
# Pattern is a Python regular expression pattern which matches ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual('\\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual("Guido", 'guido'.capitalize())
self.assertEqual("GUIDO", 'guido'.upper())
self.assertEqual("timbot", 'TimBot'.lower())
self.assertEqual("Guido Van Rossum", 'guido van rossum'.title())
self.assertEqual("tOtAlLy AwEsOmE", 'ToTaLlY aWeSoMe'.swapcase())
| 37.144737
| 81
| 0.636557
|
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("a", string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual(["Sausage", "Egg", "Cheese"], words)
def test_strings_can_be_split_with_different_patterns(self):
import re
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual(["the", "rain", "in", "spain"], words)
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual('\\n', string)
self.assertEqual(2, len(string))
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual("Guido", 'guido'.capitalize())
self.assertEqual("GUIDO", 'guido'.upper())
self.assertEqual("timbot", 'TimBot'.lower())
self.assertEqual("Guido Van Rossum", 'guido van rossum'.title())
self.assertEqual("tOtAlLy AwEsOmE", 'ToTaLlY aWeSoMe'.swapcase())
| true
| true
|
f70bf669380d96903bd4e90137b76c926924b501
| 15,957
|
py
|
Python
|
build/android/pylib/chrome_test_server_spawner.py
|
GnorTech/chromium
|
e1c7731d5bd099ca5544fcf8eda3867d4ce5bab5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-03-10T13:08:49.000Z
|
2018-03-10T13:08:49.000Z
|
build/android/pylib/chrome_test_server_spawner.py
|
GnorTech/chromium
|
e1c7731d5bd099ca5544fcf8eda3867d4ce5bab5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
build/android/pylib/chrome_test_server_spawner.py
|
GnorTech/chromium
|
e1c7731d5bd099ca5544fcf8eda3867d4ce5bab5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2020-11-04T07:19:31.000Z
|
2020-11-04T07:19:31.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import threading
import time
import urlparse
import constants
from forwarder import Forwarder
import ports
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.CHROME_DIR, 'third_party'),
os.path.join(constants.CHROME_DIR, 'third_party', 'tlslite'),
os.path.join(constants.CHROME_DIR, 'third_party', 'pyftpdlib', 'src'),
os.path.join(constants.CHROME_DIR, 'net', 'tools', 'testserver'),
os.path.join(constants.CHROME_DIR, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _CheckPortStatus(port, expected_status):
"""Returns True if port has expected_status.
Args:
port: the port number.
expected_status: boolean of expected status.
Returns:
Returns True if the status is expected. Otherwise returns False.
"""
for timeout in range(1, 5):
if ports.IsHostPortUsed(port) == expected_status:
return True
time.sleep(timeout)
return False
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, adb, tool, build_type):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
adb: instance of AndroidCommands.
tool: instance of runtime error detection tool.
build_type: 'Release' or 'Debug'.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.adb = adb
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
self._test_server_forwarder = None
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.command_line = []
self.build_type = build_type
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortStatus(self.host_port, True)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
# The following arguments must exist.
type_cmd = _GetServerTypeCommandLine(self.arguments['server-type'])
if type_cmd:
self.command_line.append(type_cmd)
self.command_line.append('--port=%d' % self.host_port)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
self.command_line.append('--host=%s' % self.arguments['host'])
data_dir = self.arguments['data-dir'] or 'chrome/test/data'
if not os.path.isabs(data_dir):
data_dir = os.path.join(constants.CHROME_DIR, data_dir)
self.command_line.append('--data-dir=%s' % data_dir)
# The following arguments are optional depending on the individual test.
if self.arguments.has_key('log-to-console'):
self.command_line.append('--log-to-console')
if self.arguments.has_key('auth-token'):
self.command_line.append('--auth-token=%s' % self.arguments['auth-token'])
if self.arguments.has_key('https'):
self.command_line.append('--https')
if self.arguments.has_key('cert-and-key-file'):
self.command_line.append('--cert-and-key-file=%s' % os.path.join(
constants.CHROME_DIR, self.arguments['cert-and-key-file']))
if self.arguments.has_key('ocsp'):
self.command_line.append('--ocsp=%s' % self.arguments['ocsp'])
if self.arguments.has_key('https-record-resume'):
self.command_line.append('--https-record-resume')
if self.arguments.has_key('ssl-client-auth'):
self.command_line.append('--ssl-client-auth')
if self.arguments.has_key('tls-intolerant'):
self.command_line.append('--tls-intolerant=%s' %
self.arguments['tls-intolerant'])
if self.arguments.has_key('ssl-client-ca'):
for ca in self.arguments['ssl-client-ca']:
self.command_line.append('--ssl-client-ca=%s' %
os.path.join(constants.CHROME_DIR, ca))
if self.arguments.has_key('ssl-bulk-cipher'):
for bulk_cipher in self.arguments['ssl-bulk-cipher']:
self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher)
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.CHROME_DIR
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
self.process = subprocess.Popen(command)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortStatus(self.host_port, True)
if self.is_ready:
self._test_server_forwarder = Forwarder(self.adb, self.build_type)
self._test_server_forwarder.Run(
[(0, self.host_port)], self.tool, '127.0.0.1')
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = self._test_server_forwarder.DevicePortForHostPort(
self.host_port)
if device_port:
for timeout in range(1, 5):
if ports.IsDevicePortUsed(self.adb, device_port, 'LISTEN'):
self.is_ready = True
self.forwarder_device_port = device_port
break
time.sleep(timeout)
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
while not self.stop_flag:
time.sleep(1)
if self.process.poll() is None:
self.process.kill()
if self._test_server_forwarder:
self._test_server_forwarder.Close()
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.adb,
self.server.tool,
self.server.build_type)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortStatus(port, False):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, adb, tool, build_type):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.port = test_server_spawner_port
self.server.adb = adb
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = build_type
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
time.sleep(1)
def Stop(self):
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.shutdown()
| 39.01467
| 80
| 0.682522
|
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import threading
import time
import urlparse
import constants
from forwarder import Forwarder
import ports
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.CHROME_DIR, 'third_party'),
os.path.join(constants.CHROME_DIR, 'third_party', 'tlslite'),
os.path.join(constants.CHROME_DIR, 'third_party', 'pyftpdlib', 'src'),
os.path.join(constants.CHROME_DIR, 'net', 'tools', 'testserver'),
os.path.join(constants.CHROME_DIR, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', 'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _CheckPortStatus(port, expected_status):
for timeout in range(1, 5):
if ports.IsHostPortUsed(port) == expected_status:
return True
time.sleep(timeout)
return False
def _GetServerTypeCommandLine(server_type):
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
def __init__(self, ready_event, arguments, adb, tool, build_type):
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.adb = adb
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
self._test_server_forwarder = None
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.command_line = []
self.build_type = build_type
def _WaitToStartAndGetPortFromTestServer(self):
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortStatus(self.host_port, True)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
if self.command_line:
return
# The following arguments must exist.
type_cmd = _GetServerTypeCommandLine(self.arguments['server-type'])
if type_cmd:
self.command_line.append(type_cmd)
self.command_line.append('--port=%d' % self.host_port)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
self.command_line.append('--host=%s' % self.arguments['host'])
data_dir = self.arguments['data-dir'] or 'chrome/test/data'
if not os.path.isabs(data_dir):
data_dir = os.path.join(constants.CHROME_DIR, data_dir)
self.command_line.append('--data-dir=%s' % data_dir)
# The following arguments are optional depending on the individual test.
if self.arguments.has_key('log-to-console'):
self.command_line.append('--log-to-console')
if self.arguments.has_key('auth-token'):
self.command_line.append('--auth-token=%s' % self.arguments['auth-token'])
if self.arguments.has_key('https'):
self.command_line.append('--https')
if self.arguments.has_key('cert-and-key-file'):
self.command_line.append('--cert-and-key-file=%s' % os.path.join(
constants.CHROME_DIR, self.arguments['cert-and-key-file']))
if self.arguments.has_key('ocsp'):
self.command_line.append('--ocsp=%s' % self.arguments['ocsp'])
if self.arguments.has_key('https-record-resume'):
self.command_line.append('--https-record-resume')
if self.arguments.has_key('ssl-client-auth'):
self.command_line.append('--ssl-client-auth')
if self.arguments.has_key('tls-intolerant'):
self.command_line.append('--tls-intolerant=%s' %
self.arguments['tls-intolerant'])
if self.arguments.has_key('ssl-client-ca'):
for ca in self.arguments['ssl-client-ca']:
self.command_line.append('--ssl-client-ca=%s' %
os.path.join(constants.CHROME_DIR, ca))
if self.arguments.has_key('ssl-bulk-cipher'):
for bulk_cipher in self.arguments['ssl-bulk-cipher']:
self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher)
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.CHROME_DIR
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
self.process = subprocess.Popen(command)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortStatus(self.host_port, True)
if self.is_ready:
self._test_server_forwarder = Forwarder(self.adb, self.build_type)
self._test_server_forwarder.Run(
[(0, self.host_port)], self.tool, '127.0.0.1')
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = self._test_server_forwarder.DevicePortForHostPort(
self.host_port)
if device_port:
for timeout in range(1, 5):
if ports.IsDevicePortUsed(self.adb, device_port, 'LISTEN'):
self.is_ready = True
self.forwarder_device_port = device_port
break
time.sleep(timeout)
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
while not self.stop_flag:
time.sleep(1)
if self.process.poll() is None:
self.process.kill()
if self._test_server_forwarder:
self._test_server_forwarder.Close()
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.adb,
self.server.tool,
self.server.build_type)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortStatus(port, False):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
def __init__(self, test_server_spawner_port, adb, tool, build_type):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.port = test_server_spawner_port
self.server.adb = adb
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = build_type
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
time.sleep(1)
def Stop(self):
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.shutdown()
| true
| true
|
f70bf6ee38f2719e916cda8cb70d9a8dda8c9666
| 8,303
|
py
|
Python
|
whoville/cloudbreak/models/reinstall_request_v2.py
|
mikchaos/whoville
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
[
"Apache-2.0"
] | null | null | null |
whoville/cloudbreak/models/reinstall_request_v2.py
|
mikchaos/whoville
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
[
"Apache-2.0"
] | null | null | null |
whoville/cloudbreak/models/reinstall_request_v2.py
|
mikchaos/whoville
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ReinstallRequestV2(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'instance_groups': 'list[InstanceGroupsV2]',
'ambari_stack_details': 'AmbariStackDetails',
'blueprint_name': 'str',
'kerberos_password': 'str',
'kerberos_principal': 'str'
}
attribute_map = {
'instance_groups': 'instanceGroups',
'ambari_stack_details': 'ambariStackDetails',
'blueprint_name': 'blueprintName',
'kerberos_password': 'kerberosPassword',
'kerberos_principal': 'kerberosPrincipal'
}
def __init__(self, instance_groups=None, ambari_stack_details=None, blueprint_name=None, kerberos_password=None, kerberos_principal=None):
"""
ReinstallRequestV2 - a model defined in Swagger
"""
self._instance_groups = None
self._ambari_stack_details = None
self._blueprint_name = None
self._kerberos_password = None
self._kerberos_principal = None
if instance_groups is not None:
self.instance_groups = instance_groups
if ambari_stack_details is not None:
self.ambari_stack_details = ambari_stack_details
self.blueprint_name = blueprint_name
if kerberos_password is not None:
self.kerberos_password = kerberos_password
if kerberos_principal is not None:
self.kerberos_principal = kerberos_principal
@property
def instance_groups(self):
"""
Gets the instance_groups of this ReinstallRequestV2.
collection of instance groupst
:return: The instance_groups of this ReinstallRequestV2.
:rtype: list[InstanceGroupsV2]
"""
return self._instance_groups
@instance_groups.setter
def instance_groups(self, instance_groups):
"""
Sets the instance_groups of this ReinstallRequestV2.
collection of instance groupst
:param instance_groups: The instance_groups of this ReinstallRequestV2.
:type: list[InstanceGroupsV2]
"""
self._instance_groups = instance_groups
@property
def ambari_stack_details(self):
"""
Gets the ambari_stack_details of this ReinstallRequestV2.
details of the Ambari stack
:return: The ambari_stack_details of this ReinstallRequestV2.
:rtype: AmbariStackDetails
"""
return self._ambari_stack_details
@ambari_stack_details.setter
def ambari_stack_details(self, ambari_stack_details):
"""
Sets the ambari_stack_details of this ReinstallRequestV2.
details of the Ambari stack
:param ambari_stack_details: The ambari_stack_details of this ReinstallRequestV2.
:type: AmbariStackDetails
"""
self._ambari_stack_details = ambari_stack_details
@property
def blueprint_name(self):
"""
Gets the blueprint_name of this ReinstallRequestV2.
blueprint name for the cluster
:return: The blueprint_name of this ReinstallRequestV2.
:rtype: str
"""
return self._blueprint_name
@blueprint_name.setter
def blueprint_name(self, blueprint_name):
"""
Sets the blueprint_name of this ReinstallRequestV2.
blueprint name for the cluster
:param blueprint_name: The blueprint_name of this ReinstallRequestV2.
:type: str
"""
if blueprint_name is None:
raise ValueError("Invalid value for `blueprint_name`, must not be `None`")
self._blueprint_name = blueprint_name
@property
def kerberos_password(self):
"""
Gets the kerberos_password of this ReinstallRequestV2.
kerberos admin password
:return: The kerberos_password of this ReinstallRequestV2.
:rtype: str
"""
return self._kerberos_password
@kerberos_password.setter
def kerberos_password(self, kerberos_password):
"""
Sets the kerberos_password of this ReinstallRequestV2.
kerberos admin password
:param kerberos_password: The kerberos_password of this ReinstallRequestV2.
:type: str
"""
if kerberos_password is not None and len(kerberos_password) > 50:
raise ValueError("Invalid value for `kerberos_password`, length must be less than or equal to `50`")
if kerberos_password is not None and len(kerberos_password) < 5:
raise ValueError("Invalid value for `kerberos_password`, length must be greater than or equal to `5`")
self._kerberos_password = kerberos_password
@property
def kerberos_principal(self):
"""
Gets the kerberos_principal of this ReinstallRequestV2.
kerberos principal
:return: The kerberos_principal of this ReinstallRequestV2.
:rtype: str
"""
return self._kerberos_principal
@kerberos_principal.setter
def kerberos_principal(self, kerberos_principal):
"""
Sets the kerberos_principal of this ReinstallRequestV2.
kerberos principal
:param kerberos_principal: The kerberos_principal of this ReinstallRequestV2.
:type: str
"""
self._kerberos_principal = kerberos_principal
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ReinstallRequestV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.168724
| 984
| 0.648802
|
from pprint import pformat
from six import iteritems
import re
class ReinstallRequestV2(object):
swagger_types = {
'instance_groups': 'list[InstanceGroupsV2]',
'ambari_stack_details': 'AmbariStackDetails',
'blueprint_name': 'str',
'kerberos_password': 'str',
'kerberos_principal': 'str'
}
attribute_map = {
'instance_groups': 'instanceGroups',
'ambari_stack_details': 'ambariStackDetails',
'blueprint_name': 'blueprintName',
'kerberos_password': 'kerberosPassword',
'kerberos_principal': 'kerberosPrincipal'
}
def __init__(self, instance_groups=None, ambari_stack_details=None, blueprint_name=None, kerberos_password=None, kerberos_principal=None):
self._instance_groups = None
self._ambari_stack_details = None
self._blueprint_name = None
self._kerberos_password = None
self._kerberos_principal = None
if instance_groups is not None:
self.instance_groups = instance_groups
if ambari_stack_details is not None:
self.ambari_stack_details = ambari_stack_details
self.blueprint_name = blueprint_name
if kerberos_password is not None:
self.kerberos_password = kerberos_password
if kerberos_principal is not None:
self.kerberos_principal = kerberos_principal
@property
def instance_groups(self):
return self._instance_groups
@instance_groups.setter
def instance_groups(self, instance_groups):
self._instance_groups = instance_groups
@property
def ambari_stack_details(self):
return self._ambari_stack_details
@ambari_stack_details.setter
def ambari_stack_details(self, ambari_stack_details):
self._ambari_stack_details = ambari_stack_details
@property
def blueprint_name(self):
return self._blueprint_name
@blueprint_name.setter
def blueprint_name(self, blueprint_name):
if blueprint_name is None:
raise ValueError("Invalid value for `blueprint_name`, must not be `None`")
self._blueprint_name = blueprint_name
@property
def kerberos_password(self):
return self._kerberos_password
@kerberos_password.setter
def kerberos_password(self, kerberos_password):
if kerberos_password is not None and len(kerberos_password) > 50:
raise ValueError("Invalid value for `kerberos_password`, length must be less than or equal to `50`")
if kerberos_password is not None and len(kerberos_password) < 5:
raise ValueError("Invalid value for `kerberos_password`, length must be greater than or equal to `5`")
self._kerberos_password = kerberos_password
@property
def kerberos_principal(self):
return self._kerberos_principal
@kerberos_principal.setter
def kerberos_principal(self, kerberos_principal):
self._kerberos_principal = kerberos_principal
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ReinstallRequestV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70bf6f1dc5f4faa2e79e0059d5d2beeba7eb784
| 41,081
|
py
|
Python
|
venv/Lib/site-packages/matplotlib/backends/backend_qt5.py
|
StewSchrieff/riddlerHoopGame
|
3d63f494aa803c7571ace83f87a40ce5d6b0dfc1
|
[
"MIT"
] | 69
|
2020-03-31T06:40:17.000Z
|
2022-02-25T11:48:18.000Z
|
venv/Lib/site-packages/matplotlib/backends/backend_qt5.py
|
StewSchrieff/riddlerHoopGame
|
3d63f494aa803c7571ace83f87a40ce5d6b0dfc1
|
[
"MIT"
] | 6
|
2018-08-28T12:33:14.000Z
|
2019-05-07T20:32:42.000Z
|
venv/Lib/site-packages/matplotlib/backends/backend_qt5.py
|
StewSchrieff/riddlerHoopGame
|
3d63f494aa803c7571ace83f87a40ce5d6b0dfc1
|
[
"MIT"
] | 28
|
2020-04-15T15:24:17.000Z
|
2021-12-26T04:05:02.000Z
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import os
import re
import signal
import sys
from six import unichr
import traceback
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, cursors, ToolContainerBase, StatusbarBase)
import matplotlib.backends.qt_editor.figureoptions as figureoptions
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
from matplotlib.figure import Figure
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from .qt_compat import (
QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'cmd', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('cmd', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
cursors.WAIT: QtCore.Qt.WaitCursor,
}
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if is_pyqt5():
try:
from PyQt5 import QtX11Extras
is_x11_build = True
except ImportError:
is_x11_build = False
else:
is_x11_build = hasattr(QtGui, "QX11Info")
if is_x11_build:
display = os.environ.get('DISPLAY')
if display is None or not re.search(r':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([b"matplotlib"])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
if is_pyqt5():
try:
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError:
pass
def _allow_super_init(__init__):
"""
Decorator for ``__init__`` to allow ``super().__init__`` on PyQt4/PySide2.
"""
if QT_API == "PyQt5":
return __init__
else:
# To work around lack of cooperative inheritance in PyQt4, PySide,
# and PySide2, when calling FigureCanvasQT.__init__, we temporarily
# patch QWidget.__init__ by a cooperative version, that first calls
# QWidget.__init__ with no additional arguments, and then finds the
# next class in the MRO with an __init__ that does support cooperative
# inheritance (i.e., not defined by the PyQt4, PySide, PySide2, sip
# or Shiboken packages), and manually call its `__init__`, once again
# passing the additional arguments.
qwidget_init = QtWidgets.QWidget.__init__
def cooperative_qwidget_init(self, *args, **kwargs):
qwidget_init(self)
mro = type(self).__mro__
next_coop_init = next(
cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]
if cls.__module__.split(".")[0] not in [
"PyQt4", "sip", "PySide", "PySide2", "Shiboken"])
next_coop_init.__init__(self, *args, **kwargs)
@functools.wraps(__init__)
def wrapper(self, **kwargs):
try:
QtWidgets.QWidget.__init__ = cooperative_qwidget_init
__init__(self, **kwargs)
finally:
# Restore __init__
QtWidgets.QWidget.__init__ = qwidget_init
return wrapper
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
@_allow_super_init
def __init__(self, figure):
_create_qApp()
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
# We don't want to scale up the figure DPI more than once.
# Note, we don't handle a signal for changing DPI yet.
figure._original_dpi = figure.dpi
self._update_figure_dpi()
# In cases with mixed resolution displays, we need to be careful if the
# dpi_ratio changes - in this case we need to resize the canvas
# accordingly. We could watch for screenChanged events from Qt, but
# the issue is that we can't guarantee this will be emitted *before*
# the first paintEvent for the canvas, so instead we keep track of the
# dpi_ratio value here and in paintEvent we resize the canvas if
# needed.
self._dpi_ratio_prev = None
self._draw_pending = False
self._is_drawing = False
self._draw_rect_callback = lambda painter: None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self.setMouseTracking(True)
self.resize(*self.get_width_height())
# Key auto-repeat enabled by default
self._keyautorepeat = True
palette = QtGui.QPalette(QtCore.Qt.white)
self.setPalette(palette)
def _update_figure_dpi(self):
dpi = self._dpi_ratio * self.figure._original_dpi
self.figure._set_dpi(dpi, forward=False)
@property
def _dpi_ratio(self):
# Not available on Qt4 or some older Qt5.
try:
# self.devicePixelRatio() returns 0 in rare cases
return self.devicePixelRatio() or 1
except AttributeError:
return 1
def _update_dpi(self):
# As described in __init__ above, we need to be careful in cases with
# mixed resolution displays if dpi_ratio is changing between painting
# events.
# Return whether we triggered a resizeEvent (and thus a paintEvent)
# from within this function.
if self._dpi_ratio != self._dpi_ratio_prev:
# We need to update the figure DPI.
self._update_figure_dpi()
self._dpi_ratio_prev = self._dpi_ratio
# The easiest way to resize the canvas is to emit a resizeEvent
# since we implement all the logic for resizing the canvas for
# that event.
event = QtGui.QResizeEvent(self.size(), self.size())
self.resizeEvent(event)
# resizeEvent triggers a paintEvent itself, so we exit this one
# (after making sure that the event is immediately handled).
return True
return False
def get_width_height(self):
w, h = FigureCanvasBase.get_width_height(self)
return int(w / self._dpi_ratio), int(h / self._dpi_ratio)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, guiEvent=event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, guiEvent=event)
def mouseEventCoords(self, pos):
"""Calculate mouse coordinates in physical pixels
Qt5 use logical pixels, but the figure is scaled to physical
pixels for rendering. Transform to physical pixels so that
all of the down-stream transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
dpi_ratio = self._dpi_ratio
x = pos.x()
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height / dpi_ratio - pos.y()
return x * dpi_ratio, y * dpi_ratio
def mousePressEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button,
guiEvent=event)
def mouseDoubleClickEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True,
guiEvent=event)
def mouseMoveEvent(self, event):
x, y = self.mouseEventCoords(event)
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def mouseReleaseEvent(self, event):
x, y = self.mouseEventCoords(event)
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button,
guiEvent=event)
if is_pyqt5():
def wheelEvent(self, event):
x, y = self.mouseEventCoords(event)
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps:
FigureCanvasBase.scroll_event(
self, x, y, steps, guiEvent=event)
else:
def wheelEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
steps = event.delta() / 120
if event.orientation() == QtCore.Qt.Vertical:
FigureCanvasBase.scroll_event(
self, x, y, steps, guiEvent=event)
def keyPressEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
@property
def keyAutoRepeat(self):
"""
If True, enable auto-repeat for key events.
"""
return self._keyautorepeat
@keyAutoRepeat.setter
def keyAutoRepeat(self, val):
self._keyautorepeat = bool(val)
def resizeEvent(self, event):
# _dpi_ratio_prev will be set the first time the canvas is painted, and
# the rendered buffer is useless before anyways.
if self._dpi_ratio_prev is None:
return
w = event.size().width() * self._dpi_ratio
h = event.size().height() * self._dpi_ratio
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# pass back into Qt to let it finish
QtWidgets.QWidget.resizeEvent(self, event)
# emit our resize events
FigureCanvasBase.resize_event(self)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if not self._keyautorepeat and event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
qApp.processEvents()
def start_event_loop(self, timeout=0):
if hasattr(self, "_event_loop") and self._event_loop.isRunning():
raise RuntimeError("Event loop already running")
self._event_loop = event_loop = QtCore.QEventLoop()
if timeout:
timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)
event_loop.exec_()
def stop_event_loop(self, event=None):
if hasattr(self, "_event_loop"):
self._event_loop.quit()
def draw(self):
"""Render the figure, and queue a request for a Qt draw.
"""
# The renderer draw is done here; delaying causes problems with code
# that uses the result of the draw() to update plot elements.
if self._is_drawing:
return
self._is_drawing = True
try:
super(FigureCanvasQT, self).draw()
finally:
self._is_drawing = False
self.update()
def draw_idle(self):
"""Queue redraw of the Agg buffer and request Qt paintEvent.
"""
# The Agg draw needs to be handled by the same thread matplotlib
# modifies the scene graph from. Post Agg draw request to the
# current event loop in order to ensure thread affinity and to
# accumulate multiple draw requests from event handling.
# TODO: queued signal connection might be safer than singleShot
if not (self._draw_pending or self._is_drawing):
self._draw_pending = True
QtCore.QTimer.singleShot(0, self._draw_idle)
def _draw_idle(self):
if self.height() < 0 or self.width() < 0:
self._draw_pending = False
if not self._draw_pending:
return
try:
self.draw()
except Exception:
# Uncaught exceptions are fatal for PyQt5, so catch them instead.
traceback.print_exc()
finally:
self._draw_pending = False
def drawRectangle(self, rect):
# Draw the zoom rectangle to the QPainter. _draw_rect_callback needs
# to be called at the end of paintEvent.
if rect is not None:
def _draw_rect_callback(painter):
pen = QtGui.QPen(QtCore.Qt.black, 1 / self._dpi_ratio,
QtCore.Qt.DotLine)
painter.setPen(pen)
painter.drawRect(*(pt / self._dpi_ratio for pt in rect))
else:
def _draw_rect_callback(painter):
return
self._draw_rect_callback = _draw_rect_callback
self.update()
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : qt.QToolBar
The qt.QToolBar
window : qt.QMainWindow
The qt.QMainWindow
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.svg')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.statusbar = None
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarQt(self.window, self.toolmanager)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
if not self.toolmanager:
# add text label to status bar
statusbar_label = QtWidgets.QLabel()
self.window.statusBar().addWidget(statusbar_label)
self.toolbar.message.connect(statusbar_label.setText)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarQt(self.toolmanager, self.window)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if matplotlib.rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
self.window.activateWindow()
self.window.raise_()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
if self.toolbar:
self.toolbar.destroy()
self.window.close()
def get_window_title(self):
return six.text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
if is_pyqt5():
name = name.replace('.png', '_large.png')
pm = QtGui.QPixmap(os.path.join(self.basedir, name))
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if text == 'Subplots':
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit axis, curve and image parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
# Esthetic adjustments - we need to set these explicitly in PyQt5
# otherwise the layout looks different - but we don't want to set it if
# not using HiDPI icons otherwise they look worse than before.
if is_pyqt5():
self.setIconSize(QtCore.QSize(24, 24))
self.layout().setSpacing(12)
if is_pyqt5():
# For some reason, self.setMinimumHeight doesn't seem to carry over to
# the actual sizeHint, so override it instead in order to make the
# aesthetic adjustments noted above.
def sizeHint(self):
size = super(NavigationToolbar2QT, self).sizeHint()
size.setHeight(max(48, size.height()))
return size
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if not allaxes:
QtWidgets.QMessageBox.warning(
self.parent, "Error", "There are no axes to edit.")
return
elif len(allaxes) == 1:
axes, = allaxes
else:
titles = []
for axes in allaxes:
name = (axes.get_title() or
" - ".join(filter(None, [axes.get_xlabel(),
axes.get_ylabel()])) or
"<anonymous {} (id: {:#x})>".format(
type(axes).__name__, id(axes)))
titles.append(name)
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s)
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
# Save dir for next time, unless empty str (i.e., use cwd).
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self._figure = targetfig
for lower, higher in [("bottom", "top"), ("left", "right")]:
self._widgets[lower].valueChanged.connect(
lambda val: self._widgets[higher].setMinimum(val + .001))
self._widgets[higher].valueChanged.connect(
lambda val: self._widgets[lower].setMaximum(val - .001))
self._attrs = ["top", "bottom", "left", "right", "hspace", "wspace"]
self._defaults = {attr: vars(self._figure.subplotpars)[attr]
for attr in self._attrs}
# Set values after setting the range callbacks, but before setting up
# the redraw callbacks.
self._reset()
for attr in self._attrs:
self._widgets[attr].valueChanged.connect(self._on_value_changed)
for action, method in [("Export values", self._export_values),
("Tight layout", self._tight_layout),
("Reset", self._reset),
("Close", self.close)]:
self._widgets[action].clicked.connect(method)
def _export_values(self):
# Explicitly round to 3 decimals (which is also the spinbox precision)
# to avoid numbers of the form 0.100...001.
dialog = QtWidgets.QDialog()
layout = QtWidgets.QVBoxLayout()
dialog.setLayout(layout)
text = QtWidgets.QPlainTextEdit()
text.setReadOnly(True)
layout.addWidget(text)
text.setPlainText(
",\n".join("{}={:.3}".format(attr, self._widgets[attr].value())
for attr in self._attrs))
# Adjust the height of the text widget to fit the whole text, plus
# some padding.
size = text.maximumSize()
size.setHeight(
QtGui.QFontMetrics(text.document().defaultFont())
.size(0, text.toPlainText()).height() + 20)
text.setMaximumSize(size)
dialog.exec_()
def _on_value_changed(self):
self._figure.subplots_adjust(**{attr: self._widgets[attr].value()
for attr in self._attrs})
self._figure.canvas.draw_idle()
def _tight_layout(self):
self._figure.tight_layout()
for attr in self._attrs:
widget = self._widgets[attr]
widget.blockSignals(True)
widget.setValue(vars(self._figure.subplotpars)[attr])
widget.blockSignals(False)
self._figure.canvas.draw_idle()
def _reset(self):
for attr, value in self._defaults.items():
self._widgets[attr].setValue(value)
class ToolbarQt(ToolContainerBase, QtWidgets.QToolBar):
def __init__(self, toolmanager, parent):
ToolContainerBase.__init__(self, toolmanager)
QtWidgets.QToolBar.__init__(self, parent)
self._toolitems = {}
self._groups = {}
self._last = None
@property
def _icon_extension(self):
if is_pyqt5():
return '_large.png'
return '.png'
def add_toolitem(
self, name, group, position, image_file, description, toggle):
button = QtWidgets.QToolButton(self)
button.setIcon(self._icon(image_file))
button.setText(name)
if description:
button.setToolTip(description)
def handler():
self.trigger_tool(name)
if toggle:
button.setCheckable(True)
button.toggled.connect(handler)
else:
button.clicked.connect(handler)
self._last = button
self._toolitems.setdefault(name, [])
self._add_to_group(group, name, button, position)
self._toolitems[name].append((button, handler))
def _add_to_group(self, group, name, button, position):
gr = self._groups.get(group, [])
if not gr:
sep = self.addSeparator()
gr.append(sep)
before = gr[position]
widget = self.insertWidget(before, button)
gr.insert(position, widget)
self._groups[group] = gr
def _icon(self, name):
pm = QtGui.QPixmap(name)
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.toolmanager.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for button, handler in self._toolitems[name]:
button.toggled.disconnect(handler)
button.setChecked(toggled)
button.toggled.connect(handler)
def remove_toolitem(self, name):
for button, handler in self._toolitems[name]:
button.setParent(None)
del self._toolitems[name]
class StatusbarQt(StatusbarBase, QtWidgets.QLabel):
def __init__(self, window, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
QtWidgets.QLabel.__init__(self)
window.statusBar().addWidget(self)
def set_message(self, s):
self.setText(s)
class ConfigureSubplotsQt(backend_tools.ConfigureSubplotsBase):
def trigger(self, *args):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
parent = self.canvas.manager.window
dia = SubplotToolQt(self.figure, parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
class SaveFigureQt(backend_tools.SaveFigureBase):
def trigger(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
parent = self.canvas.manager.window
fname, filter = _getSaveFileName(parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
# Save dir for next time, unless empty str (i.e., use cwd).
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SetCursorQt(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
class RubberbandQt(backend_tools.RubberbandBase):
def draw_rubberband(self, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
backend_tools.ToolSaveFigure = SaveFigureQt
backend_tools.ToolConfigureSubplots = ConfigureSubplotsQt
backend_tools.ToolSetCursor = SetCursorQt
backend_tools.ToolRubberband = RubberbandQt
def error_msg_qt(msg, parent=None):
if not isinstance(msg, six.string_types):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += six.text_type(value)
if len(msg):
error_msg_qt(msg)
@_Backend.export
class _BackendQT5(_Backend):
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@staticmethod
def mainloop():
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
qApp.exec_()
| 36.712243
| 79
| 0.601495
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import os
import re
import signal
import sys
from six import unichr
import traceback
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, cursors, ToolContainerBase, StatusbarBase)
import matplotlib.backends.qt_editor.figureoptions as figureoptions
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
from matplotlib.figure import Figure
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from .qt_compat import (
QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)
backend_version = __version__
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'cmd', QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('cmd', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
cursors.WAIT: QtCore.Qt.WaitCursor,
}
qApp = None
def _create_qApp():
global qApp
if qApp is None:
app = QtWidgets.QApplication.instance()
if app is None:
if is_pyqt5():
try:
from PyQt5 import QtX11Extras
is_x11_build = True
except ImportError:
is_x11_build = False
else:
is_x11_build = hasattr(QtGui, "QX11Info")
if is_x11_build:
display = os.environ.get('DISPLAY')
if display is None or not re.search(r':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([b"matplotlib"])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
if is_pyqt5():
try:
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError:
pass
def _allow_super_init(__init__):
if QT_API == "PyQt5":
return __init__
else:
qwidget_init = QtWidgets.QWidget.__init__
def cooperative_qwidget_init(self, *args, **kwargs):
qwidget_init(self)
mro = type(self).__mro__
next_coop_init = next(
cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]
if cls.__module__.split(".")[0] not in [
"PyQt4", "sip", "PySide", "PySide2", "Shiboken"])
next_coop_init.__init__(self, *args, **kwargs)
@functools.wraps(__init__)
def wrapper(self, **kwargs):
try:
QtWidgets.QWidget.__init__ = cooperative_qwidget_init
__init__(self, **kwargs)
finally:
QtWidgets.QWidget.__init__ = qwidget_init
return wrapper
class TimerQT(TimerBase):
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
@_allow_super_init
def __init__(self, figure):
_create_qApp()
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
# We don't want to scale up the figure DPI more than once.
figure._original_dpi = figure.dpi
self._update_figure_dpi()
# In cases with mixed resolution displays, we need to be careful if the
# dpi_ratio changes - in this case we need to resize the canvas
# accordingly. We could watch for screenChanged events from Qt, but
# the issue is that we can't guarantee this will be emitted *before*
self._dpi_ratio_prev = None
self._draw_pending = False
self._is_drawing = False
self._draw_rect_callback = lambda painter: None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self.setMouseTracking(True)
self.resize(*self.get_width_height())
self._keyautorepeat = True
palette = QtGui.QPalette(QtCore.Qt.white)
self.setPalette(palette)
def _update_figure_dpi(self):
dpi = self._dpi_ratio * self.figure._original_dpi
self.figure._set_dpi(dpi, forward=False)
@property
def _dpi_ratio(self):
try:
return self.devicePixelRatio() or 1
except AttributeError:
return 1
def _update_dpi(self):
if self._dpi_ratio != self._dpi_ratio_prev:
self._update_figure_dpi()
self._dpi_ratio_prev = self._dpi_ratio
event = QtGui.QResizeEvent(self.size(), self.size())
self.resizeEvent(event)
return True
return False
def get_width_height(self):
w, h = FigureCanvasBase.get_width_height(self)
return int(w / self._dpi_ratio), int(h / self._dpi_ratio)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, guiEvent=event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, guiEvent=event)
def mouseEventCoords(self, pos):
dpi_ratio = self._dpi_ratio
x = pos.x()
y = self.figure.bbox.height / dpi_ratio - pos.y()
return x * dpi_ratio, y * dpi_ratio
def mousePressEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button,
guiEvent=event)
def mouseDoubleClickEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True,
guiEvent=event)
def mouseMoveEvent(self, event):
x, y = self.mouseEventCoords(event)
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def mouseReleaseEvent(self, event):
x, y = self.mouseEventCoords(event)
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button,
guiEvent=event)
if is_pyqt5():
def wheelEvent(self, event):
x, y = self.mouseEventCoords(event)
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps:
FigureCanvasBase.scroll_event(
self, x, y, steps, guiEvent=event)
else:
def wheelEvent(self, event):
x = event.x()
y = self.figure.bbox.height - event.y()
steps = event.delta() / 120
if event.orientation() == QtCore.Qt.Vertical:
FigureCanvasBase.scroll_event(
self, x, y, steps, guiEvent=event)
def keyPressEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
@property
def keyAutoRepeat(self):
return self._keyautorepeat
@keyAutoRepeat.setter
def keyAutoRepeat(self, val):
self._keyautorepeat = bool(val)
def resizeEvent(self, event):
if self._dpi_ratio_prev is None:
return
w = event.size().width() * self._dpi_ratio
h = event.size().height() * self._dpi_ratio
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
QtWidgets.QWidget.resizeEvent(self, event)
FigureCanvasBase.resize_event(self)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if not self._keyautorepeat and event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers())
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
key = SPECIAL_KEYS[event_key]
except KeyError:
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
return TimerQT(*args, **kwargs)
def flush_events(self):
qApp.processEvents()
def start_event_loop(self, timeout=0):
if hasattr(self, "_event_loop") and self._event_loop.isRunning():
raise RuntimeError("Event loop already running")
self._event_loop = event_loop = QtCore.QEventLoop()
if timeout:
timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)
event_loop.exec_()
def stop_event_loop(self, event=None):
if hasattr(self, "_event_loop"):
self._event_loop.quit()
def draw(self):
if self._is_drawing:
return
self._is_drawing = True
try:
super(FigureCanvasQT, self).draw()
finally:
self._is_drawing = False
self.update()
def draw_idle(self):
if not (self._draw_pending or self._is_drawing):
self._draw_pending = True
QtCore.QTimer.singleShot(0, self._draw_idle)
def _draw_idle(self):
if self.height() < 0 or self.width() < 0:
self._draw_pending = False
if not self._draw_pending:
return
try:
self.draw()
except Exception:
traceback.print_exc()
finally:
self._draw_pending = False
def drawRectangle(self, rect):
if rect is not None:
def _draw_rect_callback(painter):
pen = QtGui.QPen(QtCore.Qt.black, 1 / self._dpi_ratio,
QtCore.Qt.DotLine)
painter.setPen(pen)
painter.drawRect(*(pt / self._dpi_ratio for pt in rect))
else:
def _draw_rect_callback(painter):
return
self._draw_rect_callback = _draw_rect_callback
self.update()
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.svg')
self.window.setWindowIcon(QtGui.QIcon(image))
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.statusbar = None
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarQt(self.window, self.toolmanager)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
if not self.toolmanager:
statusbar_label = QtWidgets.QLabel()
self.window.statusBar().addWidget(statusbar_label)
self.toolbar.message.connect(statusbar_label.setText)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
def _get_toolbar(self, canvas, parent):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarQt(self.toolmanager, self.window)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if matplotlib.rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def resize(self, width, height):
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
self.window.activateWindow()
self.window.raise_()
def destroy(self, *args):
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
if self.toolbar:
self.toolbar.destroy()
self.window.close()
def get_window_title(self):
return six.text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
if is_pyqt5():
name = name.replace('.png', '_large.png')
pm = QtGui.QPixmap(os.path.join(self.basedir, name))
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if text == 'Subplots':
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit axis, curve and image parameters')
self.buttons = {}
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
self.adj_window = None
# not using HiDPI icons otherwise they look worse than before.
if is_pyqt5():
self.setIconSize(QtCore.QSize(24, 24))
self.layout().setSpacing(12)
if is_pyqt5():
# For some reason, self.setMinimumHeight doesn't seem to carry over to
def sizeHint(self):
size = super(NavigationToolbar2QT, self).sizeHint()
size.setHeight(max(48, size.height()))
return size
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if not allaxes:
QtWidgets.QMessageBox.warning(
self.parent, "Error", "There are no axes to edit.")
return
elif len(allaxes) == 1:
axes, = allaxes
else:
titles = []
for axes in allaxes:
name = (axes.get_title() or
" - ".join(filter(None, [axes.get_xlabel(),
axes.get_ylabel()])) or
"<anonymous {} (id: {:#x})>".format(
type(axes).__name__, id(axes)))
titles.append(name)
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s)
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self._figure = targetfig
for lower, higher in [("bottom", "top"), ("left", "right")]:
self._widgets[lower].valueChanged.connect(
lambda val: self._widgets[higher].setMinimum(val + .001))
self._widgets[higher].valueChanged.connect(
lambda val: self._widgets[lower].setMaximum(val - .001))
self._attrs = ["top", "bottom", "left", "right", "hspace", "wspace"]
self._defaults = {attr: vars(self._figure.subplotpars)[attr]
for attr in self._attrs}
self._reset()
for attr in self._attrs:
self._widgets[attr].valueChanged.connect(self._on_value_changed)
for action, method in [("Export values", self._export_values),
("Tight layout", self._tight_layout),
("Reset", self._reset),
("Close", self.close)]:
self._widgets[action].clicked.connect(method)
def _export_values(self):
dialog = QtWidgets.QDialog()
layout = QtWidgets.QVBoxLayout()
dialog.setLayout(layout)
text = QtWidgets.QPlainTextEdit()
text.setReadOnly(True)
layout.addWidget(text)
text.setPlainText(
",\n".join("{}={:.3}".format(attr, self._widgets[attr].value())
for attr in self._attrs))
size = text.maximumSize()
size.setHeight(
QtGui.QFontMetrics(text.document().defaultFont())
.size(0, text.toPlainText()).height() + 20)
text.setMaximumSize(size)
dialog.exec_()
def _on_value_changed(self):
self._figure.subplots_adjust(**{attr: self._widgets[attr].value()
for attr in self._attrs})
self._figure.canvas.draw_idle()
def _tight_layout(self):
self._figure.tight_layout()
for attr in self._attrs:
widget = self._widgets[attr]
widget.blockSignals(True)
widget.setValue(vars(self._figure.subplotpars)[attr])
widget.blockSignals(False)
self._figure.canvas.draw_idle()
def _reset(self):
for attr, value in self._defaults.items():
self._widgets[attr].setValue(value)
class ToolbarQt(ToolContainerBase, QtWidgets.QToolBar):
def __init__(self, toolmanager, parent):
ToolContainerBase.__init__(self, toolmanager)
QtWidgets.QToolBar.__init__(self, parent)
self._toolitems = {}
self._groups = {}
self._last = None
@property
def _icon_extension(self):
if is_pyqt5():
return '_large.png'
return '.png'
def add_toolitem(
self, name, group, position, image_file, description, toggle):
button = QtWidgets.QToolButton(self)
button.setIcon(self._icon(image_file))
button.setText(name)
if description:
button.setToolTip(description)
def handler():
self.trigger_tool(name)
if toggle:
button.setCheckable(True)
button.toggled.connect(handler)
else:
button.clicked.connect(handler)
self._last = button
self._toolitems.setdefault(name, [])
self._add_to_group(group, name, button, position)
self._toolitems[name].append((button, handler))
def _add_to_group(self, group, name, button, position):
gr = self._groups.get(group, [])
if not gr:
sep = self.addSeparator()
gr.append(sep)
before = gr[position]
widget = self.insertWidget(before, button)
gr.insert(position, widget)
self._groups[group] = gr
def _icon(self, name):
pm = QtGui.QPixmap(name)
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.toolmanager.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for button, handler in self._toolitems[name]:
button.toggled.disconnect(handler)
button.setChecked(toggled)
button.toggled.connect(handler)
def remove_toolitem(self, name):
for button, handler in self._toolitems[name]:
button.setParent(None)
del self._toolitems[name]
class StatusbarQt(StatusbarBase, QtWidgets.QLabel):
def __init__(self, window, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
QtWidgets.QLabel.__init__(self)
window.statusBar().addWidget(self)
def set_message(self, s):
self.setText(s)
class ConfigureSubplotsQt(backend_tools.ConfigureSubplotsBase):
def trigger(self, *args):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
parent = self.canvas.manager.window
dia = SubplotToolQt(self.figure, parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
class SaveFigureQt(backend_tools.SaveFigureBase):
def trigger(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
parent = self.canvas.manager.window
fname, filter = _getSaveFileName(parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SetCursorQt(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
class RubberbandQt(backend_tools.RubberbandBase):
def draw_rubberband(self, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
backend_tools.ToolSaveFigure = SaveFigureQt
backend_tools.ToolConfigureSubplots = ConfigureSubplotsQt
backend_tools.ToolSetCursor = SetCursorQt
backend_tools.ToolRubberband = RubberbandQt
def error_msg_qt(msg, parent=None):
if not isinstance(msg, six.string_types):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
msg = ''
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += six.text_type(value)
if len(msg):
error_msg_qt(msg)
@_Backend.export
class _BackendQT5(_Backend):
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@staticmethod
def mainloop():
signal.signal(signal.SIGINT, signal.SIG_DFL)
qApp.exec_()
| true
| true
|
f70bf89b51c9499b4cc42a25db5b53866df60828
| 262
|
py
|
Python
|
src/baseClients.py
|
tokarzmaciej/testing-simple-app-online-shop
|
446d063585f50b96a57bf6e7c23d2042df9eecc0
|
[
"MIT"
] | null | null | null |
src/baseClients.py
|
tokarzmaciej/testing-simple-app-online-shop
|
446d063585f50b96a57bf6e7c23d2042df9eecc0
|
[
"MIT"
] | null | null | null |
src/baseClients.py
|
tokarzmaciej/testing-simple-app-online-shop
|
446d063585f50b96a57bf6e7c23d2042df9eecc0
|
[
"MIT"
] | null | null | null |
class ClientStorage:
def getAllClients(self):
pass
def postClient(self, name, surname, email):
pass
def delClient(self, id_client):
pass
def patchClient(self, id_client, new_name, new_surname, new_email):
pass
| 18.714286
| 71
| 0.637405
|
class ClientStorage:
def getAllClients(self):
pass
def postClient(self, name, surname, email):
pass
def delClient(self, id_client):
pass
def patchClient(self, id_client, new_name, new_surname, new_email):
pass
| true
| true
|
f70bf8bff2b888f43bbda54839a20ead612a3997
| 453
|
py
|
Python
|
PythonChallenge/Ex06/06_02.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
PythonChallenge/Ex06/06_02.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
PythonChallenge/Ex06/06_02.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding:utf-8
from zipfile import ZipFile
comments = []
filename = "90052"
channel = ZipFile("channel.zip", 'r')
while filename.isdigit():
filename += ".txt"
f = channel.open(filename, 'r')
line = f.readline()
f.close()
t = channel.getinfo(filename).comment
comments.append(str(t, encoding="utf-8")) # bytes -> str
filename = bytes.decode(line.split()[-1]) # bytes -> str
print(''.join(comments))
| 25.166667
| 61
| 0.637969
|
from zipfile import ZipFile
comments = []
filename = "90052"
channel = ZipFile("channel.zip", 'r')
while filename.isdigit():
filename += ".txt"
f = channel.open(filename, 'r')
line = f.readline()
f.close()
t = channel.getinfo(filename).comment
comments.append(str(t, encoding="utf-8")) filename = bytes.decode(line.split()[-1]) print(''.join(comments))
| true
| true
|
f70bf8e6fb1f119b8653e64d00e8152613bdc1fe
| 208
|
py
|
Python
|
pythran/tests/cases/fibo_seq.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,647
|
2015-01-13T01:45:38.000Z
|
2022-03-28T01:23:41.000Z
|
pythran/tests/cases/fibo_seq.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,116
|
2015-01-01T09:52:05.000Z
|
2022-03-18T21:06:40.000Z
|
pythran/tests/cases/fibo_seq.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 180
|
2015-02-12T02:47:28.000Z
|
2022-03-14T10:28:18.000Z
|
""" Nom recursive version of fibo. """
# pythran export fibo(int)
# runas fibo(7)
def fibo(n):
""" fibonaccie compuation. """
a, b = 1, 1
for _ in range(n):
a, b = a + b, a
return a
| 17.333333
| 38
| 0.533654
|
def fibo(n):
a, b = 1, 1
for _ in range(n):
a, b = a + b, a
return a
| true
| true
|
f70bf9092d547f667f305a39ab84d812eb782f20
| 5,060
|
py
|
Python
|
common/blockchain_util.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
common/blockchain_util.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
common/blockchain_util.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
import json
import uuid
from enum import Enum
import web3
from eth_account.messages import defunct_hash_message
from web3 import Web3
from common.logger import get_logger
logger = get_logger(__name__)
class ContractType(Enum):
REGISTRY = "REGISTRY"
MPE = "MPE"
RFAI = "RFAI"
class BlockChainUtil(object):
def __init__(self, provider_type, provider):
if provider_type == "HTTP_PROVIDER":
self.provider = Web3.HTTPProvider(provider)
elif provider_type == "WS_PROVIDER":
self.provider = web3.providers.WebsocketProvider(provider)
else:
raise Exception("Only HTTP_PROVIDER and WS_PROVIDER provider type are supported.")
self.web3_object = Web3(self.provider)
def load_contract(self, path):
with open(path) as f:
contract = json.load(f)
return contract
def read_contract_address(self, net_id, path, key):
contract = self.load_contract(path)
return Web3.toChecksumAddress(contract[str(net_id)][key])
def contract_instance(self, contract_abi, address):
return self.web3_object.eth.contract(abi=contract_abi, address=address)
def get_contract_instance(self, base_path, contract_name, net_id):
contract_network_path, contract_abi_path = self.get_contract_file_paths(base_path, contract_name)
contract_address = self.read_contract_address(net_id=net_id, path=contract_network_path,
key='address')
contract_abi = self.load_contract(contract_abi_path)
logger.debug(f"contract address is {contract_address}")
contract_instance = self.contract_instance(contract_abi=contract_abi, address=contract_address)
return contract_instance
def generate_signature(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return signature.signature.hex()
def generate_signature_bytes(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return bytes(signature.signature)
def get_nonce(self, address):
""" transaction count includes pending transaction also. """
nonce = self.web3_object.eth.getTransactionCount(address)
return nonce
def sign_transaction_with_private_key(self, private_key, transaction_object):
return self.web3_object.eth.account.signTransaction(transaction_object, private_key).rawTransaction
def create_transaction_object(self, *positional_inputs, method_name, address, contract_path, contract_address_path,
net_id):
nonce = self.get_nonce(address=address)
self.contract = self.load_contract(path=contract_path)
self.contract_address = self.read_contract_address(net_id=net_id, path=contract_address_path, key='address')
self.contract_instance = self.contract_instance(contract_abi=self.contract, address=self.contract_address)
print("gas_price == ", self.web3_object.eth.gasPrice)
print("nonce == ", nonce)
gas_price = 3 * (self.web3_object.eth.gasPrice)
transaction_object = getattr(self.contract_instance.functions, method_name)(
*positional_inputs).buildTransaction({
"from": address,
"nonce": nonce,
"gasPrice": gas_price,
"chainId": net_id
})
return transaction_object
def process_raw_transaction(self, raw_transaction):
return self.web3_object.eth.sendRawTransaction(raw_transaction).hex()
def create_account(self):
account = self.web3_object.eth.account.create(uuid.uuid4().hex)
return account.address, account.privateKey.hex()
def get_current_block_no(self):
return self.web3_object.eth.blockNumber
def get_transaction_receipt_from_blockchain(self, transaction_hash):
return self.web3_object.eth.getTransactionReceipt(transaction_hash)
def get_contract_file_paths(self, base_path, contract_name):
if contract_name == ContractType.REGISTRY.value:
json_file = "Registry.json"
elif contract_name == ContractType.MPE.value:
json_file = "MultiPartyEscrow.json"
elif contract_name == ContractType.RFAI.value:
json_file = "ServiceRequest.json"
else:
raise Exception("Invalid contract Type {}".format(contract_name))
contract_network_path = base_path + "/{}/{}".format("networks", json_file)
contract_abi_path = base_path + "/{}/{}".format("abi", json_file)
return contract_network_path, contract_abi_path
| 42.166667
| 119
| 0.701186
|
import json
import uuid
from enum import Enum
import web3
from eth_account.messages import defunct_hash_message
from web3 import Web3
from common.logger import get_logger
logger = get_logger(__name__)
class ContractType(Enum):
REGISTRY = "REGISTRY"
MPE = "MPE"
RFAI = "RFAI"
class BlockChainUtil(object):
def __init__(self, provider_type, provider):
if provider_type == "HTTP_PROVIDER":
self.provider = Web3.HTTPProvider(provider)
elif provider_type == "WS_PROVIDER":
self.provider = web3.providers.WebsocketProvider(provider)
else:
raise Exception("Only HTTP_PROVIDER and WS_PROVIDER provider type are supported.")
self.web3_object = Web3(self.provider)
def load_contract(self, path):
with open(path) as f:
contract = json.load(f)
return contract
def read_contract_address(self, net_id, path, key):
contract = self.load_contract(path)
return Web3.toChecksumAddress(contract[str(net_id)][key])
def contract_instance(self, contract_abi, address):
return self.web3_object.eth.contract(abi=contract_abi, address=address)
def get_contract_instance(self, base_path, contract_name, net_id):
contract_network_path, contract_abi_path = self.get_contract_file_paths(base_path, contract_name)
contract_address = self.read_contract_address(net_id=net_id, path=contract_network_path,
key='address')
contract_abi = self.load_contract(contract_abi_path)
logger.debug(f"contract address is {contract_address}")
contract_instance = self.contract_instance(contract_abi=contract_abi, address=contract_address)
return contract_instance
def generate_signature(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return signature.signature.hex()
def generate_signature_bytes(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return bytes(signature.signature)
def get_nonce(self, address):
nonce = self.web3_object.eth.getTransactionCount(address)
return nonce
def sign_transaction_with_private_key(self, private_key, transaction_object):
return self.web3_object.eth.account.signTransaction(transaction_object, private_key).rawTransaction
def create_transaction_object(self, *positional_inputs, method_name, address, contract_path, contract_address_path,
net_id):
nonce = self.get_nonce(address=address)
self.contract = self.load_contract(path=contract_path)
self.contract_address = self.read_contract_address(net_id=net_id, path=contract_address_path, key='address')
self.contract_instance = self.contract_instance(contract_abi=self.contract, address=self.contract_address)
print("gas_price == ", self.web3_object.eth.gasPrice)
print("nonce == ", nonce)
gas_price = 3 * (self.web3_object.eth.gasPrice)
transaction_object = getattr(self.contract_instance.functions, method_name)(
*positional_inputs).buildTransaction({
"from": address,
"nonce": nonce,
"gasPrice": gas_price,
"chainId": net_id
})
return transaction_object
def process_raw_transaction(self, raw_transaction):
return self.web3_object.eth.sendRawTransaction(raw_transaction).hex()
def create_account(self):
account = self.web3_object.eth.account.create(uuid.uuid4().hex)
return account.address, account.privateKey.hex()
def get_current_block_no(self):
return self.web3_object.eth.blockNumber
def get_transaction_receipt_from_blockchain(self, transaction_hash):
return self.web3_object.eth.getTransactionReceipt(transaction_hash)
def get_contract_file_paths(self, base_path, contract_name):
if contract_name == ContractType.REGISTRY.value:
json_file = "Registry.json"
elif contract_name == ContractType.MPE.value:
json_file = "MultiPartyEscrow.json"
elif contract_name == ContractType.RFAI.value:
json_file = "ServiceRequest.json"
else:
raise Exception("Invalid contract Type {}".format(contract_name))
contract_network_path = base_path + "/{}/{}".format("networks", json_file)
contract_abi_path = base_path + "/{}/{}".format("abi", json_file)
return contract_network_path, contract_abi_path
| true
| true
|
f70bfa4b74f327eaff6e74deeff7234d2edf2d5a
| 4,717
|
py
|
Python
|
quantum/common/config.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | 1
|
2019-04-11T10:27:47.000Z
|
2019-04-11T10:27:47.000Z
|
quantum/common/config.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | null | null | null |
quantum/common/config.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Quantum
"""
import os
from paste import deploy
from quantum.api.v2 import attributes
from quantum.common import utils
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.version import version_info as quantum_version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Quantum will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Quantum will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Quantum will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Quantum will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('dhcp_lease_duration', default=120,
help=_("DHCP lease duration")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Quantum")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Quantum is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
]
core_cli_opts = [
cfg.StrOpt('state_path', default='/var/lib/quantum'),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
rpc.set_defaults(control_exchange='quantum')
def parse(args):
cfg.CONF(args=args, project='quantum',
version='%%prog %s' % quantum_version.version_string_with_vcs())
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
product_name = "quantum"
logging.setup(product_name)
log_root = logging.getLogger(product_name).logger
log_root.propagate = 0
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""
Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
config_path = os.path.abspath(cfg.CONF.find_file(
cfg.CONF.api_paste_config))
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = _("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") % locals()
LOG.exception(msg)
raise RuntimeError(msg)
return app
| 36.007634
| 78
| 0.665889
|
import os
from paste import deploy
from quantum.api.v2 import attributes
from quantum.common import utils
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.version import version_info as quantum_version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Quantum will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Quantum will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Quantum will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Quantum will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('dhcp_lease_duration', default=120,
help=_("DHCP lease duration")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Quantum")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Quantum is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
]
core_cli_opts = [
cfg.StrOpt('state_path', default='/var/lib/quantum'),
]
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
rpc.set_defaults(control_exchange='quantum')
def parse(args):
cfg.CONF(args=args, project='quantum',
version='%%prog %s' % quantum_version.version_string_with_vcs())
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging(conf):
product_name = "quantum"
logging.setup(product_name)
log_root = logging.getLogger(product_name).logger
log_root.propagate = 0
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
config_path = os.path.abspath(cfg.CONF.find_file(
cfg.CONF.api_paste_config))
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = _("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") % locals()
LOG.exception(msg)
raise RuntimeError(msg)
return app
| true
| true
|
f70bfbfd82da3d2f03a5deda6f9ab6cf8be40de4
| 529
|
py
|
Python
|
Code_Socke/test.py
|
Jugendhackt/Maladidea
|
dfee3f2ee6006c0d2bcb4117d62afb1404f4bdee
|
[
"MIT"
] | null | null | null |
Code_Socke/test.py
|
Jugendhackt/Maladidea
|
dfee3f2ee6006c0d2bcb4117d62afb1404f4bdee
|
[
"MIT"
] | null | null | null |
Code_Socke/test.py
|
Jugendhackt/Maladidea
|
dfee3f2ee6006c0d2bcb4117d62afb1404f4bdee
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as G
import time as t
G.setmode(G.BCM)
G.setup(19, G.OUT)
G.setup(26, G.IN)# pull_up_down=G.PUD_UP)
G.setup(21, G.OUT)
G.setup(20, G.IN, pull_up_down=G.PUD_UP)
print("setup done")
G.output(21, True)
print("output on")
while True:
input_sensor = G.input(26)
if input_sensor == False:
print("sensor triggered")
G.output(19, True)
t.sleep(.5)
G.output(19, False)
input_taster = G.input(20)
if input_taster == False:
print("break")
break
G.cleanup()
| 20.346154
| 41
| 0.621928
|
import RPi.GPIO as G
import time as t
G.setmode(G.BCM)
G.setup(19, G.OUT)
G.setup(26, G.IN)G.setup(21, G.OUT)
G.setup(20, G.IN, pull_up_down=G.PUD_UP)
print("setup done")
G.output(21, True)
print("output on")
while True:
input_sensor = G.input(26)
if input_sensor == False:
print("sensor triggered")
G.output(19, True)
t.sleep(.5)
G.output(19, False)
input_taster = G.input(20)
if input_taster == False:
print("break")
break
G.cleanup()
| true
| true
|
f70bfcff5507413149e7bbff65a7ae2fd88753fc
| 12,293
|
py
|
Python
|
postfinancecheckout/api/bank_account_service_api.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | 1
|
2022-03-08T12:51:53.000Z
|
2022-03-08T12:51:53.000Z
|
postfinancecheckout/api/bank_account_service_api.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | null | null | null |
postfinancecheckout/api/bank_account_service_api.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
import six
from postfinancecheckout.api_client import ApiClient
class BankAccountServiceApi:
def __init__(self, configuration):
self.api_client = ApiClient(configuration=configuration)
def count(self, space_id, **kwargs):
"""Count
Counts the number of items in the database as restricted by the given filter.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.count(space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQueryFilter filter: The filter which restricts the entities which are used to calculate the count.
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.count_with_http_info(space_id, **kwargs)
else:
(data) = self.count_with_http_info(space_id, **kwargs)
return data
def count_with_http_info(self, space_id, **kwargs):
"""Count
Counts the number of items in the database as restricted by the given filter.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.count_with_http_info(space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQueryFilter filter: The filter which restricts the entities which are used to calculate the count.
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'filter']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `count`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filter' in params:
body_params = params['filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/bank-account/count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read(self, space_id, id, **kwargs):
"""Read
Reads the entity with the given 'id' and returns it.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The ID of the bank account which should be returned. (required)
:return: BankAccount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_with_http_info(space_id, id, **kwargs)
else:
(data) = self.read_with_http_info(space_id, id, **kwargs)
return data
def read_with_http_info(self, space_id, id, **kwargs):
"""Read
Reads the entity with the given 'id' and returns it.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_with_http_info(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The ID of the bank account which should be returned. (required)
:return: BankAccount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `read`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['*/*'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/bank-account/read', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BankAccount',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search(self, space_id, query, **kwargs):
"""Search
Searches for the entities as specified by the given query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search(space_id, query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQuery query: The query restricts the bank accounts which are returned by the search. (required)
:return: list[BankAccount]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_with_http_info(space_id, query, **kwargs)
else:
(data) = self.search_with_http_info(space_id, query, **kwargs)
return data
def search_with_http_info(self, space_id, query, **kwargs):
"""Search
Searches for the entities as specified by the given query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_with_http_info(space_id, query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQuery query: The query restricts the bank accounts which are returned by the search. (required)
:return: list[BankAccount]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'query']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `search`")
# verify the required parameter 'query' is set
if ('query' not in params or
params['query'] is None):
raise ValueError("Missing the required parameter `query` when calling `search`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'query' in params:
body_params = params['query']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/bank-account/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BankAccount]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.58631
| 119
| 0.602619
|
from __future__ import absolute_import
import six
from postfinancecheckout.api_client import ApiClient
class BankAccountServiceApi:
def __init__(self, configuration):
self.api_client = ApiClient(configuration=configuration)
def count(self, space_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.count_with_http_info(space_id, **kwargs)
else:
(data) = self.count_with_http_info(space_id, **kwargs)
return data
def count_with_http_info(self, space_id, **kwargs):
all_params = ['space_id', 'filter']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method count" % key
)
params[key] = val
del params['kwargs']
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `count`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filter' in params:
body_params = params['filter']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
auth_settings = []
return self.api_client.call_api(
'/bank-account/count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read(self, space_id, id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_with_http_info(space_id, id, **kwargs)
else:
(data) = self.read_with_http_info(space_id, id, **kwargs)
return data
def read_with_http_info(self, space_id, id, **kwargs):
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read" % key
)
params[key] = val
del params['kwargs']
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `read`")
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['*/*'])
auth_settings = []
return self.api_client.call_api(
'/bank-account/read', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BankAccount',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search(self, space_id, query, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_with_http_info(space_id, query, **kwargs)
else:
(data) = self.search_with_http_info(space_id, query, **kwargs)
return data
def search_with_http_info(self, space_id, query, **kwargs):
all_params = ['space_id', 'query']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
params[key] = val
del params['kwargs']
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `search`")
if ('query' not in params or
params['query'] is None):
raise ValueError("Missing the required parameter `query` when calling `search`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'query' in params:
body_params = params['query']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
auth_settings = []
return self.api_client.call_api(
'/bank-account/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BankAccount]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
f70bfd48b4dffc44103f3819522ad40aac7dc5c2
| 759
|
py
|
Python
|
assignments/a2/containerWithMostWater.py
|
jcdiv47/geekbang-algorithms
|
38dae85aeadb684b2c44945bd07a32cdede4ad5a
|
[
"MIT"
] | null | null | null |
assignments/a2/containerWithMostWater.py
|
jcdiv47/geekbang-algorithms
|
38dae85aeadb684b2c44945bd07a32cdede4ad5a
|
[
"MIT"
] | null | null | null |
assignments/a2/containerWithMostWater.py
|
jcdiv47/geekbang-algorithms
|
38dae85aeadb684b2c44945bd07a32cdede4ad5a
|
[
"MIT"
] | null | null | null |
import unittest
"""
Leetcode(https://leetcode.com/problems/container-with-most-water/solution/)
"""
def maxArea(height):
ans = 0
left, right = 0, len(height) - 1
while left < right:
ans = max(ans, min(height[left], height[right]) * (right - left))
if height[left] < height[right]:
left += 1
else:
right -= 1
return ans
class MyTestCase(unittest.TestCase):
def test1(self):
height = [4, 3, 2, 1, 4]
self.assertEqual(maxArea(height), 16)
def test2(self):
height = [1, 1]
self.assertEqual(maxArea(height), 1)
def test3(self):
height = [1, 2, 1]
self.assertEqual(maxArea(height), 2)
if __name__ == '__main__':
unittest.main()
| 21.083333
| 75
| 0.56917
|
import unittest
def maxArea(height):
ans = 0
left, right = 0, len(height) - 1
while left < right:
ans = max(ans, min(height[left], height[right]) * (right - left))
if height[left] < height[right]:
left += 1
else:
right -= 1
return ans
class MyTestCase(unittest.TestCase):
def test1(self):
height = [4, 3, 2, 1, 4]
self.assertEqual(maxArea(height), 16)
def test2(self):
height = [1, 1]
self.assertEqual(maxArea(height), 1)
def test3(self):
height = [1, 2, 1]
self.assertEqual(maxArea(height), 2)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70bfec06d09bcd09179f6c36f86fd581e0bfd1d
| 1,357
|
py
|
Python
|
test/test_flake8.py
|
Theosakamg/colcon-powershell
|
86657800695097ec4d5f1cd0035d15fd5cde2eb0
|
[
"Apache-2.0"
] | null | null | null |
test/test_flake8.py
|
Theosakamg/colcon-powershell
|
86657800695097ec4d5f1cd0035d15fd5cde2eb0
|
[
"Apache-2.0"
] | null | null | null |
test/test_flake8.py
|
Theosakamg/colcon-powershell
|
86657800695097ec4d5f1cd0035d15fd5cde2eb0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
import logging
from pathlib import Path
import sys
from flake8 import LOG
from flake8.api.legacy import get_style_guide
# avoid debug and info messages from flake8 internals
LOG.setLevel(logging.WARN)
def test_flake8():
style_guide = get_style_guide(
ignore=['D100', 'D104'],
show_source=True,
)
style_guide_tests = get_style_guide(
ignore=['D100', 'D101', 'D102', 'D103', 'D104', 'D105', 'D107'],
show_source=True,
)
stdout = sys.stdout
sys.stdout = sys.stderr
# implicitly calls report_errors()
report = style_guide.check_files([
str(Path(__file__).parents[1] / 'colcon_powershell'),
])
report_tests = style_guide_tests.check_files([
str(Path(__file__).parents[1] / 'test'),
])
sys.stdout = stdout
total_errors = report.total_errors + report_tests.total_errors
if total_errors: # pragma: no cover
# output summary with per-category counts
print()
report._application.formatter.show_statistics(report._stats)
print(
'flake8 reported {total_errors} errors'
.format_map(locals()), file=sys.stderr)
assert not report.total_errors, \
'flake8 reported {total_errors} errors'.format_map(locals())
| 28.270833
| 72
| 0.66986
|
import logging
from pathlib import Path
import sys
from flake8 import LOG
from flake8.api.legacy import get_style_guide
LOG.setLevel(logging.WARN)
def test_flake8():
style_guide = get_style_guide(
ignore=['D100', 'D104'],
show_source=True,
)
style_guide_tests = get_style_guide(
ignore=['D100', 'D101', 'D102', 'D103', 'D104', 'D105', 'D107'],
show_source=True,
)
stdout = sys.stdout
sys.stdout = sys.stderr
report = style_guide.check_files([
str(Path(__file__).parents[1] / 'colcon_powershell'),
])
report_tests = style_guide_tests.check_files([
str(Path(__file__).parents[1] / 'test'),
])
sys.stdout = stdout
total_errors = report.total_errors + report_tests.total_errors
if total_errors: print()
report._application.formatter.show_statistics(report._stats)
print(
'flake8 reported {total_errors} errors'
.format_map(locals()), file=sys.stderr)
assert not report.total_errors, \
'flake8 reported {total_errors} errors'.format_map(locals())
| true
| true
|
f70bffe4d8ea3902244e43a68f75def149a1c37f
| 1,705
|
py
|
Python
|
lib/sim/tests/test_onebit_counter.py
|
pp-mo/bbc
|
33c20ab511a88a9e7236e82477fae3256d41e38a
|
[
"BSD-3-Clause"
] | 2
|
2020-10-01T09:05:01.000Z
|
2021-05-30T17:34:46.000Z
|
lib/sim/tests/test_onebit_counter.py
|
pp-mo/bbc
|
33c20ab511a88a9e7236e82477fae3256d41e38a
|
[
"BSD-3-Clause"
] | null | null | null |
lib/sim/tests/test_onebit_counter.py
|
pp-mo/bbc
|
33c20ab511a88a9e7236e82477fae3256d41e38a
|
[
"BSD-3-Clause"
] | null | null | null |
from sim.signal import Signal, SIG_UNDEF
from sim.sequencer import DEFAULT_SEQUENCER as SEQ
from sim.tests import okeq, okin, setsig, fails
from sim.device.arith import CounterOnebit
counter = CounterOnebit(
'c1b',
t_toggle_0_to_1=3.,
t_toggle_1_to_0=3.,
t_out_2_carry=1.,
t_clear_2_carry=2.,
t_clear_onoff=4.,
t_eor_onoff=2.)
din = Signal('d_in')
clr = Signal('clr')
ore = Signal('ore')
counter.connect('input', din)
counter.connect('clear', clr)
counter.connect('enable_or', ore)
din.trace()
clr.trace()
counter.output.trace()
counter.x_carry_out.trace()
COUNTER_CARRYOUTS_COUNT = 0
def carry_out_callback(time, signal):
global COUNTER_CARRYOUTS_COUNT
COUNTER_CARRYOUTS_COUNT += 1
counter.x_carry_out.add_connection(carry_out_callback)
SEQ.addall([
setsig(100.0, din, 1),
setsig(110.0, din, 1),
setsig(120.0, din, 1),
setsig(150.0, clr, 1),
setsig(160.0, clr, 0),
setsig(180.0, clr, 1),
setsig(181.0, ore, 1),
setsig(190.0, clr, 0),
setsig(200.0, din, 1),
setsig(210.0, din, 1),
setsig(220.0, din, 1),
setsig(223.0, clr, 0),
])
okeq(counter.output.state, 0)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(101.)
okeq(counter.output.state, SIG_UNDEF)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(109.)
okeq(counter.output.state, 1)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(111.)
okeq(counter.output.state, SIG_UNDEF)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(119.)
okeq(counter.output.state, 0)
okeq(COUNTER_CARRYOUTS_COUNT, 1)
SEQ.run()
# Check we get an error when clear active period is too short.
SEQ.addall([
setsig(250.0, clr, 1),
setsig(251.0, clr, 0)
])
with fails(ValueError):
SEQ.run()
| 20.792683
| 62
| 0.695015
|
from sim.signal import Signal, SIG_UNDEF
from sim.sequencer import DEFAULT_SEQUENCER as SEQ
from sim.tests import okeq, okin, setsig, fails
from sim.device.arith import CounterOnebit
counter = CounterOnebit(
'c1b',
t_toggle_0_to_1=3.,
t_toggle_1_to_0=3.,
t_out_2_carry=1.,
t_clear_2_carry=2.,
t_clear_onoff=4.,
t_eor_onoff=2.)
din = Signal('d_in')
clr = Signal('clr')
ore = Signal('ore')
counter.connect('input', din)
counter.connect('clear', clr)
counter.connect('enable_or', ore)
din.trace()
clr.trace()
counter.output.trace()
counter.x_carry_out.trace()
COUNTER_CARRYOUTS_COUNT = 0
def carry_out_callback(time, signal):
global COUNTER_CARRYOUTS_COUNT
COUNTER_CARRYOUTS_COUNT += 1
counter.x_carry_out.add_connection(carry_out_callback)
SEQ.addall([
setsig(100.0, din, 1),
setsig(110.0, din, 1),
setsig(120.0, din, 1),
setsig(150.0, clr, 1),
setsig(160.0, clr, 0),
setsig(180.0, clr, 1),
setsig(181.0, ore, 1),
setsig(190.0, clr, 0),
setsig(200.0, din, 1),
setsig(210.0, din, 1),
setsig(220.0, din, 1),
setsig(223.0, clr, 0),
])
okeq(counter.output.state, 0)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(101.)
okeq(counter.output.state, SIG_UNDEF)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(109.)
okeq(counter.output.state, 1)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(111.)
okeq(counter.output.state, SIG_UNDEF)
okeq(COUNTER_CARRYOUTS_COUNT, 0)
SEQ.run(119.)
okeq(counter.output.state, 0)
okeq(COUNTER_CARRYOUTS_COUNT, 1)
SEQ.run()
SEQ.addall([
setsig(250.0, clr, 1),
setsig(251.0, clr, 0)
])
with fails(ValueError):
SEQ.run()
| true
| true
|
f70c01b6d44231ba57c5bf3a9a4f685924e7a5d2
| 5,927
|
py
|
Python
|
server.py
|
project-anuvaad/OpenNMT-py
|
267d097b9e90d59709fe1c26ea8b8e2c43c755c9
|
[
"MIT"
] | null | null | null |
server.py
|
project-anuvaad/OpenNMT-py
|
267d097b9e90d59709fe1c26ea8b8e2c43c755c9
|
[
"MIT"
] | 29
|
2019-07-18T10:21:57.000Z
|
2019-10-24T11:41:59.000Z
|
server.py
|
project-anuvaad/OpenNMT-py
|
267d097b9e90d59709fe1c26ea8b8e2c43c755c9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import unicode_literals
import configargparse
import sys
from config.config import statusCode,benchmark_types, language_supported, file_location
import config.bleu_results as bleu_results
import tools.sp_enc_dec as sp
import ancillary_functions_anuvaad.ancillary_functions as ancillary_functions
import ancillary_functions_anuvaad.sc_preface_handler as sc_preface_handler
import ancillary_functions_anuvaad.handle_date_url as date_url_util
from flask import Flask, jsonify, request,send_file,abort,send_from_directory
from flask_cors import CORS
from onmt.translate import TranslationServer, ServerModelError
from itertools import repeat
from onmt.utils.logging import init_logger,logger,entry_exit_log,LOG_TAGS
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import os
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from config.mongo_model import db,Benchmarks
import datetime
from kafka_utils.document_translator import doc_translator
import threading
import translation_util.translate_util as translate_util
import translation_util.interactive_translate as interactive_translation
from config.kafka_topics import consumer_topics,producer_topics,kafka_topic
STATUS_OK = "ok"
STATUS_ERROR = "error"
mongo_config_dir = "config/mongo_config.py"
IS_RUN_KAFKA = 'IS_RUN_KAFKA'
IS_RUN_KAFKA_DEFAULT_VALUE = False
bootstrap_server_boolean = os.environ.get(IS_RUN_KAFKA, IS_RUN_KAFKA_DEFAULT_VALUE)
def start(config_file,
url_root="/translator",
host="0.0.0.0",
port=3003,
debug=True):
def prefix_route(route_function, prefix='', mask='{0}{1}'):
def newroute(route, *args, **kwargs):
return route_function(mask.format(prefix, route), *args, **kwargs)
return newroute
app = Flask(__name__)
CORS(app)
app.config.from_pyfile(mongo_config_dir)
db.init_app(app)
app.route = prefix_route(app.route, url_root)
translation_server = TranslationServer()
translation_server.start(config_file)
def kafka_function():
logger.info('starting kafka from nmt-server on thread-1')
doc_translator(translation_server,[kafka_topic[0]['consumer'],kafka_topic[1]['consumer'],kafka_topic[2]['consumer']])
if bootstrap_server_boolean:
t1 = threading.Thread(target=kafka_function)
# t1.start()
@app.route('/models', methods=['GET'])
def get_models():
out = {}
try:
out['status'] = statusCode["SUCCESS"]
out['response_body'] = translation_server.list_models()
except:
out['status'] = statusCode["SYSTEM_ERR"]
logger.info("Unexpected error: %s"% sys.exc_info()[0])
return jsonify(out)
@app.route('/clone_model/<int:model_id>', methods=['POST'])
def clone_model(model_id):
out = {}
data = request.get_json(force=True)
timeout = -1
if 'timeout' in data:
timeout = data['timeout']
del data['timeout']
opt = data.get('opt', None)
try:
model_id, load_time = translation_server.clone_model(
model_id, opt, timeout)
except ServerModelError as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
else:
out['status'] = STATUS_OK
out['model_id'] = model_id
out['load_time'] = load_time
return jsonify(out)
@app.route('/unload_model/<int:model_id>', methods=['GET'])
def unload_model(model_id):
out = {"model_id": model_id}
try:
translation_server.unload_model(model_id)
out['status'] = STATUS_OK
except Exception as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
return jsonify(out)
@app.route('/translate-anuvaad', methods=['POST'])
def translate():
inputs = request.get_json(force=True)
if len(inputs)>0:
logger.info("Making translate-anuvaad API call")
logger.info(entry_exit_log(LOG_TAGS["input"],inputs))
out = translate_util.translate_func(inputs, translation_server)
logger.info("out from translate_func-trans_util done{}".format(out))
logger.info(entry_exit_log(LOG_TAGS["output"],out))
return jsonify(out)
else:
logger.info("null inputs in request in translate-anuvaad API")
return jsonify({'status':statusCode["INVALID_API_REQUEST"]})
@app.route('/to_cpu/<int:model_id>', methods=['GET'])
def to_cpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_cpu()
out['status'] = STATUS_OK
return jsonify(out)
@app.route('/to_gpu/<int:model_id>', methods=['GET'])
def to_gpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_gpu()
out['status'] = STATUS_OK
return jsonify(out)
app.run(debug=debug, host=host, port=port, use_reloader=False,
threaded=True)
def _get_parser():
parser = configargparse.ArgumentParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
description="OpenNMT-py REST Server")
parser.add_argument("--ip", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int, default="3003")
parser.add_argument("--url_root", type=str, default="/translator")
parser.add_argument("--debug", "-d", action="store_true")
parser.add_argument("--config", "-c", type=str,
default="./available_models/conf.json")
return parser
if __name__ == '__main__':
parser = _get_parser()
args = parser.parse_args()
start(args.config, url_root=args.url_root, host=args.ip, port=args.port,
debug=args.debug)
| 35.704819
| 130
| 0.672516
|
from __future__ import unicode_literals
import configargparse
import sys
from config.config import statusCode,benchmark_types, language_supported, file_location
import config.bleu_results as bleu_results
import tools.sp_enc_dec as sp
import ancillary_functions_anuvaad.ancillary_functions as ancillary_functions
import ancillary_functions_anuvaad.sc_preface_handler as sc_preface_handler
import ancillary_functions_anuvaad.handle_date_url as date_url_util
from flask import Flask, jsonify, request,send_file,abort,send_from_directory
from flask_cors import CORS
from onmt.translate import TranslationServer, ServerModelError
from itertools import repeat
from onmt.utils.logging import init_logger,logger,entry_exit_log,LOG_TAGS
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import os
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from config.mongo_model import db,Benchmarks
import datetime
from kafka_utils.document_translator import doc_translator
import threading
import translation_util.translate_util as translate_util
import translation_util.interactive_translate as interactive_translation
from config.kafka_topics import consumer_topics,producer_topics,kafka_topic
STATUS_OK = "ok"
STATUS_ERROR = "error"
mongo_config_dir = "config/mongo_config.py"
IS_RUN_KAFKA = 'IS_RUN_KAFKA'
IS_RUN_KAFKA_DEFAULT_VALUE = False
bootstrap_server_boolean = os.environ.get(IS_RUN_KAFKA, IS_RUN_KAFKA_DEFAULT_VALUE)
def start(config_file,
url_root="/translator",
host="0.0.0.0",
port=3003,
debug=True):
def prefix_route(route_function, prefix='', mask='{0}{1}'):
def newroute(route, *args, **kwargs):
return route_function(mask.format(prefix, route), *args, **kwargs)
return newroute
app = Flask(__name__)
CORS(app)
app.config.from_pyfile(mongo_config_dir)
db.init_app(app)
app.route = prefix_route(app.route, url_root)
translation_server = TranslationServer()
translation_server.start(config_file)
def kafka_function():
logger.info('starting kafka from nmt-server on thread-1')
doc_translator(translation_server,[kafka_topic[0]['consumer'],kafka_topic[1]['consumer'],kafka_topic[2]['consumer']])
if bootstrap_server_boolean:
t1 = threading.Thread(target=kafka_function)
@app.route('/models', methods=['GET'])
def get_models():
out = {}
try:
out['status'] = statusCode["SUCCESS"]
out['response_body'] = translation_server.list_models()
except:
out['status'] = statusCode["SYSTEM_ERR"]
logger.info("Unexpected error: %s"% sys.exc_info()[0])
return jsonify(out)
@app.route('/clone_model/<int:model_id>', methods=['POST'])
def clone_model(model_id):
out = {}
data = request.get_json(force=True)
timeout = -1
if 'timeout' in data:
timeout = data['timeout']
del data['timeout']
opt = data.get('opt', None)
try:
model_id, load_time = translation_server.clone_model(
model_id, opt, timeout)
except ServerModelError as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
else:
out['status'] = STATUS_OK
out['model_id'] = model_id
out['load_time'] = load_time
return jsonify(out)
@app.route('/unload_model/<int:model_id>', methods=['GET'])
def unload_model(model_id):
out = {"model_id": model_id}
try:
translation_server.unload_model(model_id)
out['status'] = STATUS_OK
except Exception as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
return jsonify(out)
@app.route('/translate-anuvaad', methods=['POST'])
def translate():
inputs = request.get_json(force=True)
if len(inputs)>0:
logger.info("Making translate-anuvaad API call")
logger.info(entry_exit_log(LOG_TAGS["input"],inputs))
out = translate_util.translate_func(inputs, translation_server)
logger.info("out from translate_func-trans_util done{}".format(out))
logger.info(entry_exit_log(LOG_TAGS["output"],out))
return jsonify(out)
else:
logger.info("null inputs in request in translate-anuvaad API")
return jsonify({'status':statusCode["INVALID_API_REQUEST"]})
@app.route('/to_cpu/<int:model_id>', methods=['GET'])
def to_cpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_cpu()
out['status'] = STATUS_OK
return jsonify(out)
@app.route('/to_gpu/<int:model_id>', methods=['GET'])
def to_gpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_gpu()
out['status'] = STATUS_OK
return jsonify(out)
app.run(debug=debug, host=host, port=port, use_reloader=False,
threaded=True)
def _get_parser():
parser = configargparse.ArgumentParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
description="OpenNMT-py REST Server")
parser.add_argument("--ip", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int, default="3003")
parser.add_argument("--url_root", type=str, default="/translator")
parser.add_argument("--debug", "-d", action="store_true")
parser.add_argument("--config", "-c", type=str,
default="./available_models/conf.json")
return parser
if __name__ == '__main__':
parser = _get_parser()
args = parser.parse_args()
start(args.config, url_root=args.url_root, host=args.ip, port=args.port,
debug=args.debug)
| true
| true
|
f70c02307b230273bb150b0e7528be384ec81c1c
| 284
|
py
|
Python
|
CellState.py
|
WilliamPJSmith/CM4-A
|
bf2a0f2a49ea7e77454bacba25e6cbb2f282572f
|
[
"Unlicense"
] | null | null | null |
CellState.py
|
WilliamPJSmith/CM4-A
|
bf2a0f2a49ea7e77454bacba25e6cbb2f282572f
|
[
"Unlicense"
] | null | null | null |
CellState.py
|
WilliamPJSmith/CM4-A
|
bf2a0f2a49ea7e77454bacba25e6cbb2f282572f
|
[
"Unlicense"
] | null | null | null |
class CellState:
# Don't show these attributes in gui (not used any more?)
excludeAttr = ['divideFlag']
excludeAttr = ['deathFlag']
def __init__(self, cid):
self.id = cid
self.growthRate = 1.0
self.color = [0.5,0.5,0.5]
self.divideFlag = False
self.deathFlag = False
| 20.285714
| 58
| 0.676056
|
class CellState:
excludeAttr = ['divideFlag']
excludeAttr = ['deathFlag']
def __init__(self, cid):
self.id = cid
self.growthRate = 1.0
self.color = [0.5,0.5,0.5]
self.divideFlag = False
self.deathFlag = False
| true
| true
|
f70c02e67d29d64736ba5ecb710a7508719ba359
| 1,184
|
py
|
Python
|
pyside/lesson_08_main.py
|
LueyEscargot/pyGuiTest
|
c072fe29a7c94dc60ec54344a5d4a91253d25f3f
|
[
"MIT"
] | null | null | null |
pyside/lesson_08_main.py
|
LueyEscargot/pyGuiTest
|
c072fe29a7c94dc60ec54344a5d4a91253d25f3f
|
[
"MIT"
] | null | null | null |
pyside/lesson_08_main.py
|
LueyEscargot/pyGuiTest
|
c072fe29a7c94dc60ec54344a5d4a91253d25f3f
|
[
"MIT"
] | null | null | null |
import sys
import argparse
import pandas as pd
from PySide2.QtCore import QDateTime, QTimeZone
from PySide2.QtWidgets import QApplication
from lesson_08_main_window import MainWindow
from lesson_08_mainWidget import Widget
def transform_date(utc, timezone=None):
utc_fmt = "yyyy-MM-ddTHH:mm:ss.zzzZ"
new_date = QDateTime().fromString(utc, utc_fmt)
if timezone:
new_date.setTimeZone(timezone)
return new_date
def read_data(fname):
# Read the CSV content
df = pd.read_csv(fname)
# Remove wrong magnitudes
df = df.drop(df[df.mag < 0].index)
magnitudes = df["mag"]
# My local timezone
timezone = QTimeZone(b"Aisa/ShangHai")
# Get timestamp transformed to our timezone
times = df["time"].apply(lambda x: transform_date(x, timezone))
return times, magnitudes
if __name__ == "__main__":
options = argparse.ArgumentParser()
options.add_argument("-f", "--file", type=str, required=True)
args = options.parse_args()
data = read_data(args.file)
# Qt Application
app = QApplication(sys.argv)
widget = Widget(data)
window = MainWindow(widget)
window.show()
sys.exit(app.exec_())
| 23.68
| 67
| 0.69848
|
import sys
import argparse
import pandas as pd
from PySide2.QtCore import QDateTime, QTimeZone
from PySide2.QtWidgets import QApplication
from lesson_08_main_window import MainWindow
from lesson_08_mainWidget import Widget
def transform_date(utc, timezone=None):
utc_fmt = "yyyy-MM-ddTHH:mm:ss.zzzZ"
new_date = QDateTime().fromString(utc, utc_fmt)
if timezone:
new_date.setTimeZone(timezone)
return new_date
def read_data(fname):
df = pd.read_csv(fname)
df = df.drop(df[df.mag < 0].index)
magnitudes = df["mag"]
timezone = QTimeZone(b"Aisa/ShangHai")
times = df["time"].apply(lambda x: transform_date(x, timezone))
return times, magnitudes
if __name__ == "__main__":
options = argparse.ArgumentParser()
options.add_argument("-f", "--file", type=str, required=True)
args = options.parse_args()
data = read_data(args.file)
app = QApplication(sys.argv)
widget = Widget(data)
window = MainWindow(widget)
window.show()
sys.exit(app.exec_())
| true
| true
|
f70c042f92fd9bc243b435404e81962654d0d10f
| 5,051
|
py
|
Python
|
mars/tensor/fft/ifftn.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/tensor/fft/ifftn.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/tensor/fft/ifftn.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorComplexFFTNMixin, validate_fftn, TensorStandardFFTN
class TensorIFFTN(TensorStandardFFTN, TensorComplexFFTNMixin):
_op_type_ = OperandDef.IFFTN
def __init__(self, shape=None, axes=None, norm=None, **kw):
super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional tensor by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `mt.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input tensor, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : complex Tensor
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
mt.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of tensor.
Notes
-----
See `mt.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.eye(4)
>>> mt.fft.ifftn(mt.fft.fftn(a, axes=(0,)), axes=(1,)).execute()
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = mt.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = mt.exp(1j*mt.random.uniform(0, 2*mt.pi, (20, 20)))
>>> im = mt.fft.ifftn(n).real
>>> plt.imshow(im.execute())
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIFFTN(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.complex_))
return op(a)
| 39.460938
| 80
| 0.659077
|
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorComplexFFTNMixin, validate_fftn, TensorStandardFFTN
class TensorIFFTN(TensorStandardFFTN, TensorComplexFFTNMixin):
_op_type_ = OperandDef.IFFTN
def __init__(self, shape=None, axes=None, norm=None, **kw):
super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)
def ifftn(a, s=None, axes=None, norm=None):
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIFFTN(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.complex_))
return op(a)
| true
| true
|
f70c0476b617936a3fedaa107dd5a21cbd494991
| 4,387
|
py
|
Python
|
research/cv/Neighbor2Neighbor/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/Neighbor2Neighbor/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/Neighbor2Neighbor/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2
|
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''dataloader'''
import os
import glob
import numpy as np
import PIL.Image as Image
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
class DataLoader_Imagenet_val:
'''DataLoader_Imagenet_val'''
def __init__(self, data_dir, patch=256, noise_style="gauss25", batch_size=4):
super(DataLoader_Imagenet_val, self).__init__()
self.data_dir = data_dir
self.patch = patch
self.train_fns = glob.glob(os.path.join(self.data_dir, "*"))
self.train_fns.sort()
print('fetch {} samples for training'.format(len(self.train_fns)))
self.noise_generator = AugmentNoise(noise_style)
self.batch_size = batch_size
self.test = 1
def __getitem__(self, index):
# fetch image
fn = self.train_fns[index]
im = Image.open(fn)
im = np.array(im, dtype=np.float32)
# random crop
H = im.shape[0]
W = im.shape[1]
if H - self.patch > 0:
xx = np.random.randint(0, H - self.patch)
im = im[xx:xx + self.patch, :, :]
if W - self.patch > 0:
yy = np.random.randint(0, W - self.patch)
im = im[:, yy:yy + self.patch, :]
im /= 255.0 #clean image
noisy = self.noise_generator.add_noise(im)
return im, noisy
def __len__(self):
return len(self.train_fns)
class AugmentNoise():
'''AugmentNoise'''
def __init__(self, style):
if style.startswith('gauss'):
self.params = [
float(p) / 255.0 for p in style.replace('gauss', '').split('_')
]
if len(self.params) == 1:
self.style = "gauss_fix"
elif len(self.params) == 2:
self.style = "gauss_range"
elif style.startswith('poisson'):
self.params = [
float(p) for p in style.replace('poisson', '').split('_')
]
if len(self.params) == 1:
self.style = "poisson_fix"
elif len(self.params) == 2:
self.style = "poisson_range"
def add_noise(self, x):
'''add_noise'''
shape = x.shape
if self.style == "gauss_fix":
std = self.params[0]
return np.array(x + np.random.normal(size=shape) * std,
dtype=np.float32)
if self.style == "gauss_range":
min_std, max_std = self.params
std = np.random.uniform(low=min_std, high=max_std, size=(1, 1, 1))
return np.array(x + np.random.normal(size=shape) * std,
dtype=np.float32)
if self.style == "poisson_fix":
lam = self.params[0]
return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)
assert self.style == "poisson_range"
min_lam, max_lam = self.params
lam = np.random.uniform(low=min_lam, high=max_lam, size=(1, 1, 1))
return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)
def create_Dataset(data_dir, patch, noise_style, batch_size, device_num, rank, shuffle):
dataset = DataLoader_Imagenet_val(data_dir, patch, noise_style, batch_size)
hwc_to_chw = CV.HWC2CHW()
data_set = ds.GeneratorDataset(dataset, column_names=["image", "noisy"], \
num_parallel_workers=8, shuffle=shuffle, num_shards=device_num, shard_id=rank)
data_set = data_set.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=8)
data_set = data_set.map(input_columns=["noisy"], operations=hwc_to_chw, num_parallel_workers=8)
data_set = data_set.batch(batch_size, drop_remainder=True)
return data_set, data_set.get_dataset_size()
| 40.62037
| 99
| 0.605197
|
import os
import glob
import numpy as np
import PIL.Image as Image
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
class DataLoader_Imagenet_val:
def __init__(self, data_dir, patch=256, noise_style="gauss25", batch_size=4):
super(DataLoader_Imagenet_val, self).__init__()
self.data_dir = data_dir
self.patch = patch
self.train_fns = glob.glob(os.path.join(self.data_dir, "*"))
self.train_fns.sort()
print('fetch {} samples for training'.format(len(self.train_fns)))
self.noise_generator = AugmentNoise(noise_style)
self.batch_size = batch_size
self.test = 1
def __getitem__(self, index):
fn = self.train_fns[index]
im = Image.open(fn)
im = np.array(im, dtype=np.float32)
H = im.shape[0]
W = im.shape[1]
if H - self.patch > 0:
xx = np.random.randint(0, H - self.patch)
im = im[xx:xx + self.patch, :, :]
if W - self.patch > 0:
yy = np.random.randint(0, W - self.patch)
im = im[:, yy:yy + self.patch, :]
im /= 255.0 noisy = self.noise_generator.add_noise(im)
return im, noisy
def __len__(self):
return len(self.train_fns)
class AugmentNoise():
def __init__(self, style):
if style.startswith('gauss'):
self.params = [
float(p) / 255.0 for p in style.replace('gauss', '').split('_')
]
if len(self.params) == 1:
self.style = "gauss_fix"
elif len(self.params) == 2:
self.style = "gauss_range"
elif style.startswith('poisson'):
self.params = [
float(p) for p in style.replace('poisson', '').split('_')
]
if len(self.params) == 1:
self.style = "poisson_fix"
elif len(self.params) == 2:
self.style = "poisson_range"
def add_noise(self, x):
shape = x.shape
if self.style == "gauss_fix":
std = self.params[0]
return np.array(x + np.random.normal(size=shape) * std,
dtype=np.float32)
if self.style == "gauss_range":
min_std, max_std = self.params
std = np.random.uniform(low=min_std, high=max_std, size=(1, 1, 1))
return np.array(x + np.random.normal(size=shape) * std,
dtype=np.float32)
if self.style == "poisson_fix":
lam = self.params[0]
return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)
assert self.style == "poisson_range"
min_lam, max_lam = self.params
lam = np.random.uniform(low=min_lam, high=max_lam, size=(1, 1, 1))
return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)
def create_Dataset(data_dir, patch, noise_style, batch_size, device_num, rank, shuffle):
dataset = DataLoader_Imagenet_val(data_dir, patch, noise_style, batch_size)
hwc_to_chw = CV.HWC2CHW()
data_set = ds.GeneratorDataset(dataset, column_names=["image", "noisy"], \
num_parallel_workers=8, shuffle=shuffle, num_shards=device_num, shard_id=rank)
data_set = data_set.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=8)
data_set = data_set.map(input_columns=["noisy"], operations=hwc_to_chw, num_parallel_workers=8)
data_set = data_set.batch(batch_size, drop_remainder=True)
return data_set, data_set.get_dataset_size()
| true
| true
|
f70c04e3484409a9478edc1a27fe0c1fc78a80c3
| 402
|
py
|
Python
|
app/route/route_sales.py
|
LifeLaboratory/skb_sudo_su
|
92f29cc8e7fbd30624ee0d8634d61b8ecbcace35
|
[
"MIT"
] | null | null | null |
app/route/route_sales.py
|
LifeLaboratory/skb_sudo_su
|
92f29cc8e7fbd30624ee0d8634d61b8ecbcace35
|
[
"MIT"
] | null | null | null |
app/route/route_sales.py
|
LifeLaboratory/skb_sudo_su
|
92f29cc8e7fbd30624ee0d8634d61b8ecbcace35
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from app.api.base import base_name as names
from app.api.src.sales import *
from app.api.base.base_router import BaseRouter
class Sales(BaseRouter):
def __init__(self):
super().__init__()
self.args = [names.LOGIN, names.PASSWORD]
def get(self, id_user):
args = {
names.ID_USER: id_user
}
return get_sales_user(args) or {}
| 21.157895
| 49
| 0.636816
|
from app.api.base import base_name as names
from app.api.src.sales import *
from app.api.base.base_router import BaseRouter
class Sales(BaseRouter):
def __init__(self):
super().__init__()
self.args = [names.LOGIN, names.PASSWORD]
def get(self, id_user):
args = {
names.ID_USER: id_user
}
return get_sales_user(args) or {}
| true
| true
|
f70c0525ea06688b28847ae0a8658955c449019c
| 15,211
|
py
|
Python
|
salt/states/cloud.py
|
ContinuumIO/salt
|
91c0955cfc24b13f07f4917d3d44a9fd9636347a
|
[
"Apache-2.0"
] | 2
|
2017-09-17T21:10:35.000Z
|
2019-08-26T03:00:12.000Z
|
salt/states/cloud.py
|
ContinuumIO/salt
|
91c0955cfc24b13f07f4917d3d44a9fd9636347a
|
[
"Apache-2.0"
] | null | null | null |
salt/states/cloud.py
|
ContinuumIO/salt
|
91c0955cfc24b13f07f4917d3d44a9fd9636347a
|
[
"Apache-2.0"
] | 3
|
2021-02-23T08:12:48.000Z
|
2021-02-23T08:13:13.000Z
|
# -*- coding: utf-8 -*-
'''
Using states instead of maps to deploy clouds
=============================================
.. versionadded:: 2014.1.0 (Hydrogen)
Use this minion to spin up a cloud instance:
.. code-block:: yaml
my-ec2-instance:
cloud.profile:
my-ec2-config
'''
import pprint
from salt._compat import string_types
import salt.utils.cloud as suc
def __virtual__():
'''
Only load if the cloud module is available in __salt__
'''
return 'cloud.profile' in __salt__
def _check_name(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if suc.check_name(name, 'a-zA-Z0-9._-'):
ret['comment'] = 'Invalid characters in name.'
ret['result'] = False
return ret
else:
ret['result'] = True
return ret
def _valid(name, comment='', changes=None):
if not changes:
changes = {}
return {'name': name,
'result': True,
'changes': changes,
'comment': comment}
def present(name, cloud_provider, onlyif=None, unless=None, **kwargs):
'''
Spin up a single instance on a cloud provider, using salt-cloud. This state
does not take a profile argument; rather, it takes the arguments that would
normally be configured as part of the state.
Note that while this function does take any configuration argument that
would normally be used to create an instance, it will not verify the state
of any of those arguments on an existing instance. Stateful properties of
an instance should be configured using their own individual state (i.e.,
cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
cloud_provider
The name of the cloud provider to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
instance = __salt__['cloud.action'](
fun='show_instance', names=[name])
retcode = __salt__['cmd.retcode']
prov = str([a for a in instance][0])
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
return _valid(name, comment='onlyif execution failed')
elif isinstance(onlyif, string_types):
if retcode(onlyif) != 0:
return _valid(name, comment='onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
return _valid(name, comment='unless execution succeeded')
elif isinstance(unless, string_types):
if retcode(unless) == 0:
return _valid(name, comment='unless execution succeeded')
if instance and 'Not Actioned' not in prov:
ret['result'] = True
ret['comment'] = 'Instance {0} already exists in {1}'.format(name,
prov)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.create'](cloud_provider, name, **kwargs)
if info and not 'Error' in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Created instance {0} using provider {1}'
' and the following options: {2}').format(
name,
cloud_provider,
pprint.pformat(kwargs)
)
elif info and not 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}: {2}').format(
name,
profile,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1},'
' please check your configuration').format(name,
profile)
return ret
def absent(name, onlyif=None, unless=None):
'''
Ensure that no instances with the specified names exist.
CAUTION: This is a destructive state, which will search all
configured cloud providers for the named instance,
and destroy it.
name
The name of the instance to destroy
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
if not instance or \
('Not Actioned/Not Running' in ret
and name in ret['Not Actioned/Not Running']):
ret['result'] = True
ret['comment'] = 'Instance {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be destroyed'.format(name)
return ret
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
return _valid(name, comment='onlyif execution failed')
elif isinstance(onlyif, string_types):
if retcode(onlyif) != 0:
return _valid(name, comment='onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
return _valid(name, comment='unless execution succeeded')
elif isinstance(unless, string_types):
if retcode(unless) == 0:
return _valid(name, comment='unless execution succeeded')
info = __salt__['cloud.destroy'](name)
if info and not 'Error' in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Destroyed instance {0}').format(
name,
)
elif 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to destroy instance {0}: {1}').format(
name,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = 'Failed to destroy instance {0}'.format(name)
return ret
def profile(name, profile, onlyif=None, unless=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
return _valid(name, comment='onlyif execution failed')
elif isinstance(onlyif, string_types):
if retcode(onlyif) != 0:
return _valid(name, comment='onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
return _valid(name, comment='unless execution succeeded')
elif isinstance(unless, string_types):
if retcode(unless) == 0:
return _valid(name, comment='unless execution succeeded')
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
prov = str(instance.keys()[0])
if instance and 'Not Actioned' not in prov:
ret['result'] = True
ret['comment'] = 'Instance {0} already exists in {1}'.format(
name, prov)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes.keys():
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret
def volume_absent(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if not name in volumes.keys():
ret['comment'] = 'Volume is absent.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be deleted.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_delete'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was deleted'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to delete.'.format(name)
return ret
def volume_attached(name, server_name, provider=None, **kwargs):
'''
Check if a block volume is attached.
'''
ret = _check_name(name)
if not ret['result']:
return ret
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
instance = __salt__['cloud.action'](
fun='show_instance',
names=server_name
)
if name in volumes.keys() and volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = ('Volume {name} is already'
'attached: {attachments}').format(**volumes[name])
ret['result'] = True
return ret
elif not name in volumes.keys():
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = False
return ret
elif not instance:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = False
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be attached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_attach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to attach.'.format(name)
return ret
def volume_detached(name, server_name=None, provider=None, **kwargs):
'''
Check if a block volume is attached.
Returns True if server or Volume do not exist.
'''
ret = _check_name(name)
if not ret['result']:
return ret
if not server_name is None:
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if server_name:
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
else:
instance = None
if name in volumes.keys() and not volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is not currently attached to anything.'
).format(**volumes[name])
ret['result'] = True
return ret
elif not name in volumes.keys():
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = True
return ret
elif not instance and not server_name is None:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be detached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_detach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to detach.'.format(name)
return ret
| 32.023158
| 79
| 0.571297
|
import pprint
from salt._compat import string_types
import salt.utils.cloud as suc
def __virtual__():
return 'cloud.profile' in __salt__
def _check_name(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if suc.check_name(name, 'a-zA-Z0-9._-'):
ret['comment'] = 'Invalid characters in name.'
ret['result'] = False
return ret
else:
ret['result'] = True
return ret
def _valid(name, comment='', changes=None):
if not changes:
changes = {}
return {'name': name,
'result': True,
'changes': changes,
'comment': comment}
def present(name, cloud_provider, onlyif=None, unless=None, **kwargs):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
instance = __salt__['cloud.action'](
fun='show_instance', names=[name])
retcode = __salt__['cmd.retcode']
prov = str([a for a in instance][0])
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
return _valid(name, comment='onlyif execution failed')
elif isinstance(onlyif, string_types):
if retcode(onlyif) != 0:
return _valid(name, comment='onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
return _valid(name, comment='unless execution succeeded')
elif isinstance(unless, string_types):
if retcode(unless) == 0:
return _valid(name, comment='unless execution succeeded')
if instance and 'Not Actioned' not in prov:
ret['result'] = True
ret['comment'] = 'Instance {0} already exists in {1}'.format(name,
prov)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.create'](cloud_provider, name, **kwargs)
if info and not 'Error' in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Created instance {0} using provider {1}'
' and the following options: {2}').format(
name,
cloud_provider,
pprint.pformat(kwargs)
)
elif info and not 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}: {2}').format(
name,
profile,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1},'
' please check your configuration').format(name,
profile)
return ret
def absent(name, onlyif=None, unless=None):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
if not instance or \
('Not Actioned/Not Running' in ret
and name in ret['Not Actioned/Not Running']):
ret['result'] = True
ret['comment'] = 'Instance {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be destroyed'.format(name)
return ret
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
return _valid(name, comment='onlyif execution failed')
elif isinstance(onlyif, string_types):
if retcode(onlyif) != 0:
return _valid(name, comment='onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
return _valid(name, comment='unless execution succeeded')
elif isinstance(unless, string_types):
if retcode(unless) == 0:
return _valid(name, comment='unless execution succeeded')
info = __salt__['cloud.destroy'](name)
if info and not 'Error' in info:
ret['changes'] = info
ret['result'] = True
ret['comment'] = ('Destroyed instance {0}').format(
name,
)
elif 'Error' in info:
ret['result'] = False
ret['comment'] = ('Failed to destroy instance {0}: {1}').format(
name,
info['Error'],
)
else:
ret['result'] = False
ret['comment'] = 'Failed to destroy instance {0}'.format(name)
return ret
def profile(name, profile, onlyif=None, unless=None, **kwargs):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, string_types):
if not onlyif:
return _valid(name, comment='onlyif execution failed')
elif isinstance(onlyif, string_types):
if retcode(onlyif) != 0:
return _valid(name, comment='onlyif execution failed')
if unless is not None:
if not isinstance(unless, string_types):
if unless:
return _valid(name, comment='unless execution succeeded')
elif isinstance(unless, string_types):
if retcode(unless) == 0:
return _valid(name, comment='unless execution succeeded')
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
prov = str(instance.keys()[0])
if instance and 'Not Actioned' not in prov:
ret['result'] = True
ret['comment'] = 'Instance {0} already exists in {1}'.format(
name, prov)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs)
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret
def volume_present(name, provider=None, **kwargs):
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes.keys():
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret
def volume_absent(name, provider=None, **kwargs):
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if not name in volumes.keys():
ret['comment'] = 'Volume is absent.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be deleted.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_delete'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was deleted'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to delete.'.format(name)
return ret
def volume_attached(name, server_name, provider=None, **kwargs):
ret = _check_name(name)
if not ret['result']:
return ret
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
instance = __salt__['cloud.action'](
fun='show_instance',
names=server_name
)
if name in volumes.keys() and volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = ('Volume {name} is already'
'attached: {attachments}').format(**volumes[name])
ret['result'] = True
return ret
elif not name in volumes.keys():
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = False
return ret
elif not instance:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = False
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be attached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_attach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to attach.'.format(name)
return ret
def volume_detached(name, server_name=None, provider=None, **kwargs):
ret = _check_name(name)
if not ret['result']:
return ret
if not server_name is None:
ret = _check_name(server_name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if server_name:
instance = __salt__['cloud.action'](fun='show_instance', names=[name])
else:
instance = None
if name in volumes.keys() and not volumes[name]['attachments']:
volume = volumes[name]
ret['comment'] = (
'Volume {name} is not currently attached to anything.'
).format(**volumes[name])
ret['result'] = True
return ret
elif not name in volumes.keys():
ret['comment'] = 'Volume {0} does not exist'.format(name)
ret['result'] = True
return ret
elif not instance and not server_name is None:
ret['comment'] = 'Server {0} does not exist'.format(server_name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be will be detached.'.format(
name
)
ret['result'] = None
return ret
response = __salt__['cloud.volume_detach'](
provider=provider,
names=name,
server_name=server_name,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': volumes[name], 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to detach.'.format(name)
return ret
| true
| true
|
f70c054e7f9c75daf40ce7a574ccf0b3546d13eb
| 3,655
|
py
|
Python
|
iotronic_lightningrod/modules/utils.py
|
Zakaria-Ben/iotronic-lightning-rod
|
4a3eff68bd1db2d57beee0e8c51fbb14fcc0877a
|
[
"Apache-2.0"
] | null | null | null |
iotronic_lightningrod/modules/utils.py
|
Zakaria-Ben/iotronic-lightning-rod
|
4a3eff68bd1db2d57beee0e8c51fbb14fcc0877a
|
[
"Apache-2.0"
] | null | null | null |
iotronic_lightningrod/modules/utils.py
|
Zakaria-Ben/iotronic-lightning-rod
|
4a3eff68bd1db2d57beee0e8c51fbb14fcc0877a
|
[
"Apache-2.0"
] | 1
|
2018-05-18T13:01:03.000Z
|
2018-05-18T13:01:03.000Z
|
# Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Nicola Peditto <npeditto@unime.it"
import asyncio
import inspect
import pkg_resources
from six import moves
from stevedore import extension
import sys
from iotronic_lightningrod.config import entry_points_name
from iotronic_lightningrod.lightningrod import SESSION
from iotronic_lightningrod.modules import Module
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def getFuncName():
return inspect.stack()[1][3]
def refresh_stevedore(namespace=None):
"""Trigger reload of entry points.
Useful to have dynamic loading/unloading of stevedore modules.
"""
# NOTE(sheeprine): pkg_resources doesn't support reload on python3 due to
# defining basestring which is still there on reload hence executing
# python2 related code.
try:
del sys.modules['pkg_resources'].basestring
except AttributeError:
# python2, do nothing
pass
# Force working_set reload
moves.reload_module(sys.modules['pkg_resources'])
# Clear stevedore cache
cache = extension.ExtensionManager.ENTRY_POINT_CACHE
if namespace:
if namespace in cache:
del cache[namespace]
else:
cache.clear()
class Utility(Module.Module):
def __init__(self, board, session):
super(Utility, self).__init__("Utility", board)
def finalize(self):
pass
def restore(self):
pass
async def hello(self, client_name, message):
import random
s = random.uniform(0.5, 3.0)
await asyncio.sleep(s)
result = "Hello by board to Conductor " + client_name + \
" that said me " + message + " - Time: " + '%.2f' % s
LOG.info("DEVICE hello result: " + str(result))
return result
async def plug_and_play(self, new_module, new_class):
LOG.info("LR modules loaded:\n\t" + new_module)
# Updating entry_points
with open(entry_points_name, 'a') as entry_points:
entry_points.write(
new_module +
'= iotronic_lightningrod.modules.' + new_module + ':'
+ new_class
)
# Reload entry_points
refresh_stevedore('s4t.modules')
LOG.info("New entry_points loaded!")
# Reading updated entry_points
named_objects = {}
for ep in pkg_resources.iter_entry_points(group='s4t.modules'):
named_objects.update({ep.name: ep.load()})
await named_objects
SESSION.disconnect()
return str(named_objects)
async def changeConf(self, conf):
await self.board.getConf(conf)
self.board.setUpdateTime()
result = "Board configuration changed!"
LOG.info("PROVISIONING RESULT: " + str(result))
return result
async def destroyNode(self, conf):
await self.board.setConf(conf)
result = "Board configuration cleaned!"
LOG.info("DESTROY RESULT: " + str(result))
return result
| 28.554688
| 78
| 0.661012
|
__author__ = "Nicola Peditto <npeditto@unime.it"
import asyncio
import inspect
import pkg_resources
from six import moves
from stevedore import extension
import sys
from iotronic_lightningrod.config import entry_points_name
from iotronic_lightningrod.lightningrod import SESSION
from iotronic_lightningrod.modules import Module
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def getFuncName():
return inspect.stack()[1][3]
def refresh_stevedore(namespace=None):
# defining basestring which is still there on reload hence executing
# python2 related code.
try:
del sys.modules['pkg_resources'].basestring
except AttributeError:
# python2, do nothing
pass
# Force working_set reload
moves.reload_module(sys.modules['pkg_resources'])
# Clear stevedore cache
cache = extension.ExtensionManager.ENTRY_POINT_CACHE
if namespace:
if namespace in cache:
del cache[namespace]
else:
cache.clear()
class Utility(Module.Module):
def __init__(self, board, session):
super(Utility, self).__init__("Utility", board)
def finalize(self):
pass
def restore(self):
pass
async def hello(self, client_name, message):
import random
s = random.uniform(0.5, 3.0)
await asyncio.sleep(s)
result = "Hello by board to Conductor " + client_name + \
" that said me " + message + " - Time: " + '%.2f' % s
LOG.info("DEVICE hello result: " + str(result))
return result
async def plug_and_play(self, new_module, new_class):
LOG.info("LR modules loaded:\n\t" + new_module)
# Updating entry_points
with open(entry_points_name, 'a') as entry_points:
entry_points.write(
new_module +
'= iotronic_lightningrod.modules.' + new_module + ':'
+ new_class
)
# Reload entry_points
refresh_stevedore('s4t.modules')
LOG.info("New entry_points loaded!")
# Reading updated entry_points
named_objects = {}
for ep in pkg_resources.iter_entry_points(group='s4t.modules'):
named_objects.update({ep.name: ep.load()})
await named_objects
SESSION.disconnect()
return str(named_objects)
async def changeConf(self, conf):
await self.board.getConf(conf)
self.board.setUpdateTime()
result = "Board configuration changed!"
LOG.info("PROVISIONING RESULT: " + str(result))
return result
async def destroyNode(self, conf):
await self.board.setConf(conf)
result = "Board configuration cleaned!"
LOG.info("DESTROY RESULT: " + str(result))
return result
| true
| true
|
f70c0553ea487881fbca1e06b6c31bb278cd4251
| 4,069
|
py
|
Python
|
Project/EnhancedDeepPath/scripts/sl_policy.py
|
iust-projects/Data-Mining-IUST
|
88f7a5541278f1fe907ca9b70c990a27f60900b2
|
[
"Apache-2.0"
] | null | null | null |
Project/EnhancedDeepPath/scripts/sl_policy.py
|
iust-projects/Data-Mining-IUST
|
88f7a5541278f1fe907ca9b70c990a27f60900b2
|
[
"Apache-2.0"
] | 2
|
2020-07-10T17:58:07.000Z
|
2020-12-22T09:02:39.000Z
|
Project/EnhancedDeepPath/scripts/sl_policy.py
|
iust-projects/Data-Mining-IUST
|
88f7a5541278f1fe907ca9b70c990a27f60900b2
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from itertools import count
import sys
from networks import policy_nn
from utils import *
from env import Env
from BFS.KB import KB
from BFS.BFS import BFS
import time
relation = sys.argv[1]
# episodes = int(sys.argv[2])
graphpath = dataPath + 'tasks/' + relation + '/' + 'graph.txt'
relationPath = dataPath + 'tasks/' + relation + '/' + 'train_pos'
class SupervisedPolicy(object):
"""docstring for SupervisedPolicy"""
def __init__(self, learning_rate = 0.001):
self.initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('supervised_policy'):
self.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')
self.action = tf.placeholder(tf.int32, [None], name = 'action')
self.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)
action_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)
self.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)
self.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope = 'supervised_policy'))
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
def predict(self, state, sess = None):
sess = sess or tf.get_default_session()
return sess.run(self.action_prob, {self.state: state})
def update(self, state, action, sess = None):
sess = sess or tf.get_default_session()
_, loss = sess.run([self.train_op, self.loss], {self.state: state, self.action: action})
return loss
def train():
tf.reset_default_graph()
policy_nn = SupervisedPolicy()
f = open(relationPath)
train_data = f.readlines()
f.close()
num_samples = len(train_data)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if num_samples > 500:
num_samples = 500
else:
num_episodes = num_samples
for episode in range(num_samples):
print("Episode %d" % episode)
print('Training Sample:', train_data[episode%num_samples][:-1])
env = Env(dataPath, train_data[episode%num_samples])
sample = train_data[episode%num_samples].split()
try:
good_episodes = teacher(sample[0], sample[1], 5, env, graphpath)
except Exception as e:
print('Cannot find a path')
continue
for item in good_episodes:
state_batch = []
action_batch = []
for t, transition in enumerate(item):
state_batch.append(transition.state)
action_batch.append(transition.action)
state_batch = np.squeeze(state_batch)
state_batch = np.reshape(state_batch, [-1, state_dim])
policy_nn.update(state_batch, action_batch)
saver.save(sess, 'models/policy_supervised_' + relation)
print('Model saved')
def test(test_episodes):
tf.reset_default_graph()
policy_nn = SupervisedPolicy()
f = open(relationPath)
test_data = f.readlines()
f.close()
test_num = len(test_data)
test_data = test_data[-test_episodes:]
print(len(test_data))
success = 0
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'models/policy_supervised_'+ relation)
print('Model reloaded')
for episode in range(len(test_data)):
print('Test sample %d: %s' % (episode,test_data[episode][:-1]))
env = Env(dataPath, test_data[episode])
sample = test_data[episode].split()
state_idx = [env.entity2id_[sample[0]], env.entity2id_[sample[1]], 0]
for t in count():
state_vec = env.idx_state(state_idx)
action_probs = policy_nn.predict(state_vec)
action_chosen = np.random.choice(np.arange(action_space), p = np.squeeze(action_probs))
reward, new_state, done = env.interact(state_idx, action_chosen)
if done or t == max_steps_test:
if done:
print('Success')
success += 1
print('Episode ends\n')
break
state_idx = new_state
print('Success persentage:', success/test_episodes)
if __name__ == "__main__":
train()
# test(50)
| 30.593985
| 152
| 0.717375
|
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from itertools import count
import sys
from networks import policy_nn
from utils import *
from env import Env
from BFS.KB import KB
from BFS.BFS import BFS
import time
relation = sys.argv[1]
graphpath = dataPath + 'tasks/' + relation + '/' + 'graph.txt'
relationPath = dataPath + 'tasks/' + relation + '/' + 'train_pos'
class SupervisedPolicy(object):
def __init__(self, learning_rate = 0.001):
self.initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('supervised_policy'):
self.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')
self.action = tf.placeholder(tf.int32, [None], name = 'action')
self.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)
action_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)
self.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)
self.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope = 'supervised_policy'))
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
def predict(self, state, sess = None):
sess = sess or tf.get_default_session()
return sess.run(self.action_prob, {self.state: state})
def update(self, state, action, sess = None):
sess = sess or tf.get_default_session()
_, loss = sess.run([self.train_op, self.loss], {self.state: state, self.action: action})
return loss
def train():
tf.reset_default_graph()
policy_nn = SupervisedPolicy()
f = open(relationPath)
train_data = f.readlines()
f.close()
num_samples = len(train_data)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if num_samples > 500:
num_samples = 500
else:
num_episodes = num_samples
for episode in range(num_samples):
print("Episode %d" % episode)
print('Training Sample:', train_data[episode%num_samples][:-1])
env = Env(dataPath, train_data[episode%num_samples])
sample = train_data[episode%num_samples].split()
try:
good_episodes = teacher(sample[0], sample[1], 5, env, graphpath)
except Exception as e:
print('Cannot find a path')
continue
for item in good_episodes:
state_batch = []
action_batch = []
for t, transition in enumerate(item):
state_batch.append(transition.state)
action_batch.append(transition.action)
state_batch = np.squeeze(state_batch)
state_batch = np.reshape(state_batch, [-1, state_dim])
policy_nn.update(state_batch, action_batch)
saver.save(sess, 'models/policy_supervised_' + relation)
print('Model saved')
def test(test_episodes):
tf.reset_default_graph()
policy_nn = SupervisedPolicy()
f = open(relationPath)
test_data = f.readlines()
f.close()
test_num = len(test_data)
test_data = test_data[-test_episodes:]
print(len(test_data))
success = 0
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'models/policy_supervised_'+ relation)
print('Model reloaded')
for episode in range(len(test_data)):
print('Test sample %d: %s' % (episode,test_data[episode][:-1]))
env = Env(dataPath, test_data[episode])
sample = test_data[episode].split()
state_idx = [env.entity2id_[sample[0]], env.entity2id_[sample[1]], 0]
for t in count():
state_vec = env.idx_state(state_idx)
action_probs = policy_nn.predict(state_vec)
action_chosen = np.random.choice(np.arange(action_space), p = np.squeeze(action_probs))
reward, new_state, done = env.interact(state_idx, action_chosen)
if done or t == max_steps_test:
if done:
print('Success')
success += 1
print('Episode ends\n')
break
state_idx = new_state
print('Success persentage:', success/test_episodes)
if __name__ == "__main__":
train()
| true
| true
|
f70c06fd558229c1a63658ce7eb7a0987e13c526
| 415
|
py
|
Python
|
students/K33422/Iskhakova_Emina/labs/lab3/admin.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
students/K33422/Iskhakova_Emina/labs/lab3/admin.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
students/K33422/Iskhakova_Emina/labs/lab3/admin.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Client)
admin.site.register(ServicesPL)
admin.site.register(MaterialsPL)
admin.site.register(Request)
admin.site.register(ChosenServices)
admin.site.register(ChosenMaterials)
admin.site.register(WorkGroup)
admin.site.register(Executor)
admin.site.register(Invoice)
admin.site.register(PaymentOrder)
admin.site.register(User)
| 27.666667
| 37
| 0.804819
|
from django.contrib import admin
from .models import *
admin.site.register(Client)
admin.site.register(ServicesPL)
admin.site.register(MaterialsPL)
admin.site.register(Request)
admin.site.register(ChosenServices)
admin.site.register(ChosenMaterials)
admin.site.register(WorkGroup)
admin.site.register(Executor)
admin.site.register(Invoice)
admin.site.register(PaymentOrder)
admin.site.register(User)
| true
| true
|
f70c0739ded8c4ed003bf1865ab7f1e637ca68d0
| 1,378
|
py
|
Python
|
pytorch-frontend/caffe2/python/operator_test/glu_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
pytorch-frontend/caffe2/python/operator_test/glu_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
pytorch-frontend/caffe2/python/operator_test/glu_op_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
import unittest
@st.composite
def _glu_old_input(draw):
dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3))
axis = draw(st.integers(min_value=0, max_value=len(dims)))
# The axis dimension must be divisible by two
axis_dim = 2 * draw(st.integers(min_value=1, max_value=2))
dims.insert(axis, axis_dim)
X = draw(hu.arrays(dims, np.float32, None))
return (X, axis)
class TestGlu(serial.SerializedTestCase):
@given(
X_axis=_glu_old_input(),
**hu.gcs
)
@settings(deadline=10000)
def test_glu_old(self, X_axis, gc, dc):
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Y = x1 * (1. / (1. + np.exp(-x2)))
return [Y]
op = core.CreateOperator("Glu", ["X"], ["Y"], dim=axis)
self.assertReferenceChecks(gc, op, [X], glu_ref)
if __name__ == "__main__":
unittest.main()
| 29.956522
| 88
| 0.681422
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
import unittest
@st.composite
def _glu_old_input(draw):
dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3))
axis = draw(st.integers(min_value=0, max_value=len(dims)))
axis_dim = 2 * draw(st.integers(min_value=1, max_value=2))
dims.insert(axis, axis_dim)
X = draw(hu.arrays(dims, np.float32, None))
return (X, axis)
class TestGlu(serial.SerializedTestCase):
@given(
X_axis=_glu_old_input(),
**hu.gcs
)
@settings(deadline=10000)
def test_glu_old(self, X_axis, gc, dc):
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Y = x1 * (1. / (1. + np.exp(-x2)))
return [Y]
op = core.CreateOperator("Glu", ["X"], ["Y"], dim=axis)
self.assertReferenceChecks(gc, op, [X], glu_ref)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f70c079999af8cc5d3c6169652b547016bc9d133
| 20,765
|
py
|
Python
|
diofant/tests/matrices/test_sparse.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/tests/matrices/test_sparse.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/tests/matrices/test_sparse.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from diofant import (I, Matrix, MutableDenseMatrix, MutableSparseMatrix,
PurePoly, Rational, ShapeError, SparseMatrix, eye, ones,
zeros)
from diofant.abc import x, y, z
__all__ = ()
def test_sparse_matrix():
def sparse_eye(n):
return SparseMatrix.eye(n)
def sparse_zeros(n):
return SparseMatrix.zeros(n)
# creation args
pytest.raises(TypeError, lambda: SparseMatrix(1, 2))
pytest.raises(ValueError, lambda: SparseMatrix(2, 2, (1, 3, 4, 5, 6)))
a = SparseMatrix((
(1, 0),
(0, 1)
))
assert SparseMatrix(a) == a
a = MutableSparseMatrix([])
b = MutableDenseMatrix([1, 2])
assert a.row_join(b) == b
assert a.col_join(b) == b
assert type(a.row_join(b)) == type(a)
assert type(a.col_join(b)) == type(a)
# test element assignment
a = SparseMatrix((
(1, 0),
(0, 1)
))
a[3] = 4
assert a[1, 1] == 4
a[3] = 1
a[0, 0] = 2
assert a == SparseMatrix((
(2, 0),
(0, 1)
))
a[1, 0] = 5
assert a == SparseMatrix((
(2, 0),
(5, 1)
))
a[1, 1] = 0
assert a == SparseMatrix((
(2, 0),
(5, 0)
))
assert a._smat == {(0, 0): 2, (1, 0): 5}
# test_multiplication
a = SparseMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = SparseMatrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
c = b * x
assert isinstance(c, SparseMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c = 5 * b
assert isinstance(c, SparseMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
# test_power
A = SparseMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = SparseMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
# test_creation
a = SparseMatrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = SparseMatrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
S = sparse_eye(3)
del S[1, :]
assert S == SparseMatrix([
[1, 0, 0],
[0, 0, 1]])
S = sparse_eye(3)
del S[:, 1]
assert S == SparseMatrix([
[1, 0],
[0, 0],
[0, 1]])
S = SparseMatrix.eye(3)
S[2, 1] = 2
S.col_swap(1, 0)
assert S == SparseMatrix([[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
S.row_swap(0, 1)
assert S == SparseMatrix([[1, 0, 0],
[0, 1, 0],
[2, 0, 1]])
S.col_swap(0, 1)
assert S == SparseMatrix([[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
S.row_swap(0, 2)
assert S == SparseMatrix([[0, 2, 1],
[1, 0, 0],
[0, 1, 0]])
S.col_swap(0, 2)
assert S == SparseMatrix([[1, 2, 0],
[0, 0, 1],
[0, 1, 0]])
a = SparseMatrix(1, 2, [1, 2])
b = a.copy()
c = a.copy()
assert a[0] == 1
del a[0, :]
assert a == SparseMatrix(0, 2, [])
del b[:, 1]
assert b == SparseMatrix(1, 1, [1])
# test_determinant
assert SparseMatrix(1, 1, [0]).det() == 0
assert SparseMatrix([[1]]).det() == 1
assert SparseMatrix(((-3, 2), (8, -5))).det() == -1
assert SparseMatrix(((x, 1), (y, 2*y))).det() == 2*x*y - y
assert SparseMatrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) )).det() == 1
assert SparseMatrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) )).det() == -289
assert SparseMatrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) )).det() == 0
assert SparseMatrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) )).det() == 275
assert SparseMatrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) )).det() == -55
assert SparseMatrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) )).det() == 11664
assert SparseMatrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) )).det() == 123
# test_slicing
m0 = sparse_eye(4)
assert m0[:3, :3] == sparse_eye(3)
assert m0[2:4, 0:2] == sparse_zeros(2)
m1 = SparseMatrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == SparseMatrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == SparseMatrix(2, 1, (2, 3))
m2 = SparseMatrix(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == SparseMatrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == SparseMatrix([[8, 9, 10, 11], [12, 13, 14, 15]])
assert SparseMatrix([[1, 2], [3, 4]])[[1], [1]] == Matrix([[4]])
# test_submatrix_assignment
m = sparse_zeros(4)
m[2:4, 2:4] = sparse_eye(2)
assert m == SparseMatrix([(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)])
assert len(m._smat) == 2
m[:2, :2] = sparse_eye(2)
assert m == sparse_eye(4)
m[:, 0] = SparseMatrix(4, 1, (1, 2, 3, 4))
assert m == SparseMatrix([(1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)])
m[:, :] = sparse_zeros(4)
assert m == sparse_zeros(4)
m[:, :] = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16))
assert m == SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == SparseMatrix((( 0, 2, 3, 4),
( 0, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
# test_reshape
m0 = sparse_eye(3)
assert m0.reshape(1, 9) == SparseMatrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = SparseMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(4, 3) == \
SparseMatrix([(0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)])
assert m1.reshape(2, 6) == \
SparseMatrix([(0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)])
# test_applyfunc
m0 = sparse_eye(3)
assert m0.applyfunc(lambda x: 2*x) == sparse_eye(3)*2
assert m0.applyfunc(lambda x: 0 ) == sparse_zeros(3)
# test_LUdecomp
testmat = SparseMatrix([[ 0, 2, 5, 3],
[ 3, 3, 7, 4],
[ 8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
testmat = SparseMatrix([[ 6, -2, 7, 4],
[ 0, 3, 6, 7],
[ 1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - M == sparse_zeros(3)
# test_LUsolve
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
B = SparseMatrix(3, 1, [3, 7, 5])
b = A*B
soln = A.LUsolve(b)
assert soln == B
A = SparseMatrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
B = SparseMatrix(3, 1, [-1, 2, 5])
b = A*B
soln = A.LUsolve(b)
assert soln == B
# test_inverse
A = sparse_eye(4)
assert A.inv() == sparse_eye(4)
assert A.inv(method='CH') == sparse_eye(4)
assert A.inv(method='LDL') == sparse_eye(4)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[7, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method='CH') == Ainv
assert A.inv(method='LDL') == Ainv
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[5, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method='CH') == Ainv
assert A.inv(method='LDL') == Ainv
# test_cross
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.cross(v2) == Matrix(1, 3, [-2, 4, -2])
assert v1.norm(2)**2 == 14
# conjugate
a = SparseMatrix(((1, 2 + I), (3, 4)))
assert a.C == SparseMatrix([
[1, 2 - I],
[3, 4]
])
# mul
assert a*Matrix(2, 2, [1, 0, 0, 1]) == a
assert a + Matrix(2, 2, [1, 1, 1, 1]) == SparseMatrix([
[2, 3 + I],
[4, 5]
])
assert a*0 == Matrix([[0, 0], [0, 0]])
# col join
assert a.col_join(sparse_eye(2)) == SparseMatrix([
[1, 2 + I],
[3, 4],
[1, 0],
[0, 1]
])
A = SparseMatrix(ones(3))
B = eye(3)
assert A.col_join(B) == Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1],
[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# row join
A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
B = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
assert A.row_join(B) == Matrix([[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
# symmetric
assert not a.is_symmetric(simplify=False)
assert sparse_eye(3).is_symmetric(simplify=False)
# test_cofactor
assert sparse_eye(3) == sparse_eye(3).cofactorMatrix()
test = SparseMatrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactorMatrix() == \
SparseMatrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = SparseMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactorMatrix() == \
SparseMatrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
# test_jacobian
L = SparseMatrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = SparseMatrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == SparseMatrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
# test_QR
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([
[5**R(1, 2), 8*5**R(-1, 2)],
[ 0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == sparse_eye(2)
R = Rational
# test nullspace
# first test reduced row-ech form
M = SparseMatrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, tmp = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = SparseMatrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, tmp = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
# now check the vectors
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
# test eigen
sparse_eye3 = sparse_eye(3)
assert sparse_eye3.charpoly(x) == PurePoly(((x - 1)**3))
assert sparse_eye3.charpoly(y) == PurePoly(((y - 1)**3))
# test values
M = Matrix([( 0, 1, -1),
( 1, 1, 0),
(-1, 0, 1)])
vals = M.eigenvals()
assert sorted(vals) == [-1, 1, 2]
R = Rational
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 3, [
Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])]
M = Matrix([[5, 0, 2],
[3, 2, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 1, [Matrix([R(-1)/2, R(3)/2, 1])]),
(2, 1, [Matrix([0, 1, 0])]),
(5, 1, [Matrix([1, 1, 0])])]
assert M.zeros(3, 5) == SparseMatrix(3, 5, {})
A = SparseMatrix(10, 10, {(0, 0): 18, (0, 9): 12, (1, 4): 18, (2, 7): 16, (3, 9): 12, (4, 2): 19, (5, 7): 16, (6, 2): 12, (9, 7): 18})
assert A.row_list() == [(0, 0, 18), (0, 9, 12), (1, 4, 18), (2, 7, 16), (3, 9, 12), (4, 2, 19), (5, 7, 16), (6, 2, 12), (9, 7, 18)]
assert A.col_list() == [(0, 0, 18), (4, 2, 19), (6, 2, 12), (1, 4, 18), (2, 7, 16), (5, 7, 16), (9, 7, 18), (0, 9, 12), (3, 9, 12)]
assert SparseMatrix.eye(2).nnz() == 2
M = SparseMatrix.eye(3)*2
M[1, 0] = -1
M.col_op(1, lambda v, i: v + 2*M[i, 0])
assert M == Matrix([[ 2, 4, 0], [-1, 0, 0], [ 0, 0, 2]])
M = SparseMatrix.zeros(3)
M.fill(1)
assert M == ones(3)
assert SparseMatrix(ones(0, 3)).tolist() == []
def test_eq():
A = SparseMatrix(((1, 2), (3, 4)))
assert A != 1
assert A != zeros(2, 1)
def test_transpose():
assert SparseMatrix(((1, 2), (3, 4))).transpose() == \
SparseMatrix(((1, 3), (2, 4)))
def test_trace():
assert SparseMatrix(((1, 2), (3, 4))).trace() == 5
assert SparseMatrix(((0, 0), (0, 4))).trace() == 4
def test_CL_RL():
assert SparseMatrix(((1, 2), (3, 4))).row_list() == \
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
assert SparseMatrix(((1, 2), (3, 4))).col_list() == \
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
def test_add():
assert SparseMatrix(((1, 0), (0, 1))) + SparseMatrix(((0, 1), (1, 0))) == \
SparseMatrix(((1, 1), (1, 1)))
a = SparseMatrix(100, 100, lambda i, j: int(j != 0 and i % j == 0))
b = SparseMatrix(100, 100, lambda i, j: int(i != 0 and j % i == 0))
assert (len(a._smat) + len(b._smat) - len((a + b)._smat) > 0)
def test_errors():
pytest.raises(ValueError, lambda: SparseMatrix(1.4, 2, lambda i, j: 0))
pytest.raises(ValueError, lambda: SparseMatrix(2, 2, 1))
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3], [1, 2]))
pytest.raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[(1, 2, 3)])
pytest.raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[5])
pytest.raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2, 3])
pytest.raises(TypeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).copyin_list([0, 1], set()))
pytest.raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2])
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3]).cross(1))
pytest.raises(IndexError, lambda: SparseMatrix(1, 2, [1, 2])[3])
pytest.raises(ShapeError,
lambda: SparseMatrix(1, 2,
[1, 2]) + SparseMatrix(2, 1, [2, 1]))
pytest.raises(IndexError, lambda: SparseMatrix([1, 2, 3])[3, 0])
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3]).applyfunc(1))
pytest.raises(ValueError, lambda: SparseMatrix([1, 2, 3]).reshape(2, 2))
pytest.raises(ValueError,
lambda: SparseMatrix([[2, 3], [4, 1]]).cholesky())
pytest.raises(ValueError,
lambda: SparseMatrix([[2, 3], [4, 1]]).LDLdecomposition())
pytest.raises(ValueError, lambda: SparseMatrix([[2, 3], [4, 1]]).add(1))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).row_join(Matrix([[1, 2]])))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).col_join(Matrix([1, 2])))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).copyin_matrix([1, 0],
Matrix([1, 2])))
def test_len():
assert not SparseMatrix()
assert SparseMatrix() == SparseMatrix([])
assert SparseMatrix() == SparseMatrix([[]])
def test_sparse_zeros_sparse_eye():
assert SparseMatrix.eye(3) == eye(3, cls=SparseMatrix)
assert len(SparseMatrix.eye(3)._smat) == 3
assert SparseMatrix.zeros(3) == zeros(3, cls=SparseMatrix)
assert len(SparseMatrix.zeros(3)._smat) == 0
def test_copyin():
s = SparseMatrix(3, 3, {})
s[1, 0] = 1
assert s[:, 0] == SparseMatrix(Matrix([0, 1, 0]))
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == SparseMatrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == SparseMatrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == SparseMatrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == SparseMatrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
def test_sparse_solve():
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() == Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
assert A.cholesky() * A.cholesky().T == Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert 15*L == Matrix([
[15, 0, 0],
[ 9, 15, 0],
[-3, 5, 15]])
assert D == Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
assert L * D * L.T == A
A = SparseMatrix(((3, 0, 2), (0, 0, 1), (1, 2, 0)))
assert A.inv() * A == SparseMatrix(eye(3))
A = SparseMatrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, 0, 2]])
ans = SparseMatrix([
[Rational(2, 3), Rational(1, 3), Rational(1, 6)],
[Rational(1, 3), Rational(2, 3), Rational(1, 3)],
[ 0, 0, Rational(1, 2)]])
assert A.inv(method='CH') == ans
assert A.inv(method='LDL') == ans
assert A * ans == SparseMatrix(eye(3))
s = A.solve(A[:, 0], 'LDL')
assert A*s == A[:, 0]
s = A.solve(A[:, 0], 'CH')
assert A*s == A[:, 0]
A = A.col_join(A)
s = A.solve_least_squares(A[:, 0], 'CH')
assert A*s == A[:, 0]
s = A.solve_least_squares(A[:, 0], 'LDL')
assert A*s == A[:, 0]
pytest.raises(ValueError, lambda: SparseMatrix([[1, 0, 1],
[0, 0, 1]]).solve([1, 1]))
pytest.raises(ValueError, lambda: SparseMatrix([[1, 0], [0, 0],
[2, 1]]).solve([1, 1, 1]))
def test_hermitian():
a = SparseMatrix([[0, I], [-I, 0]])
assert a.is_hermitian
a = SparseMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a[0, 0] = 2*I
assert a.is_hermitian is False
a[0, 0] = x
assert a.is_hermitian is None
a[0, 1] = a[1, 0]*I
assert a.is_hermitian is False
def test_fill():
a = SparseMatrix([[0, I], [-I, 0]])
a.fill(0)
assert a == Matrix([[0, 0], [0, 0]])
| 32.547022
| 138
| 0.420997
|
import pytest
from diofant import (I, Matrix, MutableDenseMatrix, MutableSparseMatrix,
PurePoly, Rational, ShapeError, SparseMatrix, eye, ones,
zeros)
from diofant.abc import x, y, z
__all__ = ()
def test_sparse_matrix():
def sparse_eye(n):
return SparseMatrix.eye(n)
def sparse_zeros(n):
return SparseMatrix.zeros(n)
pytest.raises(TypeError, lambda: SparseMatrix(1, 2))
pytest.raises(ValueError, lambda: SparseMatrix(2, 2, (1, 3, 4, 5, 6)))
a = SparseMatrix((
(1, 0),
(0, 1)
))
assert SparseMatrix(a) == a
a = MutableSparseMatrix([])
b = MutableDenseMatrix([1, 2])
assert a.row_join(b) == b
assert a.col_join(b) == b
assert type(a.row_join(b)) == type(a)
assert type(a.col_join(b)) == type(a)
a = SparseMatrix((
(1, 0),
(0, 1)
))
a[3] = 4
assert a[1, 1] == 4
a[3] = 1
a[0, 0] = 2
assert a == SparseMatrix((
(2, 0),
(0, 1)
))
a[1, 0] = 5
assert a == SparseMatrix((
(2, 0),
(5, 1)
))
a[1, 1] = 0
assert a == SparseMatrix((
(2, 0),
(5, 0)
))
assert a._smat == {(0, 0): 2, (1, 0): 5}
a = SparseMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = SparseMatrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
c = b * x
assert isinstance(c, SparseMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c = 5 * b
assert isinstance(c, SparseMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
A = SparseMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = SparseMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
a = SparseMatrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = SparseMatrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
S = sparse_eye(3)
del S[1, :]
assert S == SparseMatrix([
[1, 0, 0],
[0, 0, 1]])
S = sparse_eye(3)
del S[:, 1]
assert S == SparseMatrix([
[1, 0],
[0, 0],
[0, 1]])
S = SparseMatrix.eye(3)
S[2, 1] = 2
S.col_swap(1, 0)
assert S == SparseMatrix([[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
S.row_swap(0, 1)
assert S == SparseMatrix([[1, 0, 0],
[0, 1, 0],
[2, 0, 1]])
S.col_swap(0, 1)
assert S == SparseMatrix([[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
S.row_swap(0, 2)
assert S == SparseMatrix([[0, 2, 1],
[1, 0, 0],
[0, 1, 0]])
S.col_swap(0, 2)
assert S == SparseMatrix([[1, 2, 0],
[0, 0, 1],
[0, 1, 0]])
a = SparseMatrix(1, 2, [1, 2])
b = a.copy()
c = a.copy()
assert a[0] == 1
del a[0, :]
assert a == SparseMatrix(0, 2, [])
del b[:, 1]
assert b == SparseMatrix(1, 1, [1])
assert SparseMatrix(1, 1, [0]).det() == 0
assert SparseMatrix([[1]]).det() == 1
assert SparseMatrix(((-3, 2), (8, -5))).det() == -1
assert SparseMatrix(((x, 1), (y, 2*y))).det() == 2*x*y - y
assert SparseMatrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) )).det() == 1
assert SparseMatrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) )).det() == -289
assert SparseMatrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) )).det() == 0
assert SparseMatrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) )).det() == 275
assert SparseMatrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) )).det() == -55
assert SparseMatrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) )).det() == 11664
assert SparseMatrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) )).det() == 123
m0 = sparse_eye(4)
assert m0[:3, :3] == sparse_eye(3)
assert m0[2:4, 0:2] == sparse_zeros(2)
m1 = SparseMatrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == SparseMatrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == SparseMatrix(2, 1, (2, 3))
m2 = SparseMatrix(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == SparseMatrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == SparseMatrix([[8, 9, 10, 11], [12, 13, 14, 15]])
assert SparseMatrix([[1, 2], [3, 4]])[[1], [1]] == Matrix([[4]])
m = sparse_zeros(4)
m[2:4, 2:4] = sparse_eye(2)
assert m == SparseMatrix([(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)])
assert len(m._smat) == 2
m[:2, :2] = sparse_eye(2)
assert m == sparse_eye(4)
m[:, 0] = SparseMatrix(4, 1, (1, 2, 3, 4))
assert m == SparseMatrix([(1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)])
m[:, :] = sparse_zeros(4)
assert m == sparse_zeros(4)
m[:, :] = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16))
assert m == SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == SparseMatrix((( 0, 2, 3, 4),
( 0, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m0 = sparse_eye(3)
assert m0.reshape(1, 9) == SparseMatrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = SparseMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(4, 3) == \
SparseMatrix([(0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)])
assert m1.reshape(2, 6) == \
SparseMatrix([(0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)])
m0 = sparse_eye(3)
assert m0.applyfunc(lambda x: 2*x) == sparse_eye(3)*2
assert m0.applyfunc(lambda x: 0 ) == sparse_zeros(3)
testmat = SparseMatrix([[ 0, 2, 5, 3],
[ 3, 3, 7, 4],
[ 8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
testmat = SparseMatrix([[ 6, -2, 7, 4],
[ 0, 3, 6, 7],
[ 1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - M == sparse_zeros(3)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
B = SparseMatrix(3, 1, [3, 7, 5])
b = A*B
soln = A.LUsolve(b)
assert soln == B
A = SparseMatrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
B = SparseMatrix(3, 1, [-1, 2, 5])
b = A*B
soln = A.LUsolve(b)
assert soln == B
A = sparse_eye(4)
assert A.inv() == sparse_eye(4)
assert A.inv(method='CH') == sparse_eye(4)
assert A.inv(method='LDL') == sparse_eye(4)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[7, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method='CH') == Ainv
assert A.inv(method='LDL') == Ainv
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[5, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method='CH') == Ainv
assert A.inv(method='LDL') == Ainv
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.cross(v2) == Matrix(1, 3, [-2, 4, -2])
assert v1.norm(2)**2 == 14
a = SparseMatrix(((1, 2 + I), (3, 4)))
assert a.C == SparseMatrix([
[1, 2 - I],
[3, 4]
])
assert a*Matrix(2, 2, [1, 0, 0, 1]) == a
assert a + Matrix(2, 2, [1, 1, 1, 1]) == SparseMatrix([
[2, 3 + I],
[4, 5]
])
assert a*0 == Matrix([[0, 0], [0, 0]])
assert a.col_join(sparse_eye(2)) == SparseMatrix([
[1, 2 + I],
[3, 4],
[1, 0],
[0, 1]
])
A = SparseMatrix(ones(3))
B = eye(3)
assert A.col_join(B) == Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1],
[1, 0, 0], [0, 1, 0], [0, 0, 1]])
A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
B = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
assert A.row_join(B) == Matrix([[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
assert not a.is_symmetric(simplify=False)
assert sparse_eye(3).is_symmetric(simplify=False)
assert sparse_eye(3) == sparse_eye(3).cofactorMatrix()
test = SparseMatrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactorMatrix() == \
SparseMatrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = SparseMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactorMatrix() == \
SparseMatrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
L = SparseMatrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = SparseMatrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == SparseMatrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([
[5**R(1, 2), 8*5**R(-1, 2)],
[ 0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == sparse_eye(2)
R = Rational
M = SparseMatrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, tmp = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = SparseMatrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, tmp = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
sparse_eye3 = sparse_eye(3)
assert sparse_eye3.charpoly(x) == PurePoly(((x - 1)**3))
assert sparse_eye3.charpoly(y) == PurePoly(((y - 1)**3))
M = Matrix([( 0, 1, -1),
( 1, 1, 0),
(-1, 0, 1)])
vals = M.eigenvals()
assert sorted(vals) == [-1, 1, 2]
R = Rational
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 3, [
Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])]
M = Matrix([[5, 0, 2],
[3, 2, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 1, [Matrix([R(-1)/2, R(3)/2, 1])]),
(2, 1, [Matrix([0, 1, 0])]),
(5, 1, [Matrix([1, 1, 0])])]
assert M.zeros(3, 5) == SparseMatrix(3, 5, {})
A = SparseMatrix(10, 10, {(0, 0): 18, (0, 9): 12, (1, 4): 18, (2, 7): 16, (3, 9): 12, (4, 2): 19, (5, 7): 16, (6, 2): 12, (9, 7): 18})
assert A.row_list() == [(0, 0, 18), (0, 9, 12), (1, 4, 18), (2, 7, 16), (3, 9, 12), (4, 2, 19), (5, 7, 16), (6, 2, 12), (9, 7, 18)]
assert A.col_list() == [(0, 0, 18), (4, 2, 19), (6, 2, 12), (1, 4, 18), (2, 7, 16), (5, 7, 16), (9, 7, 18), (0, 9, 12), (3, 9, 12)]
assert SparseMatrix.eye(2).nnz() == 2
M = SparseMatrix.eye(3)*2
M[1, 0] = -1
M.col_op(1, lambda v, i: v + 2*M[i, 0])
assert M == Matrix([[ 2, 4, 0], [-1, 0, 0], [ 0, 0, 2]])
M = SparseMatrix.zeros(3)
M.fill(1)
assert M == ones(3)
assert SparseMatrix(ones(0, 3)).tolist() == []
def test_eq():
A = SparseMatrix(((1, 2), (3, 4)))
assert A != 1
assert A != zeros(2, 1)
def test_transpose():
assert SparseMatrix(((1, 2), (3, 4))).transpose() == \
SparseMatrix(((1, 3), (2, 4)))
def test_trace():
assert SparseMatrix(((1, 2), (3, 4))).trace() == 5
assert SparseMatrix(((0, 0), (0, 4))).trace() == 4
def test_CL_RL():
assert SparseMatrix(((1, 2), (3, 4))).row_list() == \
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
assert SparseMatrix(((1, 2), (3, 4))).col_list() == \
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
def test_add():
assert SparseMatrix(((1, 0), (0, 1))) + SparseMatrix(((0, 1), (1, 0))) == \
SparseMatrix(((1, 1), (1, 1)))
a = SparseMatrix(100, 100, lambda i, j: int(j != 0 and i % j == 0))
b = SparseMatrix(100, 100, lambda i, j: int(i != 0 and j % i == 0))
assert (len(a._smat) + len(b._smat) - len((a + b)._smat) > 0)
def test_errors():
pytest.raises(ValueError, lambda: SparseMatrix(1.4, 2, lambda i, j: 0))
pytest.raises(ValueError, lambda: SparseMatrix(2, 2, 1))
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3], [1, 2]))
pytest.raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[(1, 2, 3)])
pytest.raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[5])
pytest.raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2, 3])
pytest.raises(TypeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).copyin_list([0, 1], set()))
pytest.raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2])
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3]).cross(1))
pytest.raises(IndexError, lambda: SparseMatrix(1, 2, [1, 2])[3])
pytest.raises(ShapeError,
lambda: SparseMatrix(1, 2,
[1, 2]) + SparseMatrix(2, 1, [2, 1]))
pytest.raises(IndexError, lambda: SparseMatrix([1, 2, 3])[3, 0])
pytest.raises(TypeError, lambda: SparseMatrix([1, 2, 3]).applyfunc(1))
pytest.raises(ValueError, lambda: SparseMatrix([1, 2, 3]).reshape(2, 2))
pytest.raises(ValueError,
lambda: SparseMatrix([[2, 3], [4, 1]]).cholesky())
pytest.raises(ValueError,
lambda: SparseMatrix([[2, 3], [4, 1]]).LDLdecomposition())
pytest.raises(ValueError, lambda: SparseMatrix([[2, 3], [4, 1]]).add(1))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).row_join(Matrix([[1, 2]])))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).col_join(Matrix([1, 2])))
pytest.raises(ShapeError,
lambda: SparseMatrix([[1, 2],
[3, 4]]).copyin_matrix([1, 0],
Matrix([1, 2])))
def test_len():
assert not SparseMatrix()
assert SparseMatrix() == SparseMatrix([])
assert SparseMatrix() == SparseMatrix([[]])
def test_sparse_zeros_sparse_eye():
assert SparseMatrix.eye(3) == eye(3, cls=SparseMatrix)
assert len(SparseMatrix.eye(3)._smat) == 3
assert SparseMatrix.zeros(3) == zeros(3, cls=SparseMatrix)
assert len(SparseMatrix.zeros(3)._smat) == 0
def test_copyin():
s = SparseMatrix(3, 3, {})
s[1, 0] = 1
assert s[:, 0] == SparseMatrix(Matrix([0, 1, 0]))
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == SparseMatrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == SparseMatrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == SparseMatrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == SparseMatrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
def test_sparse_solve():
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() == Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
assert A.cholesky() * A.cholesky().T == Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert 15*L == Matrix([
[15, 0, 0],
[ 9, 15, 0],
[-3, 5, 15]])
assert D == Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
assert L * D * L.T == A
A = SparseMatrix(((3, 0, 2), (0, 0, 1), (1, 2, 0)))
assert A.inv() * A == SparseMatrix(eye(3))
A = SparseMatrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, 0, 2]])
ans = SparseMatrix([
[Rational(2, 3), Rational(1, 3), Rational(1, 6)],
[Rational(1, 3), Rational(2, 3), Rational(1, 3)],
[ 0, 0, Rational(1, 2)]])
assert A.inv(method='CH') == ans
assert A.inv(method='LDL') == ans
assert A * ans == SparseMatrix(eye(3))
s = A.solve(A[:, 0], 'LDL')
assert A*s == A[:, 0]
s = A.solve(A[:, 0], 'CH')
assert A*s == A[:, 0]
A = A.col_join(A)
s = A.solve_least_squares(A[:, 0], 'CH')
assert A*s == A[:, 0]
s = A.solve_least_squares(A[:, 0], 'LDL')
assert A*s == A[:, 0]
pytest.raises(ValueError, lambda: SparseMatrix([[1, 0, 1],
[0, 0, 1]]).solve([1, 1]))
pytest.raises(ValueError, lambda: SparseMatrix([[1, 0], [0, 0],
[2, 1]]).solve([1, 1, 1]))
def test_hermitian():
a = SparseMatrix([[0, I], [-I, 0]])
assert a.is_hermitian
a = SparseMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a[0, 0] = 2*I
assert a.is_hermitian is False
a[0, 0] = x
assert a.is_hermitian is None
a[0, 1] = a[1, 0]*I
assert a.is_hermitian is False
def test_fill():
a = SparseMatrix([[0, I], [-I, 0]])
a.fill(0)
assert a == Matrix([[0, 0], [0, 0]])
| true
| true
|
f70c087df6136cae52ef50e6b06ba60de3007853
| 1,505
|
py
|
Python
|
tests/test_resourcerelease.py
|
asears/moviepy
|
6ab3efba36cf7fc5d3245f0ee0dc9244cb141c9e
|
[
"MIT"
] | 1
|
2020-12-20T20:38:52.000Z
|
2020-12-20T20:38:52.000Z
|
tests/test_resourcerelease.py
|
asears/moviepy
|
6ab3efba36cf7fc5d3245f0ee0dc9244cb141c9e
|
[
"MIT"
] | 1
|
2022-03-12T01:04:31.000Z
|
2022-03-12T01:04:31.000Z
|
tests/test_resourcerelease.py
|
asears/moviepy
|
6ab3efba36cf7fc5d3245f0ee0dc9244cb141c9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tool tests meant to be run with pytest.
Testing whether issue #596 has been repaired.
Note: Platform dependent test. Will only fail on Windows > NT. """
import time
from os import remove
from os.path import join
from moviepy.video.compositing.CompositeVideoClip import clips_array
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.VideoClip import ColorClip
from tests.test_helper import TMP_DIR
def test_release_of_file_via_close():
# Create a random video file.
red = ColorClip((256, 200), color=(255, 0, 0))
green = ColorClip((256, 200), color=(0, 255, 0))
blue = ColorClip((256, 200), color=(0, 0, 255))
red.fps = green.fps = blue.fps = 10
# Repeat this so we can see no conflicts.
for i in range(3):
# Get the name of a temporary file we can use.
local_video_filename = join(
TMP_DIR, "test_release_of_file_via_close_%s.mp4" % int(time.time())
)
clip = clips_array([[red, green, blue]]).with_duration(0.5)
clip.write_videofile(local_video_filename)
# Open it up with VideoFileClip.
video = VideoFileClip(local_video_filename)
video.close()
clip.close()
# Now remove the temporary file.
# This would fail on Windows if the file is still locked.
# This should succeed without exceptions.
remove(local_video_filename)
red.close()
green.close()
blue.close()
| 28.396226
| 79
| 0.662458
|
import time
from os import remove
from os.path import join
from moviepy.video.compositing.CompositeVideoClip import clips_array
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.VideoClip import ColorClip
from tests.test_helper import TMP_DIR
def test_release_of_file_via_close():
red = ColorClip((256, 200), color=(255, 0, 0))
green = ColorClip((256, 200), color=(0, 255, 0))
blue = ColorClip((256, 200), color=(0, 0, 255))
red.fps = green.fps = blue.fps = 10
for i in range(3):
local_video_filename = join(
TMP_DIR, "test_release_of_file_via_close_%s.mp4" % int(time.time())
)
clip = clips_array([[red, green, blue]]).with_duration(0.5)
clip.write_videofile(local_video_filename)
video = VideoFileClip(local_video_filename)
video.close()
clip.close()
remove(local_video_filename)
red.close()
green.close()
blue.close()
| true
| true
|
f70c0a607295a9f836d9c828a3c177e182d6a1d4
| 416
|
py
|
Python
|
report_builder/migrations/0007_auto_20190214_1405.py
|
nazmizorlu/django-report-builder
|
0b37cd0c94af15531e487554c774a01dad3b5500
|
[
"BSD-3-Clause"
] | 560
|
2015-01-05T07:14:50.000Z
|
2022-03-11T13:27:42.000Z
|
report_builder/migrations/0007_auto_20190214_1405.py
|
nazmizorlu/django-report-builder
|
0b37cd0c94af15531e487554c774a01dad3b5500
|
[
"BSD-3-Clause"
] | 189
|
2015-01-15T16:55:55.000Z
|
2020-10-29T07:36:51.000Z
|
report_builder/migrations/0007_auto_20190214_1405.py
|
nazmizorlu/django-report-builder
|
0b37cd0c94af15531e487554c774a01dad3b5500
|
[
"BSD-3-Clause"
] | 235
|
2015-01-10T16:56:17.000Z
|
2022-03-29T15:57:03.000Z
|
# Generated by Django 2.1 on 2019-02-14 14:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_builder', '0006_auto_20180413_0747'),
]
operations = [
migrations.AlterField(
model_name='filterfield',
name='filter_value',
field=models.CharField(blank=True, max_length=2000),
),
]
| 21.894737
| 64
| 0.622596
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_builder', '0006_auto_20180413_0747'),
]
operations = [
migrations.AlterField(
model_name='filterfield',
name='filter_value',
field=models.CharField(blank=True, max_length=2000),
),
]
| true
| true
|
f70c0b2de377270eaba5653ab6d25d86078e095d
| 1,810
|
py
|
Python
|
Bot tiles.py
|
Santiagorich/Piano-tiles-bot
|
bc71c331c4350bfc1949840674ba48a957617686
|
[
"MIT"
] | null | null | null |
Bot tiles.py
|
Santiagorich/Piano-tiles-bot
|
bc71c331c4350bfc1949840674ba48a957617686
|
[
"MIT"
] | null | null | null |
Bot tiles.py
|
Santiagorich/Piano-tiles-bot
|
bc71c331c4350bfc1949840674ba48a957617686
|
[
"MIT"
] | null | null | null |
from pyautogui import *
import pyautogui
import time
import keyboard
import random
import win32api, win32con
px = 0
py = 0
class Tile():
def __init__(self,px,py):
self.x = px
self.y = py
def click(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
tiles = int(input("How many tiles?\n"))
tilesp = []
for tile in range(tiles):
print("Press Q on tile " + str(tile))
while True:
if keyboard.read_key() == "q":
px = position().x
py = position().y
tilesp.append(Tile(px,py))
time.sleep(0.1)
break
print("Press Q on the color we want to snipe")
while True:
if keyboard.read_key() == "q":
px = position().x
py = position().y
color = pyautogui.pixel(px, py)
time.sleep(0.1)
break
print(color)
print("Hold K to snipe or J to reverse snipe! - Press P to stop the bot")
toggle = False
while True:
#if keyboard.read_key() == "l":
# toggle = not toggle
# print(toggle)
# time.sleep(0.1)
#if toggle:
# for tile in tilesp:
# if pyautogui.pixel(tile.x,tile.y) == color:
# click(tile.x,tile.y)
#else:
if keyboard.is_pressed("k") == True:
for tile in tilesp:
if pyautogui.pixel(tile.x,tile.y) == color:
click(tile.x,tile.y)
if keyboard.is_pressed("j") == True:
for tile in tilesp:
if pyautogui.pixel(tile.x,tile.y) != color:
click(tile.x,tile.y)
if keyboard.is_pressed("p") == True:
print("Exiting!")
break
| 27.424242
| 74
| 0.545856
|
from pyautogui import *
import pyautogui
import time
import keyboard
import random
import win32api, win32con
px = 0
py = 0
class Tile():
def __init__(self,px,py):
self.x = px
self.y = py
def click(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
tiles = int(input("How many tiles?\n"))
tilesp = []
for tile in range(tiles):
print("Press Q on tile " + str(tile))
while True:
if keyboard.read_key() == "q":
px = position().x
py = position().y
tilesp.append(Tile(px,py))
time.sleep(0.1)
break
print("Press Q on the color we want to snipe")
while True:
if keyboard.read_key() == "q":
px = position().x
py = position().y
color = pyautogui.pixel(px, py)
time.sleep(0.1)
break
print(color)
print("Hold K to snipe or J to reverse snipe! - Press P to stop the bot")
toggle = False
while True:
if keyboard.is_pressed("k") == True:
for tile in tilesp:
if pyautogui.pixel(tile.x,tile.y) == color:
click(tile.x,tile.y)
if keyboard.is_pressed("j") == True:
for tile in tilesp:
if pyautogui.pixel(tile.x,tile.y) != color:
click(tile.x,tile.y)
if keyboard.is_pressed("p") == True:
print("Exiting!")
break
| true
| true
|
f70c0cda81ea85e278d39235993b2c823742b388
| 937
|
py
|
Python
|
isi_sdk_9_0_0/test/test_cluster_node_hardware.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_9_0_0/test/test_cluster_node_hardware.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_9_0_0/test/test_cluster_node_hardware.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.cluster_node_hardware import ClusterNodeHardware # noqa: E501
from isi_sdk_9_0_0.rest import ApiException
class TestClusterNodeHardware(unittest.TestCase):
"""ClusterNodeHardware unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterNodeHardware(self):
"""Test ClusterNodeHardware"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_9_0_0.models.cluster_node_hardware.ClusterNodeHardware() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.853659
| 96
| 0.716115
|
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.cluster_node_hardware import ClusterNodeHardware from isi_sdk_9_0_0.rest import ApiException
class TestClusterNodeHardware(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testClusterNodeHardware(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70c0dcdfb66f70f05cade13558581da90e8fcba
| 2,032
|
py
|
Python
|
test/functional/rpc_help.py
|
VeriBlock/b
|
1c2dccb1f87251b72049b75cc4db630c4da1b5c9
|
[
"MIT"
] | 4
|
2020-05-14T11:49:20.000Z
|
2022-01-19T19:54:54.000Z
|
test/functional/rpc_help.py
|
VeriBlock/b
|
1c2dccb1f87251b72049b75cc4db630c4da1b5c9
|
[
"MIT"
] | 125
|
2020-01-16T11:02:04.000Z
|
2022-03-24T12:27:13.000Z
|
test/functional/rpc_help.py
|
VeriBlock/b
|
1c2dccb1f87251b72049b75cc4db630c4da1b5c9
|
[
"MIT"
] | 9
|
2020-04-06T14:31:16.000Z
|
2021-09-30T07:50:29.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2019-2021 Xenios SEZC
# https://www.veriblock.org
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC help output."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
import os
class HelpRpcTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.test_categories()
self.dump_help()
def test_categories(self):
node = self.nodes[0]
# wrong argument count
assert_raises_rpc_error(-1, 'help', node.help, 'foo', 'bar')
# invalid argument
assert_raises_rpc_error(-1, 'JSON value is not a string as expected', node.help, 0)
# help of unknown command
assert_equal(node.help('foo'), 'help: unknown command: foo')
# command titles
titles = [line[3:-3] for line in node.help().splitlines() if line.startswith('==')]
components = ['Blockchain', 'Control', 'Generating', 'Mining', 'Network', 'Pop_mining', 'Rawtransactions', 'Util']
if self.is_wallet_compiled():
components.append('Wallet')
if self.is_zmq_compiled():
components.append('Zmq')
assert_equal(titles, components)
def dump_help(self):
dump_dir = os.path.join(self.options.tmpdir, 'rpc_help_dump')
os.mkdir(dump_dir)
calls = [line.split(' ', 1)[0] for line in self.nodes[0].help().splitlines() if line and not line.startswith('==')]
for call in calls:
with open(os.path.join(dump_dir, call), 'w', encoding='utf-8') as f:
# Make sure the node can generate the help at runtime without crashing
f.write(self.nodes[0].help(call))
if __name__ == '__main__':
HelpRpcTest().main()
| 33.311475
| 123
| 0.652067
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
import os
class HelpRpcTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.test_categories()
self.dump_help()
def test_categories(self):
node = self.nodes[0]
assert_raises_rpc_error(-1, 'help', node.help, 'foo', 'bar')
assert_raises_rpc_error(-1, 'JSON value is not a string as expected', node.help, 0)
assert_equal(node.help('foo'), 'help: unknown command: foo')
titles = [line[3:-3] for line in node.help().splitlines() if line.startswith('==')]
components = ['Blockchain', 'Control', 'Generating', 'Mining', 'Network', 'Pop_mining', 'Rawtransactions', 'Util']
if self.is_wallet_compiled():
components.append('Wallet')
if self.is_zmq_compiled():
components.append('Zmq')
assert_equal(titles, components)
def dump_help(self):
dump_dir = os.path.join(self.options.tmpdir, 'rpc_help_dump')
os.mkdir(dump_dir)
calls = [line.split(' ', 1)[0] for line in self.nodes[0].help().splitlines() if line and not line.startswith('==')]
for call in calls:
with open(os.path.join(dump_dir, call), 'w', encoding='utf-8') as f:
f.write(self.nodes[0].help(call))
if __name__ == '__main__':
HelpRpcTest().main()
| true
| true
|
f70c0e3404bc51df606439e4d7d3727526d29eb8
| 8,667
|
py
|
Python
|
astropy/table/serialize.py
|
jbkalmbach/astropy
|
88ae8c615533efd1e60de4aded204943f66f881c
|
[
"BSD-3-Clause"
] | 1
|
2022-03-02T17:07:20.000Z
|
2022-03-02T17:07:20.000Z
|
astropy/table/serialize.py
|
jbkalmbach/astropy
|
88ae8c615533efd1e60de4aded204943f66f881c
|
[
"BSD-3-Clause"
] | 1
|
2017-09-22T21:10:10.000Z
|
2017-09-22T21:10:10.000Z
|
astropy/table/serialize.py
|
jbkalmbach/astropy
|
88ae8c615533efd1e60de4aded204943f66f881c
|
[
"BSD-3-Clause"
] | 1
|
2019-10-09T21:30:57.000Z
|
2019-10-09T21:30:57.000Z
|
from importlib import import_module
import re
from copy import deepcopy
from ..utils.data_info import MixinInfo
from .column import Column
from .table import Table, QTable, has_info_class
from ..units.quantity import QuantityInfo
__construct_mixin_classes = ('astropy.time.core.Time',
'astropy.time.core.TimeDelta',
'astropy.units.quantity.Quantity',
'astropy.coordinates.angles.Latitude',
'astropy.coordinates.angles.Longitude',
'astropy.coordinates.angles.Angle',
'astropy.coordinates.distances.Distance',
'astropy.coordinates.earth.EarthLocation',
'astropy.coordinates.sky_coordinate.SkyCoord',
'astropy.table.table.NdarrayMixin')
class SerializedColumn(dict):
"""
Subclass of dict that is a used in the representation to contain the name
(and possible other info) for a mixin attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
Normally contains the single key ``name`` with the name of the column in the
table.
"""
pass
def _represent_mixin_as_column(col, name, new_cols, mixin_cols,
exclude_classes=()):
"""Convert a mixin column to a plain columns or a set of mixin columns."""
# If not a mixin, or if class in ``exclude_classes`` tuple then
# treat as a normal column. Excluded sub-classes must be explicitly
# specified.
if not has_info_class(col, MixinInfo) or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial, xform in (('unit', lambda x: x not in (None, ''), str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = xform(col_attr) if xform else col_attr
obj_attrs = col.info._represent_as_dict()
ordered_keys = col.info._represent_as_dict_attrs
data_attrs = [key for key in ordered_keys if key in obj_attrs and
getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
if len(data_attrs) == 1 and not has_info_class(data, MixinInfo):
# For one non-mixin attribute, we need only one serialized column.
# We can store info there, and keep the column name as is.
new_cols.append(Column(data, name=name, **info))
obj_attrs[data_attr] = SerializedColumn({'name': name})
# Remove attributes that are already on the serialized column.
for attr in info:
if attr in obj_attrs:
del obj_attrs[attr]
else:
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).
new_name = name + '.' + data_attr
# TODO masking, MaskedColumn
if not has_info_class(data, MixinInfo):
new_cols.append(Column(data, name=new_name))
obj_attrs[data_attr] = SerializedColumn({'name': new_name})
else:
# recurse. This will define obj_attrs[new_name].
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name))
# Strip out from info any attributes defined by the parent
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs['__info__'] = info
# Store the fully qualified class name
obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__
mixin_cols[name] = obj_attrs
def _represent_mixins_as_columns(tbl, exclude_classes=()):
"""
Convert any mixin columns to plain Column or MaskedColumn and
return a new table. Exclude any mixin columns in ``exclude_classes``,
which must be a tuple of classes.
"""
if not tbl.has_mixin_columns:
return tbl
mixin_cols = {}
new_cols = []
for col in tbl.itercols():
_represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols,
exclude_classes=exclude_classes)
meta = deepcopy(tbl.meta)
meta['__serialized_columns__'] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
cls_full_name = obj_attrs.pop('__class__')
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
if cls_full_name not in __construct_mixin_classes:
raise ValueError('unsupported class for construct {}'.format(cls_full_name))
mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups()
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
if 'name' in val:
data_attrs_map[val['name']] = name
else:
_construct_mixin_from_columns(name, val, out)
data_attrs_map[name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# Get the index where to add new column
idx = min(out.colnames.index(name) for name in data_attrs_map)
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name, data_attr in data_attrs_map.items():
col = out[name]
obj_attrs[data_attr] = col
del out[name]
info = obj_attrs.pop('__info__', {})
if len(data_attrs_map) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column.
for attr, nontrivial in (('unit', lambda x: x not in (None, '')),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info['name'] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if '__serialized_columns__' not in tbl.meta:
return tbl
# Don't know final output class but assume QTable so no columns get
# downgraded.
out = QTable(tbl, copy=False)
mixin_cols = out.meta.pop('__serialized_columns__')
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# If no quantity subclasses are in the output then output as Table.
# For instance ascii.read(file, format='ecsv') doesn't specify an
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo)
for col in out.itercols())
if not has_quantities:
out = Table(out, copy=False)
return out
| 39.756881
| 84
| 0.62063
|
from importlib import import_module
import re
from copy import deepcopy
from ..utils.data_info import MixinInfo
from .column import Column
from .table import Table, QTable, has_info_class
from ..units.quantity import QuantityInfo
__construct_mixin_classes = ('astropy.time.core.Time',
'astropy.time.core.TimeDelta',
'astropy.units.quantity.Quantity',
'astropy.coordinates.angles.Latitude',
'astropy.coordinates.angles.Longitude',
'astropy.coordinates.angles.Angle',
'astropy.coordinates.distances.Distance',
'astropy.coordinates.earth.EarthLocation',
'astropy.coordinates.sky_coordinate.SkyCoord',
'astropy.table.table.NdarrayMixin')
class SerializedColumn(dict):
pass
def _represent_mixin_as_column(col, name, new_cols, mixin_cols,
exclude_classes=()):
if not has_info_class(col, MixinInfo) or col.__class__ in exclude_classes:
new_cols.append(col)
return
# - unit: DON'T store if this is a parent attribute
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial, xform in (('unit', lambda x: x not in (None, ''), str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = xform(col_attr) if xform else col_attr
obj_attrs = col.info._represent_as_dict()
ordered_keys = col.info._represent_as_dict_attrs
data_attrs = [key for key in ordered_keys if key in obj_attrs and
getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
if len(data_attrs) == 1 and not has_info_class(data, MixinInfo):
# For one non-mixin attribute, we need only one serialized column.
# We can store info there, and keep the column name as is.
new_cols.append(Column(data, name=name, **info))
obj_attrs[data_attr] = SerializedColumn({'name': name})
# Remove attributes that are already on the serialized column.
for attr in info:
if attr in obj_attrs:
del obj_attrs[attr]
else:
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).
new_name = name + '.' + data_attr
# TODO masking, MaskedColumn
if not has_info_class(data, MixinInfo):
new_cols.append(Column(data, name=new_name))
obj_attrs[data_attr] = SerializedColumn({'name': new_name})
else:
# recurse. This will define obj_attrs[new_name].
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name))
# Strip out from info any attributes defined by the parent
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs['__info__'] = info
# Store the fully qualified class name
obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__
mixin_cols[name] = obj_attrs
def _represent_mixins_as_columns(tbl, exclude_classes=()):
if not tbl.has_mixin_columns:
return tbl
mixin_cols = {}
new_cols = []
for col in tbl.itercols():
_represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols,
exclude_classes=exclude_classes)
meta = deepcopy(tbl.meta)
meta['__serialized_columns__'] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
cls_full_name = obj_attrs.pop('__class__')
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
if cls_full_name not in __construct_mixin_classes:
raise ValueError('unsupported class for construct {}'.format(cls_full_name))
mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups()
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
if 'name' in val:
data_attrs_map[val['name']] = name
else:
_construct_mixin_from_columns(name, val, out)
data_attrs_map[name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# Get the index where to add new column
idx = min(out.colnames.index(name) for name in data_attrs_map)
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name, data_attr in data_attrs_map.items():
col = out[name]
obj_attrs[data_attr] = col
del out[name]
info = obj_attrs.pop('__info__', {})
if len(data_attrs_map) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column.
for attr, nontrivial in (('unit', lambda x: x not in (None, '')),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info['name'] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if '__serialized_columns__' not in tbl.meta:
return tbl
# Don't know final output class but assume QTable so no columns get
out = QTable(tbl, copy=False)
mixin_cols = out.meta.pop('__serialized_columns__')
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo)
for col in out.itercols())
if not has_quantities:
out = Table(out, copy=False)
return out
| true
| true
|
f70c0f56841d3e49809f2c21b43b9bac19f6cda2
| 389
|
py
|
Python
|
social_network/urls.py
|
zareisajad/social-network-django
|
991c8075a9fb51b7fbdb17704325ebc4c9d2e0fa
|
[
"MIT"
] | null | null | null |
social_network/urls.py
|
zareisajad/social-network-django
|
991c8075a9fb51b7fbdb17704325ebc4c9d2e0fa
|
[
"MIT"
] | null | null | null |
social_network/urls.py
|
zareisajad/social-network-django
|
991c8075a9fb51b7fbdb17704325ebc4c9d2e0fa
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('posts.urls')),
path('accounts/', include('accounts.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 29.923077
| 80
| 0.737789
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('posts.urls')),
path('accounts/', include('accounts.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true
| true
|
f70c0f57d7596b54aedf93375dc3a812baaafc4a
| 1,126
|
py
|
Python
|
semseg_vaihingen/tests/test_unit_model.py
|
SilkeDH/semseg_vaihingen
|
0a8bed71836fa892b8a13b7d2c5109dbcae3c549
|
[
"MIT"
] | 3
|
2020-02-03T16:55:50.000Z
|
2020-12-12T15:29:49.000Z
|
semseg_vaihingen/tests/test_unit_model.py
|
SilkeDH/semseg_vaihingen
|
0a8bed71836fa892b8a13b7d2c5109dbcae3c549
|
[
"MIT"
] | 8
|
2020-03-24T17:39:59.000Z
|
2022-02-10T00:20:46.000Z
|
semseg_vaihingen/tests/test_unit_model.py
|
SilkeDH/semseg_vaihingen
|
0a8bed71836fa892b8a13b7d2c5109dbcae3c549
|
[
"MIT"
] | 1
|
2020-02-27T09:48:53.000Z
|
2020-02-27T09:48:53.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019 Karlsruhe Institute of Technology - Steinbuch Centre for Computing
# This code is distributed under the MIT License
# Please, see the LICENSE file
#
"""
Created on Sat Aug 10 08:47:51 2019
@author: vykozlov
"""
import unittest
import semseg_vaihingen.models.deepaas_api as deepaas_api
class TestModelMethods(unittest.TestCase):
def setUp(self):
self.meta = deepaas_api.get_metadata()
def test_model_metadata_type(self):
"""
Test that get_metadata() returns dict
"""
self.assertTrue(type(self.meta) is dict)
def test_model_metadata_values(self):
"""
Test that get_metadata() returns right values (subset)
"""
self.assertEqual(self.meta['Name'].replace('-','_'),
'semseg_vaihingen'.replace('-','_'))
self.assertEqual(self.meta['Author'], 'G.Cavallaro (FZJ), M.Goetz (KIT), V.Kozlov (KIT), A.Grupp (KIT)')
self.assertEqual(self.meta['Author-email'], 'valentin.kozlov@kit.edu')
if __name__ == '__main__':
unittest.main()
| 29.631579
| 112
| 0.637655
|
import unittest
import semseg_vaihingen.models.deepaas_api as deepaas_api
class TestModelMethods(unittest.TestCase):
def setUp(self):
self.meta = deepaas_api.get_metadata()
def test_model_metadata_type(self):
self.assertTrue(type(self.meta) is dict)
def test_model_metadata_values(self):
self.assertEqual(self.meta['Name'].replace('-','_'),
'semseg_vaihingen'.replace('-','_'))
self.assertEqual(self.meta['Author'], 'G.Cavallaro (FZJ), M.Goetz (KIT), V.Kozlov (KIT), A.Grupp (KIT)')
self.assertEqual(self.meta['Author-email'], 'valentin.kozlov@kit.edu')
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70c10807ab4db35e019d3e3323ba978f7699588
| 47,600
|
py
|
Python
|
cvxpy/problems/problem.py
|
adshieh/cvxpy
|
73b696b71dbb2ceb66a805798c922461e33afc6b
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-12-21T03:11:12.000Z
|
2022-03-02T16:56:24.000Z
|
cvxpy/problems/problem.py
|
adshieh/cvxpy
|
73b696b71dbb2ceb66a805798c922461e33afc6b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/problems/problem.py
|
adshieh/cvxpy
|
73b696b71dbb2ceb66a805798c922461e33afc6b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Copyright 2013 Steven Diamond, 2017 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy import settings as s
from cvxpy import error
from cvxpy.problems.objective import Minimize, Maximize
from cvxpy.reductions.chain import Chain
from cvxpy.reductions.dgp2dcp.dgp2dcp import Dgp2Dcp
from cvxpy.reductions.dqcp2dcp import dqcp2dcp
from cvxpy.reductions.eval_params import EvalParams
from cvxpy.reductions.flip_objective import FlipObjective
from cvxpy.reductions.solvers.solving_chain import construct_solving_chain
from cvxpy.interface.matrix_utilities import scalar_value
from cvxpy.reductions.solvers import bisection
from cvxpy.reductions.solvers import defines as slv_def
from cvxpy.utilities.deterministic import unique_list
import cvxpy.utilities.performance_utils as perf
from cvxpy.constraints import Equality, Inequality, NonPos, Zero, NonNeg
import cvxpy.utilities as u
from collections import namedtuple
import numpy as np
import time
SolveResult = namedtuple(
'SolveResult',
['opt_value', 'status', 'primal_values', 'dual_values'])
class Cache(object):
def __init__(self):
self.key = None
self.solving_chain = None
self.param_prog = None
self.inverse_data = None
def invalidate(self):
self.key = None
self.solving_chain = None
self.param_prog = None
self.inverse_data = None
def make_key(self, solver, gp):
return (solver, gp)
def gp(self):
return self.key is not None and self.key[1]
class Problem(u.Canonical):
"""A convex optimization problem.
Problems are immutable, save for modification through the specification
of :class:`~cvxpy.expressions.constants.parameters.Parameter`
Arguments
---------
objective : Minimize or Maximize
The problem's objective.
constraints : list
The constraints on the problem variables.
"""
# The solve methods available.
REGISTERED_SOLVE_METHODS = {}
def __init__(self, objective, constraints=None):
if constraints is None:
constraints = []
# Check that objective is Minimize or Maximize.
if not isinstance(objective, (Minimize, Maximize)):
raise error.DCPError("Problem objective must be Minimize or Maximize.")
# Constraints and objective are immutable.
self._objective = objective
self._constraints = [c for c in constraints]
self._value = None
self._status = None
self._solution = None
self._cache = Cache()
self._solver_cache = {}
# Information about the shape of the problem and its constituent parts
self._size_metrics = None
# Benchmarks reported by the solver:
self._solver_stats = None
self.args = [self._objective, self._constraints]
@property
def value(self):
"""float : The value from the last time the problem was solved
(or None if not solved).
"""
if self._value is None:
return None
else:
return scalar_value(self._value)
@property
def status(self):
"""str : The status from the last time the problem was solved; one
of optimal, infeasible, or unbounded (with or without
suffix inaccurate).
"""
return self._status
@property
def solution(self):
"""Solution : The solution from the last time the problem was solved.
"""
return self._solution
@property
def objective(self):
"""Minimize or Maximize : The problem's objective.
Note that the objective cannot be reassigned after creation,
and modifying the objective after creation will result in
undefined behavior.
"""
return self._objective
@property
def constraints(self):
"""A shallow copy of the problem's constraints.
Note that constraints cannot be reassigned, appended to, or otherwise
modified after creation, except through parameters.
"""
return self._constraints[:]
@perf.compute_once
def is_dcp(self, dpp=False):
"""Does the problem satisfy DCP rules?
Arguments
---------
dpp : bool, optional
If True, enforce the disciplined parametrized programming (DPP)
ruleset; only relevant when the problem involves Parameters.
DPP is a mild restriction of DCP. When a problem involving
Parameters is DPP, subsequent solves can be much faster than
the first one. For more information, consult the documentation at
https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming
Returns
-------
bool
True if the Expression is DCP, False otherwise.
"""
return all(
expr.is_dcp(dpp) for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dgp(self, dpp=False):
"""Does the problem satisfy DGP rules?
Arguments
---------
dpp : bool, optional
If True, enforce the disciplined parametrized programming (DPP)
ruleset; only relevant when the problem involves Parameters.
DPP is a mild restriction of DGP. When a problem involving
Parameters is DPP, subsequent solves can be much faster than
the first one. For more information, consult the documentation at
https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming
Returns
-------
bool
True if the Expression is DGP, False otherwise.
"""
return all(
expr.is_dgp(dpp) for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dqcp(self):
"""Does the problem satisfy the DQCP rules?
"""
return all(
expr.is_dqcp() for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dpp(self, context='dcp'):
"""Does the problem satisfy DPP rules?
DPP is a mild restriction of DGP. When a problem involving
Parameters is DPP, subsequent solves can be much faster than
the first one. For more information, consult the documentation at
https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming
Arguments
---------
context : str
Whether to check DPP-compliance for DCP or DGP; ``context`` should
be either ``'dcp'`` or ``'dgp'``. Calling ``problem.is_dpp('dcp')``
is equivalent to ``problem.is_dcp(dpp=True)``, and
`problem.is_dpp('dgp')`` is equivalent to
`problem.is_dgp(dpp=True)`.
Returns
-------
bool
Whether the problem satisfies the DPP rules.
"""
if context.lower() == 'dcp':
return self.is_dcp(dpp=True)
elif context.lower() == 'dgp':
return self.is_dgp(dpp=True)
else:
raise ValueError("Unsupported context ", context)
@perf.compute_once
def is_qp(self):
"""Is problem a quadratic program?
"""
for c in self.constraints:
if not (isinstance(c, (Equality, Zero)) or c.args[0].is_pwl()):
return False
for var in self.variables():
if var.is_psd() or var.is_nsd():
return False
return (self.is_dcp() and self.objective.args[0].is_qpwa())
@perf.compute_once
def is_mixed_integer(self):
return any(v.attributes['boolean'] or v.attributes['integer']
for v in self.variables())
@perf.compute_once
def variables(self):
"""Accessor method for variables.
Returns
-------
list of :class:`~cvxpy.expressions.variable.Variable`
A list of the variables in the problem.
"""
vars_ = self.objective.variables()
for constr in self.constraints:
vars_ += constr.variables()
return unique_list(vars_)
@perf.compute_once
def parameters(self):
"""Accessor method for parameters.
Returns
-------
list of :class:`~cvxpy.expressions.constants.parameter.Parameter`
A list of the parameters in the problem.
"""
params = self.objective.parameters()
for constr in self.constraints:
params += constr.parameters()
return unique_list(params)
@perf.compute_once
def constants(self):
"""Accessor method for constants.
Returns
-------
list of :class:`~cvxpy.expressions.constants.constant.Constant`
A list of the constants in the problem.
"""
const_dict = {}
constants_ = self.objective.constants()
for constr in self.constraints:
constants_ += constr.constants()
# Note that numpy matrices are not hashable, so we use the built-in
# function "id"
const_dict = {id(constant): constant for constant in constants_}
return list(const_dict.values())
def atoms(self):
"""Accessor method for atoms.
Returns
-------
list of :class:`~cvxpy.atoms.Atom`
A list of the atom types in the problem; note that this list
contains classes, not instances.
"""
atoms = self.objective.atoms()
for constr in self.constraints:
atoms += constr.atoms()
return unique_list(atoms)
@property
def size_metrics(self):
""":class:`~cvxpy.problems.problem.SizeMetrics` : Information about the problem's size.
"""
if self._size_metrics is None:
self._size_metrics = SizeMetrics(self)
return self._size_metrics
@property
def solver_stats(self):
""":class:`~cvxpy.problems.problem.SolverStats` : Information returned by the solver.
"""
return self._solver_stats
def solve(self, *args, **kwargs):
"""Solves the problem using the specified method.
Populates the :code:`status` and :code:`value` attributes on the
problem object as a side-effect.
Arguments
---------
solver : str, optional
The solver to use. For example, 'ECOS', 'SCS', or 'OSQP'.
verbose : bool, optional
Overrides the default of hiding solver output.
gp : bool, optional
If True, parses the problem as a disciplined geometric program
instead of a disciplined convex program.
qcp : bool, optional
If True, parses the problem as a disciplined quasiconvex program
instead of a disciplined convex program.
requires_grad : bool, optional
Makes it possible to compute gradients of a solution with respect to
Parameters by calling ``problem.backward()`` after solving, or to
compute perturbations to the variables given perturbations to Parameters by
calling ``problem.derivative()``.
Gradients are only supported for DCP and DGP problems, not
quasiconvex problems. When computing gradients (i.e., when
this argument is True), the problem must satisfy the DPP rules.
enforce_dpp : bool, optional
When True, a DPPError will be thrown when trying to solve a non-DPP
problem (instead of just a warning). Only relevant for problems
involving Parameters. Defaults to False.
method : function, optional
A custom solve method to use.
kwargs : keywords, optional
Additional solver specific arguments. See Notes below.
Notes
------
CVXPY interfaces with a wide range of solvers; the algorithms used by these solvers
have arguments relating to stopping criteria, and strategies to improve solution quality.
There is no one choice of arguments which is perfect for every problem. If you are not
getting satisfactory results from a solver, you can try changing its arguments. The
exact way this is done depends on the specific solver. Here are some examples:
prob.solve(solver='ECOS', abstol=1e-6)
prob.solve(solver='OSQP', max_iter=10000).
mydict = {"MSK_DPAR_INTPNT_CO_TOL_NEAR_REL": 10}
prob.solve(solver='MOSEK', mosek_params=mydict).
You should refer to CVXPY's web documentation for details on how to pass solver
solver arguments, available at
https://www.cvxpy.org/tutorial/advanced/index.html#setting-solver-options
Returns
-------
float
The optimal value for the problem, or a string indicating
why the problem could not be solved.
Raises
------
cvxpy.error.DCPError
Raised if the problem is not DCP and `gp` is False.
cvxpy.error.DGPError
Raised if the problem is not DGP and `gp` is True.
cvxpy.error.SolverError
Raised if no suitable solver exists among the installed solvers,
or if an unanticipated error is encountered.
"""
func_name = kwargs.pop("method", None)
if func_name is not None:
solve_func = Problem.REGISTERED_SOLVE_METHODS[func_name]
else:
solve_func = Problem._solve
return solve_func(self, *args, **kwargs)
@classmethod
def register_solve(cls, name, func):
"""Adds a solve method to the Problem class.
Arguments
---------
name : str
The keyword for the method.
func : function
The function that executes the solve method. This function must
take as its first argument the problem instance to solve.
"""
cls.REGISTERED_SOLVE_METHODS[name] = func
def get_problem_data(self, solver, gp=False, enforce_dpp=False):
"""Returns the problem data used in the call to the solver.
When a problem is solved, CVXPY creates a chain of reductions enclosed
in a :class:`~cvxpy.reductions.solvers.solving_chain.SolvingChain`,
and compiles it to some low-level representation that is
compatible with the targeted solver. This method returns that low-level
representation.
For some solving chains, this low-level representation is a dictionary
that contains exactly those arguments that were supplied to the solver;
however, for other solving chains, the data is an intermediate
representation that is compiled even further by the solver interfaces.
A solution to the equivalent low-level problem can be obtained via the
data by invoking the `solve_via_data` method of the returned solving
chain, a thin wrapper around the code external to CVXPY that further
processes and solves the problem. Invoke the unpack_results method
to recover a solution to the original problem.
For example:
::
objective = ...
constraints = ...
problem = cp.Problem(objective, constraints)
data, chain, inverse_data = problem.get_problem_data(cp.SCS)
# calls SCS using `data`
soln = chain.solve_via_data(problem, data)
# unpacks the solution returned by SCS into `problem`
problem.unpack_results(soln, chain, inverse_data)
Alternatively, the `data` dictionary returned by this method
contains enough information to bypass CVXPY and call the solver
directly.
For example:
::
problem = cp.Problem(objective, constraints)
data, _, _ = problem.get_problem_data(cp.SCS)
import scs
probdata = {
'A': data['A'],
'b': data['b'],
'c': data['c'],
}
cone_dims = data['dims']
cones = {
"f": cone_dims.zero,
"l": cone_dims.nonpos,
"q": cone_dims.soc,
"ep": cone_dims.exp,
"s": cone_dims.psd,
}
soln = scs.solve(data, cones)
The structure of the data dict that CVXPY returns depends on the
solver. For details, consult the solver interfaces in
`cvxpy/reductions/solvers`.
Arguments
---------
solver : str
The solver the problem data is for.
gp : bool, optional
If True, then parses the problem as a disciplined geometric program
instead of a disciplined convex program.
enforce_dpp : bool, optional
When True, a DPPError will be thrown when trying to parse a non-DPP
problem (instead of just a warning). Defaults to False.
Returns
-------
dict or object
lowest level representation of problem
SolvingChain
The solving chain that created the data.
list
The inverse data generated by the chain.
"""
key = self._cache.make_key(solver, gp)
if key != self._cache.key:
self._cache.invalidate()
solving_chain = self._construct_chain(
solver=solver, gp=gp, enforce_dpp=enforce_dpp)
self._cache.key = key
self._cache.solving_chain = solving_chain
self._solver_cache = {}
else:
solving_chain = self._cache.solving_chain
if self._cache.param_prog is not None:
# fast path, bypasses application of reductions
if gp:
dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)
# Parameters in the param cone prog are the logs
# of parameters in the original problem (with one exception:
# parameters appearing as exponents (in power and gmatmul
# atoms) are unchanged.
old_params_to_new_params = dgp2dcp.canon_methods._parameters
for param in self.parameters():
if param in old_params_to_new_params:
old_params_to_new_params[param].value = np.log(
param.value)
data, solver_inverse_data = solving_chain.solver.apply(
self._cache.param_prog)
inverse_data = self._cache.inverse_data + [solver_inverse_data]
else:
data, inverse_data = solving_chain.apply(self)
safe_to_cache = (
isinstance(data, dict)
and s.PARAM_PROB in data
and not any(isinstance(reduction, EvalParams)
for reduction in solving_chain.reductions)
)
if safe_to_cache:
self._cache.param_prog = data[s.PARAM_PROB]
# the last datum in inverse_data corresponds to the solver,
# so we shouldn't cache it
self._cache.inverse_data = inverse_data[:-1]
return data, solving_chain, inverse_data
def _find_candidate_solvers(self,
solver=None,
gp=False):
"""
Find candiate solvers for the current problem. If solver
is not None, it checks if the specified solver is compatible
with the problem passed.
Arguments
---------
solver : string
The name of the solver with which to solve the problem. If no
solver is supplied (i.e., if solver is None), then the targeted
solver may be any of those that are installed. If the problem
is variable-free, then this parameter is ignored.
gp : bool
If True, the problem is parsed as a Disciplined Geometric Program
instead of as a Disciplined Convex Program.
Returns
-------
dict
A dictionary of compatible solvers divided in `qp_solvers`
and `conic_solvers`.
Raises
------
cvxpy.error.SolverError
Raised if the problem is not DCP and `gp` is False.
cvxpy.error.DGPError
Raised if the problem is not DGP and `gp` is True.
"""
candidates = {'qp_solvers': [],
'conic_solvers': []}
if solver is not None:
if solver not in slv_def.INSTALLED_SOLVERS:
raise error.SolverError("The solver %s is not installed." % solver)
if solver in slv_def.CONIC_SOLVERS:
candidates['conic_solvers'] += [solver]
if solver in slv_def.QP_SOLVERS:
candidates['qp_solvers'] += [solver]
else:
candidates['qp_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS
if s in slv_def.QP_SOLVERS]
candidates['conic_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS
if s in slv_def.CONIC_SOLVERS]
# If gp we must have only conic solvers
if gp:
if solver is not None and solver not in slv_def.CONIC_SOLVERS:
raise error.SolverError(
"When `gp=True`, `solver` must be a conic solver "
"(received '%s'); try calling " % solver +
" `solve()` with `solver=cvxpy.ECOS`."
)
elif solver is None:
candidates['qp_solvers'] = [] # No QP solvers allowed
if self.is_mixed_integer():
if len(slv_def.INSTALLED_MI_SOLVERS) == 0:
msg = """
CVXPY needs additional software (a `mixed-integer solver`) to handle this model.
The web documentation
https://www.cvxpy.org/tutorial/advanced/index.html#mixed-integer-programs
reviews open-source and commercial options for mixed-integer solvers.
Quick fix: if you install the python package CVXOPT (pip install cvxopt),
then CVXPY can use the open-source mixed-integer solver `GLPK`.
"""
raise error.SolverError(msg)
candidates['qp_solvers'] = [
s for s in candidates['qp_solvers']
if slv_def.SOLVER_MAP_QP[s].MIP_CAPABLE]
candidates['conic_solvers'] = [
s for s in candidates['conic_solvers']
if slv_def.SOLVER_MAP_CONIC[s].MIP_CAPABLE]
if not candidates['conic_solvers'] and \
not candidates['qp_solvers']:
raise error.SolverError(
"Problem is mixed-integer, but candidate "
"QP/Conic solvers (%s) are not MIP-capable." %
(candidates['qp_solvers'] +
candidates['conic_solvers']))
return candidates
def _construct_chain(self, solver=None, gp=False, enforce_dpp=False):
"""
Construct the chains required to reformulate and solve the problem.
In particular, this function
# finds the candidate solvers
# constructs the solving chain that performs the
numeric reductions and solves the problem.
Arguments
---------
solver : str, optional
The solver to use. Defaults to ECOS.
gp : bool, optional
If True, the problem is parsed as a Disciplined Geometric Program
instead of as a Disciplined Convex Program.
enforce_dpp : bool, optional
Whether to error on DPP violations.
Returns
-------
A solving chain
"""
candidate_solvers = self._find_candidate_solvers(solver=solver, gp=gp)
return construct_solving_chain(self, candidate_solvers, gp=gp,
enforce_dpp=enforce_dpp)
def _invalidate_cache(self):
self._cache_key = None
self._solving_chain = None
self._param_prog = None
self._inverse_data = None
def _solve(self,
solver=None,
warm_start=True,
verbose=False,
gp=False, qcp=False, requires_grad=False, enforce_dpp=False, **kwargs):
"""Solves a DCP compliant optimization problem.
Saves the values of primal and dual variables in the variable
and constraint objects, respectively.
Arguments
---------
solver : str, optional
The solver to use. Defaults to ECOS.
warm_start : bool, optional
Should the previous solver result be used to warm start?
verbose : bool, optional
Overrides the default of hiding solver output.
gp : bool, optional
If True, parses the problem as a disciplined geometric program.
qcp : bool, optional
If True, parses the problem as a disciplined quasiconvex program.
requires_grad : bool, optional
Makes it possible to compute gradients with respect to
parameters by calling `backward()` after solving, or to compute
perturbations to the variables by calling `derivative()`. When
True, the solver must be SCS, and dqcp must be False.
A DPPError is thrown when problem is not DPP.
enforce_dpp : bool, optional
When True, a DPPError will be thrown when trying to solve a non-DPP
problem (instead of just a warning). Defaults to False.
kwargs : dict, optional
A dict of options that will be passed to the specific solver.
In general, these options will override any default settings
imposed by cvxpy.
Returns
-------
float
The optimal value for the problem, or a string indicating
why the problem could not be solved.
"""
for parameter in self.parameters():
if parameter.value is None:
raise error.ParameterError(
"A Parameter (whose name is '%s') does not have a value "
"associated with it; all Parameter objects must have "
"values before solving a problem." % parameter.name())
if requires_grad:
dpp_context = 'dgp' if gp else 'dcp'
if qcp:
raise ValueError("Cannot compute gradients of DQCP problems.")
elif not self.is_dpp(dpp_context):
raise error.DPPError("Problem is not DPP (when requires_grad "
"is True, problem must be DPP).")
elif solver is not None and solver not in [s.SCS, s.DIFFCP]:
raise ValueError("When requires_grad is True, the only "
"supported solver is SCS "
"(received %s)." % solver)
elif s.DIFFCP not in slv_def.INSTALLED_SOLVERS:
raise ImportError(
"The Python package diffcp must be installed to "
"differentiate through problems. Please follow the "
"installation instructions at "
"https://github.com/cvxgrp/diffcp")
else:
solver = s.DIFFCP
else:
if gp and qcp:
raise ValueError("At most one of `gp` and `qcp` can be True.")
if qcp and not self.is_dcp():
if not self.is_dqcp():
raise error.DQCPError("The problem is not DQCP.")
reductions = [dqcp2dcp.Dqcp2Dcp()]
if type(self.objective) == Maximize:
reductions = [FlipObjective()] + reductions
chain = Chain(problem=self, reductions=reductions)
soln = bisection.bisect(
chain.reduce(), solver=solver, verbose=verbose, **kwargs)
self.unpack(chain.retrieve(soln))
return self.value
data, solving_chain, inverse_data = self.get_problem_data(
solver, gp, enforce_dpp)
solution = solving_chain.solve_via_data(
self, data, warm_start, verbose, kwargs)
self.unpack_results(solution, solving_chain, inverse_data)
return self.value
def backward(self):
"""Compute the gradient of a solution with respect to Parameters.
This method differentiates through the solution map of the problem,
obtaining the gradient of a solution with respect to the Parameters.
In other words, it calculates the sensitivities of the Parameters
with respect to perturbations in the optimal Variable values. This
can be useful for integrating CVXPY into automatic differentation
toolkits.
``backward()`` populates the ``gradient`` attribute of each Parameter
in the problem as a side-effect. It can only be called after calling
``solve()`` with ``requires_grad=True``.
Below is a simple example:
::
import cvxpy as cp
import numpy as np
p = cp.Parameter()
x = cp.Variable()
quadratic = cp.square(x - 2 * p)
problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])
p.value = 3.0
problem.solve(requires_grad=True, eps=1e-10)
# backward() populates the gradient attribute of the parameters
problem.backward()
# Because x* = 2 * p, dx*/dp = 2
np.testing.assert_allclose(p.gradient, 2.0)
In the above example, the gradient could easily be computed by hand.
The ``backward()`` is useful because for almost all problems, the
gradient cannot be computed analytically.
This method can be used to differentiate through any DCP or DGP
problem, as long as the problem is DPP compliant (i.e.,
``problem.is_dcp(dpp=True)`` or ``problem.is_dgp(dpp=True)`` evaluates to
``True``).
This method uses the chain rule to evaluate the gradients of a
scalar-valued function of the Variables with respect to the Parameters.
For example, let x be a variable and p a Parameter; x and p might be
scalars, vectors, or matrices. Let f be a scalar-valued function, with
z = f(x). Then this method computes dz/dp = (dz/dx) (dx/p). dz/dx
is chosen as the all-ones vector by default, corresponding to
choosing f to be the sum function. You can specify a custom value for
dz/dx by setting the ``gradient`` attribute on your variables. For example,
::
import cvxpy as cp
import numpy as np
b = cp.Parameter()
x = cp.Variable()
quadratic = cp.square(x - 2 * b)
problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])
b.value = 3.
problem.solve(requires_grad=True, eps=1e-10)
x.gradient = 4.
problem.backward()
# dz/dp = dz/dx dx/dp = 4. * 2. == 8.
np.testing.assert_allclose(b.gradient, 8.)
The ``gradient`` attribute on a variable can also be interpreted as a
perturbation to its optimal value.
Raises
------
ValueError
if solve was not called with ``requires_grad=True``
SolverError
if the problem is infeasible or unbounded
"""
if s.DIFFCP not in self._solver_cache:
raise ValueError("backward can only be called after calling "
"solve with `requires_grad=True`")
elif self.status not in s.SOLUTION_PRESENT:
raise error.SolverError("Backpropagating through "
"infeasible/unbounded problems is not "
"yet supported. Please file an issue on "
"Github if you need this feature.")
# TODO(akshayka): Backpropagate through dual variables as well.
backward_cache = self._solver_cache[s.DIFFCP]
DT = backward_cache["DT"]
zeros = np.zeros(backward_cache["s"].shape)
del_vars = {}
gp = self._cache.gp()
for variable in self.variables():
if variable.gradient is None:
del_vars[variable.id] = np.ones(variable.shape)
else:
del_vars[variable.id] = np.asarray(variable.gradient,
dtype=np.float64)
if gp:
# x_gp = exp(x_cone_program),
# dx_gp/d x_cone_program = exp(x_cone_program) = x_gp
del_vars[variable.id] *= variable.value
dx = self._cache.param_prog.split_adjoint(del_vars)
start = time.time()
dA, db, dc = DT(dx, zeros, zeros)
end = time.time()
backward_cache['DT_TIME'] = end - start
dparams = self._cache.param_prog.apply_param_jac(dc, -dA, db)
if not gp:
for param in self.parameters():
param.gradient = dparams[param.id]
else:
dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)
old_params_to_new_params = dgp2dcp.canon_methods._parameters
for param in self.parameters():
# Note: if param is an exponent in a power or gmatmul atom,
# then the parameter passes through unchanged to the DCP
# program; if the param is also used elsewhere (not as an
# exponent), then param will also be in
# old_params_to_new_params. Therefore, param.gradient =
# dparams[param.id] (or 0) + 1/param*dparams[new_param.id]
#
# Note that param.id is in dparams if and only if
# param was used as an exponent (because this means that
# the parameter entered the DCP problem unchanged.)
grad = 0.0 if param.id not in dparams else dparams[param.id]
if param in old_params_to_new_params:
new_param = old_params_to_new_params[param]
# new_param.value == log(param), apply chain rule
grad += (1.0 / param.value) * dparams[new_param.id]
param.gradient = grad
def derivative(self):
"""Apply the derivative of the solution map to perturbations in the Parameters
This method applies the derivative of the solution map to perturbations
in the Parameters to obtain perturbations in the optimal values of the
Variables. In other words, it tells you how the optimal values of the
Variables would be changed by small changes to the Parameters.
You can specify perturbations in a Parameter by setting its ``delta``
attribute (if unspecified, the perturbation defaults to 0).
This method populates the ``delta`` attribute of the Variables as a
side-effect.
This method can only be called after calling ``solve()`` with
``requires_grad=True``. It is compatible with both DCP and DGP
problems (that are also DPP-compliant).
Below is a simple example:
::
import cvxpy as cp
import numpy as np
p = cp.Parameter()
x = cp.Variable()
quadratic = cp.square(x - 2 * p)
problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])
p.value = 3.0
problem.solve(requires_grad=True, eps=1e-10)
# derivative() populates the delta attribute of the variables
problem.derivative()
p.delta = 1e-3
# Because x* = 2 * p, dx*/dp = 2, so (dx*/dp)(p.delta) == 2e-3
np.testing.assert_allclose(x.delta, 2e-3)
Raises
------
ValueError
if solve was not called with ``requires_grad=True``
SolverError
if the problem is infeasible or unbounded
"""
if s.DIFFCP not in self._solver_cache:
raise ValueError("derivative can only be called after calling "
"solve with `requires_grad=True`")
elif self.status not in s.SOLUTION_PRESENT:
raise ValueError("Differentiating through infeasible/unbounded "
"problems is not yet supported. Please file an "
"issue on Github if you need this feature.")
# TODO(akshayka): Forward differentiate dual variables as well
backward_cache = self._solver_cache[s.DIFFCP]
param_prog = self._cache.param_prog
D = backward_cache["D"]
param_deltas = {}
gp = self._cache.gp()
if gp:
dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)
if not self.parameters():
for variable in self.variables():
variable.delta = np.zeros(variable.shape)
return
for param in self.parameters():
delta = param.delta if param.delta is not None else np.zeros(param.shape)
if gp:
if param in dgp2dcp.canon_methods._parameters:
new_param_id = dgp2dcp.canon_methods._parameters[param].id
else:
new_param_id = param.id
param_deltas[new_param_id] = (
1.0/param.value * np.asarray(delta, dtype=np.float64))
if param.id in param_prog.param_id_to_col:
# here, param generated a new parameter and also
# passed through to the param cone prog unchanged
# (because it was an exponent of a power)
param_deltas[param.id] = np.asarray(delta,
dtype=np.float64)
else:
param_deltas[param.id] = np.asarray(delta, dtype=np.float64)
dc, _, dA, db = param_prog.apply_parameters(param_deltas,
zero_offset=True)
start = time.time()
dx, _, _ = D(-dA, db, dc)
end = time.time()
backward_cache['D_TIME'] = end - start
dvars = param_prog.split_solution(
dx, [v.id for v in self.variables()])
for variable in self.variables():
variable.delta = dvars[variable.id]
if gp:
# x_gp = exp(x_cone_program),
# dx_gp/d x_cone_program = exp(x_cone_program) = x_gp
variable.delta *= variable.value
def _clear_solution(self):
for v in self.variables():
v.save_value(None)
for c in self.constraints:
for dv in c.dual_variables:
dv.save_value(None)
self._value = None
self._status = None
self._solution = None
def unpack(self, solution):
"""Updates the problem state given a Solution.
Updates problem.status, problem.value and value of primal and dual
variables. If solution.status is in cvxpy.settins.ERROR, this method
is a no-op.
Arguments
_________
solution : cvxpy.Solution
A Solution object.
Raises
------
ValueError
If the solution object has an invalid status
"""
if solution.status in s.SOLUTION_PRESENT:
for v in self.variables():
v.save_value(solution.primal_vars[v.id])
for c in self.constraints:
if c.id in solution.dual_vars:
c.save_dual_value(solution.dual_vars[c.id])
elif solution.status in s.INF_OR_UNB:
for v in self.variables():
v.save_value(None)
for constr in self.constraints:
for dv in constr.dual_variables:
dv.save_value(None)
else:
raise ValueError("Cannot unpack invalid solution: %s" % solution)
self._value = solution.opt_val
self._status = solution.status
self._solution = solution
def unpack_results(self, solution, chain, inverse_data):
"""Updates the problem state given the solver results.
Updates problem.status, problem.value and value of
primal and dual variables.
Arguments
_________
solution : object
The solution returned by applying the chain to the problem
and invoking the solver on the resulting data.
chain : SolvingChain
A solving chain that was used to solve the problem.
inverse_data : list
The inverse data returned by applying the chain to the problem.
Raises
------
cvxpy.error.SolverError
If the solver failed
"""
solution = chain.invert(solution, inverse_data)
if solution.status in s.ERROR:
raise error.SolverError(
"Solver '%s' failed. " % chain.solver.name() +
"Try another solver, or solve with verbose=True for more "
"information.")
self.unpack(solution)
self._solver_stats = SolverStats(self._solution.attr,
chain.solver.name())
def __str__(self):
if len(self.constraints) == 0:
return str(self.objective)
else:
subject_to = "subject to "
lines = [str(self.objective),
subject_to + str(self.constraints[0])]
for constr in self.constraints[1:]:
lines += [len(subject_to) * " " + str(constr)]
return '\n'.join(lines)
def __repr__(self):
return "Problem(%s, %s)" % (repr(self.objective),
repr(self.constraints))
def __neg__(self):
return Problem(-self.objective, self.constraints)
def __add__(self, other):
if other == 0:
return self
elif not isinstance(other, Problem):
return NotImplemented
return Problem(self.objective + other.objective,
unique_list(self.constraints + other.constraints))
def __radd__(self, other):
if other == 0:
return self
else:
return NotImplemented
def __sub__(self, other):
if not isinstance(other, Problem):
return NotImplemented
return Problem(self.objective - other.objective,
unique_list(self.constraints + other.constraints))
def __rsub__(self, other):
if other == 0:
return -self
else:
return NotImplemented
def __mul__(self, other):
if not isinstance(other, (int, float)):
return NotImplemented
return Problem(self.objective * other, self.constraints)
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (int, float)):
return NotImplemented
return Problem(self.objective * (1.0 / other), self.constraints)
def is_constant(self):
return False
__truediv__ = __div__
class SolverStats(object):
"""Reports some of the miscellaneous information that is returned
by the solver after solving but that is not captured directly by
the Problem instance.
Attributes
----------
solve_time : double
The time (in seconds) it took for the solver to solve the problem.
setup_time : double
The time (in seconds) it took for the solver to setup the problem.
num_iters : int
The number of iterations the solver had to go through to find a solution.
"""
def __init__(self, results_dict, solver_name):
self.solver_name = solver_name
self.solve_time = None
self.setup_time = None
self.num_iters = None
if s.SOLVE_TIME in results_dict:
self.solve_time = results_dict[s.SOLVE_TIME]
if s.SETUP_TIME in results_dict:
self.setup_time = results_dict[s.SETUP_TIME]
if s.NUM_ITERS in results_dict:
self.num_iters = results_dict[s.NUM_ITERS]
class SizeMetrics(object):
"""Reports various metrics regarding the problem.
Attributes
----------
num_scalar_variables : integer
The number of scalar variables in the problem.
num_scalar_data : integer
The number of scalar constants and parameters in the problem. The number of
constants used across all matrices, vectors, in the problem.
Some constants are not apparent when the problem is constructed: for example,
The sum_squares expression is a wrapper for a quad_over_lin expression with a
constant 1 in the denominator.
num_scalar_eq_constr : integer
The number of scalar equality constraints in the problem.
num_scalar_leq_constr : integer
The number of scalar inequality constraints in the problem.
max_data_dimension : integer
The longest dimension of any data block constraint or parameter.
max_big_small_squared : integer
The maximum value of (big)(small)^2 over all data blocks of the problem, where
(big) is the larger dimension and (small) is the smaller dimension
for each data block.
"""
def __init__(self, problem):
# num_scalar_variables
self.num_scalar_variables = 0
for var in problem.variables():
self.num_scalar_variables += var.size
# num_scalar_data, max_data_dimension, and max_big_small_squared
self.max_data_dimension = 0
self.num_scalar_data = 0
self.max_big_small_squared = 0
for const in problem.constants()+problem.parameters():
big = 0
# Compute number of data
self.num_scalar_data += const.size
big = 1 if len(const.shape) == 0 else max(const.shape)
small = 1 if len(const.shape) == 0 else min(const.shape)
# Get max data dimension:
if self.max_data_dimension < big:
self.max_data_dimension = big
max_big_small_squared = float(big)*(float(small)**2)
if self.max_big_small_squared < max_big_small_squared:
self.max_big_small_squared = max_big_small_squared
# num_scalar_eq_constr
self.num_scalar_eq_constr = 0
for constraint in problem.constraints:
if isinstance(constraint, (Equality, Zero)):
self.num_scalar_eq_constr += constraint.expr.size
# num_scalar_leq_constr
self.num_scalar_leq_constr = 0
for constraint in problem.constraints:
if isinstance(constraint, (Inequality, NonPos, NonNeg)):
self.num_scalar_leq_constr += constraint.expr.size
| 39.209226
| 100
| 0.597017
|
from cvxpy import settings as s
from cvxpy import error
from cvxpy.problems.objective import Minimize, Maximize
from cvxpy.reductions.chain import Chain
from cvxpy.reductions.dgp2dcp.dgp2dcp import Dgp2Dcp
from cvxpy.reductions.dqcp2dcp import dqcp2dcp
from cvxpy.reductions.eval_params import EvalParams
from cvxpy.reductions.flip_objective import FlipObjective
from cvxpy.reductions.solvers.solving_chain import construct_solving_chain
from cvxpy.interface.matrix_utilities import scalar_value
from cvxpy.reductions.solvers import bisection
from cvxpy.reductions.solvers import defines as slv_def
from cvxpy.utilities.deterministic import unique_list
import cvxpy.utilities.performance_utils as perf
from cvxpy.constraints import Equality, Inequality, NonPos, Zero, NonNeg
import cvxpy.utilities as u
from collections import namedtuple
import numpy as np
import time
SolveResult = namedtuple(
'SolveResult',
['opt_value', 'status', 'primal_values', 'dual_values'])
class Cache(object):
def __init__(self):
self.key = None
self.solving_chain = None
self.param_prog = None
self.inverse_data = None
def invalidate(self):
self.key = None
self.solving_chain = None
self.param_prog = None
self.inverse_data = None
def make_key(self, solver, gp):
return (solver, gp)
def gp(self):
return self.key is not None and self.key[1]
class Problem(u.Canonical):
REGISTERED_SOLVE_METHODS = {}
def __init__(self, objective, constraints=None):
if constraints is None:
constraints = []
if not isinstance(objective, (Minimize, Maximize)):
raise error.DCPError("Problem objective must be Minimize or Maximize.")
self._objective = objective
self._constraints = [c for c in constraints]
self._value = None
self._status = None
self._solution = None
self._cache = Cache()
self._solver_cache = {}
self._size_metrics = None
self._solver_stats = None
self.args = [self._objective, self._constraints]
@property
def value(self):
if self._value is None:
return None
else:
return scalar_value(self._value)
@property
def status(self):
return self._status
@property
def solution(self):
return self._solution
@property
def objective(self):
return self._objective
@property
def constraints(self):
return self._constraints[:]
@perf.compute_once
def is_dcp(self, dpp=False):
return all(
expr.is_dcp(dpp) for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dgp(self, dpp=False):
return all(
expr.is_dgp(dpp) for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dqcp(self):
return all(
expr.is_dqcp() for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dpp(self, context='dcp'):
if context.lower() == 'dcp':
return self.is_dcp(dpp=True)
elif context.lower() == 'dgp':
return self.is_dgp(dpp=True)
else:
raise ValueError("Unsupported context ", context)
@perf.compute_once
def is_qp(self):
for c in self.constraints:
if not (isinstance(c, (Equality, Zero)) or c.args[0].is_pwl()):
return False
for var in self.variables():
if var.is_psd() or var.is_nsd():
return False
return (self.is_dcp() and self.objective.args[0].is_qpwa())
@perf.compute_once
def is_mixed_integer(self):
return any(v.attributes['boolean'] or v.attributes['integer']
for v in self.variables())
@perf.compute_once
def variables(self):
vars_ = self.objective.variables()
for constr in self.constraints:
vars_ += constr.variables()
return unique_list(vars_)
@perf.compute_once
def parameters(self):
params = self.objective.parameters()
for constr in self.constraints:
params += constr.parameters()
return unique_list(params)
@perf.compute_once
def constants(self):
const_dict = {}
constants_ = self.objective.constants()
for constr in self.constraints:
constants_ += constr.constants()
const_dict = {id(constant): constant for constant in constants_}
return list(const_dict.values())
def atoms(self):
atoms = self.objective.atoms()
for constr in self.constraints:
atoms += constr.atoms()
return unique_list(atoms)
@property
def size_metrics(self):
if self._size_metrics is None:
self._size_metrics = SizeMetrics(self)
return self._size_metrics
@property
def solver_stats(self):
return self._solver_stats
def solve(self, *args, **kwargs):
func_name = kwargs.pop("method", None)
if func_name is not None:
solve_func = Problem.REGISTERED_SOLVE_METHODS[func_name]
else:
solve_func = Problem._solve
return solve_func(self, *args, **kwargs)
@classmethod
def register_solve(cls, name, func):
cls.REGISTERED_SOLVE_METHODS[name] = func
def get_problem_data(self, solver, gp=False, enforce_dpp=False):
key = self._cache.make_key(solver, gp)
if key != self._cache.key:
self._cache.invalidate()
solving_chain = self._construct_chain(
solver=solver, gp=gp, enforce_dpp=enforce_dpp)
self._cache.key = key
self._cache.solving_chain = solving_chain
self._solver_cache = {}
else:
solving_chain = self._cache.solving_chain
if self._cache.param_prog is not None:
if gp:
dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)
old_params_to_new_params = dgp2dcp.canon_methods._parameters
for param in self.parameters():
if param in old_params_to_new_params:
old_params_to_new_params[param].value = np.log(
param.value)
data, solver_inverse_data = solving_chain.solver.apply(
self._cache.param_prog)
inverse_data = self._cache.inverse_data + [solver_inverse_data]
else:
data, inverse_data = solving_chain.apply(self)
safe_to_cache = (
isinstance(data, dict)
and s.PARAM_PROB in data
and not any(isinstance(reduction, EvalParams)
for reduction in solving_chain.reductions)
)
if safe_to_cache:
self._cache.param_prog = data[s.PARAM_PROB]
self._cache.inverse_data = inverse_data[:-1]
return data, solving_chain, inverse_data
def _find_candidate_solvers(self,
solver=None,
gp=False):
candidates = {'qp_solvers': [],
'conic_solvers': []}
if solver is not None:
if solver not in slv_def.INSTALLED_SOLVERS:
raise error.SolverError("The solver %s is not installed." % solver)
if solver in slv_def.CONIC_SOLVERS:
candidates['conic_solvers'] += [solver]
if solver in slv_def.QP_SOLVERS:
candidates['qp_solvers'] += [solver]
else:
candidates['qp_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS
if s in slv_def.QP_SOLVERS]
candidates['conic_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS
if s in slv_def.CONIC_SOLVERS]
# If gp we must have only conic solvers
if gp:
if solver is not None and solver not in slv_def.CONIC_SOLVERS:
raise error.SolverError(
"When `gp=True`, `solver` must be a conic solver "
"(received '%s'); try calling " % solver +
" `solve()` with `solver=cvxpy.ECOS`."
)
elif solver is None:
candidates['qp_solvers'] = [] # No QP solvers allowed
if self.is_mixed_integer():
if len(slv_def.INSTALLED_MI_SOLVERS) == 0:
msg = """
CVXPY needs additional software (a `mixed-integer solver`) to handle this model.
The web documentation
https://www.cvxpy.org/tutorial/advanced/index.html#mixed-integer-programs
reviews open-source and commercial options for mixed-integer solvers.
Quick fix: if you install the python package CVXOPT (pip install cvxopt),
then CVXPY can use the open-source mixed-integer solver `GLPK`.
"""
raise error.SolverError(msg)
candidates['qp_solvers'] = [
s for s in candidates['qp_solvers']
if slv_def.SOLVER_MAP_QP[s].MIP_CAPABLE]
candidates['conic_solvers'] = [
s for s in candidates['conic_solvers']
if slv_def.SOLVER_MAP_CONIC[s].MIP_CAPABLE]
if not candidates['conic_solvers'] and \
not candidates['qp_solvers']:
raise error.SolverError(
"Problem is mixed-integer, but candidate "
"QP/Conic solvers (%s) are not MIP-capable." %
(candidates['qp_solvers'] +
candidates['conic_solvers']))
return candidates
def _construct_chain(self, solver=None, gp=False, enforce_dpp=False):
candidate_solvers = self._find_candidate_solvers(solver=solver, gp=gp)
return construct_solving_chain(self, candidate_solvers, gp=gp,
enforce_dpp=enforce_dpp)
def _invalidate_cache(self):
self._cache_key = None
self._solving_chain = None
self._param_prog = None
self._inverse_data = None
def _solve(self,
solver=None,
warm_start=True,
verbose=False,
gp=False, qcp=False, requires_grad=False, enforce_dpp=False, **kwargs):
for parameter in self.parameters():
if parameter.value is None:
raise error.ParameterError(
"A Parameter (whose name is '%s') does not have a value "
"associated with it; all Parameter objects must have "
"values before solving a problem." % parameter.name())
if requires_grad:
dpp_context = 'dgp' if gp else 'dcp'
if qcp:
raise ValueError("Cannot compute gradients of DQCP problems.")
elif not self.is_dpp(dpp_context):
raise error.DPPError("Problem is not DPP (when requires_grad "
"is True, problem must be DPP).")
elif solver is not None and solver not in [s.SCS, s.DIFFCP]:
raise ValueError("When requires_grad is True, the only "
"supported solver is SCS "
"(received %s)." % solver)
elif s.DIFFCP not in slv_def.INSTALLED_SOLVERS:
raise ImportError(
"The Python package diffcp must be installed to "
"differentiate through problems. Please follow the "
"installation instructions at "
"https://github.com/cvxgrp/diffcp")
else:
solver = s.DIFFCP
else:
if gp and qcp:
raise ValueError("At most one of `gp` and `qcp` can be True.")
if qcp and not self.is_dcp():
if not self.is_dqcp():
raise error.DQCPError("The problem is not DQCP.")
reductions = [dqcp2dcp.Dqcp2Dcp()]
if type(self.objective) == Maximize:
reductions = [FlipObjective()] + reductions
chain = Chain(problem=self, reductions=reductions)
soln = bisection.bisect(
chain.reduce(), solver=solver, verbose=verbose, **kwargs)
self.unpack(chain.retrieve(soln))
return self.value
data, solving_chain, inverse_data = self.get_problem_data(
solver, gp, enforce_dpp)
solution = solving_chain.solve_via_data(
self, data, warm_start, verbose, kwargs)
self.unpack_results(solution, solving_chain, inverse_data)
return self.value
def backward(self):
if s.DIFFCP not in self._solver_cache:
raise ValueError("backward can only be called after calling "
"solve with `requires_grad=True`")
elif self.status not in s.SOLUTION_PRESENT:
raise error.SolverError("Backpropagating through "
"infeasible/unbounded problems is not "
"yet supported. Please file an issue on "
"Github if you need this feature.")
# TODO(akshayka): Backpropagate through dual variables as well.
backward_cache = self._solver_cache[s.DIFFCP]
DT = backward_cache["DT"]
zeros = np.zeros(backward_cache["s"].shape)
del_vars = {}
gp = self._cache.gp()
for variable in self.variables():
if variable.gradient is None:
del_vars[variable.id] = np.ones(variable.shape)
else:
del_vars[variable.id] = np.asarray(variable.gradient,
dtype=np.float64)
if gp:
# x_gp = exp(x_cone_program),
# dx_gp/d x_cone_program = exp(x_cone_program) = x_gp
del_vars[variable.id] *= variable.value
dx = self._cache.param_prog.split_adjoint(del_vars)
start = time.time()
dA, db, dc = DT(dx, zeros, zeros)
end = time.time()
backward_cache['DT_TIME'] = end - start
dparams = self._cache.param_prog.apply_param_jac(dc, -dA, db)
if not gp:
for param in self.parameters():
param.gradient = dparams[param.id]
else:
dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)
old_params_to_new_params = dgp2dcp.canon_methods._parameters
for param in self.parameters():
# Note: if param is an exponent in a power or gmatmul atom,
# then the parameter passes through unchanged to the DCP
# program; if the param is also used elsewhere (not as an
# exponent), then param will also be in
# old_params_to_new_params. Therefore, param.gradient =
# dparams[param.id] (or 0) + 1/param*dparams[new_param.id]
#
# Note that param.id is in dparams if and only if
# param was used as an exponent (because this means that
# the parameter entered the DCP problem unchanged.)
grad = 0.0 if param.id not in dparams else dparams[param.id]
if param in old_params_to_new_params:
new_param = old_params_to_new_params[param]
# new_param.value == log(param), apply chain rule
grad += (1.0 / param.value) * dparams[new_param.id]
param.gradient = grad
def derivative(self):
if s.DIFFCP not in self._solver_cache:
raise ValueError("derivative can only be called after calling "
"solve with `requires_grad=True`")
elif self.status not in s.SOLUTION_PRESENT:
raise ValueError("Differentiating through infeasible/unbounded "
"problems is not yet supported. Please file an "
"issue on Github if you need this feature.")
# TODO(akshayka): Forward differentiate dual variables as well
backward_cache = self._solver_cache[s.DIFFCP]
param_prog = self._cache.param_prog
D = backward_cache["D"]
param_deltas = {}
gp = self._cache.gp()
if gp:
dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)
if not self.parameters():
for variable in self.variables():
variable.delta = np.zeros(variable.shape)
return
for param in self.parameters():
delta = param.delta if param.delta is not None else np.zeros(param.shape)
if gp:
if param in dgp2dcp.canon_methods._parameters:
new_param_id = dgp2dcp.canon_methods._parameters[param].id
else:
new_param_id = param.id
param_deltas[new_param_id] = (
1.0/param.value * np.asarray(delta, dtype=np.float64))
if param.id in param_prog.param_id_to_col:
# here, param generated a new parameter and also
# passed through to the param cone prog unchanged
# (because it was an exponent of a power)
param_deltas[param.id] = np.asarray(delta,
dtype=np.float64)
else:
param_deltas[param.id] = np.asarray(delta, dtype=np.float64)
dc, _, dA, db = param_prog.apply_parameters(param_deltas,
zero_offset=True)
start = time.time()
dx, _, _ = D(-dA, db, dc)
end = time.time()
backward_cache['D_TIME'] = end - start
dvars = param_prog.split_solution(
dx, [v.id for v in self.variables()])
for variable in self.variables():
variable.delta = dvars[variable.id]
if gp:
# x_gp = exp(x_cone_program),
# dx_gp/d x_cone_program = exp(x_cone_program) = x_gp
variable.delta *= variable.value
def _clear_solution(self):
for v in self.variables():
v.save_value(None)
for c in self.constraints:
for dv in c.dual_variables:
dv.save_value(None)
self._value = None
self._status = None
self._solution = None
def unpack(self, solution):
if solution.status in s.SOLUTION_PRESENT:
for v in self.variables():
v.save_value(solution.primal_vars[v.id])
for c in self.constraints:
if c.id in solution.dual_vars:
c.save_dual_value(solution.dual_vars[c.id])
elif solution.status in s.INF_OR_UNB:
for v in self.variables():
v.save_value(None)
for constr in self.constraints:
for dv in constr.dual_variables:
dv.save_value(None)
else:
raise ValueError("Cannot unpack invalid solution: %s" % solution)
self._value = solution.opt_val
self._status = solution.status
self._solution = solution
def unpack_results(self, solution, chain, inverse_data):
solution = chain.invert(solution, inverse_data)
if solution.status in s.ERROR:
raise error.SolverError(
"Solver '%s' failed. " % chain.solver.name() +
"Try another solver, or solve with verbose=True for more "
"information.")
self.unpack(solution)
self._solver_stats = SolverStats(self._solution.attr,
chain.solver.name())
def __str__(self):
if len(self.constraints) == 0:
return str(self.objective)
else:
subject_to = "subject to "
lines = [str(self.objective),
subject_to + str(self.constraints[0])]
for constr in self.constraints[1:]:
lines += [len(subject_to) * " " + str(constr)]
return '\n'.join(lines)
def __repr__(self):
return "Problem(%s, %s)" % (repr(self.objective),
repr(self.constraints))
def __neg__(self):
return Problem(-self.objective, self.constraints)
def __add__(self, other):
if other == 0:
return self
elif not isinstance(other, Problem):
return NotImplemented
return Problem(self.objective + other.objective,
unique_list(self.constraints + other.constraints))
def __radd__(self, other):
if other == 0:
return self
else:
return NotImplemented
def __sub__(self, other):
if not isinstance(other, Problem):
return NotImplemented
return Problem(self.objective - other.objective,
unique_list(self.constraints + other.constraints))
def __rsub__(self, other):
if other == 0:
return -self
else:
return NotImplemented
def __mul__(self, other):
if not isinstance(other, (int, float)):
return NotImplemented
return Problem(self.objective * other, self.constraints)
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (int, float)):
return NotImplemented
return Problem(self.objective * (1.0 / other), self.constraints)
def is_constant(self):
return False
__truediv__ = __div__
class SolverStats(object):
def __init__(self, results_dict, solver_name):
self.solver_name = solver_name
self.solve_time = None
self.setup_time = None
self.num_iters = None
if s.SOLVE_TIME in results_dict:
self.solve_time = results_dict[s.SOLVE_TIME]
if s.SETUP_TIME in results_dict:
self.setup_time = results_dict[s.SETUP_TIME]
if s.NUM_ITERS in results_dict:
self.num_iters = results_dict[s.NUM_ITERS]
class SizeMetrics(object):
def __init__(self, problem):
# num_scalar_variables
self.num_scalar_variables = 0
for var in problem.variables():
self.num_scalar_variables += var.size
# num_scalar_data, max_data_dimension, and max_big_small_squared
self.max_data_dimension = 0
self.num_scalar_data = 0
self.max_big_small_squared = 0
for const in problem.constants()+problem.parameters():
big = 0
# Compute number of data
self.num_scalar_data += const.size
big = 1 if len(const.shape) == 0 else max(const.shape)
small = 1 if len(const.shape) == 0 else min(const.shape)
# Get max data dimension:
if self.max_data_dimension < big:
self.max_data_dimension = big
max_big_small_squared = float(big)*(float(small)**2)
if self.max_big_small_squared < max_big_small_squared:
self.max_big_small_squared = max_big_small_squared
# num_scalar_eq_constr
self.num_scalar_eq_constr = 0
for constraint in problem.constraints:
if isinstance(constraint, (Equality, Zero)):
self.num_scalar_eq_constr += constraint.expr.size
# num_scalar_leq_constr
self.num_scalar_leq_constr = 0
for constraint in problem.constraints:
if isinstance(constraint, (Inequality, NonPos, NonNeg)):
self.num_scalar_leq_constr += constraint.expr.size
| true
| true
|
f70c10ef1da6e4113a7dda715729c1f83bb1a8dc
| 2,545
|
py
|
Python
|
models/pointnet2_part_seg_msg.py
|
Danielznn16/RoboticHand-in-KG
|
27e4eee97ea4ecab40fbd13b24a97e1f94c10258
|
[
"MIT"
] | null | null | null |
models/pointnet2_part_seg_msg.py
|
Danielznn16/RoboticHand-in-KG
|
27e4eee97ea4ecab40fbd13b24a97e1f94c10258
|
[
"MIT"
] | null | null | null |
models/pointnet2_part_seg_msg.py
|
Danielznn16/RoboticHand-in-KG
|
27e4eee97ea4ecab40fbd13b24a97e1f94c10258
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
import torch.nn.functional as F
from models.pointnet_util import PointNetSetAbstractionMsg,PointNetSetAbstraction,PointNetFeaturePropagation
class get_model(nn.Module):
def __init__(self, num_classes, normal_channel=False):
super(get_model, self).__init__()
if normal_channel:
additional_channel = 3
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)
self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256])
self.fp2 = PointNetFeaturePropagation(in_channel=576, mlp=[256, 128])
self.fp1 = PointNetFeaturePropagation(in_channel=150+additional_channel, mlp=[128, 128])
self.conv1 = nn.Conv1d(128, 128, 1)
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_classes, 1)
def forward(self, xyz, cls_label):
# Set Abstraction layers
B,C,N = xyz.shape
if self.normal_channel:
l0_points = xyz
l0_xyz = xyz[:,:3,:]
else:
l0_points = xyz
l0_xyz = xyz
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
# Feature Propagation layers
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
cls_label_one_hot = cls_label.view(B,16,1).repeat(1,1,N)
# print(cls_label_one_hot)
l0_points = self.fp1(l0_xyz, l1_xyz, torch.cat([cls_label_one_hot,l0_xyz,l0_points],1), l1_points)
# FC layers
feat = F.relu(self.bn1(self.conv1(l0_points)))
x = self.drop1(feat)
x = self.conv2(x)
x = F.log_softmax(x, dim=1)
x = x.permute(0, 2, 1)
return x, l3_points
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
def forward(self, pred, target, trans_feat):
total_loss = F.nll_loss(pred, target)
return total_loss
| 42.416667
| 149
| 0.6389
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from models.pointnet_util import PointNetSetAbstractionMsg,PointNetSetAbstraction,PointNetFeaturePropagation
class get_model(nn.Module):
def __init__(self, num_classes, normal_channel=False):
super(get_model, self).__init__()
if normal_channel:
additional_channel = 3
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)
self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256])
self.fp2 = PointNetFeaturePropagation(in_channel=576, mlp=[256, 128])
self.fp1 = PointNetFeaturePropagation(in_channel=150+additional_channel, mlp=[128, 128])
self.conv1 = nn.Conv1d(128, 128, 1)
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_classes, 1)
def forward(self, xyz, cls_label):
B,C,N = xyz.shape
if self.normal_channel:
l0_points = xyz
l0_xyz = xyz[:,:3,:]
else:
l0_points = xyz
l0_xyz = xyz
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
cls_label_one_hot = cls_label.view(B,16,1).repeat(1,1,N)
l0_points = self.fp1(l0_xyz, l1_xyz, torch.cat([cls_label_one_hot,l0_xyz,l0_points],1), l1_points)
feat = F.relu(self.bn1(self.conv1(l0_points)))
x = self.drop1(feat)
x = self.conv2(x)
x = F.log_softmax(x, dim=1)
x = x.permute(0, 2, 1)
return x, l3_points
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
def forward(self, pred, target, trans_feat):
total_loss = F.nll_loss(pred, target)
return total_loss
| true
| true
|
f70c11f8a5b4f0874903f8ba8e3d38d1b62f1537
| 12,511
|
py
|
Python
|
shared/common.py
|
jonnyCodev/cloudmapper
|
10fd533e318f0a18f58929f1759e32005347254e
|
[
"BSD-3-Clause"
] | null | null | null |
shared/common.py
|
jonnyCodev/cloudmapper
|
10fd533e318f0a18f58929f1759e32005347254e
|
[
"BSD-3-Clause"
] | null | null | null |
shared/common.py
|
jonnyCodev/cloudmapper
|
10fd533e318f0a18f58929f1759e32005347254e
|
[
"BSD-3-Clause"
] | 1
|
2021-12-23T12:42:14.000Z
|
2021-12-23T12:42:14.000Z
|
from __future__ import print_function
import argparse
import json
import datetime
import pyjq
import yaml
import sys
from netaddr import IPNetwork
from shared.nodes import Account, Region
from shared.query import query_aws, get_parameter_file
class Severity:
# For logging
DEBUG = 0
INFO = 1
WARN = 2
ERROR = 3
@classmethod
def str_to_int(cls, level):
if level == "DEBUG":
return cls.DEBUG
elif level == "INFO":
return cls.INFO
elif level == "WARN":
return cls.WARN
elif level == "ERROR":
return cls.ERROR
else:
raise Exception("Unknown log level {}".format(level))
@staticmethod
def string(severity_level):
if severity_level == Severity.DEBUG:
return "DEBUG"
elif severity_level == Severity.INFO:
return "INFO"
elif severity_level == Severity.WARN:
return "WARN"
elif severity_level == Severity.ERROR:
return "ERROR"
else:
raise Exception("Unknown severity level")
LOG_LEVEL = Severity.INFO
def log_debug(msg, location=None, reasons=[]):
log_issue(Severity.DEBUG, msg, location, reasons)
def log_info(msg, location=None, reasons=[]):
log_issue(Severity.INFO, msg, location, reasons)
def log_warning(msg, location=None, reasons=[]):
log_issue(Severity.WARN, msg, location, reasons)
def log_error(msg, location=None, reasons=[]):
log_issue(Severity.ERROR, msg, location, reasons)
def log_issue(severity, msg, location=None, reasons=[]):
if severity >= LOG_LEVEL:
json_issue = {
"Severity": Severity.string(severity),
"Issue": msg,
"Location": location,
"Reasons": reasons,
}
print(json.dumps(json_issue, sort_keys=True), file=sys.stderr)
class Finding(object):
"""Used for auditing"""
region = None
issue_id = None
resource_id = None
resource_details = None
def __init__(self, region, issue_id, resource_id, resource_details=None):
self.region = region
self.issue_id = issue_id
self.resource_id = resource_id
self.resource_details = resource_details
def __str__(self):
return json.dumps(
{
"account_id": self.region.account.local_id,
"account_name": self.region.account.name,
"region": self.region.name,
"issue": self.issue_id,
"resource": self.resource_id,
"details": self.resource_details,
}
)
@property
def account_name(self):
return self.region.account.name
def custom_serializer(x):
if isinstance(x, datetime.datetime):
return x.isoformat()
elif isinstance(x, bytes):
return x.decode()
raise TypeError("Unknown type")
def make_list(v):
if not isinstance(v, list):
return [v]
return v
def is_external_cidr(cidr):
ipnetwork = IPNetwork(cidr)
if (
ipnetwork in IPNetwork("10.0.0.0/8")
or ipnetwork in IPNetwork("172.16.0.0/12")
or ipnetwork in IPNetwork("192.168.0.0/16")
):
return False
return True
def is_unblockable_cidr(cidr):
ipnetwork = IPNetwork(cidr)
if (
ipnetwork in IPNetwork("169.254.0.0/16")
or ipnetwork in IPNetwork("127.0.0.0/8") # link local
or ipnetwork in IPNetwork("192.0.2.0/24") # loopback
or ipnetwork in IPNetwork("198.51.100.0/24") # Test network from RFC 5737
or ipnetwork in IPNetwork("203.0.113.0/24") # Test network
or ipnetwork in IPNetwork("224.0.0.0/4") # Test network
or ipnetwork in IPNetwork("240.0.0.0/5") # class D multicast
or ipnetwork in IPNetwork("248.0.0.0/5") # class E reserved
or ipnetwork in IPNetwork("255.255.255.255/32") # reserved # broadcast
):
return True
return False
def get_regions(account, outputfilter={}):
# aws ec2 describe-regions
region_data = query_aws(account, "describe-regions")
region_filter = ""
if "regions" in outputfilter:
region_filter = "| select(.RegionName | contains({}))".format(
outputfilter["regions"]
)
regions = pyjq.all(".Regions[]{}".format(region_filter), region_data)
return regions
def get_account(account_name, config=None, config_filename="config.json.demo"):
if config is None:
config = json.load(open(config_filename))
for account in config["accounts"]:
if account["name"] == account_name:
return account
if account_name is None and account.get("default", False):
return account
# Else could not find account
if account_name is None:
exit(
"ERROR: Must specify an account, or set one in {} as a default".format(
config_filename
)
)
exit(
'ERROR: Account named "{}" not found in {}'.format(
account_name, config_filename
)
)
def parse_arguments(arguments, parser=None):
"""Returns (args, accounts, config)"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", help="Config file name", default="config.json", type=str
)
parser.add_argument(
"--accounts", help="Accounts to collect from", required=True, type=str
)
parser.add_argument(
"--log_level",
help="Log level to record (DEBUG, INFO, WARN, ERROR)",
default="INFO",
required=False,
type=str,
)
args = parser.parse_args(arguments)
global LOG_LEVEL
LOG_LEVEL = Severity.str_to_int(args.log_level)
# Read accounts file
try:
config = json.load(open(args.config))
except IOError:
exit('ERROR: Unable to load config file "{}"'.format(args.config))
except ValueError as e:
exit(
'ERROR: Config file "{}" could not be loaded ({}), see config.json.demo for an example'.format(
args.config, e
)
)
# Get accounts
account_names = args.accounts.split(",")
accounts = []
# TODO Need to be able to tag accounts into sets (ex. Prod, or by business unit) so the tag can be referenced
# as opposed to the individual account names.
for account_name in account_names:
if account_name == "all":
for account in config["accounts"]:
accounts.append(account)
break
accounts.append(get_account(account_name, config, args.config))
return (args, accounts, config)
def get_account_stats(account, all_resources=False):
"""Returns stats for an account"""
with open("stats_config.yaml", "r") as f:
resources = yaml.safe_load(f)
account = Account(None, account)
log_debug(
"Collecting stats in account {} ({})".format(account.name, account.local_id)
)
stats = {}
stats["keys"] = []
for resource in resources:
# If the resource is marked as verbose, and we're not showing all resources, skip it.
if resource.get("verbose", False) and not all_resources:
continue
stats["keys"].append(resource["name"])
stats[resource["name"]] = {}
for region_json in get_regions(account):
region = Region(account, region_json)
for resource in resources:
if resource.get("verbose", False) and not all_resources:
continue
# Skip global services (just CloudFront)
if ("region" in resource) and (resource["region"] != region.name):
continue
# S3 buckets require special code to identify their location
if resource["name"] == "S3 buckets":
if region.name == "us-east-1":
buckets = pyjq.all(
".Buckets[].Name",
query_aws(region.account, "s3-list-buckets", region),
)
for bucket in buckets:
# Get the bucket's location
bucket_region = get_parameter_file(
region, "s3", "get-bucket-location", bucket
)["LocationConstraint"]
# Convert the value to a name.
# See https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
if bucket_region is None:
bucket_region = "us-east-1"
elif bucket_region == "EU":
bucket_region = "eu-west-1"
# Increment the count
tmp = stats[resource["name"]].get(bucket_region, 0)
stats[resource["name"]][bucket_region] = tmp + 1
else:
if region.name != 'ap-east-1':
# Normal path
stats[resource["name"]][region.name] = sum(
pyjq.all(
resource["query"],
query_aws(region.account, resource["source"], region),
)
)
return stats
def get_us_east_1(account):
for region_json in get_regions(account):
region = Region(account, region_json)
if region.name == "us-east-1":
return region
raise Exception("us-east-1 not found")
def iso_date(d):
""" Convert ISO format date string such as 2018-04-08T23:33:20+00:00"""
time_format = "%Y-%m-%dT%H:%M:%S"
return datetime.datetime.strptime(d.split("+")[0], time_format)
def days_between(s1, s2):
"""s1 and s2 are date strings"""
d1 = iso_date(s1)
d2 = iso_date(s2)
return abs((d1 - d2).days)
def get_collection_date(account):
if type(account) is not Account:
account = Account(None, account)
account_struct = account
json_blob = query_aws(
account_struct, "iam-get-credential-report", get_us_east_1(account_struct)
)
if not json_blob:
raise Exception(
"File iam-get-credential-report.json does not exist or is not well-formed. Likely cause is you did not run the collect command for this account."
)
# GeneratedTime looks like "2019-01-30T15:43:24+00:00"
return json_blob["GeneratedTime"]
def get_access_advisor_active_counts(account, max_age=90):
region = get_us_east_1(account)
json_account_auth_details = query_aws(
region.account, "iam-get-account-authorization-details", region
)
account_stats = {
"users": {"active": 0, "inactive": 0},
"roles": {"active": 0, "inactive": 0},
}
for principal_auth in [
*json_account_auth_details["UserDetailList"],
*json_account_auth_details["RoleDetailList"],
]:
stats = {}
stats["auth"] = principal_auth
principal_type = "roles"
if "UserName" in principal_auth:
principal_type = "users"
job_id = get_parameter_file(
region,
"iam",
"generate-service-last-accessed-details",
principal_auth["Arn"],
)["JobId"]
json_last_access_details = get_parameter_file(
region, "iam", "get-service-last-accessed-details", job_id
)
stats["last_access"] = json_last_access_details
stats["is_inactive"] = True
job_completion_date = datetime.datetime.strptime(
json_last_access_details["JobCompletionDate"][0:10], "%Y-%m-%d"
)
for service in json_last_access_details["ServicesLastAccessed"]:
if "LastAuthenticated" in service:
last_access_date = datetime.datetime.strptime(
service["LastAuthenticated"][0:10], "%Y-%m-%d"
)
if (job_completion_date - last_access_date).days < max_age:
stats["is_inactive"] = False
break
if stats["is_inactive"]:
account_stats[principal_type]["inactive"] += 1
else:
account_stats[principal_type]["active"] += 1
return account_stats
def get_current_policy_doc(policy):
for doc in policy["PolicyVersionList"]:
if doc["IsDefaultVersion"]:
return doc["Document"]
raise Exception("No default document version in policy {}".format(policy["Arn"]))
| 31.044665
| 157
| 0.587963
|
from __future__ import print_function
import argparse
import json
import datetime
import pyjq
import yaml
import sys
from netaddr import IPNetwork
from shared.nodes import Account, Region
from shared.query import query_aws, get_parameter_file
class Severity:
DEBUG = 0
INFO = 1
WARN = 2
ERROR = 3
@classmethod
def str_to_int(cls, level):
if level == "DEBUG":
return cls.DEBUG
elif level == "INFO":
return cls.INFO
elif level == "WARN":
return cls.WARN
elif level == "ERROR":
return cls.ERROR
else:
raise Exception("Unknown log level {}".format(level))
@staticmethod
def string(severity_level):
if severity_level == Severity.DEBUG:
return "DEBUG"
elif severity_level == Severity.INFO:
return "INFO"
elif severity_level == Severity.WARN:
return "WARN"
elif severity_level == Severity.ERROR:
return "ERROR"
else:
raise Exception("Unknown severity level")
LOG_LEVEL = Severity.INFO
def log_debug(msg, location=None, reasons=[]):
log_issue(Severity.DEBUG, msg, location, reasons)
def log_info(msg, location=None, reasons=[]):
log_issue(Severity.INFO, msg, location, reasons)
def log_warning(msg, location=None, reasons=[]):
log_issue(Severity.WARN, msg, location, reasons)
def log_error(msg, location=None, reasons=[]):
log_issue(Severity.ERROR, msg, location, reasons)
def log_issue(severity, msg, location=None, reasons=[]):
if severity >= LOG_LEVEL:
json_issue = {
"Severity": Severity.string(severity),
"Issue": msg,
"Location": location,
"Reasons": reasons,
}
print(json.dumps(json_issue, sort_keys=True), file=sys.stderr)
class Finding(object):
region = None
issue_id = None
resource_id = None
resource_details = None
def __init__(self, region, issue_id, resource_id, resource_details=None):
self.region = region
self.issue_id = issue_id
self.resource_id = resource_id
self.resource_details = resource_details
def __str__(self):
return json.dumps(
{
"account_id": self.region.account.local_id,
"account_name": self.region.account.name,
"region": self.region.name,
"issue": self.issue_id,
"resource": self.resource_id,
"details": self.resource_details,
}
)
@property
def account_name(self):
return self.region.account.name
def custom_serializer(x):
if isinstance(x, datetime.datetime):
return x.isoformat()
elif isinstance(x, bytes):
return x.decode()
raise TypeError("Unknown type")
def make_list(v):
if not isinstance(v, list):
return [v]
return v
def is_external_cidr(cidr):
ipnetwork = IPNetwork(cidr)
if (
ipnetwork in IPNetwork("10.0.0.0/8")
or ipnetwork in IPNetwork("172.16.0.0/12")
or ipnetwork in IPNetwork("192.168.0.0/16")
):
return False
return True
def is_unblockable_cidr(cidr):
ipnetwork = IPNetwork(cidr)
if (
ipnetwork in IPNetwork("169.254.0.0/16")
or ipnetwork in IPNetwork("127.0.0.0/8") or ipnetwork in IPNetwork("192.0.2.0/24") or ipnetwork in IPNetwork("198.51.100.0/24") or ipnetwork in IPNetwork("203.0.113.0/24") or ipnetwork in IPNetwork("224.0.0.0/4") or ipnetwork in IPNetwork("240.0.0.0/5") or ipnetwork in IPNetwork("248.0.0.0/5") or ipnetwork in IPNetwork("255.255.255.255/32") ):
return True
return False
def get_regions(account, outputfilter={}):
region_data = query_aws(account, "describe-regions")
region_filter = ""
if "regions" in outputfilter:
region_filter = "| select(.RegionName | contains({}))".format(
outputfilter["regions"]
)
regions = pyjq.all(".Regions[]{}".format(region_filter), region_data)
return regions
def get_account(account_name, config=None, config_filename="config.json.demo"):
if config is None:
config = json.load(open(config_filename))
for account in config["accounts"]:
if account["name"] == account_name:
return account
if account_name is None and account.get("default", False):
return account
if account_name is None:
exit(
"ERROR: Must specify an account, or set one in {} as a default".format(
config_filename
)
)
exit(
'ERROR: Account named "{}" not found in {}'.format(
account_name, config_filename
)
)
def parse_arguments(arguments, parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", help="Config file name", default="config.json", type=str
)
parser.add_argument(
"--accounts", help="Accounts to collect from", required=True, type=str
)
parser.add_argument(
"--log_level",
help="Log level to record (DEBUG, INFO, WARN, ERROR)",
default="INFO",
required=False,
type=str,
)
args = parser.parse_args(arguments)
global LOG_LEVEL
LOG_LEVEL = Severity.str_to_int(args.log_level)
try:
config = json.load(open(args.config))
except IOError:
exit('ERROR: Unable to load config file "{}"'.format(args.config))
except ValueError as e:
exit(
'ERROR: Config file "{}" could not be loaded ({}), see config.json.demo for an example'.format(
args.config, e
)
)
account_names = args.accounts.split(",")
accounts = []
for account_name in account_names:
if account_name == "all":
for account in config["accounts"]:
accounts.append(account)
break
accounts.append(get_account(account_name, config, args.config))
return (args, accounts, config)
def get_account_stats(account, all_resources=False):
with open("stats_config.yaml", "r") as f:
resources = yaml.safe_load(f)
account = Account(None, account)
log_debug(
"Collecting stats in account {} ({})".format(account.name, account.local_id)
)
stats = {}
stats["keys"] = []
for resource in resources:
if resource.get("verbose", False) and not all_resources:
continue
stats["keys"].append(resource["name"])
stats[resource["name"]] = {}
for region_json in get_regions(account):
region = Region(account, region_json)
for resource in resources:
if resource.get("verbose", False) and not all_resources:
continue
# Skip global services (just CloudFront)
if ("region" in resource) and (resource["region"] != region.name):
continue
# S3 buckets require special code to identify their location
if resource["name"] == "S3 buckets":
if region.name == "us-east-1":
buckets = pyjq.all(
".Buckets[].Name",
query_aws(region.account, "s3-list-buckets", region),
)
for bucket in buckets:
# Get the bucket's location
bucket_region = get_parameter_file(
region, "s3", "get-bucket-location", bucket
)["LocationConstraint"]
if bucket_region is None:
bucket_region = "us-east-1"
elif bucket_region == "EU":
bucket_region = "eu-west-1"
tmp = stats[resource["name"]].get(bucket_region, 0)
stats[resource["name"]][bucket_region] = tmp + 1
else:
if region.name != 'ap-east-1':
stats[resource["name"]][region.name] = sum(
pyjq.all(
resource["query"],
query_aws(region.account, resource["source"], region),
)
)
return stats
def get_us_east_1(account):
for region_json in get_regions(account):
region = Region(account, region_json)
if region.name == "us-east-1":
return region
raise Exception("us-east-1 not found")
def iso_date(d):
time_format = "%Y-%m-%dT%H:%M:%S"
return datetime.datetime.strptime(d.split("+")[0], time_format)
def days_between(s1, s2):
d1 = iso_date(s1)
d2 = iso_date(s2)
return abs((d1 - d2).days)
def get_collection_date(account):
if type(account) is not Account:
account = Account(None, account)
account_struct = account
json_blob = query_aws(
account_struct, "iam-get-credential-report", get_us_east_1(account_struct)
)
if not json_blob:
raise Exception(
"File iam-get-credential-report.json does not exist or is not well-formed. Likely cause is you did not run the collect command for this account."
)
return json_blob["GeneratedTime"]
def get_access_advisor_active_counts(account, max_age=90):
region = get_us_east_1(account)
json_account_auth_details = query_aws(
region.account, "iam-get-account-authorization-details", region
)
account_stats = {
"users": {"active": 0, "inactive": 0},
"roles": {"active": 0, "inactive": 0},
}
for principal_auth in [
*json_account_auth_details["UserDetailList"],
*json_account_auth_details["RoleDetailList"],
]:
stats = {}
stats["auth"] = principal_auth
principal_type = "roles"
if "UserName" in principal_auth:
principal_type = "users"
job_id = get_parameter_file(
region,
"iam",
"generate-service-last-accessed-details",
principal_auth["Arn"],
)["JobId"]
json_last_access_details = get_parameter_file(
region, "iam", "get-service-last-accessed-details", job_id
)
stats["last_access"] = json_last_access_details
stats["is_inactive"] = True
job_completion_date = datetime.datetime.strptime(
json_last_access_details["JobCompletionDate"][0:10], "%Y-%m-%d"
)
for service in json_last_access_details["ServicesLastAccessed"]:
if "LastAuthenticated" in service:
last_access_date = datetime.datetime.strptime(
service["LastAuthenticated"][0:10], "%Y-%m-%d"
)
if (job_completion_date - last_access_date).days < max_age:
stats["is_inactive"] = False
break
if stats["is_inactive"]:
account_stats[principal_type]["inactive"] += 1
else:
account_stats[principal_type]["active"] += 1
return account_stats
def get_current_policy_doc(policy):
for doc in policy["PolicyVersionList"]:
if doc["IsDefaultVersion"]:
return doc["Document"]
raise Exception("No default document version in policy {}".format(policy["Arn"]))
| true
| true
|
f70c127f5f194424081088d7da6167ddefd1d0fc
| 3,957
|
py
|
Python
|
monitorrent/plugins/clients/transmission.py
|
mortifactor/monitorrent
|
2388ec5b82af5d078fa7e37930d3b66b4a797954
|
[
"WTFPL"
] | 465
|
2015-08-31T09:16:41.000Z
|
2022-03-12T10:33:04.000Z
|
monitorrent/plugins/clients/transmission.py
|
mortifactor/monitorrent
|
2388ec5b82af5d078fa7e37930d3b66b4a797954
|
[
"WTFPL"
] | 340
|
2015-07-18T17:31:54.000Z
|
2022-03-30T15:16:25.000Z
|
monitorrent/plugins/clients/transmission.py
|
mortifactor/monitorrent
|
2388ec5b82af5d078fa7e37930d3b66b4a797954
|
[
"WTFPL"
] | 87
|
2015-07-18T10:52:24.000Z
|
2022-03-27T09:52:35.000Z
|
import six
import transmissionrpc
from pytz import reference, utc
from sqlalchemy import Column, Integer, String
from monitorrent.db import Base, DBSession
from monitorrent.plugin_managers import register_plugin
import base64
class TransmissionCredentials(Base):
__tablename__ = "transmission_credentials"
id = Column(Integer, primary_key=True)
host = Column(String, nullable=False)
port = Column(Integer, nullable=False)
username = Column(String, nullable=True)
password = Column(String, nullable=True)
class TransmissionClientPlugin(object):
name = "transmission"
form = [{
'type': 'row',
'content': [{
'type': 'text',
'label': 'Host',
'model': 'host',
'flex': 80
}, {
'type': 'text',
'label': 'Port',
'model': 'port',
'flex': 20
}]
}, {
'type': 'row',
'content': [{
'type': 'text',
'label': 'Username',
'model': 'username',
'flex': 50
}, {
'type': 'password',
'label': 'Password',
'model': 'password',
'flex': 50
}]
}]
DEFAULT_PORT = 9091
SUPPORTED_FIELDS = ['download_dir']
def get_settings(self):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
return None
return {'host': cred.host, 'port': cred.port, 'username': cred.username}
def set_settings(self, settings):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
cred = TransmissionCredentials()
db.add(cred)
cred.host = settings['host']
cred.port = settings.get('port', self.DEFAULT_PORT)
cred.username = settings.get('username', None)
cred.password = settings.get('password', None)
def check_connection(self):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
return False
client = transmissionrpc.Client(address=cred.host, port=cred.port,
user=cred.username, password=cred.password)
return client
def find_torrent(self, torrent_hash):
client = self.check_connection()
if not client:
return False
try:
torrent = client.get_torrent(torrent_hash.lower(), ['id', 'hashString', 'addedDate', 'name'])
return {
"name": torrent.name,
"date_added": torrent.date_added.replace(tzinfo=reference.LocalTimezone()).astimezone(utc)
}
except KeyError:
return False
def get_download_dir(self):
client = self.check_connection()
if not client:
return None
session = client.get_session()
return six.text_type(session.download_dir)
def add_torrent(self, torrent, torrent_settings):
"""
:type torrent: str
:type torrent_settings: clients.TopicSettings | None
"""
client = self.check_connection()
if not client:
return False
torrent_settings_dict = {}
if torrent_settings is not None:
if torrent_settings.download_dir is not None:
torrent_settings_dict['download_dir'] = torrent_settings.download_dir
client.add_torrent(base64.b64encode(torrent).decode('utf-8'), **torrent_settings_dict)
return True
def remove_torrent(self, torrent_hash):
client = self.check_connection()
if not client:
return False
client.remove_torrent(torrent_hash.lower(), delete_data=False)
return True
register_plugin('client', 'transmission', TransmissionClientPlugin())
| 32.434426
| 106
| 0.573414
|
import six
import transmissionrpc
from pytz import reference, utc
from sqlalchemy import Column, Integer, String
from monitorrent.db import Base, DBSession
from monitorrent.plugin_managers import register_plugin
import base64
class TransmissionCredentials(Base):
__tablename__ = "transmission_credentials"
id = Column(Integer, primary_key=True)
host = Column(String, nullable=False)
port = Column(Integer, nullable=False)
username = Column(String, nullable=True)
password = Column(String, nullable=True)
class TransmissionClientPlugin(object):
name = "transmission"
form = [{
'type': 'row',
'content': [{
'type': 'text',
'label': 'Host',
'model': 'host',
'flex': 80
}, {
'type': 'text',
'label': 'Port',
'model': 'port',
'flex': 20
}]
}, {
'type': 'row',
'content': [{
'type': 'text',
'label': 'Username',
'model': 'username',
'flex': 50
}, {
'type': 'password',
'label': 'Password',
'model': 'password',
'flex': 50
}]
}]
DEFAULT_PORT = 9091
SUPPORTED_FIELDS = ['download_dir']
def get_settings(self):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
return None
return {'host': cred.host, 'port': cred.port, 'username': cred.username}
def set_settings(self, settings):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
cred = TransmissionCredentials()
db.add(cred)
cred.host = settings['host']
cred.port = settings.get('port', self.DEFAULT_PORT)
cred.username = settings.get('username', None)
cred.password = settings.get('password', None)
def check_connection(self):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
return False
client = transmissionrpc.Client(address=cred.host, port=cred.port,
user=cred.username, password=cred.password)
return client
def find_torrent(self, torrent_hash):
client = self.check_connection()
if not client:
return False
try:
torrent = client.get_torrent(torrent_hash.lower(), ['id', 'hashString', 'addedDate', 'name'])
return {
"name": torrent.name,
"date_added": torrent.date_added.replace(tzinfo=reference.LocalTimezone()).astimezone(utc)
}
except KeyError:
return False
def get_download_dir(self):
client = self.check_connection()
if not client:
return None
session = client.get_session()
return six.text_type(session.download_dir)
def add_torrent(self, torrent, torrent_settings):
client = self.check_connection()
if not client:
return False
torrent_settings_dict = {}
if torrent_settings is not None:
if torrent_settings.download_dir is not None:
torrent_settings_dict['download_dir'] = torrent_settings.download_dir
client.add_torrent(base64.b64encode(torrent).decode('utf-8'), **torrent_settings_dict)
return True
def remove_torrent(self, torrent_hash):
client = self.check_connection()
if not client:
return False
client.remove_torrent(torrent_hash.lower(), delete_data=False)
return True
register_plugin('client', 'transmission', TransmissionClientPlugin())
| true
| true
|
f70c12954517a7ab92741620556d0e5dde45046e
| 3,476
|
py
|
Python
|
examples/generate_notices_report_for_project_version.py
|
AvneetKhaira/hub-rest-api-python
|
d9fac065d8cae72aded87f7326477b03f52f45f8
|
[
"Apache-2.0"
] | null | null | null |
examples/generate_notices_report_for_project_version.py
|
AvneetKhaira/hub-rest-api-python
|
d9fac065d8cae72aded87f7326477b03f52f45f8
|
[
"Apache-2.0"
] | null | null | null |
examples/generate_notices_report_for_project_version.py
|
AvneetKhaira/hub-rest-api-python
|
d9fac065d8cae72aded87f7326477b03f52f45f8
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Dec 19, 2018
@author: gsnyder
Generate notices report for a given project-version
'''
from blackduck.HubRestApi import HubInstance
import argparse
import json
import logging
import sys
import time
import zipfile
parser = argparse.ArgumentParser("A program to generate the notices file for a given project-version")
parser.add_argument("project_name")
parser.add_argument("version_name")
# TODO: Add the copyright checkbox option
parser.add_argument('-f', "--file_name_base", default="notices_report", help="Base file name to write the report data into. If the report format is TEXT a .zip file will be created, otherwise a .json file")
parser.add_argument('-r', '--report_format', default='TEXT', choices=["JSON", "TEXT"], help="Report format - choices are TEXT or HTML")
parser.add_argument('-c', '--include_copyright_info', action='store_true', help="Set this option to have additional copyright information from the Black Duck KB included in the notices file report.")
args = parser.parse_args()
hub = HubInstance()
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', stream=sys.stderr, level=logging.DEBUG)
class FailedReportDownload(Exception):
pass
def download_report(location, file_name_base, retries=10):
report_id = location.split("/")[-1]
if retries:
logging.debug("Retrieving generated report from {}".format(location))
# response = hub.download_report(report_id)
response, report_format = hub.download_notification_report(location)
if response.status_code == 200:
if report_format == "TEXT":
filename = file_name_base + ".zip"
with open(filename, "wb") as f:
f.write(response.content)
else:
# JSON format
filename = file_name_base + ".json"
with open(filename, "w") as f:
json.dump(response.json(), f, indent=3)
logging.info("Successfully downloaded json file to {} for report {}".format(
filename, report_id))
else:
logging.warning("Failed to retrieve report {}".format(report_id))
logging.warning("Probably not ready yet, waiting 5 seconds then retrying (remaining retries={}".format(retries))
time.sleep(5)
retries -= 1
download_report(location, file_name_base, retries)
else:
raise FailedReportDownload("Failed to retrieve report {} after multiple retries".format(report_id))
project = hub.get_project_by_name(args.project_name)
if project:
version = hub.get_version_by_name(project, args.version_name)
response = hub.create_version_notices_report(version, args.report_format, include_copyright_info=args.include_copyright_info)
if response.status_code == 201:
logging.info("Successfully created notices report in {} format for project {} and version {}".format(
args.report_format, args.project_name, args.version_name))
location = response.headers['Location']
download_report(location, args.file_name_base)
# Showing how you can interact with the downloaded zip and where to find the
# output content. Uncomment the lines below to see how it works.
# with zipfile.ZipFile(zip_file_name_base, 'r') as zipf:
# with zipf.open("{}/{}/version-license.txt".format(args.project_name, args.version_name), "r") as license_file:
# print(license_file.read())
else:
logging.error("Failed to create reports for project {} version {}, status code returned {}".format(
args.project_name, args.version_name, response.status_code))
else:
logging.warning("Did not find project with name {}".format(args.project_name))
| 39.05618
| 206
| 0.750575
|
from blackduck.HubRestApi import HubInstance
import argparse
import json
import logging
import sys
import time
import zipfile
parser = argparse.ArgumentParser("A program to generate the notices file for a given project-version")
parser.add_argument("project_name")
parser.add_argument("version_name")
parser.add_argument('-f', "--file_name_base", default="notices_report", help="Base file name to write the report data into. If the report format is TEXT a .zip file will be created, otherwise a .json file")
parser.add_argument('-r', '--report_format', default='TEXT', choices=["JSON", "TEXT"], help="Report format - choices are TEXT or HTML")
parser.add_argument('-c', '--include_copyright_info', action='store_true', help="Set this option to have additional copyright information from the Black Duck KB included in the notices file report.")
args = parser.parse_args()
hub = HubInstance()
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', stream=sys.stderr, level=logging.DEBUG)
class FailedReportDownload(Exception):
pass
def download_report(location, file_name_base, retries=10):
report_id = location.split("/")[-1]
if retries:
logging.debug("Retrieving generated report from {}".format(location))
response, report_format = hub.download_notification_report(location)
if response.status_code == 200:
if report_format == "TEXT":
filename = file_name_base + ".zip"
with open(filename, "wb") as f:
f.write(response.content)
else:
filename = file_name_base + ".json"
with open(filename, "w") as f:
json.dump(response.json(), f, indent=3)
logging.info("Successfully downloaded json file to {} for report {}".format(
filename, report_id))
else:
logging.warning("Failed to retrieve report {}".format(report_id))
logging.warning("Probably not ready yet, waiting 5 seconds then retrying (remaining retries={}".format(retries))
time.sleep(5)
retries -= 1
download_report(location, file_name_base, retries)
else:
raise FailedReportDownload("Failed to retrieve report {} after multiple retries".format(report_id))
project = hub.get_project_by_name(args.project_name)
if project:
version = hub.get_version_by_name(project, args.version_name)
response = hub.create_version_notices_report(version, args.report_format, include_copyright_info=args.include_copyright_info)
if response.status_code == 201:
logging.info("Successfully created notices report in {} format for project {} and version {}".format(
args.report_format, args.project_name, args.version_name))
location = response.headers['Location']
download_report(location, args.file_name_base)
else:
logging.error("Failed to create reports for project {} version {}, status code returned {}".format(
args.project_name, args.version_name, response.status_code))
else:
logging.warning("Did not find project with name {}".format(args.project_name))
| true
| true
|
f70c13aa147eaeb39388692f9ff8fa426bc19476
| 1,735
|
py
|
Python
|
napari_assistant/_gui/_button_grid.py
|
Cryaaa/napari-assistant
|
efdde41368885ccc6cc0e40c4eba236e3883215c
|
[
"BSD-3-Clause"
] | null | null | null |
napari_assistant/_gui/_button_grid.py
|
Cryaaa/napari-assistant
|
efdde41368885ccc6cc0e40c4eba236e3883215c
|
[
"BSD-3-Clause"
] | 8
|
2022-03-07T20:38:01.000Z
|
2022-03-20T14:50:52.000Z
|
napari_assistant/_gui/_button_grid.py
|
Cryaaa/napari-assistant
|
efdde41368885ccc6cc0e40c4eba236e3883215c
|
[
"BSD-3-Clause"
] | null | null | null |
from qtpy.QtCore import QSize
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QListWidget, QListWidgetItem
from pathlib import Path
ICON_ROOT = Path(__file__).parent / "icons"
STYLES = r"""
QListWidget{
min-width: 294;
background: none;
font-size: 8pt;
color: #eee;
}
QListWidget::item {
width: 68;
height: 85;
border-radius: 0;
margin: 1;
padding: 4;
background: #414851;
}
QListWidget::item::hover {
background: #5A626C;
}
"""
def _get_icon(name):
path = ICON_ROOT / f'{name.lower().replace(" ", "_")}.png'
if not path.exists():
return ""
return str(path)
class ButtonGrid(QListWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setMovement(self.Static) # The items cannot be moved by the user.
self.setViewMode(self.IconMode) # make items icons
self.setResizeMode(self.Adjust) # relayout when view is resized.
self.setUniformItemSizes(True) # better performance
self.setIconSize(QSize(64, 44))
self.setWordWrap(True)
self.setStyleSheet(STYLES)
def addItem(self, label : str, tool_tip : str = None):
if isinstance(label, QListWidgetItem):
super().addItem(label)
item = QListWidgetItem(QIcon(_get_icon(label)), label)
if tool_tip is not None:
item.setToolTip(tool_tip)
super().addItem(item)
def addItems(self, labels) -> None:
for label in labels:
if hasattr(labels[label], "tool_tip"):
self.addItem(label, labels[label].tool_tip)
else:
self.addItem(label)
| 27.983871
| 79
| 0.605764
|
from qtpy.QtCore import QSize
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QListWidget, QListWidgetItem
from pathlib import Path
ICON_ROOT = Path(__file__).parent / "icons"
STYLES = r"""
QListWidget{
min-width: 294;
background: none;
font-size: 8pt;
color: #eee;
}
QListWidget::item {
width: 68;
height: 85;
border-radius: 0;
margin: 1;
padding: 4;
background: #414851;
}
QListWidget::item::hover {
background: #5A626C;
}
"""
def _get_icon(name):
path = ICON_ROOT / f'{name.lower().replace(" ", "_")}.png'
if not path.exists():
return ""
return str(path)
class ButtonGrid(QListWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setMovement(self.Static) self.setViewMode(self.IconMode) self.setResizeMode(self.Adjust) self.setUniformItemSizes(True) self.setIconSize(QSize(64, 44))
self.setWordWrap(True)
self.setStyleSheet(STYLES)
def addItem(self, label : str, tool_tip : str = None):
if isinstance(label, QListWidgetItem):
super().addItem(label)
item = QListWidgetItem(QIcon(_get_icon(label)), label)
if tool_tip is not None:
item.setToolTip(tool_tip)
super().addItem(item)
def addItems(self, labels) -> None:
for label in labels:
if hasattr(labels[label], "tool_tip"):
self.addItem(label, labels[label].tool_tip)
else:
self.addItem(label)
| true
| true
|
f70c144e6451a175f12753b62c7803ebe9b46b98
| 3,156
|
py
|
Python
|
day14/day14.py
|
elp2/advent_of_code_2019
|
af3ce232fb6597dbc80e96bdfd5a6248f07aa3c6
|
[
"Apache-2.0"
] | 1
|
2021-12-02T15:19:36.000Z
|
2021-12-02T15:19:36.000Z
|
day14/day14.py
|
elp2/advent_of_code_2019
|
af3ce232fb6597dbc80e96bdfd5a6248f07aa3c6
|
[
"Apache-2.0"
] | null | null | null |
day14/day14.py
|
elp2/advent_of_code_2019
|
af3ce232fb6597dbc80e96bdfd5a6248f07aa3c6
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from copy import copy
from math import ceil, floor
def parse_item(item):
[num, name] = item.strip().split(' ')
return {}
def filter_zeroes(d):
ret = defaultdict(lambda: 0)
for k, v in d.items():
if v != 0:
ret[k] = v
return ret
output_to_formula = {}
def parse_input():
lines = open('input').readlines()
for line in lines:
[input_string, output_string] = line.split('=>')
[output_number, output_chemical] = output_string.strip().split(' ')
formula = {'num': int(output_number)}
input_formula = defaultdict(lambda: 0)
for inp in input_string.strip().split(','):
[num, name] = inp.strip().split(' ')
input_formula[name] = int(num)
formula['inputs'] = input_formula
output_to_formula[output_chemical] = formula
def subtract_from_extras(extras, chem, num):
ret = num
if chem in extras:
from_extras = min(num, extras[chem])
ret -= from_extras
extras[chem] -= from_extras
return ret
def expand_one(chem, needed, extras):
if chem == 'ORE':
return {chem: needed}
formula = output_to_formula[chem]
fnum = formula['num']
scaling = ceil(needed / fnum)
extra = fnum * scaling - needed
if extra != 0:
extras[chem] += extra
ins = copy(formula['inputs'])
for key in ins.keys():
ins[key] *= scaling
ins[key] = subtract_from_extras(extras, key, ins[key])
return ins
def expand(chemicals):
extras = defaultdict(lambda: 0)
while list(chemicals.keys()) != ['ORE']:
new = defaultdict(lambda: 0)
for chem, num in chemicals.items():
num = subtract_from_extras(extras, chem, num)
expanded = expand_one(chem, num, extras)
for key in expanded.keys():
new[key] += expanded[key]
print('Round! ', chemicals, '->', new)
chemicals = new
ret = defaultdict(lambda: 0)
for key, value in extras.items():
if value != 0:
ret[key] = value
for key, value in chemicals.items():
if value != 0:
ret[key] = value
return chemicals
def part1():
parse_input()
chemicals = defaultdict(lambda: 0)
chemicals['FUEL'] = 1
while list(chemicals.keys()) != ['ORE']:
chemicals = expand(chemicals)
print('Expanded: ', chemicals)
# part1() # 892207
ONE_TRILLION = 1_000_000_000_000
START_FUELS = floor(ONE_TRILLION / 892207)
START_STEP = floor(START_FUELS / 2)
def part2():
parse_input()
fuels = START_FUELS
step = START_STEP
while True:
chemicals = defaultdict(lambda: 0)
chemicals['FUEL'] = fuels + step
while list(chemicals.keys()) != ['ORE']:
chemicals = expand(chemicals)
ores = chemicals['ORE']
if ores == ONE_TRILLION or step == 0:
print('FUELS = ', fuels)
break
elif ores < ONE_TRILLION:
fuels += step
elif ores > ONE_TRILLION:
step = floor(step / 2)
print(ores - ONE_TRILLION, step)
part2() # 1935265
| 28.178571
| 75
| 0.585234
|
from collections import defaultdict
from copy import copy
from math import ceil, floor
def parse_item(item):
[num, name] = item.strip().split(' ')
return {}
def filter_zeroes(d):
ret = defaultdict(lambda: 0)
for k, v in d.items():
if v != 0:
ret[k] = v
return ret
output_to_formula = {}
def parse_input():
lines = open('input').readlines()
for line in lines:
[input_string, output_string] = line.split('=>')
[output_number, output_chemical] = output_string.strip().split(' ')
formula = {'num': int(output_number)}
input_formula = defaultdict(lambda: 0)
for inp in input_string.strip().split(','):
[num, name] = inp.strip().split(' ')
input_formula[name] = int(num)
formula['inputs'] = input_formula
output_to_formula[output_chemical] = formula
def subtract_from_extras(extras, chem, num):
ret = num
if chem in extras:
from_extras = min(num, extras[chem])
ret -= from_extras
extras[chem] -= from_extras
return ret
def expand_one(chem, needed, extras):
if chem == 'ORE':
return {chem: needed}
formula = output_to_formula[chem]
fnum = formula['num']
scaling = ceil(needed / fnum)
extra = fnum * scaling - needed
if extra != 0:
extras[chem] += extra
ins = copy(formula['inputs'])
for key in ins.keys():
ins[key] *= scaling
ins[key] = subtract_from_extras(extras, key, ins[key])
return ins
def expand(chemicals):
extras = defaultdict(lambda: 0)
while list(chemicals.keys()) != ['ORE']:
new = defaultdict(lambda: 0)
for chem, num in chemicals.items():
num = subtract_from_extras(extras, chem, num)
expanded = expand_one(chem, num, extras)
for key in expanded.keys():
new[key] += expanded[key]
print('Round! ', chemicals, '->', new)
chemicals = new
ret = defaultdict(lambda: 0)
for key, value in extras.items():
if value != 0:
ret[key] = value
for key, value in chemicals.items():
if value != 0:
ret[key] = value
return chemicals
def part1():
parse_input()
chemicals = defaultdict(lambda: 0)
chemicals['FUEL'] = 1
while list(chemicals.keys()) != ['ORE']:
chemicals = expand(chemicals)
print('Expanded: ', chemicals)
ONE_TRILLION = 1_000_000_000_000
START_FUELS = floor(ONE_TRILLION / 892207)
START_STEP = floor(START_FUELS / 2)
def part2():
parse_input()
fuels = START_FUELS
step = START_STEP
while True:
chemicals = defaultdict(lambda: 0)
chemicals['FUEL'] = fuels + step
while list(chemicals.keys()) != ['ORE']:
chemicals = expand(chemicals)
ores = chemicals['ORE']
if ores == ONE_TRILLION or step == 0:
print('FUELS = ', fuels)
break
elif ores < ONE_TRILLION:
fuels += step
elif ores > ONE_TRILLION:
step = floor(step / 2)
print(ores - ONE_TRILLION, step)
part2()
| true
| true
|
f70c15bf0053d4434cfa71056c4b147777a06236
| 547
|
py
|
Python
|
ufdl-image-segmentation-app/src/ufdl/image_segmentation_app/migrations/0006_job_templates.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
ufdl-image-segmentation-app/src/ufdl/image_segmentation_app/migrations/0006_job_templates.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | 85
|
2020-07-24T00:04:28.000Z
|
2022-02-10T10:35:15.000Z
|
ufdl-image-segmentation-app/src/ufdl/image_segmentation_app/migrations/0006_job_templates.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
from django.db import migrations
from ufdl.core_app.migrations import DataMigration
from ufdl.core_app.migrations.job_templates import get_python_job_template_migration
from .job_templates import iterate_job_templates
class Migration(migrations.Migration):
"""
Migration inserting the pre-trained model presets into the database.
"""
dependencies = [
('ufdl-image-segmentation', '0005_pretrained_models')
]
operations = [
DataMigration(get_python_job_template_migration(iterate_job_templates()))
]
| 27.35
| 84
| 0.764168
|
from django.db import migrations
from ufdl.core_app.migrations import DataMigration
from ufdl.core_app.migrations.job_templates import get_python_job_template_migration
from .job_templates import iterate_job_templates
class Migration(migrations.Migration):
dependencies = [
('ufdl-image-segmentation', '0005_pretrained_models')
]
operations = [
DataMigration(get_python_job_template_migration(iterate_job_templates()))
]
| true
| true
|
f70c15fdf850b63bd6c15c2688bbc3eb9b82f421
| 13,299
|
py
|
Python
|
qucumber/nn_states/density_matrix.py
|
ZvonimirBandic/QuCumber
|
81f0291951e89346fd8ab5c35cc90341fd8acf35
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 163
|
2018-07-18T15:00:57.000Z
|
2022-03-31T09:05:06.000Z
|
qucumber/nn_states/density_matrix.py
|
ZvonimirBandic/QuCumber
|
81f0291951e89346fd8ab5c35cc90341fd8acf35
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 101
|
2018-07-17T17:36:06.000Z
|
2021-10-19T01:40:10.000Z
|
qucumber/nn_states/density_matrix.py
|
ZvonimirBandic/QuCumber
|
81f0291951e89346fd8ab5c35cc90341fd8acf35
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 32
|
2018-08-18T21:56:02.000Z
|
2022-03-12T22:04:16.000Z
|
# Copyright 2019 PIQuIL - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import torch
from torch.nn import functional as F
from qucumber import _warn_on_missing_gpu
from qucumber.utils import cplx, unitaries
from qucumber.rbm import PurificationRBM
from .neural_state import NeuralStateBase
class DensityMatrix(NeuralStateBase):
r"""
:param num_visible: The number of visible units, i.e. the size of the system
:type num_visible: int
:param num_hidden: The number of units in the hidden layer
:type num_hidden: int
:param num_aux: The number of units in the purification layer
:type num_aux: int
:param unitary_dict: A dictionary associating bases with their unitary rotations
:type unitary_dict: dict[str, torch.Tensor]
:param gpu: Whether to perform computations on the default gpu.
:type gpu: bool
"""
_rbm_am = None
_rbm_ph = None
_device = None
def __init__(
self,
num_visible,
num_hidden=None,
num_aux=None,
unitary_dict=None,
gpu=False,
module=None,
):
if gpu and torch.cuda.is_available():
warnings.warn(
"Using DensityMatrix on GPU is not recommended due to poor performance compared to CPU.",
ResourceWarning,
2,
)
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
if module is None:
self.rbm_am = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
self.rbm_ph = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
else:
_warn_on_missing_gpu(gpu)
self.rbm_am = module.to(self.device)
self.rbm_am.device = self.device
self.rbm_ph = module.to(self.device).clone()
self.rbm_ph.device = self.device
self.num_visible = self.rbm_am.num_visible
self.num_hidden = self.rbm_am.num_hidden
self.num_aux = self.rbm_am.num_aux
self.device = self.rbm_am.device
self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict()
self.unitary_dict = {
k: v.to(device=self.device) for k, v in self.unitary_dict.items()
}
@property
def networks(self):
return ["rbm_am", "rbm_ph"]
@property
def rbm_am(self):
return self._rbm_am
@rbm_am.setter
def rbm_am(self, new_val):
self._rbm_am = new_val
@property
def rbm_ph(self):
"""RBM used to learn the wavefunction phase."""
return self._rbm_ph
@rbm_ph.setter
def rbm_ph(self, new_val):
self._rbm_ph = new_val
@property
def device(self):
return self._device
@device.setter
def device(self, new_val):
self._device = new_val
def pi(self, v, vp, expand=True):
r"""Calculates elements of the :math:`\Pi` matrix.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\Pi|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\Pi|\sigma'_i\rangle`.
:param v: A batch of visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other batch of visible state, :math:`\sigma'`.
:type vp: torch.Tensor
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
:type expand: bool
:returns: The matrix elements given by :math:`\langle\sigma|\Pi|\sigma'\rangle`
:rtype: torch.Tensor
"""
m_am = F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias)
mp_am = F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias)
m_ph = F.linear(v, self.rbm_ph.weights_U)
mp_ph = F.linear(vp, self.rbm_ph.weights_U)
if expand and v.dim() >= 2:
m_am = m_am.unsqueeze_(1)
m_ph = m_ph.unsqueeze_(1)
if expand and vp.dim() >= 2:
mp_am = mp_am.unsqueeze_(0)
mp_ph = mp_ph.unsqueeze_(0)
exp_arg = (m_am + mp_am) / 2
phase = (m_ph - mp_ph) / 2
real = (
(1 + 2 * exp_arg.exp() * phase.cos() + (2 * exp_arg).exp())
.sqrt()
.log()
.sum(-1)
)
imag = torch.atan2(
(exp_arg.exp() * phase.sin()), (1 + exp_arg.exp() * phase.cos())
).sum(-1)
return cplx.make_complex(real, imag)
def pi_grad(self, v, vp, phase=False, expand=False):
r"""Calculates the gradient of the :math:`\Pi` matrix with
respect to the amplitude RBM parameters for two input states
:param v: One of the visible states, :math:`\sigma`
:type v: torch.Tensor
:param vp: The other visible state, :math`\sigma'`
:type vp: torch.Tensor
:param phase: Whether to compute the gradients for the phase RBM (`True`)
or the amplitude RBM (`False`)
:type phase: bool
:returns: The matrix element of the gradient given by
:math:`\langle\sigma|\nabla_\lambda\Pi|\sigma'\rangle`
:rtype: torch.Tensor
"""
unsqueezed = v.dim() < 2 or vp.dim() < 2
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.rbm_am.weights_W)
vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.rbm_am.weights_W)
if expand:
arg_real = 0.5 * (
F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(1)
+ F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(
0
)
)
arg_imag = 0.5 * (
F.linear(v, self.rbm_ph.weights_U).unsqueeze_(1)
- F.linear(vp, self.rbm_ph.weights_U).unsqueeze_(0)
)
else:
arg_real = self.rbm_am.mixing_term(v + vp)
arg_imag = self.rbm_ph.mixing_term(v - vp)
sig = cplx.sigmoid(arg_real, arg_imag)
batch_sizes = (
(v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)
)
W_grad = torch.zeros_like(self.rbm_am.weights_W).expand(*batch_sizes, -1, -1)
vb_grad = torch.zeros_like(self.rbm_am.visible_bias).expand(*batch_sizes, -1)
hb_grad = torch.zeros_like(self.rbm_am.hidden_bias).expand(*batch_sizes, -1)
if phase:
temp = (v.unsqueeze(1) - vp.unsqueeze(0)) if expand else (v - vp)
sig = cplx.scalar_mult(sig, cplx.I)
ab_grad_real = torch.zeros_like(self.rbm_ph.aux_bias).expand(
*batch_sizes, -1
)
ab_grad_imag = ab_grad_real.clone()
else:
temp = (v.unsqueeze(1) + vp.unsqueeze(0)) if expand else (v + vp)
ab_grad_real = cplx.real(sig)
ab_grad_imag = cplx.imag(sig)
U_grad = 0.5 * torch.einsum("c...j,...k->c...jk", sig, temp)
U_grad_real = cplx.real(U_grad)
U_grad_imag = cplx.imag(U_grad)
vec_real = [
W_grad.view(*batch_sizes, -1),
U_grad_real.view(*batch_sizes, -1),
vb_grad,
hb_grad,
ab_grad_real,
]
vec_imag = [
W_grad.view(*batch_sizes, -1).clone(),
U_grad_imag.view(*batch_sizes, -1),
vb_grad.clone(),
hb_grad.clone(),
ab_grad_imag,
]
if unsqueezed and not expand:
vec_real = [grad.squeeze_(0) for grad in vec_real]
vec_imag = [grad.squeeze_(0) for grad in vec_imag]
return cplx.make_complex(
torch.cat(vec_real, dim=-1), torch.cat(vec_imag, dim=-1)
)
def rho(self, v, vp=None, expand=True):
r"""Computes the matrix elements of the (unnormalized) density matrix.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\widetilde{\rho}|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\widetilde{\rho}|\sigma'_i\rangle`.
:param v: One of the visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other visible state, :math:`\sigma'`.
If `None`, will be set to `v`.
:type vp: torch.Tensor
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
:type expand: bool
:returns: The elements of the current density matrix
:math:`\langle\sigma|\widetilde{\rho}|\sigma'\rangle`
:rtype: torch.Tensor
"""
if expand is False and vp is None:
return cplx.make_complex(self.probability(v))
elif vp is None:
vp = v
pi_ = self.pi(v, vp, expand=expand)
amp = (self.rbm_am.gamma(v, vp, eta=+1, expand=expand) + cplx.real(pi_)).exp()
phase = self.rbm_ph.gamma(v, vp, eta=-1, expand=expand) + cplx.imag(pi_)
return cplx.make_complex(amp * phase.cos(), amp * phase.sin())
def importance_sampling_numerator(self, vp, v):
return self.rho(vp, v, expand=False)
def importance_sampling_denominator(self, v):
return cplx.make_complex(self.probability(v))
def rotated_gradient(self, basis, sample):
r"""Computes the gradients rotated into the measurement basis
:param basis: The bases in which the measurement is made
:type basis: numpy.ndarray
:param sample: The measurement (either 0 or 1)
:type sample: torch.Tensor
:returns: A list of two tensors, representing the rotated gradients
of the amplitude and phase RBMs
:rtype: list[torch.Tensor, torch.Tensor]
"""
UrhoU, UrhoU_v, v = unitaries.rotate_rho_probs(
self, basis, sample, include_extras=True
)
inv_UrhoU = 1 / (UrhoU + 1e-8) # avoid dividing by zero
raw_grads = [self.am_grads(v), self.ph_grads(v)]
rotated_grad = [
-cplx.einsum("ijb,ijbg->bg", UrhoU_v, g, imag_part=False) for g in raw_grads
]
return [torch.einsum("b,bg->g", inv_UrhoU, g) for g in rotated_grad]
def am_grads(self, v):
r"""Computes the gradients of the amplitude RBM for given input states
:param v: The first input state, :math:`\sigma`
:type v: torch.Tensor
:returns: The gradients of all amplitude RBM parameters
:rtype: torch.Tensor
"""
return self.rbm_am.gamma_grad(v, v, eta=+1, expand=True) + self.pi_grad(
v, v, phase=False, expand=True
)
def ph_grads(self, v):
r"""Computes the gradients of the phase RBM for given input states
:param v: The first input state, :math:`\sigma`
:type v: torch.Tensor
:returns: The gradients of all phase RBM parameters
:rtype: torch.Tensor
"""
return cplx.scalar_mult( # need to multiply Gamma- by i
self.rbm_ph.gamma_grad(v, v, eta=-1, expand=True), cplx.I
) + self.pi_grad(v, v, phase=True, expand=True)
def fit(
self,
data,
epochs=100,
pos_batch_size=100,
neg_batch_size=None,
k=1,
lr=1,
input_bases=None,
progbar=False,
starting_epoch=1,
time=False,
callbacks=None,
optimizer=torch.optim.SGD,
optimizer_args=None,
scheduler=None,
scheduler_args=None,
**kwargs,
):
if input_bases is None:
raise ValueError("input_bases must be provided to train a DensityMatrix!")
else:
super().fit(
data=data,
epochs=epochs,
pos_batch_size=pos_batch_size,
neg_batch_size=neg_batch_size,
k=k,
lr=lr,
input_bases=input_bases,
progbar=progbar,
starting_epoch=starting_epoch,
time=time,
callbacks=callbacks,
optimizer=optimizer,
optimizer_args=optimizer_args,
scheduler=scheduler,
scheduler_args=scheduler_args,
**kwargs,
)
@staticmethod
def autoload(location, gpu=False):
state_dict = torch.load(location)
nn_state = DensityMatrix(
unitary_dict=state_dict["unitary_dict"],
num_visible=len(state_dict["rbm_am"]["visible_bias"]),
num_hidden=len(state_dict["rbm_am"]["hidden_bias"]),
num_aux=len(state_dict["rbm_am"]["aux_bias"]),
gpu=gpu,
)
nn_state.load(location)
return nn_state
| 34.542857
| 105
| 0.585608
|
import warnings
import torch
from torch.nn import functional as F
from qucumber import _warn_on_missing_gpu
from qucumber.utils import cplx, unitaries
from qucumber.rbm import PurificationRBM
from .neural_state import NeuralStateBase
class DensityMatrix(NeuralStateBase):
_rbm_am = None
_rbm_ph = None
_device = None
def __init__(
self,
num_visible,
num_hidden=None,
num_aux=None,
unitary_dict=None,
gpu=False,
module=None,
):
if gpu and torch.cuda.is_available():
warnings.warn(
"Using DensityMatrix on GPU is not recommended due to poor performance compared to CPU.",
ResourceWarning,
2,
)
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
if module is None:
self.rbm_am = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
self.rbm_ph = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
else:
_warn_on_missing_gpu(gpu)
self.rbm_am = module.to(self.device)
self.rbm_am.device = self.device
self.rbm_ph = module.to(self.device).clone()
self.rbm_ph.device = self.device
self.num_visible = self.rbm_am.num_visible
self.num_hidden = self.rbm_am.num_hidden
self.num_aux = self.rbm_am.num_aux
self.device = self.rbm_am.device
self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict()
self.unitary_dict = {
k: v.to(device=self.device) for k, v in self.unitary_dict.items()
}
@property
def networks(self):
return ["rbm_am", "rbm_ph"]
@property
def rbm_am(self):
return self._rbm_am
@rbm_am.setter
def rbm_am(self, new_val):
self._rbm_am = new_val
@property
def rbm_ph(self):
return self._rbm_ph
@rbm_ph.setter
def rbm_ph(self, new_val):
self._rbm_ph = new_val
@property
def device(self):
return self._device
@device.setter
def device(self, new_val):
self._device = new_val
def pi(self, v, vp, expand=True):
m_am = F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias)
mp_am = F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias)
m_ph = F.linear(v, self.rbm_ph.weights_U)
mp_ph = F.linear(vp, self.rbm_ph.weights_U)
if expand and v.dim() >= 2:
m_am = m_am.unsqueeze_(1)
m_ph = m_ph.unsqueeze_(1)
if expand and vp.dim() >= 2:
mp_am = mp_am.unsqueeze_(0)
mp_ph = mp_ph.unsqueeze_(0)
exp_arg = (m_am + mp_am) / 2
phase = (m_ph - mp_ph) / 2
real = (
(1 + 2 * exp_arg.exp() * phase.cos() + (2 * exp_arg).exp())
.sqrt()
.log()
.sum(-1)
)
imag = torch.atan2(
(exp_arg.exp() * phase.sin()), (1 + exp_arg.exp() * phase.cos())
).sum(-1)
return cplx.make_complex(real, imag)
def pi_grad(self, v, vp, phase=False, expand=False):
unsqueezed = v.dim() < 2 or vp.dim() < 2
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.rbm_am.weights_W)
vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.rbm_am.weights_W)
if expand:
arg_real = 0.5 * (
F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(1)
+ F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(
0
)
)
arg_imag = 0.5 * (
F.linear(v, self.rbm_ph.weights_U).unsqueeze_(1)
- F.linear(vp, self.rbm_ph.weights_U).unsqueeze_(0)
)
else:
arg_real = self.rbm_am.mixing_term(v + vp)
arg_imag = self.rbm_ph.mixing_term(v - vp)
sig = cplx.sigmoid(arg_real, arg_imag)
batch_sizes = (
(v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)
)
W_grad = torch.zeros_like(self.rbm_am.weights_W).expand(*batch_sizes, -1, -1)
vb_grad = torch.zeros_like(self.rbm_am.visible_bias).expand(*batch_sizes, -1)
hb_grad = torch.zeros_like(self.rbm_am.hidden_bias).expand(*batch_sizes, -1)
if phase:
temp = (v.unsqueeze(1) - vp.unsqueeze(0)) if expand else (v - vp)
sig = cplx.scalar_mult(sig, cplx.I)
ab_grad_real = torch.zeros_like(self.rbm_ph.aux_bias).expand(
*batch_sizes, -1
)
ab_grad_imag = ab_grad_real.clone()
else:
temp = (v.unsqueeze(1) + vp.unsqueeze(0)) if expand else (v + vp)
ab_grad_real = cplx.real(sig)
ab_grad_imag = cplx.imag(sig)
U_grad = 0.5 * torch.einsum("c...j,...k->c...jk", sig, temp)
U_grad_real = cplx.real(U_grad)
U_grad_imag = cplx.imag(U_grad)
vec_real = [
W_grad.view(*batch_sizes, -1),
U_grad_real.view(*batch_sizes, -1),
vb_grad,
hb_grad,
ab_grad_real,
]
vec_imag = [
W_grad.view(*batch_sizes, -1).clone(),
U_grad_imag.view(*batch_sizes, -1),
vb_grad.clone(),
hb_grad.clone(),
ab_grad_imag,
]
if unsqueezed and not expand:
vec_real = [grad.squeeze_(0) for grad in vec_real]
vec_imag = [grad.squeeze_(0) for grad in vec_imag]
return cplx.make_complex(
torch.cat(vec_real, dim=-1), torch.cat(vec_imag, dim=-1)
)
def rho(self, v, vp=None, expand=True):
if expand is False and vp is None:
return cplx.make_complex(self.probability(v))
elif vp is None:
vp = v
pi_ = self.pi(v, vp, expand=expand)
amp = (self.rbm_am.gamma(v, vp, eta=+1, expand=expand) + cplx.real(pi_)).exp()
phase = self.rbm_ph.gamma(v, vp, eta=-1, expand=expand) + cplx.imag(pi_)
return cplx.make_complex(amp * phase.cos(), amp * phase.sin())
def importance_sampling_numerator(self, vp, v):
return self.rho(vp, v, expand=False)
def importance_sampling_denominator(self, v):
return cplx.make_complex(self.probability(v))
def rotated_gradient(self, basis, sample):
UrhoU, UrhoU_v, v = unitaries.rotate_rho_probs(
self, basis, sample, include_extras=True
)
inv_UrhoU = 1 / (UrhoU + 1e-8)
raw_grads = [self.am_grads(v), self.ph_grads(v)]
rotated_grad = [
-cplx.einsum("ijb,ijbg->bg", UrhoU_v, g, imag_part=False) for g in raw_grads
]
return [torch.einsum("b,bg->g", inv_UrhoU, g) for g in rotated_grad]
def am_grads(self, v):
return self.rbm_am.gamma_grad(v, v, eta=+1, expand=True) + self.pi_grad(
v, v, phase=False, expand=True
)
def ph_grads(self, v):
return cplx.scalar_mult( self.rbm_ph.gamma_grad(v, v, eta=-1, expand=True), cplx.I
) + self.pi_grad(v, v, phase=True, expand=True)
def fit(
self,
data,
epochs=100,
pos_batch_size=100,
neg_batch_size=None,
k=1,
lr=1,
input_bases=None,
progbar=False,
starting_epoch=1,
time=False,
callbacks=None,
optimizer=torch.optim.SGD,
optimizer_args=None,
scheduler=None,
scheduler_args=None,
**kwargs,
):
if input_bases is None:
raise ValueError("input_bases must be provided to train a DensityMatrix!")
else:
super().fit(
data=data,
epochs=epochs,
pos_batch_size=pos_batch_size,
neg_batch_size=neg_batch_size,
k=k,
lr=lr,
input_bases=input_bases,
progbar=progbar,
starting_epoch=starting_epoch,
time=time,
callbacks=callbacks,
optimizer=optimizer,
optimizer_args=optimizer_args,
scheduler=scheduler,
scheduler_args=scheduler_args,
**kwargs,
)
@staticmethod
def autoload(location, gpu=False):
state_dict = torch.load(location)
nn_state = DensityMatrix(
unitary_dict=state_dict["unitary_dict"],
num_visible=len(state_dict["rbm_am"]["visible_bias"]),
num_hidden=len(state_dict["rbm_am"]["hidden_bias"]),
num_aux=len(state_dict["rbm_am"]["aux_bias"]),
gpu=gpu,
)
nn_state.load(location)
return nn_state
| true
| true
|
f70c16978f371e597599b34590bfb00c27d46526
| 1,589
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/task_agent/v4_1/models/task_group_definition.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/task_agent/v4_1/models/task_group_definition.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/task_agent/v4_1/models/task_group_definition.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TaskGroupDefinition(Model):
"""TaskGroupDefinition.
:param display_name:
:type display_name: str
:param is_expanded:
:type is_expanded: bool
:param name:
:type name: str
:param tags:
:type tags: list of str
:param visible_rule:
:type visible_rule: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'is_expanded': {'key': 'isExpanded', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'visible_rule': {'key': 'visibleRule', 'type': 'str'}
}
def __init__(self, display_name=None, is_expanded=None, name=None, tags=None, visible_rule=None):
super(TaskGroupDefinition, self).__init__()
self.display_name = display_name
self.is_expanded = is_expanded
self.name = name
self.tags = tags
self.visible_rule = visible_rule
| 37.833333
| 102
| 0.512901
|
from msrest.serialization import Model
class TaskGroupDefinition(Model):
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'is_expanded': {'key': 'isExpanded', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'visible_rule': {'key': 'visibleRule', 'type': 'str'}
}
def __init__(self, display_name=None, is_expanded=None, name=None, tags=None, visible_rule=None):
super(TaskGroupDefinition, self).__init__()
self.display_name = display_name
self.is_expanded = is_expanded
self.name = name
self.tags = tags
self.visible_rule = visible_rule
| true
| true
|
f70c16f14cc1f04dbb6d4ae81ba7699c84f9eca6
| 2,431
|
py
|
Python
|
apps/core/views.py
|
RobertArzolaC/base_django
|
3fc368000b418d387ccb57b30fa223ac916f2895
|
[
"MIT"
] | null | null | null |
apps/core/views.py
|
RobertArzolaC/base_django
|
3fc368000b418d387ccb57b30fa223ac916f2895
|
[
"MIT"
] | 7
|
2020-02-12T00:30:41.000Z
|
2022-02-10T08:03:46.000Z
|
apps/core/views.py
|
RobertArzolaC/base_django
|
3fc368000b418d387ccb57b30fa223ac916f2895
|
[
"MIT"
] | 2
|
2020-09-21T23:32:11.000Z
|
2021-01-10T17:29:24.000Z
|
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from rest_framework import generics
from rest_framework import permissions
from rest_framework.views import status
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from .serializers import TokenSerializer, UserSerializer
# Get the JWT settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
# Create your views here.
class LoginView(generics.CreateAPIView):
"""
POST auth/login/
"""
# This permission class will over ride the global permission
# class setting
permission_classes = (permissions.AllowAny,)
queryset = User.objects.all()
def post(self, request, *args, **kwargs):
username = request.data.get("username", "")
password = request.data.get("password", "")
user = authenticate(request, username=username, password=password)
if user is not None:
# login saves the user’s ID in the session,
# using Django’s session framework.
login(request, user)
serializer = TokenSerializer(data={
# using drf jwt utility functions to generate a token
"token": jwt_encode_handler(
jwt_payload_handler(user)
)})
serializer.is_valid()
return Response(data=serializer.data, status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class RegisterUsers(generics.CreateAPIView):
"""
POST auth/register/
"""
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
username = request.data.get("username", "")
password = request.data.get("password", "")
email = request.data.get("email", "")
if not username and not password and not email:
return Response(
data={
"message": "username, password and email is required to register a user"
},
status=status.HTTP_400_BAD_REQUEST
)
new_user = User.objects.create_user(
username=username, password=password, email=email
)
return Response(
data=UserSerializer(new_user).data,
status=status.HTTP_201_CREATED
)
| 33.763889
| 92
| 0.653229
|
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from rest_framework import generics
from rest_framework import permissions
from rest_framework.views import status
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from .serializers import TokenSerializer, UserSerializer
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
class LoginView(generics.CreateAPIView):
permission_classes = (permissions.AllowAny,)
queryset = User.objects.all()
def post(self, request, *args, **kwargs):
username = request.data.get("username", "")
password = request.data.get("password", "")
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
serializer = TokenSerializer(data={
"token": jwt_encode_handler(
jwt_payload_handler(user)
)})
serializer.is_valid()
return Response(data=serializer.data, status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class RegisterUsers(generics.CreateAPIView):
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
username = request.data.get("username", "")
password = request.data.get("password", "")
email = request.data.get("email", "")
if not username and not password and not email:
return Response(
data={
"message": "username, password and email is required to register a user"
},
status=status.HTTP_400_BAD_REQUEST
)
new_user = User.objects.create_user(
username=username, password=password, email=email
)
return Response(
data=UserSerializer(new_user).data,
status=status.HTTP_201_CREATED
)
| true
| true
|
f70c1a0ce799afb0a85a95fec93286da54ddca94
| 4,890
|
py
|
Python
|
src/primaires/scripting/actions/remplir.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
src/primaires/scripting/actions/remplir.py
|
vincent-lg/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
src/primaires/scripting/actions/remplir.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action remplir."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Remplit un conteneur de nourriture ou de potion."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.remplir_objet, "Objet", "Objet")
cls.ajouter_types(cls.remplir_proto_nb, "Objet", "str",
"Fraction")
@staticmethod
def remplir_objet(conteneur, objet):
"""Met l'objet dans le conteneur de nourriture.
Attention, l'objet conteneur ne peut en aucun cas être "flottant" mais
doit lui-même être contenu quelque part (sol d'une salle, inventaire
d'un personnage, autre conteneur...).
"""
if not conteneur.contenu:
raise ErreurExecution("{} n'est contenu nul part".format(
conteneur.get_nom()))
if conteneur.est_de_type("conteneur de potion"):
if conteneur.potion:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
if objet.contenu:
objet.contenu.retirer(objet)
conteneur.potion = objet
conteneur.onces = conteneur.onces_max
return
if not conteneur.est_de_type("conteneur de nourriture"):
raise ErreurExecution("{} n'est pas un conteneur".format(
conteneur.get_nom()))
if objet.poids_unitaire > conteneur.poids_max:
raise ErreurExecution("{} est plein".format(conteneur.get_nom()))
if objet.contenu:
objet.contenu.retirer(objet)
conteneur.nourriture.append(objet)
@staticmethod
def remplir_proto_nb(conteneur, prototype, nb):
"""Pose dans le conteneur nb objets du prototype précisé.
Attention, l'objet conteneur ne peut en aucun cas être "flottant" mais
doit lui-même être contenu quelque part (sol d'une salle, inventaire
d'un personnage, autre conteneur...).
"""
nb = int(nb)
if not prototype in importeur.objet.prototypes:
raise ErreurExecution("prototype {} introuvable".format(prototype))
prototype = importeur.objet.prototypes[prototype]
if not conteneur.contenu:
raise ErreurExecution("{} n'est contenu nul part".format(
conteneur.get_nom()))
if conteneur.est_de_type("conteneur de potion"):
if conteneur.potion:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
objet = importeur.objet.creer_objet(prototype)
conteneur.potion = objet
conteneur.onces = conteneur.onces_max
return
if not conteneur.est_de_type("conteneur de nourriture"):
raise ErreurExecution("{} n'est pas un conteneur".format(
conteneur.get_nom()))
poids_total = 0
for i in range(nb):
poids_total += prototype.poids
if poids_total > conteneur.poids_max:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
objet = importeur.objet.creer_objet(prototype)
conteneur.nourriture.append(objet)
| 44.054054
| 79
| 0.666871
|
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
@classmethod
def init_types(cls):
cls.ajouter_types(cls.remplir_objet, "Objet", "Objet")
cls.ajouter_types(cls.remplir_proto_nb, "Objet", "str",
"Fraction")
@staticmethod
def remplir_objet(conteneur, objet):
if not conteneur.contenu:
raise ErreurExecution("{} n'est contenu nul part".format(
conteneur.get_nom()))
if conteneur.est_de_type("conteneur de potion"):
if conteneur.potion:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
if objet.contenu:
objet.contenu.retirer(objet)
conteneur.potion = objet
conteneur.onces = conteneur.onces_max
return
if not conteneur.est_de_type("conteneur de nourriture"):
raise ErreurExecution("{} n'est pas un conteneur".format(
conteneur.get_nom()))
if objet.poids_unitaire > conteneur.poids_max:
raise ErreurExecution("{} est plein".format(conteneur.get_nom()))
if objet.contenu:
objet.contenu.retirer(objet)
conteneur.nourriture.append(objet)
@staticmethod
def remplir_proto_nb(conteneur, prototype, nb):
nb = int(nb)
if not prototype in importeur.objet.prototypes:
raise ErreurExecution("prototype {} introuvable".format(prototype))
prototype = importeur.objet.prototypes[prototype]
if not conteneur.contenu:
raise ErreurExecution("{} n'est contenu nul part".format(
conteneur.get_nom()))
if conteneur.est_de_type("conteneur de potion"):
if conteneur.potion:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
objet = importeur.objet.creer_objet(prototype)
conteneur.potion = objet
conteneur.onces = conteneur.onces_max
return
if not conteneur.est_de_type("conteneur de nourriture"):
raise ErreurExecution("{} n'est pas un conteneur".format(
conteneur.get_nom()))
poids_total = 0
for i in range(nb):
poids_total += prototype.poids
if poids_total > conteneur.poids_max:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
objet = importeur.objet.creer_objet(prototype)
conteneur.nourriture.append(objet)
| true
| true
|
f70c1a7906ca430885a53e98cf618a681cf5345c
| 1,871
|
py
|
Python
|
nova/tests/unit/objects/test_numa.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/objects/test_numa.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/objects/test_numa.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import objects
from nova.tests.unit.objects import test_objects
fake_obj_numa = objects.NUMATopology(
cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=2, memory_usage=256),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=1, memory_usage=128)])
class _TestNUMA(object):
def test_convert_wipe(self):
d1 = fake_obj_numa._to_dict()
d2 = objects.NUMATopology.obj_from_primitive(d1)._to_dict()
self.assertEqual(d1, d2)
def test_pinning_logic(self):
obj = objects.NUMATopology(cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=2, memory_usage=256,
pinned_cpus=set([1])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=1, memory_usage=128,
pinned_cpus=set([]))
]
)
self.assertEqual(set([2]), obj.cells[0].free_cpus)
self.assertEqual(set([3, 4]), obj.cells[1].free_cpus)
class TestNUMA(test_objects._LocalTest,
_TestNUMA):
pass
class TestNUMARemote(test_objects._RemoteTest,
_TestNUMA):
pass
| 31.711864
| 78
| 0.621058
|
from nova import objects
from nova.tests.unit.objects import test_objects
fake_obj_numa = objects.NUMATopology(
cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=2, memory_usage=256),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=1, memory_usage=128)])
class _TestNUMA(object):
def test_convert_wipe(self):
d1 = fake_obj_numa._to_dict()
d2 = objects.NUMATopology.obj_from_primitive(d1)._to_dict()
self.assertEqual(d1, d2)
def test_pinning_logic(self):
obj = objects.NUMATopology(cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=2, memory_usage=256,
pinned_cpus=set([1])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=1, memory_usage=128,
pinned_cpus=set([]))
]
)
self.assertEqual(set([2]), obj.cells[0].free_cpus)
self.assertEqual(set([3, 4]), obj.cells[1].free_cpus)
class TestNUMA(test_objects._LocalTest,
_TestNUMA):
pass
class TestNUMARemote(test_objects._RemoteTest,
_TestNUMA):
pass
| true
| true
|
f70c1a9e14e34afc1f891dc6e43eba44f38e5062
| 6,564
|
py
|
Python
|
tests/shell/test_configadmin.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 65
|
2015-04-21T10:41:18.000Z
|
2022-01-02T16:25:40.000Z
|
tests/shell/test_configadmin.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 85
|
2015-01-20T14:23:52.000Z
|
2022-02-19T17:08:46.000Z
|
tests/shell/test_configadmin.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 32
|
2015-03-13T07:43:05.000Z
|
2020-04-24T07:56:53.000Z
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the ConfigurationAdmin shell commands
:author: Thomas Calmant
"""
# Pelix
import pelix.framework
import pelix.services
import pelix.shell
import pelix.shell.beans as beans
# Standard library
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
class ConfigAdminShellTest(unittest.TestCase):
"""
Tests the EventAdmin shell commands
"""
def setUp(self):
"""
Prepares a framework and a registers a service to export
"""
# Use a local configuration folder
conf_folder = os.path.join(os.path.dirname(__file__), "conf")
# Create the framework
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core', 'pelix.shell.core',
'pelix.services.configadmin', 'pelix.shell.configadmin'),
{'configuration.folder': conf_folder})
self.framework.start()
# Get the Shell service
context = self.framework.get_bundle_context()
svc_ref = context.get_service_reference(pelix.shell.SERVICE_SHELL)
self.shell = context.get_service(svc_ref)
# Instantiate the EventAdmin component
context = self.framework.get_bundle_context()
# Get the service
self.config_ref = context.get_service_reference(
pelix.services.SERVICE_CONFIGURATION_ADMIN)
self.config = context.get_service(self.config_ref)
# Remove existing configurations
for config in self.config.list_configurations():
config.delete()
def _run_command(self, command, *args):
"""
Runs the given shell command
"""
# String output
str_output = StringIO()
# Format command
if args:
command = command.format(*args)
# Add the namespace prefix
command = 'config.{0}'.format(command)
# Run command
session = beans.ShellSession(beans.IOHandler(None, str_output))
self.shell.execute(command, session)
return str_output.getvalue()
def tearDown(self):
"""
Cleans up for next test
"""
# Remove existing configurations
for config in self.config.list_configurations():
config.delete()
# Stop the framework
pelix.framework.FrameworkFactory.delete_framework()
self.framework = None
def testLifeCycle(self):
"""
Tests a configuration life cycle
"""
# Create a factory configuration
key = "testConfig"
first_value = "first"
factory_name = "testFactory"
output = self._run_command("create {0} {1}={2}", factory_name,
key, first_value)
# Get the generated configuration
config = next(iter(self.config.list_configurations()))
# Check validity
self.assertIn(config.get_pid(), output)
self.assertEqual(factory_name, config.get_factory_pid())
self.assertDictContainsSubset({key: first_value},
config.get_properties())
# Update it
second_value = "second"
self._run_command("update {0} {1}={2}", config.get_pid(),
key, second_value)
self.assertDictContainsSubset({key: second_value},
config.get_properties())
# Reload it
self._run_command("reload {0}", config.get_pid())
# List it
output = self._run_command('list')
self.assertIn(config.get_pid(), output)
output = self._run_command('list {0}', config.get_pid())
self.assertIn(config.get_pid(), output)
# Delete it
self._run_command("delete {0}", config.get_pid())
self.assertEqual(self.config.list_configurations(), set())
def testInvalidPid(self):
"""
Tests commands with invalid PIDs
"""
self._run_command("delete <invalid>")
self._run_command("list <invalid>")
self._run_command("reload <invalid>")
def testUpdate(self):
"""
Tests the update command
"""
pid = "testPid"
key = "testConfig"
value = "testValue"
# Create the configuration, with no property
self._run_command("update {0}", pid)
# Get the generated configuration
config = next(iter(self.config.list_configurations()))
self.assertEqual(config.get_pid(), pid)
self.assertIsNone(config.get_properties())
# Set a key
self._run_command("update {0} {1}={2}", pid, key, value)
self.assertDictContainsSubset({key: value}, config.get_properties())
# Remove a key
self._run_command("update {0} {1}=None", pid, key)
self.assertNotIn(key, config.get_properties())
def testList(self):
"""
Other tests for the list command
"""
pid = "testPid"
pid2 = "testPidBis"
key = "testConfig"
value = "testValue"
# Nothing at first
output = self._run_command("list")
self.assertIn("No configuration", output)
# List inexistent PID
output = self._run_command("list {0}", pid)
self.assertIn("No configuration", output)
# Create a configuration without properties
config = self.config.get_configuration(pid)
# List it
output = self._run_command("list {0}", pid)
self.assertIn("Not yet updated", output)
# Update it
config.update({key: value})
output = self._run_command("list {0}", pid)
self.assertIn(pid, output)
self.assertIn(key, output)
self.assertIn(value, output)
# Create a second one
config2 = self.config.get_configuration(pid2)
# Delete the first one
config.delete()
self.assertNotIn(config, self.config.list_configurations())
self.assertIn(config2, self.config.list_configurations())
# List it
output = self._run_command("list {0}", pid)
self.assertIn("No configuration", output)
self.assertIn(pid, output)
| 30.248848
| 80
| 0.591865
|
import pelix.framework
import pelix.services
import pelix.shell
import pelix.shell.beans as beans
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
class ConfigAdminShellTest(unittest.TestCase):
def setUp(self):
conf_folder = os.path.join(os.path.dirname(__file__), "conf")
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core', 'pelix.shell.core',
'pelix.services.configadmin', 'pelix.shell.configadmin'),
{'configuration.folder': conf_folder})
self.framework.start()
context = self.framework.get_bundle_context()
svc_ref = context.get_service_reference(pelix.shell.SERVICE_SHELL)
self.shell = context.get_service(svc_ref)
context = self.framework.get_bundle_context()
self.config_ref = context.get_service_reference(
pelix.services.SERVICE_CONFIGURATION_ADMIN)
self.config = context.get_service(self.config_ref)
for config in self.config.list_configurations():
config.delete()
def _run_command(self, command, *args):
str_output = StringIO()
if args:
command = command.format(*args)
command = 'config.{0}'.format(command)
session = beans.ShellSession(beans.IOHandler(None, str_output))
self.shell.execute(command, session)
return str_output.getvalue()
def tearDown(self):
for config in self.config.list_configurations():
config.delete()
pelix.framework.FrameworkFactory.delete_framework()
self.framework = None
def testLifeCycle(self):
key = "testConfig"
first_value = "first"
factory_name = "testFactory"
output = self._run_command("create {0} {1}={2}", factory_name,
key, first_value)
config = next(iter(self.config.list_configurations()))
self.assertIn(config.get_pid(), output)
self.assertEqual(factory_name, config.get_factory_pid())
self.assertDictContainsSubset({key: first_value},
config.get_properties())
second_value = "second"
self._run_command("update {0} {1}={2}", config.get_pid(),
key, second_value)
self.assertDictContainsSubset({key: second_value},
config.get_properties())
self._run_command("reload {0}", config.get_pid())
output = self._run_command('list')
self.assertIn(config.get_pid(), output)
output = self._run_command('list {0}', config.get_pid())
self.assertIn(config.get_pid(), output)
self._run_command("delete {0}", config.get_pid())
self.assertEqual(self.config.list_configurations(), set())
def testInvalidPid(self):
self._run_command("delete <invalid>")
self._run_command("list <invalid>")
self._run_command("reload <invalid>")
def testUpdate(self):
pid = "testPid"
key = "testConfig"
value = "testValue"
self._run_command("update {0}", pid)
config = next(iter(self.config.list_configurations()))
self.assertEqual(config.get_pid(), pid)
self.assertIsNone(config.get_properties())
self._run_command("update {0} {1}={2}", pid, key, value)
self.assertDictContainsSubset({key: value}, config.get_properties())
self._run_command("update {0} {1}=None", pid, key)
self.assertNotIn(key, config.get_properties())
def testList(self):
pid = "testPid"
pid2 = "testPidBis"
key = "testConfig"
value = "testValue"
output = self._run_command("list")
self.assertIn("No configuration", output)
output = self._run_command("list {0}", pid)
self.assertIn("No configuration", output)
config = self.config.get_configuration(pid)
output = self._run_command("list {0}", pid)
self.assertIn("Not yet updated", output)
config.update({key: value})
output = self._run_command("list {0}", pid)
self.assertIn(pid, output)
self.assertIn(key, output)
self.assertIn(value, output)
config2 = self.config.get_configuration(pid2)
config.delete()
self.assertNotIn(config, self.config.list_configurations())
self.assertIn(config2, self.config.list_configurations())
output = self._run_command("list {0}", pid)
self.assertIn("No configuration", output)
self.assertIn(pid, output)
| true
| true
|
f70c1c985b94a376cb5ae57f8f742ff430e60c99
| 8,056
|
py
|
Python
|
src/models/densenet/model.py
|
gsc2001/ConvexNet
|
a17609bd5bca0a02b6330b1ad8035f2b280109f0
|
[
"MIT"
] | null | null | null |
src/models/densenet/model.py
|
gsc2001/ConvexNet
|
a17609bd5bca0a02b6330b1ad8035f2b280109f0
|
[
"MIT"
] | null | null | null |
src/models/densenet/model.py
|
gsc2001/ConvexNet
|
a17609bd5bca0a02b6330b1ad8035f2b280109f0
|
[
"MIT"
] | null | null | null |
"""
Vanilla DenseNet implementation
Paper: https://arxiv.org/abs/1608.06993
Implementation taken from: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py
"""
import re
from collections import OrderedDict
from functools import partial
from typing import Any, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch import Tensor
class _DenseLayer(nn.Module):
def __init__(
self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False
) -> None:
super().__init__()
self.norm1: nn.BatchNorm2d
self.add_module("norm1", nn.BatchNorm2d(num_input_features))
self.relu1: nn.ReLU
self.add_module("relu1", nn.ReLU(inplace=True))
self.conv1: nn.Conv2d
self.add_module(
"conv1", nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
)
self.norm2: nn.BatchNorm2d
self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate))
self.relu2: nn.ReLU
self.add_module("relu2", nn.ReLU(inplace=True))
self.conv2: nn.Conv2d
self.add_module(
"conv2", nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
)
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method # noqa: F811
def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input: Tensor) -> Tensor: # noqa: F811
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input: Tensor) -> Tensor: # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False,
) -> None:
super().__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super().__init__()
self.add_module("norm", nn.BatchNorm2d(num_input_features))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module("conv", nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
def __init__(
self,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
num_classes: int = 1000,
memory_efficient: bool = False,
) -> None:
super().__init__()
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
| 38.180095
| 120
| 0.620531
|
import re
from collections import OrderedDict
from functools import partial
from typing import Any, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch import Tensor
class _DenseLayer(nn.Module):
def __init__(
self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False
) -> None:
super().__init__()
self.norm1: nn.BatchNorm2d
self.add_module("norm1", nn.BatchNorm2d(num_input_features))
self.relu1: nn.ReLU
self.add_module("relu1", nn.ReLU(inplace=True))
self.conv1: nn.Conv2d
self.add_module(
"conv1", nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
)
self.norm2: nn.BatchNorm2d
self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate))
self.relu2: nn.ReLU
self.add_module("relu2", nn.ReLU(inplace=True))
self.conv2: nn.Conv2d
self.add_module(
"conv2", nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
)
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) return bottleneck_output
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method def forward(self, input: List[Tensor]) -> Tensor: pass
@torch.jit._overload_method def forward(self, input: Tensor) -> Tensor: pass
def forward(self, input: Tensor) -> Tensor: if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False,
) -> None:
super().__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super().__init__()
self.add_module("norm", nn.BatchNorm2d(num_input_features))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module("conv", nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
def __init__(
self,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
num_classes: int = 1000,
memory_efficient: bool = False,
) -> None:
super().__init__()
self.features = nn.Sequential(
OrderedDict(
[
("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
self.classifier = nn.Linear(num_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
| true
| true
|
f70c1d6a132dc5c4f214794bc2ddfb198c8735bf
| 10,736
|
py
|
Python
|
modules/andforensics_connector.py
|
KimVegetable/carpe
|
8325b680898970c02e1fcfc1929490bf31b9ea49
|
[
"Apache-2.0"
] | null | null | null |
modules/andforensics_connector.py
|
KimVegetable/carpe
|
8325b680898970c02e1fcfc1929490bf31b9ea49
|
[
"Apache-2.0"
] | null | null | null |
modules/andforensics_connector.py
|
KimVegetable/carpe
|
8325b680898970c02e1fcfc1929490bf31b9ea49
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""module for android forensics."""
import os
import io
import subprocess
import sqlite3
from datetime import datetime
from modules import logger
from modules import manager
from modules import interface
class AndForensicsConnector(interface.ModuleConnector):
NAME = 'andforensics_connector'
DESCRIPTION = 'Module for android'
TABLE_NAME = 'lv1_os_android_andforensics'
_plugin_classes = {}
def __init__(self):
super(AndForensicsConnector, self).__init__()
def Connect(self, par_id, configuration, source_path_spec, knowledge_base):
"""Connector to connect to AndForensics.
Args:
par_id: partition id.
configuration: configuration values.
source_path_spec (dfvfs.PathSpec): path specification of the source file.
knowledge_base (KnowledgeBase): knowledge base.
"""
# 이미지를 복사해와야함 andforensics
if os.path.exists(configuration.source_path):
cmd = 'python3.6 /home/byeongchan/modules/andForensics/andForensics.py -i \'{0:s}\' -o \'{1:s}\' ' \
'-proc {2:d}'.format(os.path.dirname(configuration.source_path),
configuration.tmp_path + os.sep + 'andForensics', 10)
proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
ret_code = proc.stdout.read()
f = io.StringIO(str(ret_code))
result_msg = f.readline()
print(result_msg)
f.close()
if result_msg[-14:-3] == 'Process End':
base_name = os.path.basename(configuration.source_path)
output_path = configuration.tmp_path + os.sep + 'andForensics' + os.sep \
+ os.path.basename(configuration.source_path)
analysis_db_path = output_path + os.sep + 'analysis_' + base_name + '.db'
load_db_path = output_path + os.sep + 'loaddb_' + base_name + '.db'
preprocess_db_path = output_path + os.sep + 'preprocess_' + base_name + '.db'
this_file_path = os.path.dirname(
os.path.abspath(__file__)) + os.sep + 'schema' + os.sep + 'android' + os.sep
yaml_list = [this_file_path + 'lv1_os_and_app_list.yaml',
this_file_path + 'lv1_os_and_call_history.yaml',
this_file_path + 'lv1_os_and_emb_file.yaml',
this_file_path + 'lv1_os_and_file_history.yaml',
this_file_path + 'lv1_os_and_geodata.yaml',
this_file_path + 'lv1_os_and_id_pw_hash.yaml',
this_file_path + 'lv1_os_and_web_browser_history.yaml']
old_table_list = ['application_list', 'call_history', 'embedded_file', 'file_history',
'geodata', 'id_password_hash', 'web_browser_history']
new_table_list = ['lv1_os_and_app_list', 'lv1_os_and_call_history', 'lv1_os_and_emb_file',
'lv1_os_and_file_history', 'lv1_os_and_geodata', 'lv1_os_and_id_pw_hash',
'lv1_os_and_web_browser_history']
if not self.check_table_from_yaml(configuration, yaml_list, new_table_list):
return False
info = tuple([par_id, configuration.case_id, configuration.evidence_id])
try:
conn = sqlite3.connect(analysis_db_path)
cursor = conn.cursor()
for idx, table in enumerate(old_table_list):
cursor.execute(f'select * from {table}')
rows = cursor.fetchall()
rows_list = []
for row in rows:
if table is 'application_list':
row = row[:5] + _convert_timestamp(row[5:13]) + row[13:]
rows_list.append(info + row)
print(rows_list)
query = ""
if table is 'application_list':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s, %s, %s, %s, %s, %s, %s);"
if table is 'call_history':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s)"
elif table is 'embedded_file':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s, %s, %s)"
elif table is 'file_history' or table is 'id_password_hash':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
elif table is 'geodata':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
elif table is 'web_browser_history':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s)"
configuration.cursor.bulk_execute(query, rows_list)
self.mask_table(configuration, 'call_history')
except Exception as exception:
logger.error('Database error : {0!s}'.format(exception))
finally:
conn.close()
else:
logger.info('')
def mask_table(self, configuration, table_name):
if table_name is 'call_history':
query = "update lv1_os_and_call_history set timestamp = regexp_replace(timestamp, " \
"'(\\\\d{2,3}-)\\\\d{1,2}(\\\\d{2}-)\\\\d{2}(\\\\d{2})', " \
"'\\\\1**\\\\2**\\\\3');"
configuration.cursor.execute_query(query)
query = "update lv1_os_and_call_history set phonenumber = regexp_replace(phonenumber, " \
"'((?:(?:0|\\\\+82)(?:10|2|3[1-3]|4[1-4]|5[0-5]|6[1-4]|70)-?)\\\\d{1,2})\\\\d{2}(-?)\\\\d{2}(\\\\d{2})', " \
"'\\\\1**\\\\2**\\\\3')"
configuration.cursor.execute_query(query)
query = "update lv1_os_and_call_history set file = regexp_replace(file, " \
"'(통화 녹음 )([가-힣]|(?:\\\\d{6}))(?:\\\\s|\\\\S)*(_\\\\d{6}_\\\\d{6})', " \
"'\\\\1\\\\2*\\\\3')"
configuration.cursor.execute_query(query)
query = "update lv1_os_and_call_history SET contents = if(CHAR_LENGTH(contents)-CHAR_LENGTH(REPLACE(contents,'|',''))=2," \
" CONCAT_WS('|'," \
" REGEXP_REPLACE(SUBSTRING_INDEX(contents, '|', 1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string_num\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 2), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 3), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string_num_mixed\\\\))', '\\\\1*\\\\2')" \
" )," \
" CONCAT_WS('|'," \
" SUBSTRING_INDEX(contents, '|', 1)," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 2), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(name:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 3), '|', -1)," \
" '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string_num\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 4), '|', -1)," \
" '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 5), '|', -1)," \
" '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string_num_mixed\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 6), '|', -1)," \
" '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string_num_mixed\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 7), '|', -1)," \
" '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string_num\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 8), '|', -1)," \
" '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 9), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(cnap_name:string_num_mixed\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 10), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(cnap_name:string\\\\))', '\\\\1*\\\\2')" \
" )" \
")"
configuration.cursor.execute_query(query)
manager.ModulesManager.RegisterModule(AndForensicsConnector)
def _convert_timestamp(timestamp):
if timestamp is None:
return 'N/A'
if isinstance(timestamp, tuple):
to_timestamp = []
for t in timestamp:
to_timestamp.append(datetime.fromtimestamp(t).strftime('%Y-%m-%dT%H:%M:%SZ'))
return tuple(to_timestamp)
else:
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')
| 59.644444
| 147
| 0.454359
|
import os
import io
import subprocess
import sqlite3
from datetime import datetime
from modules import logger
from modules import manager
from modules import interface
class AndForensicsConnector(interface.ModuleConnector):
NAME = 'andforensics_connector'
DESCRIPTION = 'Module for android'
TABLE_NAME = 'lv1_os_android_andforensics'
_plugin_classes = {}
def __init__(self):
super(AndForensicsConnector, self).__init__()
def Connect(self, par_id, configuration, source_path_spec, knowledge_base):
if os.path.exists(configuration.source_path):
cmd = 'python3.6 /home/byeongchan/modules/andForensics/andForensics.py -i \'{0:s}\' -o \'{1:s}\' ' \
'-proc {2:d}'.format(os.path.dirname(configuration.source_path),
configuration.tmp_path + os.sep + 'andForensics', 10)
proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
ret_code = proc.stdout.read()
f = io.StringIO(str(ret_code))
result_msg = f.readline()
print(result_msg)
f.close()
if result_msg[-14:-3] == 'Process End':
base_name = os.path.basename(configuration.source_path)
output_path = configuration.tmp_path + os.sep + 'andForensics' + os.sep \
+ os.path.basename(configuration.source_path)
analysis_db_path = output_path + os.sep + 'analysis_' + base_name + '.db'
load_db_path = output_path + os.sep + 'loaddb_' + base_name + '.db'
preprocess_db_path = output_path + os.sep + 'preprocess_' + base_name + '.db'
this_file_path = os.path.dirname(
os.path.abspath(__file__)) + os.sep + 'schema' + os.sep + 'android' + os.sep
yaml_list = [this_file_path + 'lv1_os_and_app_list.yaml',
this_file_path + 'lv1_os_and_call_history.yaml',
this_file_path + 'lv1_os_and_emb_file.yaml',
this_file_path + 'lv1_os_and_file_history.yaml',
this_file_path + 'lv1_os_and_geodata.yaml',
this_file_path + 'lv1_os_and_id_pw_hash.yaml',
this_file_path + 'lv1_os_and_web_browser_history.yaml']
old_table_list = ['application_list', 'call_history', 'embedded_file', 'file_history',
'geodata', 'id_password_hash', 'web_browser_history']
new_table_list = ['lv1_os_and_app_list', 'lv1_os_and_call_history', 'lv1_os_and_emb_file',
'lv1_os_and_file_history', 'lv1_os_and_geodata', 'lv1_os_and_id_pw_hash',
'lv1_os_and_web_browser_history']
if not self.check_table_from_yaml(configuration, yaml_list, new_table_list):
return False
info = tuple([par_id, configuration.case_id, configuration.evidence_id])
try:
conn = sqlite3.connect(analysis_db_path)
cursor = conn.cursor()
for idx, table in enumerate(old_table_list):
cursor.execute(f'select * from {table}')
rows = cursor.fetchall()
rows_list = []
for row in rows:
if table is 'application_list':
row = row[:5] + _convert_timestamp(row[5:13]) + row[13:]
rows_list.append(info + row)
print(rows_list)
query = ""
if table is 'application_list':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s, %s, %s, %s, %s, %s, %s);"
if table is 'call_history':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s)"
elif table is 'embedded_file':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s, %s, %s)"
elif table is 'file_history' or table is 'id_password_hash':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
elif table is 'geodata':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
elif table is 'web_browser_history':
query = f"Insert into {new_table_list[idx]} values (%s, %s, %s, %s, %s, %s, %s, %s, " \
f"%s, %s, %s)"
configuration.cursor.bulk_execute(query, rows_list)
self.mask_table(configuration, 'call_history')
except Exception as exception:
logger.error('Database error : {0!s}'.format(exception))
finally:
conn.close()
else:
logger.info('')
def mask_table(self, configuration, table_name):
if table_name is 'call_history':
query = "update lv1_os_and_call_history set timestamp = regexp_replace(timestamp, " \
"'(\\\\d{2,3}-)\\\\d{1,2}(\\\\d{2}-)\\\\d{2}(\\\\d{2})', " \
"'\\\\1**\\\\2**\\\\3');"
configuration.cursor.execute_query(query)
query = "update lv1_os_and_call_history set phonenumber = regexp_replace(phonenumber, " \
"'((?:(?:0|\\\\+82)(?:10|2|3[1-3]|4[1-4]|5[0-5]|6[1-4]|70)-?)\\\\d{1,2})\\\\d{2}(-?)\\\\d{2}(\\\\d{2})', " \
"'\\\\1**\\\\2**\\\\3')"
configuration.cursor.execute_query(query)
query = "update lv1_os_and_call_history set file = regexp_replace(file, " \
"'(통화 녹음 )([가-힣]|(?:\\\\d{6}))(?:\\\\s|\\\\S)*(_\\\\d{6}_\\\\d{6})', " \
"'\\\\1\\\\2*\\\\3')"
configuration.cursor.execute_query(query)
query = "update lv1_os_and_call_history SET contents = if(CHAR_LENGTH(contents)-CHAR_LENGTH(REPLACE(contents,'|',''))=2," \
" CONCAT_WS('|'," \
" REGEXP_REPLACE(SUBSTRING_INDEX(contents, '|', 1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string_num\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 2), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 3), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(contact_name:string_num_mixed\\\\))', '\\\\1*\\\\2')" \
" )," \
" CONCAT_WS('|'," \
" SUBSTRING_INDEX(contents, '|', 1)," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 2), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(name:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 3), '|', -1)," \
" '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string_num\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 4), '|', -1)," \
" '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 5), '|', -1)," \
" '(^(?:\\\\S|\\\\s){2})(?:\\\\S|\\\\s)*(\\\\(m_subject:string_num_mixed\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 6), '|', -1)," \
" '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string_num_mixed\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 7), '|', -1)," \
" '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string_num\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 8), '|', -1)," \
" '(^(?:\\\\S|\\\\s){3})(?:\\\\S|\\\\s)*(\\\\(m_content:string\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 9), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(cnap_name:string_num_mixed\\\\))', '\\\\1*\\\\2')," \
" REGEXP_REPLACE(SUBSTRING_INDEX(SUBSTRING_INDEX(contents, '|', 10), '|', -1)," \
" '(^\\\\S|\\\\s)(?:\\\\S|\\\\s)*(\\\\(cnap_name:string\\\\))', '\\\\1*\\\\2')" \
" )" \
")"
configuration.cursor.execute_query(query)
manager.ModulesManager.RegisterModule(AndForensicsConnector)
def _convert_timestamp(timestamp):
if timestamp is None:
return 'N/A'
if isinstance(timestamp, tuple):
to_timestamp = []
for t in timestamp:
to_timestamp.append(datetime.fromtimestamp(t).strftime('%Y-%m-%dT%H:%M:%SZ'))
return tuple(to_timestamp)
else:
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')
| true
| true
|
f70c1db71c0d85aad1438342c2764a0e1cfb70f9
| 48,153
|
py
|
Python
|
python/ccxt/bitz.py
|
atommy1966/ccxt
|
928243ed26a268659723c0965c4c5d6ee128d70a
|
[
"MIT"
] | 1
|
2020-12-21T04:04:24.000Z
|
2020-12-21T04:04:24.000Z
|
python/ccxt/bitz.py
|
atommy1966/ccxt
|
928243ed26a268659723c0965c4c5d6ee128d70a
|
[
"MIT"
] | 1
|
2020-05-08T09:19:46.000Z
|
2020-09-12T14:55:58.000Z
|
python/ccxt/bitz.py
|
atommy1966/ccxt
|
928243ed26a268659723c0965c4c5d6ee128d70a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
class bitz(Exchange):
def describe(self):
return self.deep_extend(super(bitz, self).describe(), {
'id': 'bitz',
'name': 'Bit-Z',
'countries': ['HK'],
'rateLimit': 2000,
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrders': True,
'fetchOrder': True,
'createMarketOrder': False,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'5d': '5day',
'1w': '1week',
'1M': '1mon',
},
'hostname': 'apiv2.bitz.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/35862606-4f554f14-0b5d-11e8-957d-35058c504b6f.jpg',
'api': {
'market': 'https://{hostname}',
'trade': 'https://{hostname}',
'assets': 'https://{hostname}',
},
'www': 'https://www.bitz.com',
'doc': 'https://apidoc.bitz.com/en/',
'fees': 'https://www.bitz.com/fee?type=1',
'referral': 'https://u.bitz.com/register?invite_code=1429193',
},
'api': {
'market': {
'get': [
'ticker',
'depth',
'order', # trades
'tickerall',
'kline',
'symbolList',
'currencyRate',
'currencyCoinRate',
'coinRate',
],
},
'trade': {
'post': [
'addEntrustSheet',
'cancelEntrustSheet',
'cancelAllEntrustSheet',
'getUserHistoryEntrustSheet', # closed orders
'getUserNowEntrustSheet', # open orders
'getEntrustSheetInfo', # order
'depositOrWithdraw', # transactions
],
},
'assets': {
'post': [
'getUserAssets',
],
},
},
'fees': {
'trading': {
'maker': 0.002,
'taker': 0.002,
},
'funding': {
'withdraw': {
'BTC': '0.5%',
'DKKT': '0.5%',
'ETH': 0.01,
'USDT': '0.5%',
'LTC': '0.5%',
'FCT': '0.5%',
'LSK': '0.5%',
'HXI': '0.8%',
'ZEC': '0.5%',
'DOGE': '0.5%',
'MZC': '0.5%',
'ETC': '0.5%',
'GXS': '0.5%',
'XPM': '0.5%',
'PPC': '0.5%',
'BLK': '0.5%',
'XAS': '0.5%',
'HSR': '0.5%',
'NULS': 5.0,
'VOISE': 350.0,
'PAY': 1.5,
'EOS': 0.6,
'YBCT': 35.0,
'OMG': 0.3,
'OTN': 0.4,
'BTX': '0.5%',
'QTUM': '0.5%',
'DASH': '0.5%',
'GAME': '0.5%',
'BCH': '0.5%',
'GNT': 9.0,
'SSS': 1500.0,
'ARK': '0.5%',
'PART': '0.5%',
'LEO': '0.5%',
'DGB': '0.5%',
'ZSC': 130.0,
'VIU': 350.0,
'BTG': '0.5%',
'ARN': 10.0,
'VTC': '0.5%',
'BCD': '0.5%',
'TRX': 200.0,
'HWC': '0.5%',
'UNIT': '0.5%',
'OXY': '0.5%',
'MCO': 0.3500,
'SBTC': '0.5%',
'BCX': '0.5%',
'ETF': '0.5%',
'PYLNT': 0.4000,
'XRB': '0.5%',
'ETP': '0.5%',
},
},
},
'precision': {
'amount': 8,
'price': 8,
},
'options': {
'fetchOHLCVVolume': True,
'fetchOHLCVWarning': True,
'lastNonceTimestamp': 0,
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/3881
# https://support.bit-z.pro/hc/en-us/articles/360007500654-BOX-BOX-Token-
'BOX': 'BOX Token',
'LEO': 'LeoCoin',
'XRB': 'NANO',
'PXC': 'Pixiecoin',
'VTC': 'VoteCoin',
'TTC': 'TimesChain',
},
'exceptions': {
# '200': Success
'-102': ExchangeError, # Invalid parameter
'-103': AuthenticationError, # Verification failed
'-104': ExchangeNotAvailable, # Network Error-1
'-105': AuthenticationError, # Invalid api signature
'-106': ExchangeNotAvailable, # Network Error-2
'-109': AuthenticationError, # Invalid scretKey
'-110': DDoSProtection, # The number of access requests exceeded
'-111': PermissionDenied, # Current IP is not in the range of trusted IP
'-112': OnMaintenance, # Service is under maintenance
'-114': RateLimitExceeded, # The number of daily requests has reached the limit
'-117': AuthenticationError, # The apikey expires
'-100015': AuthenticationError, # Trade password error
'-100044': ExchangeError, # Fail to request data
'-100101': ExchangeError, # Invalid symbol
'-100201': ExchangeError, # Invalid symbol
'-100301': ExchangeError, # Invalid symbol
'-100401': ExchangeError, # Invalid symbol
'-100302': ExchangeError, # Type of K-line error
'-100303': ExchangeError, # Size of K-line error
'-200003': AuthenticationError, # Please set trade password
'-200005': PermissionDenied, # This account can not trade
'-200025': ExchangeNotAvailable, # Temporary trading halt
'-200027': InvalidOrder, # Price Error
'-200028': InvalidOrder, # Amount must be greater than 0
'-200029': InvalidOrder, # Number must be between %s and %d
'-200030': InvalidOrder, # Over price range
'-200031': InsufficientFunds, # Insufficient assets
'-200032': ExchangeError, # System error. Please contact customer service
'-200033': ExchangeError, # Fail to trade
'-200034': OrderNotFound, # The order does not exist
'-200035': OrderNotFound, # Cancellation error, order filled
'-200037': InvalidOrder, # Trade direction error
'-200038': ExchangeError, # Trading Market Error
'-200055': OrderNotFound, # Order record does not exist
'-300069': AuthenticationError, # api_key is illegal
'-300101': ExchangeError, # Transaction type error
'-300102': InvalidOrder, # Price or number cannot be less than 0
'-300103': AuthenticationError, # Trade password error
'-301001': ExchangeNotAvailable, # Network Error-3
},
})
def fetch_markets(self, params={}):
response = self.marketGetSymbolList(params)
#
# { status: 200,
# msg: "",
# data: { ltc_btc: { id: "1",
# name: "ltc_btc",
# coinFrom: "ltc",
# coinTo: "btc",
# numberFloat: "4",
# priceFloat: "8",
# status: "1",
# minTrade: "0.010",
# maxTrade: "500000000.000"},
# qtum_usdt: { id: "196",
# name: "qtum_usdt",
# coinFrom: "qtum",
# coinTo: "usdt",
# numberFloat: "4",
# priceFloat: "2",
# status: "1",
# minTrade: "0.100",
# maxTrade: "500000000.000"}, },
# time: 1535969146,
# microtime: "0.66955600 1535969146",
# source: "api" }
#
markets = self.safe_value(response, 'data')
ids = list(markets.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = markets[id]
numericId = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'coinFrom')
quoteId = self.safe_string(market, 'coinTo')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'numberFloat'),
'price': self.safe_integer(market, 'priceFloat'),
}
result.append({
'info': market,
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'minTrade'),
'max': self.safe_float(market, 'maxTrade'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.assetsPostGetUserAssets(params)
#
# {
# status: 200,
# msg: "",
# data: {
# cny: 0,
# usd: 0,
# btc_total: 0,
# info: [{
# "name": "zpr",
# "num": "37.49067275",
# "over": "37.49067275",
# "lock": "0.00000000",
# "btc": "0.00000000",
# "usd": "0.00000000",
# "cny": "0.00000000",
# }],
# },
# time: 1535983966,
# microtime: "0.70400500 1535983966",
# source: "api",
# }
#
balances = self.safe_value(response['data'], 'info')
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_float(balance, 'lock')
account['total'] = self.safe_float(balance, 'num')
account['free'] = self.safe_float(balance, 'over')
result[code] = account
return self.parse_balance(result)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" }
#
timestamp = None
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'now')
open = self.safe_float(ticker, 'open')
change = None
average = None
if last is not None and open is not None:
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': self.safe_float(ticker, 'priceChange24h'),
'average': average,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
def parse_microtime(self, microtime):
if microtime is None:
return microtime
parts = microtime.split(' ')
milliseconds = float(parts[0])
seconds = int(parts[1])
total = self.sum(seconds, milliseconds)
return int(total * 1000)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetTicker(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" },
# time: 1535970397,
# microtime: "0.76341900 1535970397",
# source: "api" }
#
ticker = self.parse_ticker(response['data'], market)
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
response = self.marketGetTickerall(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { ela_btc: { symbol: "ela_btc",
# quoteVolume: "0.00",
# volume: "3.28",
# priceChange: "0.00",
# priceChange24h: "0.00",
# askPrice: "0.00147984",
# askQty: "5.4580",
# bidPrice: "0.00120230",
# bidQty: "12.5384",
# open: "0.00149078",
# high: "0.00149078",
# low: "0.00149078",
# now: "0.00149078",
# firstId: 115581219,
# lastId: 115581219,
# dealCount: 1,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "73.66",
# usd: "10.79",
# krw: "11995.03" } },
# time: 1535971578,
# microtime: "0.39854200 1535971578",
# source: "api" }
#
tickers = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
ticker = self.parse_ticker(tickers[id], market)
symbol = ticker['symbol']
if symbol is None:
if market is not None:
symbol = market['symbol']
else:
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is not None:
result[symbol] = self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = self.marketGetDepth(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { asks: [["10.00000000", "0.4426", "4.4260"],
# ["1.00000000", "0.8339", "0.8339"],
# ["0.91700000", "0.0500", "0.0458"],
# ["0.20000000", "0.1000", "0.0200"],
# ["0.03987120", "16.1262", "0.6429"],
# ["0.03986120", "9.7523", "0.3887"] ],
# bids: [["0.03976145", "0.0359", "0.0014"],
# ["0.03973401", "20.9493", "0.8323"],
# ["0.03967970", "0.0328", "0.0013"],
# ["0.00000002", "10000.0000", "0.0002"],
# ["0.00000001", "231840.7500", "0.0023"]],
# coinPair: "eth_btc" },
# time: 1535974778,
# microtime: "0.04017400 1535974778",
# source: "api" }
#
orderbook = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.parse_order_book(orderbook, timestamp)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp(trade, 'T')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'p')
amount = self.safe_float(trade, 'n')
cost = None
if price is not None:
if amount is not None:
cost = self.price_to_precision(symbol, amount * price)
side = self.safe_string(trade, 's')
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': None,
'type': 'limit',
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetOrder(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: [{id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
# {id: 115806811,
# t: "19:33:19",
# T: 1535974399,
# p: "0.03981135",
# n: "9.4612",
# s: "sell" } ],
# time: 1535974583,
# microtime: "0.57118100 1535974583",
# source: "api" }
#
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
#
# {
# time: "1535973420000",
# open: "0.03975084",
# high: "0.03975084",
# low: "0.03967700",
# close: "0.03967700",
# volume: "12.4733",
# datetime: "2018-09-03 19:17:00"
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
duration = self.parse_timeframe(timeframe) * 1000
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = min(limit, 300) # 1-300
if since is not None:
request['to'] = self.sum(since, limit * duration * 1000)
else:
if since is not None:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires a limit argument if the since argument is specified')
response = self.marketGetKline(self.extend(request, params))
#
# {
# status: 200,
# msg: "",
# data: {
# bars: [
# {time: "1535973420000", open: "0.03975084", high: "0.03975084", low: "0.03967700", close: "0.03967700", volume: "12.4733", datetime: "2018-09-03 19:17:00"},
# {time: "1535955480000", open: "0.04009900", high: "0.04016745", low: "0.04009900", close: "0.04012074", volume: "74.4803", datetime: "2018-09-03 14:18:00"},
# ],
# resolution: "1min",
# symbol: "eth_btc",
# from: "1535973420000",
# to: "1535955480000",
# size: 300
# },
# time: 1535973435,
# microtime: "0.56462100 1535973435",
# source: "api"
# }
#
data = self.safe_value(response, 'data', {})
bars = self.safe_value(data, 'bars', [])
return self.parse_ohlcvs(bars, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open', # partially filled
'2': 'closed', # filled
'3': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# }
#
id = self.safe_string(order, 'id')
symbol = None
if market is None:
baseId = self.safe_string(order, 'coinFrom')
quoteId = self.safe_string(order, 'coinTo')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.safe_value(self.markets_by_id, marketId)
else:
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'flag')
if side is not None:
side = 'sell' if (side == 'sale') else 'buy'
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'number')
remaining = self.safe_float(order, 'numberOver')
filled = self.safe_float(order, 'numberDeal')
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is None:
timestamp = self.safe_timestamp(order, 'created')
cost = self.safe_float(order, 'orderTotalPrice')
if price is not None:
if filled is not None:
cost = filled * price
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
'average': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' createOrder allows limit orders only')
market = self.market(symbol)
orderType = '1' if (side == 'buy') else '2'
if not self.password:
raise ExchangeError(self.id + ' createOrder() requires you to set exchange.password = "YOUR_TRADING_PASSWORD"(a trade password is NOT THE SAME as your login password)')
request = {
'symbol': market['id'],
'type': orderType,
'price': self.price_to_precision(symbol, price),
'number': self.amount_to_precision(symbol, amount),
'tradePwd': self.password,
}
response = self.tradePostAddEntrustSheet(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# },
# "time": "1533035297",
# "microtime": "0.41892000 1533035297",
# "source": "api",
# }
#
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
order = self.extend({
'timestamp': timestamp,
}, response['data'])
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"1000.00000000",
# "lock":"-1000.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"9999.99999999",
# "lock":"9999.99999999"
# }
# },
# "time":"1535464383",
# "microtime":"0.91558000 1535464383",
# "source":"api"
# }
#
return response
def cancel_orders(self, ids, symbol=None, params={}):
self.load_markets()
request = {
'ids': ','.join(ids),
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "744173808":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"899.99999999",
# "lock":"19099.99999999"
# }
# },
# "744173809":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"999.99999999",
# "lock":"18999.99999999"
# }
# }
# },
# "time":"1535525649",
# "microtime":"0.05009400 1535525649",
# "source":"api"
# }
#
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostGetEntrustSheetInfo(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "id":"708279852",
# "uId":"2074056",
# "price":"100.00000000",
# "number":"10.0000",
# "total":"0.00000000",
# "numberOver":"10.0000",
# "numberDeal":"0.0000",
# "flag":"sale",
# "status":"0", #0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "coinFrom":"bz",
# "coinTo":"usdt",
# "orderTotalPrice":"0",
# "created":"1533279876"
# },
# "time":"1533280294",
# "microtime":"0.36859200 1533280294",
# "source":"api"
# }
#
return self.parse_order(response['data'])
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coinFrom': market['baseId'],
'coinTo': market['quoteId'],
# 'type': 1, # optional integer, 1 = buy, 2 = sell
# 'page': 1, # optional integer
# 'pageSize': 100, # optional integer, max 100
# 'startTime': 1510235730, # optional integer timestamp in seconds
# 'endTime': 1510235730, # optional integer timestamp in seconds
}
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
if since is not None:
request['startTime'] = int(since / 1000)
# request['endTime'] = int(since / 1000)
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "data": [
# {
# "id": "693248739",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3", # 0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "isNew": "N",
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "created": "1533035300",
# },
# {
# "id": "723086996",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3",
# "isNew": "N",
# "coinFrom": "bz",
# "coinTo": "usdt",
# "created": "1533523568",
# },
# ],
# "pageInfo": {
# "limit": "10",
# "offest": "0",
# "current_page": "1",
# "page_size": "10",
# "total_count": "17",
# "page_count": "2",
# }
# },
# "time": "1533279329",
# "microtime": "0.15305300 1533279329",
# "source": "api"
# }
#
orders = self.safe_value(response['data'], 'data', [])
return self.parse_orders(orders, None, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserNowEntrustSheet', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'1': 'pending',
'2': 'pending',
'3': 'pending',
'4': 'ok',
'5': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id": '96275',
# "uid": '2109073',
# "wallet": '0xf4c4141c0127bc37b1d0c409a091920eba13ada7',
# "txid": '0xb7adfa52aa566f9ac112e3c01f77bd91179b19eab12092a9a5a8b33d5086e31d',
# "confirm": '12',
# "number": '0.50000000',
# "status": 4,
# "updated": '1534944168605',
# "addressUrl": 'https://etherscan.io/address/',
# "txidUrl": 'https://etherscan.io/tx/',
# "description": 'Ethereum',
# "coin": 'eth',
# "memo": ''
# }
#
# {
# "id":"397574",
# "uid":"2033056",
# "wallet":"1AG1gZvQAYu3WBvgg7p4BMMghQD2gE693k",
# "txid":"",
# "confirm":"0",
# "number":"1000.00000000",
# "status":1,
# "updated":"0",
# "addressUrl":"http://omniexplorer.info/lookupadd.aspx?address=",
# "txidUrl":"http://omniexplorer.info/lookuptx.aspx?txid=",
# "description":"Tether",
# "coin":"usdt",
# "memo":""
# }
#
# {
# "id":"153606",
# "uid":"2033056",
# "wallet":"1AG1gZvQAYu3WBvgg7p4BMMghQD2gE693k",
# "txid":"aa2b179f84cd6dedafd41845e0fbf7f01e14c0d71ea3140d03d6f5a9ccd93199",
# "confirm":"0",
# "number":"761.11110000",
# "status":4,
# "updated":"1536726133579",
# "addressUrl":"http://omniexplorer.info/lookupadd.aspx?address=",
# "txidUrl":"http://omniexplorer.info/lookuptx.aspx?txid=",
# "description":"Tether",
# "coin":"usdt",
# "memo":""
# }
#
timestamp = self.safe_integer(transaction, 'updated')
if timestamp == 0:
timestamp = None
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(transaction, 'type')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
return {
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'txid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'wallet'),
'tag': self.safe_string(transaction, 'memo'),
'type': type,
'amount': self.safe_float(transaction, 'number'),
'currency': code,
'status': status,
'updated': timestamp,
'fee': None,
'info': transaction,
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transaction_type(self, type):
types = {
'deposit': 1,
'withdrawal': 2,
}
return self.safe_integer(types, type, type)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('deposit', code, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('withdrawal', code, since, limit, params)
def fetch_transactions_for_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency `code` argument')
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'type': self.parse_transaction_type(type),
}
if since is not None:
request['startTime'] = int(since / str(1000))
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
response = self.tradePostDepositOrWithdraw(self.extend(request, params))
transactions = self.safe_value(response['data'], 'data', [])
return self.parse_transactions_by_type(type, transactions, code, since, limit)
def nonce(self):
currentTimestamp = self.seconds()
if currentTimestamp > self.options['lastNonceTimestamp']:
self.options['lastNonceTimestamp'] = currentTimestamp
self.options['lastNonce'] = 100000
self.options['lastNonce'] = self.sum(self.options['lastNonce'], 1)
return self.options['lastNonce']
def sign(self, path, api='market', method='GET', params={}, headers=None, body=None):
baseUrl = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
url = baseUrl + '/' + self.capitalize(api) + '/' + path
query = None
if api == 'market':
query = self.urlencode(params)
if len(query):
url += '?' + query
else:
self.check_required_credentials()
body = self.rawencode(self.keysort(self.extend({
'apiKey': self.apiKey,
'timeStamp': self.seconds(),
'nonce': self.nonce(),
}, params)))
body += '&sign=' + self.hash(self.encode(body + self.secret))
headers = {'Content-type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
status = self.safe_string(response, 'status')
if status is not None:
feedback = self.id + ' ' + body
#
# {"status":-107,"msg":"","data":"","time":1535968848,"microtime":"0.89092200 1535968848","source":"api"}
#
if status == '200':
#
# {"status":200,"msg":"","data":-200031,"time":1535999806,"microtime":"0.85476800 1535999806","source":"api"}
#
code = self.safe_integer(response, 'data')
if code is not None:
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
else:
return # no error
self.throw_exactly_matched_exception(self.exceptions, status, feedback)
raise ExchangeError(feedback)
| 41.981691
| 182
| 0.415114
|
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
class bitz(Exchange):
def describe(self):
return self.deep_extend(super(bitz, self).describe(), {
'id': 'bitz',
'name': 'Bit-Z',
'countries': ['HK'],
'rateLimit': 2000,
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrders': True,
'fetchOrder': True,
'createMarketOrder': False,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'5d': '5day',
'1w': '1week',
'1M': '1mon',
},
'hostname': 'apiv2.bitz.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/35862606-4f554f14-0b5d-11e8-957d-35058c504b6f.jpg',
'api': {
'market': 'https://{hostname}',
'trade': 'https://{hostname}',
'assets': 'https://{hostname}',
},
'www': 'https://www.bitz.com',
'doc': 'https://apidoc.bitz.com/en/',
'fees': 'https://www.bitz.com/fee?type=1',
'referral': 'https://u.bitz.com/register?invite_code=1429193',
},
'api': {
'market': {
'get': [
'ticker',
'depth',
'order', 'tickerall',
'kline',
'symbolList',
'currencyRate',
'currencyCoinRate',
'coinRate',
],
},
'trade': {
'post': [
'addEntrustSheet',
'cancelEntrustSheet',
'cancelAllEntrustSheet',
'getUserHistoryEntrustSheet', 'getUserNowEntrustSheet', 'getEntrustSheetInfo', 'depositOrWithdraw', ],
},
'assets': {
'post': [
'getUserAssets',
],
},
},
'fees': {
'trading': {
'maker': 0.002,
'taker': 0.002,
},
'funding': {
'withdraw': {
'BTC': '0.5%',
'DKKT': '0.5%',
'ETH': 0.01,
'USDT': '0.5%',
'LTC': '0.5%',
'FCT': '0.5%',
'LSK': '0.5%',
'HXI': '0.8%',
'ZEC': '0.5%',
'DOGE': '0.5%',
'MZC': '0.5%',
'ETC': '0.5%',
'GXS': '0.5%',
'XPM': '0.5%',
'PPC': '0.5%',
'BLK': '0.5%',
'XAS': '0.5%',
'HSR': '0.5%',
'NULS': 5.0,
'VOISE': 350.0,
'PAY': 1.5,
'EOS': 0.6,
'YBCT': 35.0,
'OMG': 0.3,
'OTN': 0.4,
'BTX': '0.5%',
'QTUM': '0.5%',
'DASH': '0.5%',
'GAME': '0.5%',
'BCH': '0.5%',
'GNT': 9.0,
'SSS': 1500.0,
'ARK': '0.5%',
'PART': '0.5%',
'LEO': '0.5%',
'DGB': '0.5%',
'ZSC': 130.0,
'VIU': 350.0,
'BTG': '0.5%',
'ARN': 10.0,
'VTC': '0.5%',
'BCD': '0.5%',
'TRX': 200.0,
'HWC': '0.5%',
'UNIT': '0.5%',
'OXY': '0.5%',
'MCO': 0.3500,
'SBTC': '0.5%',
'BCX': '0.5%',
'ETF': '0.5%',
'PYLNT': 0.4000,
'XRB': '0.5%',
'ETP': '0.5%',
},
},
},
'precision': {
'amount': 8,
'price': 8,
},
'options': {
'fetchOHLCVVolume': True,
'fetchOHLCVWarning': True,
'lastNonceTimestamp': 0,
},
'commonCurrencies': {
'BOX': 'BOX Token',
'LEO': 'LeoCoin',
'XRB': 'NANO',
'PXC': 'Pixiecoin',
'VTC': 'VoteCoin',
'TTC': 'TimesChain',
},
'exceptions': {
'-102': ExchangeError, '-103': AuthenticationError, '-104': ExchangeNotAvailable, '-105': AuthenticationError, '-106': ExchangeNotAvailable, '-109': AuthenticationError, '-110': DDoSProtection, '-111': PermissionDenied, '-112': OnMaintenance, '-114': RateLimitExceeded, '-117': AuthenticationError, '-100015': AuthenticationError, '-100044': ExchangeError, '-100101': ExchangeError, '-100201': ExchangeError, '-100301': ExchangeError, '-100401': ExchangeError, '-100302': ExchangeError, '-100303': ExchangeError, '-200003': AuthenticationError, '-200005': PermissionDenied, '-200025': ExchangeNotAvailable, '-200027': InvalidOrder, '-200028': InvalidOrder, '-200029': InvalidOrder, '-200030': InvalidOrder, '-200031': InsufficientFunds, '-200032': ExchangeError, '-200033': ExchangeError, '-200034': OrderNotFound, '-200035': OrderNotFound, '-200037': InvalidOrder, '-200038': ExchangeError, '-200055': OrderNotFound, '-300069': AuthenticationError, '-300101': ExchangeError, '-300102': InvalidOrder, '-300103': AuthenticationError, '-301001': ExchangeNotAvailable, },
})
def fetch_markets(self, params={}):
response = self.marketGetSymbolList(params)
markets = self.safe_value(response, 'data')
ids = list(markets.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = markets[id]
numericId = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'coinFrom')
quoteId = self.safe_string(market, 'coinTo')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'numberFloat'),
'price': self.safe_integer(market, 'priceFloat'),
}
result.append({
'info': market,
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'minTrade'),
'max': self.safe_float(market, 'maxTrade'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.assetsPostGetUserAssets(params)
balances = self.safe_value(response['data'], 'info')
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_float(balance, 'lock')
account['total'] = self.safe_float(balance, 'num')
account['free'] = self.safe_float(balance, 'over')
result[code] = account
return self.parse_balance(result)
def parse_ticker(self, ticker, market=None):
timestamp = None
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'now')
open = self.safe_float(ticker, 'open')
change = None
average = None
if last is not None and open is not None:
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': self.safe_float(ticker, 'priceChange24h'),
'average': average,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
def parse_microtime(self, microtime):
if microtime is None:
return microtime
parts = microtime.split(' ')
milliseconds = float(parts[0])
seconds = int(parts[1])
total = self.sum(seconds, milliseconds)
return int(total * 1000)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetTicker(self.extend(request, params))
ticker = self.parse_ticker(response['data'], market)
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
response = self.marketGetTickerall(self.extend(request, params))
tickers = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
ticker = self.parse_ticker(tickers[id], market)
symbol = ticker['symbol']
if symbol is None:
if market is not None:
symbol = market['symbol']
else:
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is not None:
result[symbol] = self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = self.marketGetDepth(self.extend(request, params))
orderbook = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.parse_order_book(orderbook, timestamp)
def parse_trade(self, trade, market=None):
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp(trade, 'T')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'p')
amount = self.safe_float(trade, 'n')
cost = None
if price is not None:
if amount is not None:
cost = self.price_to_precision(symbol, amount * price)
side = self.safe_string(trade, 's')
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': None,
'type': 'limit',
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetOrder(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
self.safe_integer(ohlcv, 'time'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
duration = self.parse_timeframe(timeframe) * 1000
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = min(limit, 300) if since is not None:
request['to'] = self.sum(since, limit * duration * 1000)
else:
if since is not None:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires a limit argument if the since argument is specified')
response = self.marketGetKline(self.extend(request, params))
data = self.safe_value(response, 'data', {})
bars = self.safe_value(data, 'bars', [])
return self.parse_ohlcvs(bars, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open', '2': 'closed', '3': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
symbol = None
if market is None:
baseId = self.safe_string(order, 'coinFrom')
quoteId = self.safe_string(order, 'coinTo')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.safe_value(self.markets_by_id, marketId)
else:
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'flag')
if side is not None:
side = 'sell' if (side == 'sale') else 'buy'
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'number')
remaining = self.safe_float(order, 'numberOver')
filled = self.safe_float(order, 'numberDeal')
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is None:
timestamp = self.safe_timestamp(order, 'created')
cost = self.safe_float(order, 'orderTotalPrice')
if price is not None:
if filled is not None:
cost = filled * price
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
'average': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' createOrder allows limit orders only')
market = self.market(symbol)
orderType = '1' if (side == 'buy') else '2'
if not self.password:
raise ExchangeError(self.id + ' createOrder() requires you to set exchange.password = "YOUR_TRADING_PASSWORD"(a trade password is NOT THE SAME as your login password)')
request = {
'symbol': market['id'],
'type': orderType,
'price': self.price_to_precision(symbol, price),
'number': self.amount_to_precision(symbol, amount),
'tradePwd': self.password,
}
response = self.tradePostAddEntrustSheet(self.extend(request, params))
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
order = self.extend({
'timestamp': timestamp,
}, response['data'])
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
return response
def cancel_orders(self, ids, symbol=None, params={}):
self.load_markets()
request = {
'ids': ','.join(ids),
}
response = self.tradePostCancelEntrustSheet(self.extend(request, params))
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'entrustSheetId': id,
}
response = self.tradePostGetEntrustSheetInfo(self.extend(request, params))
return self.parse_order(response['data'])
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coinFrom': market['baseId'],
'coinTo': market['quoteId'],
}
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
if since is not None:
request['startTime'] = int(since / 1000)
response = getattr(self, method)(self.extend(request, params))
orders = self.safe_value(response['data'], 'data', [])
return self.parse_orders(orders, None, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserNowEntrustSheet', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'1': 'pending',
'2': 'pending',
'3': 'pending',
'4': 'ok',
'5': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
timestamp = self.safe_integer(transaction, 'updated')
if timestamp == 0:
timestamp = None
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(transaction, 'type')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
return {
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'txid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'wallet'),
'tag': self.safe_string(transaction, 'memo'),
'type': type,
'amount': self.safe_float(transaction, 'number'),
'currency': code,
'status': status,
'updated': timestamp,
'fee': None,
'info': transaction,
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transaction_type(self, type):
types = {
'deposit': 1,
'withdrawal': 2,
}
return self.safe_integer(types, type, type)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('deposit', code, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions_for_type('withdrawal', code, since, limit, params)
def fetch_transactions_for_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency `code` argument')
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'type': self.parse_transaction_type(type),
}
if since is not None:
request['startTime'] = int(since / str(1000))
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
response = self.tradePostDepositOrWithdraw(self.extend(request, params))
transactions = self.safe_value(response['data'], 'data', [])
return self.parse_transactions_by_type(type, transactions, code, since, limit)
def nonce(self):
currentTimestamp = self.seconds()
if currentTimestamp > self.options['lastNonceTimestamp']:
self.options['lastNonceTimestamp'] = currentTimestamp
self.options['lastNonce'] = 100000
self.options['lastNonce'] = self.sum(self.options['lastNonce'], 1)
return self.options['lastNonce']
def sign(self, path, api='market', method='GET', params={}, headers=None, body=None):
baseUrl = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
url = baseUrl + '/' + self.capitalize(api) + '/' + path
query = None
if api == 'market':
query = self.urlencode(params)
if len(query):
url += '?' + query
else:
self.check_required_credentials()
body = self.rawencode(self.keysort(self.extend({
'apiKey': self.apiKey,
'timeStamp': self.seconds(),
'nonce': self.nonce(),
}, params)))
body += '&sign=' + self.hash(self.encode(body + self.secret))
headers = {'Content-type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return status = self.safe_string(response, 'status')
if status is not None:
feedback = self.id + ' ' + body
if status == '200':
code = self.safe_integer(response, 'data')
if code is not None:
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
else:
return self.throw_exactly_matched_exception(self.exceptions, status, feedback)
raise ExchangeError(feedback)
| true
| true
|
f70c1e0a237bb4b5225ab23400b8c3d6fa0b725c
| 3,648
|
py
|
Python
|
_scripts/nblint.py
|
dfreeman06/ipyradiant
|
6298889eb0d28c0dda01c4fc9d422814b9858878
|
[
"BSD-3-Clause"
] | null | null | null |
_scripts/nblint.py
|
dfreeman06/ipyradiant
|
6298889eb0d28c0dda01c4fc9d422814b9858878
|
[
"BSD-3-Clause"
] | null | null | null |
_scripts/nblint.py
|
dfreeman06/ipyradiant
|
6298889eb0d28c0dda01c4fc9d422814b9858878
|
[
"BSD-3-Clause"
] | null | null | null |
""" linter and formatter of notebooks
"""
# Copyright (c) 2020 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import json
import shutil
import subprocess
import sys
from hashlib import sha256
from pathlib import Path
import black
import isort
import nbformat
from . import project as P
NODE = [shutil.which("node") or shutil.which("node.exe") or shutil.which("node.cmd")]
NB_METADATA_KEYS = ["kernelspec", "language_info"]
def blacken(source):
"""apply black to a source string"""
return black.format_str(source, mode=black.FileMode(line_length=88))
def nblint_one(nb_node):
"""format/lint one notebook"""
changes = 0
has_empty = 0
nb_metadata_keys = list(nb_node.metadata.keys())
for key in nb_metadata_keys:
if key not in NB_METADATA_KEYS:
nb_node.metadata.pop(key)
for cell in nb_node.cells:
cell_type = cell["cell_type"]
source = "".join(cell["source"])
if not source.strip():
has_empty += 1
if cell_type == "markdown":
args = [
*P.PRETTIER,
"--stdin-filepath",
"foo.md",
"--prose-wrap",
"always",
]
prettier = subprocess.Popen(
list(map(str, args)),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
out, _err = prettier.communicate(source.encode("utf-8"))
new = out.decode("utf-8").rstrip()
if new != source:
cell["source"] = new.splitlines(True)
changes += 1
elif cell_type == "code":
if cell["outputs"] or cell["execution_count"]:
cell["outputs"] = []
cell["execution_count"] = None
changes += 1
if [line for line in source.splitlines() if line.strip().startswith("!")]:
continue
if source.startswith("%"):
continue
new = isort.SortImports(file_contents=source).output
new = blacken(new).rstrip()
if new != source:
cell["source"] = new.splitlines(True)
changes += 1
if has_empty:
changes += 1
nb_node.cells = [
cell for cell in nb_node.cells if "".join(cell["source"]).strip()
]
return nb_node
def nb_hash(nb_text):
"""hash one notebook"""
return sha256(nb_text.encode("utf-8")).hexdigest()
def nblint(nb_paths):
"""lint a number of notebook paths"""
nb_hashes = {}
if P.NBLINT_HASHES.exists():
nb_hashes = json.loads(P.NBLINT_HASHES.read_text())
len_paths = len(nb_paths)
for i, nb_path in enumerate(nb_paths):
hash_key = f"{nb_path}"
log_hash = nb_hashes.get(hash_key)
nb_text = nb_path.read_text()
pre_hash = nb_hash(nb_text)
print(f"[{i + 1} of {len_paths}] {nb_path}")
if log_hash == pre_hash:
continue
nb_node = nblint_one(nbformat.reads(nb_text, 4))
with nb_path.open("w") as fpt:
nbformat.write(nb_node, fpt)
post_hash = nb_hash(nb_path.read_text())
if post_hash != pre_hash:
print("\tformatted")
else:
print("\tno change")
nb_hashes[hash_key] = post_hash
P.NBLINT_HASHES.parent.mkdir(exist_ok=True, parents=True)
P.NBLINT_HASHES.write_text(json.dumps(nb_hashes, indent=2, sort_keys=True))
return 0
if __name__ == "__main__":
sys.exit(nblint([Path(p) for p in sys.argv[1:]] or P.EXAMPLE_IPYNB))
| 27.847328
| 86
| 0.573739
|
import json
import shutil
import subprocess
import sys
from hashlib import sha256
from pathlib import Path
import black
import isort
import nbformat
from . import project as P
NODE = [shutil.which("node") or shutil.which("node.exe") or shutil.which("node.cmd")]
NB_METADATA_KEYS = ["kernelspec", "language_info"]
def blacken(source):
return black.format_str(source, mode=black.FileMode(line_length=88))
def nblint_one(nb_node):
changes = 0
has_empty = 0
nb_metadata_keys = list(nb_node.metadata.keys())
for key in nb_metadata_keys:
if key not in NB_METADATA_KEYS:
nb_node.metadata.pop(key)
for cell in nb_node.cells:
cell_type = cell["cell_type"]
source = "".join(cell["source"])
if not source.strip():
has_empty += 1
if cell_type == "markdown":
args = [
*P.PRETTIER,
"--stdin-filepath",
"foo.md",
"--prose-wrap",
"always",
]
prettier = subprocess.Popen(
list(map(str, args)),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
out, _err = prettier.communicate(source.encode("utf-8"))
new = out.decode("utf-8").rstrip()
if new != source:
cell["source"] = new.splitlines(True)
changes += 1
elif cell_type == "code":
if cell["outputs"] or cell["execution_count"]:
cell["outputs"] = []
cell["execution_count"] = None
changes += 1
if [line for line in source.splitlines() if line.strip().startswith("!")]:
continue
if source.startswith("%"):
continue
new = isort.SortImports(file_contents=source).output
new = blacken(new).rstrip()
if new != source:
cell["source"] = new.splitlines(True)
changes += 1
if has_empty:
changes += 1
nb_node.cells = [
cell for cell in nb_node.cells if "".join(cell["source"]).strip()
]
return nb_node
def nb_hash(nb_text):
return sha256(nb_text.encode("utf-8")).hexdigest()
def nblint(nb_paths):
nb_hashes = {}
if P.NBLINT_HASHES.exists():
nb_hashes = json.loads(P.NBLINT_HASHES.read_text())
len_paths = len(nb_paths)
for i, nb_path in enumerate(nb_paths):
hash_key = f"{nb_path}"
log_hash = nb_hashes.get(hash_key)
nb_text = nb_path.read_text()
pre_hash = nb_hash(nb_text)
print(f"[{i + 1} of {len_paths}] {nb_path}")
if log_hash == pre_hash:
continue
nb_node = nblint_one(nbformat.reads(nb_text, 4))
with nb_path.open("w") as fpt:
nbformat.write(nb_node, fpt)
post_hash = nb_hash(nb_path.read_text())
if post_hash != pre_hash:
print("\tformatted")
else:
print("\tno change")
nb_hashes[hash_key] = post_hash
P.NBLINT_HASHES.parent.mkdir(exist_ok=True, parents=True)
P.NBLINT_HASHES.write_text(json.dumps(nb_hashes, indent=2, sort_keys=True))
return 0
if __name__ == "__main__":
sys.exit(nblint([Path(p) for p in sys.argv[1:]] or P.EXAMPLE_IPYNB))
| true
| true
|
f70c1e5e6020c7c8e558bc2ed17aaf6cfa5c8b3f
| 812
|
py
|
Python
|
CraftProtocol/Protocol/v1_12_2/Packet/Play/KeepAliveServerPacket.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 21
|
2018-05-12T20:18:02.000Z
|
2022-02-18T17:33:50.000Z
|
CraftProtocol/Protocol/v1_12_2/Packet/Play/KeepAliveServerPacket.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 1
|
2018-06-23T09:13:39.000Z
|
2018-06-27T01:22:27.000Z
|
CraftProtocol/Protocol/v1_12_2/Packet/Play/KeepAliveServerPacket.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 2
|
2018-05-19T21:36:00.000Z
|
2020-10-02T03:23:13.000Z
|
#!/usr/bin/env python
from CraftProtocol.Protocol.Packet.BasePacket import BasePacket
from CraftProtocol.Protocol.Packet.PacketDirection import PacketDirection
from CraftProtocol.StreamIO import StreamIO
class KeepAliveServerPacket(BasePacket):
PACKET_ID = 0x0B
PACKET_DIRECTION = PacketDirection.SERVERBOUND
def __init__(self, keepalive_id):
BasePacket.__init__(self)
self._id = long(keepalive_id)
def get_id(self):
return self._id
def set_id(self, keepalive_id):
self._id = long(keepalive_id)
@staticmethod
def write(stream, packet):
StreamIO.write_long(stream, packet.get_id())
@staticmethod
def read(stream, packet_size):
keepalive_id = StreamIO.read_long(stream)
return KeepAliveServerPacket(keepalive_id)
| 26.193548
| 73
| 0.730296
|
from CraftProtocol.Protocol.Packet.BasePacket import BasePacket
from CraftProtocol.Protocol.Packet.PacketDirection import PacketDirection
from CraftProtocol.StreamIO import StreamIO
class KeepAliveServerPacket(BasePacket):
PACKET_ID = 0x0B
PACKET_DIRECTION = PacketDirection.SERVERBOUND
def __init__(self, keepalive_id):
BasePacket.__init__(self)
self._id = long(keepalive_id)
def get_id(self):
return self._id
def set_id(self, keepalive_id):
self._id = long(keepalive_id)
@staticmethod
def write(stream, packet):
StreamIO.write_long(stream, packet.get_id())
@staticmethod
def read(stream, packet_size):
keepalive_id = StreamIO.read_long(stream)
return KeepAliveServerPacket(keepalive_id)
| true
| true
|
f70c1fe0165b3d3a65f17e6891bf1b312cb2442d
| 7,155
|
py
|
Python
|
src/oci/log_analytics/models/classify_command_descriptor.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2020-09-10T22:09:45.000Z
|
2021-12-24T17:00:07.000Z
|
src/oci/log_analytics/models/classify_command_descriptor.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/log_analytics/models/classify_command_descriptor.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .abstract_command_descriptor import AbstractCommandDescriptor
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ClassifyCommandDescriptor(AbstractCommandDescriptor):
"""
Command descriptor for querylanguage CLASSIFY command.
"""
def __init__(self, **kwargs):
"""
Initializes a new ClassifyCommandDescriptor object with values from keyword arguments. The default value of the :py:attr:`~oci.log_analytics.models.ClassifyCommandDescriptor.name` attribute
of this class is ``CLASSIFY`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this ClassifyCommandDescriptor.
Allowed values for this property are: "COMMAND", "SEARCH", "STATS", "GEO_STATS", "TIME_STATS", "SORT", "FIELDS", "ADD_FIELDS", "LINK", "LINK_DETAILS", "CLUSTER", "CLUSTER_DETAILS", "CLUSTER_SPLIT", "EVAL", "EXTRACT", "JSON_EXTRACT", "XML_EXTRACT", "EVENT_STATS", "BUCKET", "CLASSIFY", "TOP", "BOTTOM", "HEAD", "TAIL", "FIELD_SUMMARY", "REGEX", "RENAME", "TIME_COMPARE", "WHERE", "CLUSTER_COMPARE", "DELETE", "DELTA", "DISTINCT", "SEARCH_LOOKUP", "LOOKUP", "DEMO_MODE", "MACRO", "MULTI_SEARCH", "HIGHLIGHT", "HIGHLIGHT_ROWS", "HIGHLIGHT_GROUPS", "CREATE_VIEW", "MAP", "NLP", "COMPARE"
:type name: str
:param display_query_string:
The value to assign to the display_query_string property of this ClassifyCommandDescriptor.
:type display_query_string: str
:param internal_query_string:
The value to assign to the internal_query_string property of this ClassifyCommandDescriptor.
:type internal_query_string: str
:param category:
The value to assign to the category property of this ClassifyCommandDescriptor.
:type category: str
:param referenced_fields:
The value to assign to the referenced_fields property of this ClassifyCommandDescriptor.
:type referenced_fields: list[oci.log_analytics.models.AbstractField]
:param declared_fields:
The value to assign to the declared_fields property of this ClassifyCommandDescriptor.
:type declared_fields: list[oci.log_analytics.models.AbstractField]
:param top_count:
The value to assign to the top_count property of this ClassifyCommandDescriptor.
:type top_count: int
:param bottom_count:
The value to assign to the bottom_count property of this ClassifyCommandDescriptor.
:type bottom_count: int
:param correlate:
The value to assign to the correlate property of this ClassifyCommandDescriptor.
:type correlate: list[oci.log_analytics.models.FieldsAddRemoveField]
"""
self.swagger_types = {
'name': 'str',
'display_query_string': 'str',
'internal_query_string': 'str',
'category': 'str',
'referenced_fields': 'list[AbstractField]',
'declared_fields': 'list[AbstractField]',
'top_count': 'int',
'bottom_count': 'int',
'correlate': 'list[FieldsAddRemoveField]'
}
self.attribute_map = {
'name': 'name',
'display_query_string': 'displayQueryString',
'internal_query_string': 'internalQueryString',
'category': 'category',
'referenced_fields': 'referencedFields',
'declared_fields': 'declaredFields',
'top_count': 'topCount',
'bottom_count': 'bottomCount',
'correlate': 'correlate'
}
self._name = None
self._display_query_string = None
self._internal_query_string = None
self._category = None
self._referenced_fields = None
self._declared_fields = None
self._top_count = None
self._bottom_count = None
self._correlate = None
self._name = 'CLASSIFY'
@property
def top_count(self):
"""
Gets the top_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to top N.
:return: The top_count of this ClassifyCommandDescriptor.
:rtype: int
"""
return self._top_count
@top_count.setter
def top_count(self, top_count):
"""
Sets the top_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to top N.
:param top_count: The top_count of this ClassifyCommandDescriptor.
:type: int
"""
self._top_count = top_count
@property
def bottom_count(self):
"""
Gets the bottom_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to bottom N.
:return: The bottom_count of this ClassifyCommandDescriptor.
:rtype: int
"""
return self._bottom_count
@bottom_count.setter
def bottom_count(self, bottom_count):
"""
Sets the bottom_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to bottom N.
:param bottom_count: The bottom_count of this ClassifyCommandDescriptor.
:type: int
"""
self._bottom_count = bottom_count
@property
def correlate(self):
"""
Gets the correlate of this ClassifyCommandDescriptor.
Fields specified in CLASSIFY command in queryString if set include / exclude fields in correlate results.
:return: The correlate of this ClassifyCommandDescriptor.
:rtype: list[oci.log_analytics.models.FieldsAddRemoveField]
"""
return self._correlate
@correlate.setter
def correlate(self, correlate):
"""
Sets the correlate of this ClassifyCommandDescriptor.
Fields specified in CLASSIFY command in queryString if set include / exclude fields in correlate results.
:param correlate: The correlate of this ClassifyCommandDescriptor.
:type: list[oci.log_analytics.models.FieldsAddRemoveField]
"""
self._correlate = correlate
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 40.196629
| 595
| 0.672397
|
from .abstract_command_descriptor import AbstractCommandDescriptor
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ClassifyCommandDescriptor(AbstractCommandDescriptor):
def __init__(self, **kwargs):
self.swagger_types = {
'name': 'str',
'display_query_string': 'str',
'internal_query_string': 'str',
'category': 'str',
'referenced_fields': 'list[AbstractField]',
'declared_fields': 'list[AbstractField]',
'top_count': 'int',
'bottom_count': 'int',
'correlate': 'list[FieldsAddRemoveField]'
}
self.attribute_map = {
'name': 'name',
'display_query_string': 'displayQueryString',
'internal_query_string': 'internalQueryString',
'category': 'category',
'referenced_fields': 'referencedFields',
'declared_fields': 'declaredFields',
'top_count': 'topCount',
'bottom_count': 'bottomCount',
'correlate': 'correlate'
}
self._name = None
self._display_query_string = None
self._internal_query_string = None
self._category = None
self._referenced_fields = None
self._declared_fields = None
self._top_count = None
self._bottom_count = None
self._correlate = None
self._name = 'CLASSIFY'
@property
def top_count(self):
return self._top_count
@top_count.setter
def top_count(self, top_count):
self._top_count = top_count
@property
def bottom_count(self):
return self._bottom_count
@bottom_count.setter
def bottom_count(self, bottom_count):
self._bottom_count = bottom_count
@property
def correlate(self):
return self._correlate
@correlate.setter
def correlate(self, correlate):
self._correlate = correlate
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f70c229a3106b39a3efca6e2d4e211e7c73f62f0
| 1,626
|
py
|
Python
|
projects/pub_utils.py
|
ChameleonCloud/portal
|
92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee
|
[
"Apache-2.0"
] | 3
|
2015-08-04T20:53:41.000Z
|
2020-02-14T22:58:20.000Z
|
projects/pub_utils.py
|
ChameleonCloud/portal
|
92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee
|
[
"Apache-2.0"
] | 103
|
2015-01-15T14:21:00.000Z
|
2022-03-31T19:14:20.000Z
|
projects/pub_utils.py
|
ChameleonCloud/portal
|
92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee
|
[
"Apache-2.0"
] | 4
|
2016-02-22T16:48:20.000Z
|
2021-01-08T17:13:21.000Z
|
import datetime
import re
class PublicationUtils:
@staticmethod
def get_month(bibtex_entry):
month = bibtex_entry.get("month")
m = None
try:
m = int(month)
except Exception:
pass
try:
m = datetime.datetime.strptime(month, "%b").month
except Exception:
pass
try:
m = datetime.datetime.strptime(month, "%B").month
except Exception:
pass
return m
@staticmethod
def get_forum(bibtex_entry):
forum = []
if "journal" in bibtex_entry:
forum.append(bibtex_entry["journal"])
if "booktitle" in bibtex_entry:
forum.append(bibtex_entry["booktitle"])
if "series" in bibtex_entry:
forum.append(bibtex_entry["series"])
if "publisher" in bibtex_entry:
forum.append(bibtex_entry["publisher"])
if "school" in bibtex_entry:
forum.append(bibtex_entry["school"])
if "institution" in bibtex_entry:
forum.append(bibtex_entry["institution"])
if "address" in bibtex_entry:
forum.append(bibtex_entry["address"])
return ",".join(forum)
@staticmethod
def get_link(bibtex_entry):
if "note" in bibtex_entry:
m = re.search("^\\\\url{(.+?)}$", bibtex_entry["note"])
if m:
return m.group(1)
if "howpublished" in bibtex_entry:
m = re.search("^\\\\url{(.+?)}$", bibtex_entry["howpublished"])
if m:
return m.group(1)
return None
| 29.563636
| 75
| 0.54797
|
import datetime
import re
class PublicationUtils:
@staticmethod
def get_month(bibtex_entry):
month = bibtex_entry.get("month")
m = None
try:
m = int(month)
except Exception:
pass
try:
m = datetime.datetime.strptime(month, "%b").month
except Exception:
pass
try:
m = datetime.datetime.strptime(month, "%B").month
except Exception:
pass
return m
@staticmethod
def get_forum(bibtex_entry):
forum = []
if "journal" in bibtex_entry:
forum.append(bibtex_entry["journal"])
if "booktitle" in bibtex_entry:
forum.append(bibtex_entry["booktitle"])
if "series" in bibtex_entry:
forum.append(bibtex_entry["series"])
if "publisher" in bibtex_entry:
forum.append(bibtex_entry["publisher"])
if "school" in bibtex_entry:
forum.append(bibtex_entry["school"])
if "institution" in bibtex_entry:
forum.append(bibtex_entry["institution"])
if "address" in bibtex_entry:
forum.append(bibtex_entry["address"])
return ",".join(forum)
@staticmethod
def get_link(bibtex_entry):
if "note" in bibtex_entry:
m = re.search("^\\\\url{(.+?)}$", bibtex_entry["note"])
if m:
return m.group(1)
if "howpublished" in bibtex_entry:
m = re.search("^\\\\url{(.+?)}$", bibtex_entry["howpublished"])
if m:
return m.group(1)
return None
| true
| true
|
f70c238c07f15da6c86f56503d1173d7e0edad0d
| 498
|
py
|
Python
|
blog/migrations/0019_user_avatar.py
|
dijiudu/django_blog-django2.0.3
|
b18889c4b9053b2a39c734c10a3bed84554d4303
|
[
"MIT"
] | 137
|
2017-05-05T11:57:11.000Z
|
2021-01-06T18:56:56.000Z
|
blog/migrations/0019_user_avatar.py
|
dijiudu/django_blog-django2.0.3
|
b18889c4b9053b2a39c734c10a3bed84554d4303
|
[
"MIT"
] | 10
|
2018-05-20T06:36:10.000Z
|
2022-03-11T23:19:21.000Z
|
blog/migrations/0019_user_avatar.py
|
wangchaocc21/django_blog
|
3fe8215e627960e933abe9548eda123987e94f13
|
[
"MIT"
] | 24
|
2017-06-19T18:08:59.000Z
|
2019-02-02T04:15:13.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-25 08:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_auto_20170625_1616'),
]
operations = [
migrations.AddField(
model_name='user',
name='avatar',
field=models.ImageField(blank=True, default='avatar/default.png', upload_to='avatar/%Y/%m'),
),
]
| 23.714286
| 104
| 0.620482
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_auto_20170625_1616'),
]
operations = [
migrations.AddField(
model_name='user',
name='avatar',
field=models.ImageField(blank=True, default='avatar/default.png', upload_to='avatar/%Y/%m'),
),
]
| true
| true
|
f70c259df5a8437d1f62a29d06994107e133cb19
| 7,245
|
py
|
Python
|
seamseg/utils/coco_ap.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 2
|
2021-01-11T08:57:40.000Z
|
2021-01-11T08:57:44.000Z
|
seamseg/utils/coco_ap.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | null | null | null |
seamseg/utils/coco_ap.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 1
|
2020-09-28T07:55:50.000Z
|
2020-09-28T07:55:50.000Z
|
import json
import tempfile
import time
from collections import defaultdict
from os import path, remove
import numpy as np
import torch
import torch.distributed as dist
from PIL import Image
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval
from pycocotools.mask import encode as mask_encode
from .bbx import invert_roi_bbx, extract_boxes
from .parallel import PackedSequence
from .roi_sampling import roi_sampling
def process_prediction(bbx_pred, cls_pred, obj_pred, msk_pred, img_size, idx, original_size):
# Move everything to CPU
bbx_pred, cls_pred, obj_pred = (t.cpu() for t in (bbx_pred, cls_pred, obj_pred))
msk_pred = msk_pred.cpu() if msk_pred is not None else None
if msk_pred is not None:
if isinstance(msk_pred, torch.Tensor):
# ROI-stile prediction
bbx_inv = invert_roi_bbx(bbx_pred, list(msk_pred.shape[-2:]), list(img_size))
bbx_idx = torch.arange(0, msk_pred.size(0), dtype=torch.long)
msk_pred = roi_sampling(msk_pred.unsqueeze(1).sigmoid(), bbx_inv, bbx_idx, list(img_size), padding="zero")
msk_pred = msk_pred.squeeze(1) > 0.5
elif isinstance(msk_pred, PackedSequence):
# Seeds-style prediction
msk_pred.data = msk_pred.data > 0.5
msk_pred_exp = msk_pred.data.new_zeros(len(msk_pred), img_size[0], img_size[1])
for it, (msk_pred_i, bbx_pred_i) in enumerate(zip(msk_pred, bbx_pred)):
i, j = int(bbx_pred_i[0].item()), int(bbx_pred_i[1].item())
msk_pred_exp[it, i:i + msk_pred_i.size(0), j:j + msk_pred_i.size(1)] = msk_pred_i
msk_pred = msk_pred_exp
# Convert bbx and redo clamping
bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])
bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])
bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]
outs = []
for i, (bbx_pred_i, bbx_pred_size_i, cls_pred_i, obj_pred_i) in \
enumerate(zip(bbx_pred, bbx_pred_size, cls_pred, obj_pred)):
out = dict(image_id=idx, category_id=int(cls_pred_i.item()), score=float(obj_pred_i.item()))
out["bbox"] = [
float(bbx_pred_i[1].item()),
float(bbx_pred_i[0].item()),
float(bbx_pred_size_i[1].item()),
float(bbx_pred_size_i[0].item()),
]
# Expand and convert mask if present
if msk_pred is not None:
segmentation = Image.fromarray(msk_pred[i].numpy()).resize(original_size[::-1], Image.NEAREST)
out["segmentation"] = mask_encode(np.asfortranarray(np.array(segmentation)))
out["segmentation"]["counts"] = str(out["segmentation"]["counts"], "utf-8")
outs.append(out)
return outs
def process_panoptic_prediction(panoptic_pred, num_stuff, idx, img_size, original_size):
# Extract panoptic prediction
msk_pred, cat_pred, obj_pred, iscrowd_pred = panoptic_pred
bbx_pred = extract_boxes(msk_pred, cat_pred.numel())
# Convert bbx and redo clamping
bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])
bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])
bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]
outs = []
for i, (obj_i, cat_i, bbx_i, iscrowd_i, bbx_size_i) in enumerate(zip(
obj_pred, cat_pred, bbx_pred, iscrowd_pred, bbx_pred_size)):
if iscrowd_i.item() == 1 or cat_i.item() < num_stuff or cat_i.item() == 255:
continue
out = dict(image_id=idx, category_id=int(cat_i.item()), score=float(obj_i.item()))
out["bbox"] = [
float(bbx_i[1].item()),
float(bbx_i[0].item()),
float(bbx_size_i[1].item()),
float(bbx_size_i[0].item()),
]
segmentation = msk_pred == i
segmentation = Image.fromarray(segmentation.numpy()).resize(original_size[::-1], Image.NEAREST)
out["segmentation"] = mask_encode(np.asfortranarray(np.array(segmentation)))
out["segmentation"]["counts"] = str(out["segmentation"]["counts"], "utf-8")
outs.append(out)
return outs
def summarize(predictions, annotations_file, img_list, mask=False):
msk_map = 0
with tempfile.NamedTemporaryFile("w") as fid:
json.dump(predictions, fid)
fid.flush()
# Detection
gt = COCO(annotations_file, img_list)
pred = gt.loadRes(fid.name)
pred_eval = COCOeval(gt, pred, "bbox")
pred_eval.evaluate()
pred_eval.accumulate()
pred_eval.summarize()
det_map = pred_eval.stats[0]
if mask:
pred_eval = COCOeval(gt, pred, "segm")
pred_eval.evaluate()
pred_eval.accumulate()
pred_eval.summarize()
msk_map = pred_eval.stats[0]
return det_map, msk_map
def summarize_mp(predictions, annotations_file, img_list, log_dir, mask=False):
# Write partial results to file (all workers)
rank = dist.get_rank()
with open(path.join(log_dir, "coco_ap_{:02d}.json".format(rank)), "w") as fid:
json.dump(predictions, fid)
with open(path.join(log_dir, "img_list_{:02d}.json".format(rank)), "w") as fid:
json.dump(img_list, fid)
dist.barrier()
# Merge results from all workers and run evaluation (only rank 0)
if rank == 0:
predictions = []
img_list = []
for i in range(dist.get_world_size()):
coco_ap_file = path.join(log_dir, "coco_ap_{:02d}.json".format(i))
with open(coco_ap_file) as fid:
predictions += json.load(fid)
remove(coco_ap_file)
img_list_file = path.join(log_dir, "img_list_{:02d}.json".format(i))
with open(img_list_file) as fid:
img_list += json.load(fid)
remove(img_list_file)
det_map, msk_map = summarize(predictions, annotations_file, img_list, mask)
else:
det_map, msk_map = 0, 0
dist.barrier()
return det_map, msk_map
class COCO(_COCO):
"""Modified COCO class that loads only a subset of"""
def __init__(self, annotation_file, img_list):
# load dataset
self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset) == dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time() - tic))
# Clean-up dataset, removing all images and annotations that are not in img_list
img_list = set(img_list)
dataset["images"] = [img for img in dataset["images"] if img["id"] in img_list]
dataset["annotations"] = [ann for ann in dataset["annotations"] if ann["image_id"] in img_list]
self.dataset = dataset
self.createIndex()
| 38.537234
| 118
| 0.633402
|
import json
import tempfile
import time
from collections import defaultdict
from os import path, remove
import numpy as np
import torch
import torch.distributed as dist
from PIL import Image
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval
from pycocotools.mask import encode as mask_encode
from .bbx import invert_roi_bbx, extract_boxes
from .parallel import PackedSequence
from .roi_sampling import roi_sampling
def process_prediction(bbx_pred, cls_pred, obj_pred, msk_pred, img_size, idx, original_size):
bbx_pred, cls_pred, obj_pred = (t.cpu() for t in (bbx_pred, cls_pred, obj_pred))
msk_pred = msk_pred.cpu() if msk_pred is not None else None
if msk_pred is not None:
if isinstance(msk_pred, torch.Tensor):
bbx_inv = invert_roi_bbx(bbx_pred, list(msk_pred.shape[-2:]), list(img_size))
bbx_idx = torch.arange(0, msk_pred.size(0), dtype=torch.long)
msk_pred = roi_sampling(msk_pred.unsqueeze(1).sigmoid(), bbx_inv, bbx_idx, list(img_size), padding="zero")
msk_pred = msk_pred.squeeze(1) > 0.5
elif isinstance(msk_pred, PackedSequence):
msk_pred.data = msk_pred.data > 0.5
msk_pred_exp = msk_pred.data.new_zeros(len(msk_pred), img_size[0], img_size[1])
for it, (msk_pred_i, bbx_pred_i) in enumerate(zip(msk_pred, bbx_pred)):
i, j = int(bbx_pred_i[0].item()), int(bbx_pred_i[1].item())
msk_pred_exp[it, i:i + msk_pred_i.size(0), j:j + msk_pred_i.size(1)] = msk_pred_i
msk_pred = msk_pred_exp
bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])
bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])
bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]
outs = []
for i, (bbx_pred_i, bbx_pred_size_i, cls_pred_i, obj_pred_i) in \
enumerate(zip(bbx_pred, bbx_pred_size, cls_pred, obj_pred)):
out = dict(image_id=idx, category_id=int(cls_pred_i.item()), score=float(obj_pred_i.item()))
out["bbox"] = [
float(bbx_pred_i[1].item()),
float(bbx_pred_i[0].item()),
float(bbx_pred_size_i[1].item()),
float(bbx_pred_size_i[0].item()),
]
if msk_pred is not None:
segmentation = Image.fromarray(msk_pred[i].numpy()).resize(original_size[::-1], Image.NEAREST)
out["segmentation"] = mask_encode(np.asfortranarray(np.array(segmentation)))
out["segmentation"]["counts"] = str(out["segmentation"]["counts"], "utf-8")
outs.append(out)
return outs
def process_panoptic_prediction(panoptic_pred, num_stuff, idx, img_size, original_size):
msk_pred, cat_pred, obj_pred, iscrowd_pred = panoptic_pred
bbx_pred = extract_boxes(msk_pred, cat_pred.numel())
bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])
bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])
bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]
outs = []
for i, (obj_i, cat_i, bbx_i, iscrowd_i, bbx_size_i) in enumerate(zip(
obj_pred, cat_pred, bbx_pred, iscrowd_pred, bbx_pred_size)):
if iscrowd_i.item() == 1 or cat_i.item() < num_stuff or cat_i.item() == 255:
continue
out = dict(image_id=idx, category_id=int(cat_i.item()), score=float(obj_i.item()))
out["bbox"] = [
float(bbx_i[1].item()),
float(bbx_i[0].item()),
float(bbx_size_i[1].item()),
float(bbx_size_i[0].item()),
]
segmentation = msk_pred == i
segmentation = Image.fromarray(segmentation.numpy()).resize(original_size[::-1], Image.NEAREST)
out["segmentation"] = mask_encode(np.asfortranarray(np.array(segmentation)))
out["segmentation"]["counts"] = str(out["segmentation"]["counts"], "utf-8")
outs.append(out)
return outs
def summarize(predictions, annotations_file, img_list, mask=False):
msk_map = 0
with tempfile.NamedTemporaryFile("w") as fid:
json.dump(predictions, fid)
fid.flush()
gt = COCO(annotations_file, img_list)
pred = gt.loadRes(fid.name)
pred_eval = COCOeval(gt, pred, "bbox")
pred_eval.evaluate()
pred_eval.accumulate()
pred_eval.summarize()
det_map = pred_eval.stats[0]
if mask:
pred_eval = COCOeval(gt, pred, "segm")
pred_eval.evaluate()
pred_eval.accumulate()
pred_eval.summarize()
msk_map = pred_eval.stats[0]
return det_map, msk_map
def summarize_mp(predictions, annotations_file, img_list, log_dir, mask=False):
rank = dist.get_rank()
with open(path.join(log_dir, "coco_ap_{:02d}.json".format(rank)), "w") as fid:
json.dump(predictions, fid)
with open(path.join(log_dir, "img_list_{:02d}.json".format(rank)), "w") as fid:
json.dump(img_list, fid)
dist.barrier()
if rank == 0:
predictions = []
img_list = []
for i in range(dist.get_world_size()):
coco_ap_file = path.join(log_dir, "coco_ap_{:02d}.json".format(i))
with open(coco_ap_file) as fid:
predictions += json.load(fid)
remove(coco_ap_file)
img_list_file = path.join(log_dir, "img_list_{:02d}.json".format(i))
with open(img_list_file) as fid:
img_list += json.load(fid)
remove(img_list_file)
det_map, msk_map = summarize(predictions, annotations_file, img_list, mask)
else:
det_map, msk_map = 0, 0
dist.barrier()
return det_map, msk_map
class COCO(_COCO):
def __init__(self, annotation_file, img_list):
self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset) == dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time() - tic))
img_list = set(img_list)
dataset["images"] = [img for img in dataset["images"] if img["id"] in img_list]
dataset["annotations"] = [ann for ann in dataset["annotations"] if ann["image_id"] in img_list]
self.dataset = dataset
self.createIndex()
| true
| true
|
f70c25aac3ff734e0ee87c124fe1d3e6037c8415
| 364
|
py
|
Python
|
examples/quotes_avro/quotes_avro/pipelines.py
|
ZuInnoTe/scrapy-contrib-bigexporters
|
45428fcfc2c1531ac93a66d381f46ef70ccef1fe
|
[
"MIT"
] | 9
|
2020-10-11T16:48:26.000Z
|
2022-03-22T22:49:55.000Z
|
examples/quotes_avro/quotes_avro/pipelines.py
|
ZuInnoTe/scrapy-contrib-bigexporters
|
45428fcfc2c1531ac93a66d381f46ef70ccef1fe
|
[
"MIT"
] | 4
|
2020-10-11T18:23:16.000Z
|
2022-03-24T16:50:34.000Z
|
examples/quotes_avro/quotes_avro/pipelines.py
|
ZuInnoTe/scrapy-contrib-bigexporters
|
45428fcfc2c1531ac93a66d381f46ef70ccef1fe
|
[
"MIT"
] | 1
|
2022-03-31T20:00:04.000Z
|
2022-03-31T20:00:04.000Z
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class QuotesAvroPipeline:
def process_item(self, item, spider):
return item
| 26
| 66
| 0.769231
|
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class QuotesAvroPipeline:
def process_item(self, item, spider):
return item
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.