hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f8d1298f2a7c3c98e0c4dc2d436a9160dbc1815 | 424 | py | Python | StackOverflow-Playgrounds/Random-Questions/main_01.py | AdrKacz/Dev-Learning | d75ef6a0430c2b1109e71f74d017598e024ca518 | [
"MIT"
] | null | null | null | StackOverflow-Playgrounds/Random-Questions/main_01.py | AdrKacz/Dev-Learning | d75ef6a0430c2b1109e71f74d017598e024ca518 | [
"MIT"
] | null | null | null | StackOverflow-Playgrounds/Random-Questions/main_01.py | AdrKacz/Dev-Learning | d75ef6a0430c2b1109e71f74d017598e024ca518 | [
"MIT"
] | null | null | null | def FindDifference(word):
letters = list("programmer")
i = 0
j = 0
while i < len(word) and j < len(letters):
if word[i] == letters[j]:
j+=1
i+=1
start = i - 1
if i == len(word):
return -1
i = len(word)
j = len(letters)
while i > 0 and j > 0:
i -= 1
if word[i] == letters[j - 1]:
j-=1
end = i
return end - start - 1
if __name__ == '__main__':
print(FindDifference("progxrammerrxproxgrammer")) | 16.307692 | 50 | 0.582547 |
33dbbec6777e2f4c9bdd173a060c40ff23de48c1 | 699 | py | Python | tests/integration/blueprints/site/ticketing/test_views_mytickets.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | null | null | null | tests/integration/blueprints/site/ticketing/test_views_mytickets.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | null | null | null | tests/integration/blueprints/site/ticketing/test_views_mytickets.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | null | null | null | """
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from tests.helpers import http_client, login_user
def test_when_logged_in(site_app, site, user):
login_user(user.id)
response = send_request(site_app, user_id=user.id)
assert response.status_code == 200
assert response.mimetype == 'text/html'
def test_when_not_logged_in(site_app, site):
response = send_request(site_app)
assert response.status_code == 302
assert 'Location' in response.headers
# helpers
def send_request(app, user_id=None):
url = '/tickets/mine'
with http_client(app, user_id=user_id) as client:
return client.get(url)
| 21.84375 | 54 | 0.723891 |
81b213ee8f4a3cacc8e27a9766c34f4abd091dc6 | 34,202 | py | Python | onnx/helper.py | jacky82226/onnx | 9b3524511003e11998d5b58ec9d0add3ce568506 | [
"MIT"
] | 1 | 2022-02-04T07:45:14.000Z | 2022-02-04T07:45:14.000Z | onnx/helper.py | developerChans/onnx | 5cf5feef5ec3fd5527b2fdb6c29780e3b705059f | [
"Apache-2.0"
] | null | null | null | onnx/helper.py | developerChans/onnx | 5cf5feef5ec3fd5527b2fdb6c29780e3b705059f | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections.abc # type: ignore
import numbers
import google.protobuf.message
from onnx import TensorProto, SparseTensorProto, AttributeProto, ValueInfoProto, \
TensorShapeProto, NodeProto, ModelProto, GraphProto, OperatorSetIdProto, \
TypeProto, SequenceProto, MapProto, IR_VERSION, TrainingInfoProto, OptionalProto, \
FunctionProto
from onnx import defs
from onnx import mapping
from onnx.mapping import STORAGE_TENSOR_TYPE_TO_FIELD
from typing import Text, Sequence, Any, Optional, Dict, Union, TypeVar, Callable, Tuple, List, cast
import numpy as np # type: ignore
import warnings
VersionRowType = Union[Tuple[Text, int, int, int], Tuple[Text, int, int, int, int]]
VersionTableType = List[VersionRowType]
AssignmentBindingType = List[Tuple[Text, Text]]
# This is a copy of the documented version in https://github.com/onnx/onnx/blob/main/docs/Versioning.md#released-versions
# Both must be updated whenever a new version of ONNX is released.
VERSION_TABLE: VersionTableType = [
# Release-version, IR version, ai.onnx version, ai.onnx.ml version, (optional) ai.onnx.training version
('1.0', 3, 1, 1),
('1.1', 3, 5, 1),
('1.1.2', 3, 6, 1),
('1.2', 3, 7, 1),
('1.3', 3, 8, 1),
('1.4.1', 4, 9, 1),
('1.5.0', 5, 10, 1),
('1.6.0', 6, 11, 2),
('1.7.0', 7, 12, 2, 1),
('1.8.0', 7, 13, 2, 1),
('1.8.1', 7, 13, 2, 1),
('1.9.0', 7, 14, 2, 1),
('1.10.0', 8, 15, 2, 1),
('1.10.1', 8, 15, 2, 1),
('1.10.2', 8, 15, 2, 1),
('1.11.0', 8, 16, 3, 1)
]
VersionMapType = Dict[Tuple[Text, int], int]
# create a map from (opset-domain, opset-version) to ir-version from above table
def create_op_set_id_version_map(table: VersionTableType) -> VersionMapType:
result: VersionMapType = dict()
def process(release_version: Text, ir_version: int, *args: Any) -> None:
for pair in zip(['ai.onnx', 'ai.onnx.ml', 'ai.onnx.training'], args):
if (pair not in result):
result[pair] = ir_version
for row in table:
process(*row)
return result
OP_SET_ID_VERSION_MAP = create_op_set_id_version_map(VERSION_TABLE)
# Given list of opset ids, determine minimum IR version required
def find_min_ir_version_for(opsetidlist: List[OperatorSetIdProto]) -> int:
default_min_version = 3
def find_min(domain: Union[Text, None], version: int) -> int:
key = (domain if domain else 'ai.onnx', version)
if (key in OP_SET_ID_VERSION_MAP):
return OP_SET_ID_VERSION_MAP[key]
else:
raise ValueError("Unsupported opset-version.")
if (opsetidlist):
return max([find_min(x.domain, x.version) for x in opsetidlist])
return default_min_version # if no opsets specified
def make_node(
op_type: Text,
inputs: Sequence[Text],
outputs: Sequence[Text],
name: Optional[Text] = None,
doc_string: Optional[Text] = None,
domain: Optional[Text] = None,
**kwargs: Any
) -> NodeProto:
"""Construct a NodeProto.
Arguments:
op_type (string): The name of the operator to construct
inputs (list of string): list of input names
outputs (list of string): list of output names
name (string, default None): optional unique identifier for NodeProto
doc_string (string, default None): optional documentation string for NodeProto
domain (string, default None): optional domain for NodeProto.
If it's None, we will just use default domain (which is empty)
**kwargs (dict): the attributes of the node. The acceptable values
are documented in :func:`make_attribute`.
"""
node = NodeProto()
node.op_type = op_type
node.input.extend(inputs)
node.output.extend(outputs)
if name:
node.name = name
if doc_string:
node.doc_string = doc_string
if domain is not None:
node.domain = domain
if kwargs:
node.attribute.extend(
make_attribute(key, value)
for key, value in sorted(kwargs.items())
if value is not None)
return node
def make_operatorsetid(
domain: Text,
version: int,
) -> OperatorSetIdProto:
"""Construct an OperatorSetIdProto.
Arguments:
domain (string): The domain of the operator set id
version (integer): Version of operator set id
"""
operatorsetid = OperatorSetIdProto()
operatorsetid.domain = domain
operatorsetid.version = version
return operatorsetid
def make_graph(
nodes: Sequence[NodeProto],
name: Text,
inputs: Sequence[ValueInfoProto],
outputs: Sequence[ValueInfoProto],
initializer: Optional[Sequence[TensorProto]] = None,
doc_string: Optional[Text] = None,
value_info: Sequence[ValueInfoProto] = [],
sparse_initializer: Optional[Sequence[SparseTensorProto]] = None,
) -> GraphProto:
if initializer is None:
initializer = []
if sparse_initializer is None:
sparse_initializer = []
if value_info is None:
value_info = []
graph = GraphProto()
graph.node.extend(nodes)
graph.name = name
graph.input.extend(inputs)
graph.output.extend(outputs)
graph.initializer.extend(initializer)
graph.sparse_initializer.extend(sparse_initializer)
graph.value_info.extend(value_info)
if doc_string:
graph.doc_string = doc_string
return graph
def make_opsetid(domain: Text, version: int) -> OperatorSetIdProto:
opsetid = OperatorSetIdProto()
opsetid.domain = domain
opsetid.version = version
return opsetid
def make_function(
domain: Text,
fname: Text,
inputs: Sequence[Text],
outputs: Sequence[Text],
nodes: Sequence[NodeProto],
opset_imports: Sequence[OperatorSetIdProto],
attributes: Optional[Sequence[Text]] = [],
doc_string: Optional[Text] = None
) -> FunctionProto:
f = FunctionProto()
f.domain = domain
f.name = fname
f.input.extend(inputs)
f.output.extend(outputs)
f.node.extend(nodes)
f.opset_import.extend(opset_imports)
f.attribute.extend(attributes)
if doc_string:
f.doc_string = doc_string
return f
def make_model(graph: GraphProto, **kwargs: Any) -> ModelProto:
model = ModelProto()
# Touch model.ir_version so it is stored as the version from which it is
# generated.
model.ir_version = IR_VERSION
model.graph.CopyFrom(graph)
opset_imports: Optional[Sequence[OperatorSetIdProto]] = None
opset_imports = kwargs.pop('opset_imports', None) # type: ignore
if opset_imports is not None:
model.opset_import.extend(opset_imports)
else:
# Default import
imp = model.opset_import.add()
imp.version = defs.onnx_opset_version()
functions: Optional[Sequence[FunctionProto]] = None
functions = kwargs.pop('functions', None) # type: ignore
if functions is not None:
model.functions.extend(functions)
for k, v in kwargs.items():
# TODO: Does this work with repeated fields?
setattr(model, k, v)
return model
# An extension of make_model that infers an IR_VERSION for the model,
# if not specified, using a best-effort-basis.
def make_model_gen_version(graph: GraphProto, **kwargs: Any) -> ModelProto:
ir_version_field = str('ir_version')
if (ir_version_field not in kwargs):
opset_imports_field = str('opset_imports')
imports = (kwargs[opset_imports_field] if opset_imports_field in kwargs else [])
kwargs[ir_version_field] = find_min_ir_version_for(imports)
return make_model(graph, **kwargs)
def set_model_props(model: ModelProto, dict_value: Dict[Text, Text]) -> None:
del model.metadata_props[:]
for (k, v) in dict_value.items():
entry = model.metadata_props.add()
entry.key = k
entry.value = v
# model.metadata_properties.append(entry)
def split_complex_to_pairs(ca: Sequence[np.complex64]) -> Sequence[int]:
return [(ca[i // 2].real if (i % 2 == 0) else ca[i // 2].imag)
for i in range(len(ca) * 2)]
def make_tensor(
name: Text,
data_type: int,
dims: Sequence[int],
vals: Any,
raw: bool = False
) -> TensorProto:
'''
Make a TensorProto with specified arguments. If raw is False, this
function will choose the corresponding proto field to store the
values based on data_type. If raw is True, use "raw_data" proto
field to store the values, and values should be of type bytes in
this case.
'''
tensor = TensorProto()
tensor.data_type = data_type
tensor.name = name
if data_type == TensorProto.STRING:
assert not raw, "Can not use raw_data to store string type"
# Check number of vals specified equals tensor size
expected_size = 1 if (not raw) else (mapping.TENSOR_TYPE_TO_NP_TYPE[data_type].itemsize)
# Flatten a numpy array if its rank > 1
if type(vals) is np.ndarray and len(vals.shape) > 1:
vals = vals.flatten()
for d in dims:
expected_size = expected_size * d
if len(vals) != expected_size:
raise ValueError("Number of values does not match tensor's size. Expected {}, but it is {}. "
.format(expected_size, len(vals)))
if raw:
tensor.raw_data = vals
else:
if (data_type == TensorProto.COMPLEX64
or data_type == TensorProto.COMPLEX128):
vals = split_complex_to_pairs(vals)
# floa16/bfloat16 are stored as uint16
elif (data_type == TensorProto.FLOAT16
or data_type == TensorProto.BFLOAT16):
vals = np.array(vals).astype(np.float16).view(dtype=np.uint16).flatten().tolist()
field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
getattr(tensor, field).extend(vals)
tensor.dims.extend(dims)
return tensor
def make_sparse_tensor(
values: TensorProto,
indices: TensorProto,
dims: Sequence[int]
) -> SparseTensorProto:
sparse = SparseTensorProto()
sparse.values.CopyFrom(values)
sparse.indices.CopyFrom(indices)
sparse.dims.extend(dims)
return sparse
def make_sequence(
name: Text,
elem_type: SequenceProto.DataType,
values: Sequence[Any],
) -> SequenceProto:
'''
Make a Sequence with specified value arguments.
'''
sequence = SequenceProto()
sequence.name = name
sequence.elem_type = elem_type
values_field = mapping.STORAGE_ELEMENT_TYPE_TO_FIELD[elem_type]
getattr(sequence, values_field).extend(values)
return sequence
def make_map(
name: Text,
key_type: int,
keys: List[Any],
values: SequenceProto
) -> MapProto:
'''
Make a Map with specified key-value pair arguments.
Criteria for conversion:
- Keys and Values must have the same number of elements
- Every key in keys must be of the same type
- Every value in values must be of the same type
'''
map = MapProto()
valid_key_int_types = [TensorProto.INT8, TensorProto.INT16, TensorProto.INT32,
TensorProto.INT64, TensorProto.UINT8, TensorProto.UINT16,
TensorProto.UINT32, TensorProto.UINT64]
map.name = name
map.key_type = key_type
if key_type == TensorProto.STRING:
map.string_keys.extend(keys)
elif key_type in valid_key_int_types:
map.keys.extend(keys)
map.values.CopyFrom(values)
return map
def make_optional(
name: Text,
elem_type: OptionalProto.DataType,
value: Optional[Any],
) -> OptionalProto:
'''
Make an Optional with specified value arguments.
'''
optional = OptionalProto()
optional.name = name
optional.elem_type = elem_type
if elem_type != 0:
values_field = mapping.OPTIONAL_ELEMENT_TYPE_TO_FIELD[elem_type]
getattr(optional, values_field).CopyFrom(value)
return optional
def _to_bytes_or_false(val: Union[Text, bytes]) -> Union[bytes, bool]:
"""An internal graph to convert the input to a bytes or to False.
The criteria for conversion is as follows and should be python 2 and 3
compatible:
- If val is py2 str or py3 bytes: return bytes
- If val is py2 unicode or py3 str: return val.decode('utf-8')
- Otherwise, return False
"""
if isinstance(val, bytes):
return val
try:
return val.encode('utf-8')
except AttributeError:
return False
def make_attribute(
key: Text,
value: Any,
doc_string: Optional[Text] = None
) -> AttributeProto:
"""Makes an AttributeProto based on the value type."""
attr = AttributeProto()
attr.name = key
if doc_string:
attr.doc_string = doc_string
is_iterable = isinstance(value, collections.abc.Iterable)
bytes_or_false = _to_bytes_or_false(value)
# First, singular cases
# float
if isinstance(value, float):
attr.f = value
attr.type = AttributeProto.FLOAT
# integer
elif isinstance(value, numbers.Integral):
attr.i = cast(int, value)
attr.type = AttributeProto.INT
# string
elif bytes_or_false is not False:
assert isinstance(bytes_or_false, bytes)
attr.s = bytes_or_false
attr.type = AttributeProto.STRING
elif isinstance(value, TensorProto):
attr.t.CopyFrom(value)
attr.type = AttributeProto.TENSOR
elif isinstance(value, SparseTensorProto):
attr.sparse_tensor.CopyFrom(value)
attr.type = AttributeProto.SPARSE_TENSOR
elif isinstance(value, GraphProto):
attr.g.CopyFrom(value)
attr.type = AttributeProto.GRAPH
elif isinstance(value, TypeProto):
attr.tp.CopyFrom(value)
attr.type = AttributeProto.TYPE_PROTO
# third, iterable cases
elif is_iterable:
byte_array = [_to_bytes_or_false(v) for v in value]
if all(isinstance(v, numbers.Integral) for v in value):
# Turn np.int32/64 into Python built-in int.
attr.ints.extend(int(v) for v in value)
attr.type = AttributeProto.INTS
elif all(isinstance(v, numbers.Real) for v in value):
# Since ints and floats are members of Real, this allows a mix of ints and floats
# (and converts the ints to floats).
attr.floats.extend(float(v) for v in value)
attr.type = AttributeProto.FLOATS
elif all(map(lambda bytes_or_false: bytes_or_false is not False, byte_array)):
attr.strings.extend(cast(List[bytes], byte_array))
attr.type = AttributeProto.STRINGS
elif all(isinstance(v, TensorProto) for v in value):
attr.tensors.extend(value)
attr.type = AttributeProto.TENSORS
elif all(isinstance(v, SparseTensorProto) for v in value):
attr.sparse_tensors.extend(value)
attr.type = AttributeProto.SPARSE_TENSORS
elif all(isinstance(v, GraphProto) for v in value):
attr.graphs.extend(value)
attr.type = AttributeProto.GRAPHS
elif all(isinstance(tp, TypeProto) for tp in value):
attr.type_protos.extend(value)
attr.type = AttributeProto.TYPE_PROTOS
else:
raise ValueError(
"You passed in an iterable attribute but I cannot figure out "
"its applicable type.")
else:
raise TypeError(
'value "{}" is not valid attribute data type.'.format(value))
return attr
def get_attribute_value(attr: AttributeProto) -> Any:
if attr.type == AttributeProto.FLOAT:
return attr.f
if attr.type == AttributeProto.INT:
return attr.i
if attr.type == AttributeProto.STRING:
return attr.s
if attr.type == AttributeProto.TENSOR:
return attr.t
if attr.type == AttributeProto.SPARSE_TENSOR:
return attr.sparse_tensor
if attr.type == AttributeProto.GRAPH:
return attr.g
if attr.type == AttributeProto.TYPE_PROTO:
return attr.tp
if attr.type == AttributeProto.FLOATS:
return list(attr.floats)
if attr.type == AttributeProto.INTS:
return list(attr.ints)
if attr.type == AttributeProto.STRINGS:
return list(attr.strings)
if attr.type == AttributeProto.TENSORS:
return list(attr.tensors)
if attr.type == AttributeProto.SPARSE_TENSORS:
return list(attr.sparse_tensors)
if attr.type == AttributeProto.GRAPHS:
return list(attr.graphs)
if attr.type == AttributeProto.TYPE_PROTOS:
return list(attr.type_protos)
raise ValueError("Unsupported ONNX attribute: {}".format(attr))
def make_empty_tensor_value_info(name: Text) -> ValueInfoProto:
value_info_proto = ValueInfoProto()
value_info_proto.name = name
return value_info_proto
def make_tensor_type_proto(
elem_type: int,
shape: Optional[Sequence[Union[Text, int, None]]],
shape_denotation: Optional[List[Text]] = None,
) -> TypeProto:
"""Makes a Tensor TypeProto based on the data type and shape."""
type_proto = TypeProto()
tensor_type_proto = type_proto.tensor_type
tensor_type_proto.elem_type = elem_type
tensor_shape_proto = tensor_type_proto.shape
if shape is not None:
# You might think this is a no-op (extending a normal Python
# list by [] certainly is), but protobuf lists work a little
# differently; if a field is never set, it is omitted from the
# resulting protobuf; a list that is explicitly set to be
# empty will get an (empty) entry in the protobuf. This
# difference is visible to our consumers, so make sure we emit
# an empty shape!
tensor_shape_proto.dim.extend([])
if shape_denotation:
if len(shape_denotation) != len(shape):
raise ValueError(
'Invalid shape_denotation. '
'Must be of the same length as shape.')
for i, d in enumerate(shape):
dim = tensor_shape_proto.dim.add()
if d is None:
pass
elif isinstance(d, int):
dim.dim_value = d
elif isinstance(d, str):
dim.dim_param = d
else:
raise ValueError(
'Invalid item in shape: {}. '
'Needs to be of int or str.'.format(d))
if shape_denotation:
dim.denotation = shape_denotation[i]
return type_proto
def make_tensor_value_info(
name: Text,
elem_type: int,
shape: Optional[Sequence[Union[Text, int, None]]],
doc_string: Text = "",
shape_denotation: Optional[List[Text]] = None,
) -> ValueInfoProto:
"""Makes a ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
tensor_type_proto = make_tensor_type_proto(elem_type, shape, shape_denotation)
value_info_proto.type.CopyFrom(tensor_type_proto)
return value_info_proto
def make_sparse_tensor_type_proto(
elem_type: int,
shape: Optional[Sequence[Union[Text, int, None]]],
shape_denotation: Optional[List[Text]] = None,
) -> TypeProto:
"""Makes a SparseTensor TypeProto based on the data type and shape."""
type_proto = TypeProto()
sparse_tensor_type_proto = type_proto.sparse_tensor_type
sparse_tensor_type_proto.elem_type = elem_type
sparse_tensor_shape_proto = sparse_tensor_type_proto.shape
if shape is not None:
# You might think this is a no-op (extending a normal Python
# list by [] certainly is), but protobuf lists work a little
# differently; if a field is never set, it is omitted from the
# resulting protobuf; a list that is explicitly set to be
# empty will get an (empty) entry in the protobuf. This
# difference is visible to our consumers, so make sure we emit
# an empty shape!
sparse_tensor_shape_proto.dim.extend([])
if shape_denotation:
if len(shape_denotation) != len(shape):
raise ValueError(
'Invalid shape_denotation. '
'Must be of the same length as shape.')
for i, d in enumerate(shape):
dim = sparse_tensor_shape_proto.dim.add()
if d is None:
pass
elif isinstance(d, int):
dim.dim_value = d
elif isinstance(d, str):
dim.dim_param = d
else:
raise ValueError(
'Invalid item in shape: {}. '
'Needs to be of int or text.'.format(d))
if shape_denotation:
dim.denotation = shape_denotation[i]
return type_proto
def make_sparse_tensor_value_info(
name: Text,
elem_type: int,
shape: Optional[Sequence[Union[Text, int, None]]],
doc_string: Text = "",
shape_denotation: Optional[List[Text]] = None,
) -> ValueInfoProto:
"""Makes a SparseTensor ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
sparse_tensor_type_proto = make_sparse_tensor_type_proto(elem_type, shape, shape_denotation)
value_info_proto.type.sparse_tensor_type.CopyFrom(sparse_tensor_type_proto.sparse_tensor_type)
return value_info_proto
def make_sequence_type_proto(
inner_type_proto: TypeProto,
) -> TypeProto:
"""Makes a sequence TypeProto."""
type_proto = TypeProto()
type_proto.sequence_type.elem_type.CopyFrom(inner_type_proto)
return type_proto
def make_optional_type_proto(
inner_type_proto: TypeProto,
) -> TypeProto:
"""Makes an optional TypeProto."""
type_proto = TypeProto()
type_proto.optional_type.elem_type.CopyFrom(inner_type_proto)
return type_proto
def make_value_info(
name: Text,
type_proto: TypeProto,
doc_string: Text = "",
) -> ValueInfoProto:
"""Makes a ValueInfoProto with the given type_proto."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
value_info_proto.type.CopyFrom(type_proto)
return value_info_proto
def _sanitize_str(s: Union[Text, bytes]) -> Text:
if isinstance(s, str):
sanitized = s
elif isinstance(s, bytes):
sanitized = s.decode('utf-8', errors='ignore')
else:
sanitized = str(s)
if len(sanitized) < 64:
return sanitized
return sanitized[:64] + '...<+len=%d>' % (len(sanitized) - 64)
def make_tensor_sequence_value_info(
name: Text,
elem_type: int,
shape: Optional[Sequence[Union[Text, int, None]]],
doc_string: Text = "",
elem_shape_denotation: Optional[List[Text]] = None,
) -> ValueInfoProto:
"""Makes a Sequence[Tensors] ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
tensor_type_proto = make_tensor_type_proto(elem_type, shape, elem_shape_denotation)
sequence_type_proto = make_sequence_type_proto(tensor_type_proto)
value_info_proto.type.sequence_type.CopyFrom(sequence_type_proto.sequence_type)
return value_info_proto
def printable_attribute(attr: AttributeProto, subgraphs: bool = False) -> Union[Text, Tuple[Text, List[GraphProto]]]:
content = []
content.append(attr.name)
content.append("=")
def str_float(f: float) -> Text:
# NB: Different Python versions print different numbers of trailing
# decimals, specifying this explicitly keeps it consistent for all
# versions
return '{:.15g}'.format(f)
def str_int(i: int) -> Text:
# NB: In Python 2, longs will repr() as '2L', which is ugly and
# unnecessary. Explicitly format it to keep it consistent.
return '{:d}'.format(i)
def str_str(s: Text) -> Text:
return repr(s)
_T = TypeVar('_T') # noqa
def str_list(str_elem: Callable[[_T], Text], xs: Sequence[_T]) -> Text:
return '[' + ', '.join(map(str_elem, xs)) + ']'
# for now, this logic should continue to work as long as we are running on a proto3
# implementation. If/when we switch to proto3, we will need to use attr.type
# To support printing subgraphs, if we find a graph attribute, print out
# its name here and pass the graph itself up to the caller for later
# printing.
graphs = []
if attr.HasField("f"):
content.append(str_float(attr.f))
elif attr.HasField("i"):
content.append(str_int(attr.i))
elif attr.HasField("s"):
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(repr(_sanitize_str(attr.s)))
elif attr.HasField("t"):
if len(attr.t.dims) > 0:
content.append("<Tensor>")
else:
# special case to print scalars
field = STORAGE_TENSOR_TYPE_TO_FIELD[attr.t.data_type]
content.append('<Scalar Tensor {}>'.format(str(getattr(attr.t, field))))
elif attr.HasField("g"):
content.append("<graph {}>".format(attr.g.name))
graphs.append(attr.g)
elif attr.HasField("tp"):
content.append("<Type Proto {}>".format(attr.tp))
elif attr.floats:
content.append(str_list(str_float, attr.floats))
elif attr.ints:
content.append(str_list(str_int, attr.ints))
elif attr.strings:
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(str(list(map(_sanitize_str, attr.strings))))
elif attr.tensors:
content.append("[<Tensor>, ...]")
elif attr.type_protos:
content.append('[')
for i, tp in enumerate(attr.type_protos):
comma = ',' if i != len(attr.type_protos) - 1 else ''
content.append('<Type Proto {}>{}'.format(tp, comma))
content.append(']')
elif attr.graphs:
content.append('[')
for i, g in enumerate(attr.graphs):
comma = ',' if i != len(attr.graphs) - 1 else ''
content.append('<graph {}>{}'.format(g.name, comma))
content.append(']')
graphs.extend(attr.graphs)
else:
content.append("<Unknown>")
if subgraphs:
return ' '.join(content), graphs
else:
return ' '.join(content)
def printable_dim(dim: TensorShapeProto.Dimension) -> Text:
which = dim.WhichOneof('value')
assert which is not None
return str(getattr(dim, which))
def printable_type(t: TypeProto) -> Text:
if t.WhichOneof('value') == "tensor_type":
s = TensorProto.DataType.Name(t.tensor_type.elem_type)
if t.tensor_type.HasField('shape'):
if len(t.tensor_type.shape.dim):
s += str(', ' + 'x'.join(map(printable_dim, t.tensor_type.shape.dim)))
else:
s += str(', scalar')
return s
if t.WhichOneof('value') is None:
return ""
return 'Unknown type {}'.format(t.WhichOneof('value'))
def printable_value_info(v: ValueInfoProto) -> Text:
s = '%{}'.format(v.name)
if v.type:
s = '{}[{}]'.format(s, printable_type(v.type))
return s
def printable_tensor_proto(t: TensorProto) -> Text:
s = '%{}['.format(t.name)
s += TensorProto.DataType.Name(t.data_type)
if t.dims is not None:
if len(t.dims):
s += str(', ' + 'x'.join(map(str, t.dims)))
else:
s += str(', scalar')
s += ']'
return s
def printable_node(node: NodeProto, prefix: Text = '', subgraphs: bool = False) -> Union[Text, Tuple[Text, List[GraphProto]]]:
content = []
if len(node.output):
content.append(
', '.join(['%{}'.format(name) for name in node.output]))
content.append('=')
# To deal with nested graphs
graphs: List[GraphProto] = []
printed_attrs = []
for attr in node.attribute:
if subgraphs:
printed_attr_subgraphs = printable_attribute(attr, subgraphs)
assert isinstance(printed_attr_subgraphs[1], list)
graphs.extend(printed_attr_subgraphs[1])
printed_attrs.append(printed_attr_subgraphs[0])
else:
printed = printable_attribute(attr)
assert isinstance(printed, Text)
printed_attrs.append(printed)
printed_attributes = ', '.join(sorted(printed_attrs))
printed_inputs = ', '.join(['%{}'.format(name) for name in node.input])
if node.attribute:
content.append("{}[{}]({})".format(node.op_type, printed_attributes, printed_inputs))
else:
content.append("{}({})".format(node.op_type, printed_inputs))
if subgraphs:
return prefix + ' '.join(content), graphs
else:
return prefix + ' '.join(content)
def printable_graph(graph: GraphProto, prefix: Text = '') -> Text:
content = []
indent = prefix + ' '
# header
header = ['graph', graph.name]
initializers = {t.name for t in graph.initializer}
if len(graph.input):
header.append("(")
in_strs = [] # required inputs
in_with_init_strs = [] # optional inputs with initializer providing default value
for inp in graph.input:
if inp.name not in initializers:
in_strs.append(printable_value_info(inp))
else:
in_with_init_strs.append(printable_value_info(inp))
if in_strs:
content.append(prefix + ' '.join(header))
header = []
for line in in_strs:
content.append(prefix + ' ' + line)
header.append(")")
if in_with_init_strs:
header.append("optional inputs with matching initializers (")
content.append(prefix + ' '.join(header))
header = []
for line in in_with_init_strs:
content.append(prefix + ' ' + line)
header.append(")")
# from IR 4 onwards an initializer is not required to have a matching graph input
# so output the name, type and shape of those as well
if len(in_with_init_strs) < len(initializers):
graph_inputs = {i.name for i in graph.input}
init_strs = [printable_tensor_proto(i) for i in graph.initializer
if i.name not in graph_inputs]
header.append("initializers (")
content.append(prefix + ' '.join(header))
header = []
for line in init_strs:
content.append(prefix + ' ' + line)
header.append(")")
header.append('{')
content.append(prefix + ' '.join(header))
graphs: List[GraphProto] = []
# body
for node in graph.node:
contents_subgraphs = printable_node(node, indent, subgraphs=True)
assert isinstance(contents_subgraphs[1], list)
content.append(contents_subgraphs[0])
graphs.extend(contents_subgraphs[1])
# tail
tail = ['return']
if len(graph.output):
tail.append(
', '.join(['%{}'.format(out.name) for out in graph.output]))
content.append(indent + ' '.join(tail))
# closing bracket
content.append(prefix + '}')
for g in graphs:
content.append('\n' + printable_graph(g))
return '\n'.join(content)
def strip_doc_string(proto: google.protobuf.message.Message) -> None:
"""
Empties `doc_string` field on any nested protobuf messages
"""
assert isinstance(proto, google.protobuf.message.Message)
for descriptor in proto.DESCRIPTOR.fields:
if descriptor.name == 'doc_string':
proto.ClearField(descriptor.name)
elif descriptor.type == descriptor.TYPE_MESSAGE:
if descriptor.label == descriptor.LABEL_REPEATED:
for x in getattr(proto, descriptor.name):
strip_doc_string(x)
elif proto.HasField(descriptor.name):
strip_doc_string(getattr(proto, descriptor.name))
def make_training_info(algorithm: GraphProto, algorithm_bindings: AssignmentBindingType, initialization: Optional[GraphProto], initialization_bindings: Optional[AssignmentBindingType]) -> TrainingInfoProto:
training_info = TrainingInfoProto()
training_info.algorithm.CopyFrom(algorithm)
for k, v in algorithm_bindings:
binding = training_info.update_binding.add()
binding.key = k
binding.value = v
if initialization:
training_info.initialization.CopyFrom(initialization)
if initialization_bindings:
for k, v in initialization_bindings:
binding = training_info.initialization_binding.add()
binding.key = k
binding.value = v
return training_info
# For backwards compatibility
def make_sequence_value_info(
name: Text,
elem_type: int,
shape: Optional[Sequence[Union[Text, int, None]]],
doc_string: Text = "",
elem_shape_denotation: Optional[List[Text]] = None,
) -> ValueInfoProto:
"""Makes a Sequence[Tensors] ValueInfoProto based on the data type and shape."""
warnings.warn(str("`onnx.helper.make_sequence_value_info` is a deprecated alias for `onnx.helper.make_tensor_sequence_value_info`. To silence this warning, please use `make_tensor_sequence_value_info` for `TensorProto` sequences. Deprecated in ONNX v1.10.0, `onnx.helper.make_sequence_value_info alias` will be removed in an upcoming release."), DeprecationWarning, stacklevel=2)
return make_tensor_sequence_value_info(name, elem_type, shape, doc_string, elem_shape_denotation)
| 35.59001 | 383 | 0.64543 |
6bf82f9b2d8b50ec81c9a247770281b03f737a55 | 609 | py | Python | sitegen/stamper/stamper.py | hacktoon/sitegen | bedead6b8601d990832dc195c4b7f52cf8acb534 | [
"WTFPL"
] | null | null | null | sitegen/stamper/stamper.py | hacktoon/sitegen | bedead6b8601d990832dc195c4b7f52cf8acb534 | [
"WTFPL"
] | null | null | null | sitegen/stamper/stamper.py | hacktoon/sitegen | bedead6b8601d990832dc195c4b7f52cf8acb534 | [
"WTFPL"
] | null | null | null | # coding: utf-8
'''
===============================================================================
Sitegen
Author: Karlisson M. Bezerra
E-mail: contact@hacktoon.com
URL: https://github.com/hacktoon/sitegen
License: WTFPL - http://sam.zoy.org/wtfpl/COPYING
===============================================================================
'''
from . import parser
import sys
class Stamper:
def __init__(self, text, include_path=''):
self.include_path = include_path
self.tree = parser.Parser(text, include_path=self.include_path).parse()
def render(self, context):
return self.tree.render(context)
| 25.375 | 79 | 0.545156 |
993141ede465ac8842effdf0a57c401511a573be | 585 | py | Python | tests/test_gather.py | rickproza/twill | 7a98e4912a8ff929a94e35d35e7a027472ee4f46 | [
"MIT"
] | 13 | 2020-04-18T15:17:58.000Z | 2022-02-24T13:25:46.000Z | tests/test_gather.py | rickproza/twill | 7a98e4912a8ff929a94e35d35e7a027472ee4f46 | [
"MIT"
] | 5 | 2020-04-04T21:16:00.000Z | 2022-02-10T00:26:20.000Z | tests/test_gather.py | rickproza/twill | 7a98e4912a8ff929a94e35d35e7a027472ee4f46 | [
"MIT"
] | 3 | 2020-06-06T17:26:19.000Z | 2022-02-10T00:30:39.000Z | import os
from twill.utils import gather_filenames
def test_gather_dir():
this_dir = os.path.dirname(__file__)
test_gather = os.path.join(this_dir, 'test_gather')
cwd = os.getcwd()
os.chdir(test_gather)
try:
files = gather_filenames(('.',))
if os.sep != '/':
files = [f.replace(os.sep, '/') for f in files]
assert sorted(files) == sorted([
'./00-testme/x-script.twill',
'./01-test/b.twill', './02-test2/c.twill',
'./02-test2/02-subtest/d.twill']), files
finally:
os.chdir(cwd)
| 26.590909 | 59 | 0.567521 |
9fa29b5c142cf0e1a427dcd57e1aca98fdecfc78 | 71 | py | Python | weapy/__init__.py | TRNSYSJP/weapy | 6e0cebe8be9f7d89894f2800dbf3b3074184d265 | [
"MIT"
] | null | null | null | weapy/__init__.py | TRNSYSJP/weapy | 6e0cebe8be9f7d89894f2800dbf3b3074184d265 | [
"MIT"
] | 4 | 2020-08-24T07:04:53.000Z | 2020-10-25T09:57:08.000Z | weapy/__init__.py | TRNSYSJP/weapy | 6e0cebe8be9f7d89894f2800dbf3b3074184d265 | [
"MIT"
] | null | null | null | from weapy.weatherdata import WeatherDataFile
# import wea.weatherdata
| 23.666667 | 45 | 0.859155 |
8bf6e4eed8dd6d520214f3887dea26f50ee5fb96 | 631 | py | Python | cmstk/vasp/oszicar_test.py | seatonullberg/cmstk | f8dd4f698723053c06d181ecdd918d8e5fc98a92 | [
"MIT"
] | 1 | 2019-12-23T14:43:58.000Z | 2019-12-23T14:43:58.000Z | cmstk/vasp/oszicar_test.py | seatonullberg/cmstk | f8dd4f698723053c06d181ecdd918d8e5fc98a92 | [
"MIT"
] | 6 | 2019-04-25T22:08:40.000Z | 2019-12-18T21:46:09.000Z | cmstk/vasp/oszicar_test.py | seatonullberg/cmstk | f8dd4f698723053c06d181ecdd918d8e5fc98a92 | [
"MIT"
] | null | null | null | from cmstk.vasp.oszicar import OszicarFile
from cmstk.util import data_directory
import os
def test_oszicar_file():
"""Tests the initialization of an OszicarFile object."""
path = os.path.join(data_directory(), "vasp", "Fe75Cr25_BCC_bulk.oszicar")
oszicar = OszicarFile(path)
with oszicar:
assert oszicar.total_free_energy[0] == -.13644212E+03
assert oszicar.total_free_energy[-1] == -.13652019E+03
assert oszicar.e0[0] == -.13644801E+03
assert oszicar.e0[-1] == -.13652664E+03
assert oszicar.magnetization[0] == 24.9856
assert oszicar.magnetization[-1] == 24.9537
| 37.117647 | 78 | 0.687797 |
29db84119b07dca32d1732419d3cacd173fe21c0 | 305 | py | Python | src/cache.py | SwapnilBhosale/tomasula-simulator | 2ae152e0574159314ccf7fc298b82d6865a03169 | [
"Apache-2.0"
] | null | null | null | src/cache.py | SwapnilBhosale/tomasula-simulator | 2ae152e0574159314ccf7fc298b82d6865a03169 | [
"Apache-2.0"
] | null | null | null | src/cache.py | SwapnilBhosale/tomasula-simulator | 2ae152e0574159314ccf7fc298b82d6865a03169 | [
"Apache-2.0"
] | null | null | null |
'''
This is the base class for cache
defines get and put methods
'''
class Cache:
def __init__(self, name):
self.name = name
def get_from_cache(self, address):
raise NotImplementedError()
def put_into_cache(self, address, data):
raise NotImplementedError() | 19.0625 | 44 | 0.64918 |
0f969c75242735022743652961bd11aeadc5399c | 1,117 | py | Python | datasets/data_utils.py | dolphintear/pytorch-kaggle-starter | 7f993161afca8809e8a6ea46bffe76b4d6163082 | [
"MIT"
] | 336 | 2017-08-22T18:54:19.000Z | 2022-03-22T04:07:08.000Z | datasets/data_utils.py | emeraldic/kaggle-starter | 7f993161afca8809e8a6ea46bffe76b4d6163082 | [
"MIT"
] | 1 | 2020-02-14T14:12:15.000Z | 2020-02-14T14:12:15.000Z | datasets/data_utils.py | emeraldic/kaggle-starter | 7f993161afca8809e8a6ea46bffe76b4d6163082 | [
"MIT"
] | 73 | 2017-08-26T22:09:58.000Z | 2022-03-29T13:00:02.000Z | import os
import shutil
import numpy as np
import utils
from glob import glob
from PIL import Image
from skimage import io
import torch
import config as cfg
import constants as c
from datasets import metadata
def pil_loader(path):
return Image.open(path).convert('RGB')
def tensor_loader(path):
return torch.load(path)
def numpy_loader(path):
return np.load(path)
def io_loader(path):
return io.imread(path)
def tif_loader(path):
return calibrate_image(io.imread(path)[:,:,(2,1,0,3)])
def calibrate_image(rgb_image, ref_stds, ref_means):
res = rgb_image.astype('float32')
return np.clip((res - np.mean(res,axis=(0,1))) / np.std(res,axis=(0,1))
* ref_stds + ref_means,0,255).astype('uint8')
def get_inputs_targets(fpaths, dframe):
## REFACTOR
inputs = []
targets = []
for fpath in fpaths:
# Refactor
name, tags = metadata.get_img_name_and_tags(METADATA_DF, fpath)
inputs.append(img_utils.load_img_as_arr(fpath))
targets.append(meta.get_one_hots_by_name(name, dframe))
return np.array(inputs), np.array(targets) | 21.901961 | 75 | 0.696509 |
82acb7c550ec21df9ee76027450843802b58882e | 12,289 | py | Python | sympy/physics/quantum/tests/test_gate.py | JMSS-Unknown/sympy | cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7 | [
"BSD-3-Clause"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | sympy/physics/quantum/tests/test_gate.py | JMSS-Unknown/sympy | cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7 | [
"BSD-3-Clause"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | sympy/physics/quantum/tests/test_gate.py | JMSS-Unknown/sympy | cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7 | [
"BSD-3-Clause"
] | 1 | 2018-10-21T06:32:46.000Z | 2018-10-21T06:32:46.000Z | from sympy import exp, symbols, sqrt, I, pi, Mul, Integer, Wild
from sympy.core.compatibility import range
from sympy.matrices import Matrix, ImmutableMatrix
from sympy.physics.quantum.gate import (XGate, YGate, ZGate, random_circuit,
CNOT, IdentityGate, H, X, Y, S, T, Z, SwapGate, gate_simp, gate_sort,
CNotGate, TGate, HadamardGate, PhaseGate, UGate, CGate)
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qubit import Qubit, IntQubit, qubit_to_matrix, \
matrix_to_qubit
from sympy.physics.quantum.matrixutils import matrix_to_zero
from sympy.physics.quantum.matrixcache import sqrt2_inv
from sympy.physics.quantum import Dagger
def test_gate():
"""Test a basic gate."""
h = HadamardGate(1)
assert h.min_qubits == 2
assert h.nqubits == 1
i0 = Wild('i0')
i1 = Wild('i1')
h0_w1 = HadamardGate(i0)
h0_w2 = HadamardGate(i0)
h1_w1 = HadamardGate(i1)
assert h0_w1 == h0_w2
assert h0_w1 != h1_w1
assert h1_w1 != h0_w2
cnot_10_w1 = CNOT(i1, i0)
cnot_10_w2 = CNOT(i1, i0)
cnot_01_w1 = CNOT(i0, i1)
assert cnot_10_w1 == cnot_10_w2
assert cnot_10_w1 != cnot_01_w1
assert cnot_10_w2 != cnot_01_w1
def test_UGate():
a, b, c, d = symbols('a,b,c,d')
uMat = Matrix([[a, b], [c, d]])
# Test basic case where gate exists in 1-qubit space
u1 = UGate((0,), uMat)
assert represent(u1, nqubits=1) == uMat
assert qapply(u1*Qubit('0')) == a*Qubit('0') + c*Qubit('1')
assert qapply(u1*Qubit('1')) == b*Qubit('0') + d*Qubit('1')
# Test case where gate exists in a larger space
u2 = UGate((1,), uMat)
u2Rep = represent(u2, nqubits=2)
for i in range(4):
assert u2Rep*qubit_to_matrix(IntQubit(i, 2)) == \
qubit_to_matrix(qapply(u2*IntQubit(i, 2)))
def test_cgate():
"""Test the general CGate."""
# Test single control functionality
CNOTMatrix = Matrix(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
assert represent(CGate(1, XGate(0)), nqubits=2) == CNOTMatrix
# Test multiple control bit functionality
ToffoliGate = CGate((1, 2), XGate(0))
assert represent(ToffoliGate, nqubits=3) == \
Matrix(
[[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0,
1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]])
ToffoliGate = CGate((3, 0), XGate(1))
assert qapply(ToffoliGate*Qubit('1001')) == \
matrix_to_qubit(represent(ToffoliGate*Qubit('1001'), nqubits=4))
assert qapply(ToffoliGate*Qubit('0000')) == \
matrix_to_qubit(represent(ToffoliGate*Qubit('0000'), nqubits=4))
CYGate = CGate(1, YGate(0))
CYGate_matrix = Matrix(
((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 0, -I), (0, 0, I, 0)))
# Test 2 qubit controlled-Y gate decompose method.
assert represent(CYGate.decompose(), nqubits=2) == CYGate_matrix
CZGate = CGate(0, ZGate(1))
CZGate_matrix = Matrix(
((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, -1)))
assert qapply(CZGate*Qubit('11')) == -Qubit('11')
assert matrix_to_qubit(represent(CZGate*Qubit('11'), nqubits=2)) == \
-Qubit('11')
# Test 2 qubit controlled-Z gate decompose method.
assert represent(CZGate.decompose(), nqubits=2) == CZGate_matrix
CPhaseGate = CGate(0, PhaseGate(1))
assert qapply(CPhaseGate*Qubit('11')) == \
I*Qubit('11')
assert matrix_to_qubit(represent(CPhaseGate*Qubit('11'), nqubits=2)) == \
I*Qubit('11')
# Test that the dagger, inverse, and power of CGate is evaluated properly
assert Dagger(CZGate) == CZGate
assert pow(CZGate, 1) == Dagger(CZGate)
assert Dagger(CZGate) == CZGate.inverse()
assert Dagger(CPhaseGate) != CPhaseGate
assert Dagger(CPhaseGate) == CPhaseGate.inverse()
assert Dagger(CPhaseGate) == pow(CPhaseGate, -1)
assert pow(CPhaseGate, -1) == CPhaseGate.inverse()
def test_UGate_CGate_combo():
a, b, c, d = symbols('a,b,c,d')
uMat = Matrix([[a, b], [c, d]])
cMat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, a, b], [0, 0, c, d]])
# Test basic case where gate exists in 1-qubit space.
u1 = UGate((0,), uMat)
cu1 = CGate(1, u1)
assert represent(cu1, nqubits=2) == cMat
assert qapply(cu1*Qubit('10')) == a*Qubit('10') + c*Qubit('11')
assert qapply(cu1*Qubit('11')) == b*Qubit('10') + d*Qubit('11')
assert qapply(cu1*Qubit('01')) == Qubit('01')
assert qapply(cu1*Qubit('00')) == Qubit('00')
# Test case where gate exists in a larger space.
u2 = UGate((1,), uMat)
u2Rep = represent(u2, nqubits=2)
for i in range(4):
assert u2Rep*qubit_to_matrix(IntQubit(i, 2)) == \
qubit_to_matrix(qapply(u2*IntQubit(i, 2)))
def test_UGate_OneQubitGate_combo():
v, w, f, g = symbols('v w f g')
uMat1 = ImmutableMatrix([[v, w], [f, g]])
cMat1 = Matrix([[v, w + 1, 0, 0], [f + 1, g, 0, 0], [0, 0, v, w + 1], [0, 0, f + 1, g]])
u1 = X(0) + UGate(0, uMat1)
assert represent(u1, nqubits=2) == cMat1
uMat2 = ImmutableMatrix([[1/sqrt(2), 1/sqrt(2)], [I/sqrt(2), -I/sqrt(2)]])
cMat2_1 = Matrix([[1/2 + I/2, 1/2 - I/2], [1/2 - I/2, 1/2 + I/2]])
cMat2_2 = Matrix([[1, 0], [0, I]])
u2 = UGate(0, uMat2)
assert represent(H(0)*u2, nqubits=1) == cMat2_1
assert represent(u2*H(0), nqubits=1) == cMat2_2
def test_represent_hadamard():
"""Test the representation of the hadamard gate."""
circuit = HadamardGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
# Check that the answers are same to within an epsilon.
assert answer == Matrix([sqrt2_inv, sqrt2_inv, 0, 0])
def test_represent_xgate():
"""Test the representation of the X gate."""
circuit = XGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert Matrix([0, 1, 0, 0]) == answer
def test_represent_ygate():
"""Test the representation of the Y gate."""
circuit = YGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert answer[0] == 0 and answer[1] == I and \
answer[2] == 0 and answer[3] == 0
def test_represent_zgate():
"""Test the representation of the Z gate."""
circuit = ZGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert Matrix([1, 0, 0, 0]) == answer
def test_represent_phasegate():
"""Test the representation of the S gate."""
circuit = PhaseGate(0)*Qubit('01')
answer = represent(circuit, nqubits=2)
assert Matrix([0, I, 0, 0]) == answer
def test_represent_tgate():
"""Test the representation of the T gate."""
circuit = TGate(0)*Qubit('01')
assert Matrix([0, exp(I*pi/4), 0, 0]) == represent(circuit, nqubits=2)
def test_compound_gates():
"""Test a compound gate representation."""
circuit = YGate(0)*ZGate(0)*XGate(0)*HadamardGate(0)*Qubit('00')
answer = represent(circuit, nqubits=2)
assert Matrix([I/sqrt(2), I/sqrt(2), 0, 0]) == answer
def test_cnot_gate():
"""Test the CNOT gate."""
circuit = CNotGate(1, 0)
assert represent(circuit, nqubits=2) == \
Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
circuit = circuit*Qubit('111')
assert matrix_to_qubit(represent(circuit, nqubits=3)) == \
qapply(circuit)
circuit = CNotGate(1, 0)
assert Dagger(circuit) == circuit
assert Dagger(Dagger(circuit)) == circuit
assert circuit*circuit == 1
def test_gate_sort():
"""Test gate_sort."""
for g in (X, Y, Z, H, S, T):
assert gate_sort(g(2)*g(1)*g(0)) == g(0)*g(1)*g(2)
e = gate_sort(X(1)*H(0)**2*CNOT(0, 1)*X(1)*X(0))
assert e == H(0)**2*CNOT(0, 1)*X(0)*X(1)**2
assert gate_sort(Z(0)*X(0)) == -X(0)*Z(0)
assert gate_sort(Z(0)*X(0)**2) == X(0)**2*Z(0)
assert gate_sort(Y(0)*H(0)) == -H(0)*Y(0)
assert gate_sort(Y(0)*X(0)) == -X(0)*Y(0)
assert gate_sort(Z(0)*Y(0)) == -Y(0)*Z(0)
assert gate_sort(T(0)*S(0)) == S(0)*T(0)
assert gate_sort(Z(0)*S(0)) == S(0)*Z(0)
assert gate_sort(Z(0)*T(0)) == T(0)*Z(0)
assert gate_sort(Z(0)*CNOT(0, 1)) == CNOT(0, 1)*Z(0)
assert gate_sort(S(0)*CNOT(0, 1)) == CNOT(0, 1)*S(0)
assert gate_sort(T(0)*CNOT(0, 1)) == CNOT(0, 1)*T(0)
assert gate_sort(X(1)*CNOT(0, 1)) == CNOT(0, 1)*X(1)
# This takes a long time and should only be uncommented once in a while.
# nqubits = 5
# ngates = 10
# trials = 10
# for i in range(trials):
# c = random_circuit(ngates, nqubits)
# assert represent(c, nqubits=nqubits) == \
# represent(gate_sort(c), nqubits=nqubits)
def test_gate_simp():
"""Test gate_simp."""
e = H(0)*X(1)*H(0)**2*CNOT(0, 1)*X(1)**3*X(0)*Z(3)**2*S(4)**3
assert gate_simp(e) == H(0)*CNOT(0, 1)*S(4)*X(0)*Z(4)
assert gate_simp(X(0)*X(0)) == 1
assert gate_simp(Y(0)*Y(0)) == 1
assert gate_simp(Z(0)*Z(0)) == 1
assert gate_simp(H(0)*H(0)) == 1
assert gate_simp(T(0)*T(0)) == S(0)
assert gate_simp(S(0)*S(0)) == Z(0)
assert gate_simp(Integer(1)) == Integer(1)
assert gate_simp(X(0)**2 + Y(0)**2) == Integer(2)
def test_swap_gate():
"""Test the SWAP gate."""
swap_gate_matrix = Matrix(
((1, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0), (0, 0, 0, 1)))
assert represent(SwapGate(1, 0).decompose(), nqubits=2) == swap_gate_matrix
assert qapply(SwapGate(1, 3)*Qubit('0010')) == Qubit('1000')
nqubits = 4
for i in range(nqubits):
for j in range(i):
assert represent(SwapGate(i, j), nqubits=nqubits) == \
represent(SwapGate(i, j).decompose(), nqubits=nqubits)
def test_one_qubit_commutators():
"""Test single qubit gate commutation relations."""
for g1 in (IdentityGate, X, Y, Z, H, T, S):
for g2 in (IdentityGate, X, Y, Z, H, T, S):
e = Commutator(g1(0), g2(0))
a = matrix_to_zero(represent(e, nqubits=1, format='sympy'))
b = matrix_to_zero(represent(e.doit(), nqubits=1, format='sympy'))
assert a == b
e = Commutator(g1(0), g2(1))
assert e.doit() == 0
def test_one_qubit_anticommutators():
"""Test single qubit gate anticommutation relations."""
for g1 in (IdentityGate, X, Y, Z, H):
for g2 in (IdentityGate, X, Y, Z, H):
e = AntiCommutator(g1(0), g2(0))
a = matrix_to_zero(represent(e, nqubits=1, format='sympy'))
b = matrix_to_zero(represent(e.doit(), nqubits=1, format='sympy'))
assert a == b
e = AntiCommutator(g1(0), g2(1))
a = matrix_to_zero(represent(e, nqubits=2, format='sympy'))
b = matrix_to_zero(represent(e.doit(), nqubits=2, format='sympy'))
assert a == b
def test_cnot_commutators():
"""Test commutators of involving CNOT gates."""
assert Commutator(CNOT(0, 1), Z(0)).doit() == 0
assert Commutator(CNOT(0, 1), T(0)).doit() == 0
assert Commutator(CNOT(0, 1), S(0)).doit() == 0
assert Commutator(CNOT(0, 1), X(1)).doit() == 0
assert Commutator(CNOT(0, 1), CNOT(0, 1)).doit() == 0
assert Commutator(CNOT(0, 1), CNOT(0, 2)).doit() == 0
assert Commutator(CNOT(0, 2), CNOT(0, 1)).doit() == 0
assert Commutator(CNOT(1, 2), CNOT(1, 0)).doit() == 0
def test_random_circuit():
c = random_circuit(10, 3)
assert isinstance(c, Mul)
m = represent(c, nqubits=3)
assert m.shape == (8, 8)
assert isinstance(m, Matrix)
def test_hermitian_XGate():
x = XGate(1, 2)
x_dagger = Dagger(x)
assert (x == x_dagger)
def test_hermitian_YGate():
y = YGate(1, 2)
y_dagger = Dagger(y)
assert (y == y_dagger)
def test_hermitian_ZGate():
z = ZGate(1, 2)
z_dagger = Dagger(z)
assert (z == z_dagger)
def test_unitary_XGate():
x = XGate(1, 2)
x_dagger = Dagger(x)
assert (x*x_dagger == 1)
def test_unitary_YGate():
y = YGate(1, 2)
y_dagger = Dagger(y)
assert (y*y_dagger == 1)
def test_unitary_ZGate():
z = ZGate(1, 2)
z_dagger = Dagger(z)
assert (z*z_dagger == 1)
| 34.422969 | 92 | 0.59419 |
0151e0e177f1c7ad3f6e59338594873e81ad1ad4 | 2,035 | py | Python | api/advanced/player_lookup.py | Major-League-Summer-Baseball/mlsb-platform | ecb2a6a15dcaa12c4e18a6d9c5d1b4caf83e05a4 | [
"Apache-2.0"
] | 1 | 2021-04-22T02:06:33.000Z | 2021-04-22T02:06:33.000Z | api/advanced/player_lookup.py | Major-League-Summer-Baseball/mlsb-platform | ecb2a6a15dcaa12c4e18a6d9c5d1b4caf83e05a4 | [
"Apache-2.0"
] | 42 | 2021-03-12T23:18:30.000Z | 2022-03-13T20:57:36.000Z | api/advanced/player_lookup.py | Major-League-Summer-Baseball/mlsb-platform | ecb2a6a15dcaa12c4e18a6d9c5d1b4caf83e05a4 | [
"Apache-2.0"
] | 1 | 2019-04-21T23:24:54.000Z | 2019-04-21T23:24:54.000Z | '''
@author: Dallas Fraser
@author: 2016-04-12
@organization: MLSB API
@summary: The views for looking up a player
'''
from flask_restful import Resource, reqparse
from flask import Response
from json import dumps
from api.model import Player
parser = reqparse.RequestParser()
parser.add_argument('email', type=str)
parser.add_argument('player_name', type=str)
parser.add_argument("active", type=int)
class PlayerLookupAPI(Resource):
def post(self):
"""
POST request to lookup Player
Route: Route['player_lookup']
Parameters:
email: the league id (str)
player_name: the player id (str)
Returns:
status: 200
mimetype: application/json
data: list of possible Players
"""
data = []
args = parser.parse_args()
players = None
active = False
if args['active'] and args['active'] == 1:
active = True
if args['email']:
# guaranteed to find player
email = args['email'].strip().lower()
if not active:
players = (Player.query
.filter(Player.email == email).all())
else:
players = (Player.query
.filter(Player.email == email)
.filter(Player.active == active).all())
elif args['player_name']:
# maybe overlap
pn = args['player_name']
if not active:
players = (Player.query
.filter(Player.name.contains(pn)).all())
else:
players = (Player.query
.filter(Player.name.contains(pn))
.filter(Player.active == active).all())
if players is not None:
for player in players:
data.append(player.json())
return Response(dumps(data), status=200, mimetype="application/json")
| 33.360656 | 77 | 0.528256 |
2ff1eab2816839cd7fd6ca7d390929d1cd2c2911 | 752 | py | Python | guru/users/tests/factories.py | Jeromeschmidt/Guru | 3128a539e55b46afceb33b59c0bafaec7e9f630a | [
"MIT"
] | null | null | null | guru/users/tests/factories.py | Jeromeschmidt/Guru | 3128a539e55b46afceb33b59c0bafaec7e9f630a | [
"MIT"
] | 1 | 2021-02-26T02:49:34.000Z | 2021-02-26T02:49:34.000Z | guru/users/tests/factories.py | Jeromeschmidt/Guru | 3128a539e55b46afceb33b59c0bafaec7e9f630a | [
"MIT"
] | 1 | 2020-02-24T18:09:00.000Z | 2020-02-24T18:09:00.000Z | from typing import Any, Sequence
from django.contrib.auth import get_user_model
from factory import DjangoModelFactory, Faker, post_generation
class UserFactory(DjangoModelFactory):
username = Faker("user_name")
email = Faker("email")
name = Faker("name")
@post_generation
def password(self, create: bool, extracted: Sequence[Any], **kwargs):
password = (extracted if extracted else Faker(
"password",
length=42,
special_chars=True,
digits=True,
upper_case=True,
lower_case=True,
).generate(extra_kwargs={}))
self.set_password(password)
class Meta:
model = get_user_model()
django_get_or_create = ["username"]
| 26.857143 | 73 | 0.639628 |
c399d31a95b6eba4b7365300884a30cd7dc3ea62 | 2,079 | py | Python | homeassistant/components/binary_sensor/insteon.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 3 | 2019-01-31T13:41:37.000Z | 2020-05-20T14:22:18.000Z | homeassistant/components/binary_sensor/insteon.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 5 | 2021-02-08T20:32:11.000Z | 2022-01-13T01:19:23.000Z | homeassistant/components/binary_sensor/insteon.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 1 | 2020-11-04T07:35:32.000Z | 2020-11-04T07:35:32.000Z | """
Support for INSTEON dimmers via PowerLinc Modem.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.insteon/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.insteon import InsteonEntity
DEPENDENCIES = ['insteon']
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {'openClosedSensor': 'opening',
'motionSensor': 'motion',
'doorSensor': 'door',
'wetLeakSensor': 'moisture',
'lightSensor': 'light',
'batterySensor': 'battery'}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the INSTEON device class for the hass platform."""
insteon_modem = hass.data['insteon'].get('modem')
address = discovery_info['address']
device = insteon_modem.devices[address]
state_key = discovery_info['state_key']
name = device.states[state_key].name
if name != 'dryLeakSensor':
_LOGGER.debug('Adding device %s entity %s to Binary Sensor platform',
device.address.hex, device.states[state_key].name)
new_entity = InsteonBinarySensor(device, state_key)
async_add_entities([new_entity])
class InsteonBinarySensor(InsteonEntity, BinarySensorDevice):
"""A Class for an Insteon device entity."""
def __init__(self, device, state_key):
"""Initialize the INSTEON binary sensor."""
super().__init__(device, state_key)
self._sensor_type = SENSOR_TYPES.get(self._insteon_device_state.name)
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def is_on(self):
"""Return the boolean response if the node is on."""
on_val = bool(self._insteon_device_state.value)
if self._insteon_device_state.name == 'lightSensor':
return not on_val
return on_val
| 32.484375 | 77 | 0.671477 |
e7527d893fafec4b9d43cbcc6eb4a5f28d5e1e29 | 1,965 | py | Python | tom_calculator/cli.py | andribas404/tom-calculator | b6e04055ca425dcd86e82d9651ad1dcef08d000f | [
"MIT"
] | null | null | null | tom_calculator/cli.py | andribas404/tom-calculator | b6e04055ca425dcd86e82d9651ad1dcef08d000f | [
"MIT"
] | null | null | null | tom_calculator/cli.py | andribas404/tom-calculator | b6e04055ca425dcd86e82d9651ad1dcef08d000f | [
"MIT"
] | null | null | null | """CLI.
Contains CLI application.
CLI is invoked from entrypoint.
For the manual invoking use command `docker exec -it tom-calculator_app_1 bash`
Usage: tom-calculator [OPTIONS] COMMAND [ARGS]...
Options:
--install-completion [bash|zsh|fish|powershell|pwsh]
Install completion for the specified shell.
--show-completion [bash|zsh|fish|powershell|pwsh]
Show completion for the specified shell, to
copy it or customize the installation.
--help Show this message and exit.
Commands:
migrate
migrate-data
"""
import asyncio
import logging
import subprocess
import typer
from dependency_injector.wiring import Provide, inject
from tom_calculator import services
from tom_calculator.application import create_container
from tom_calculator.util import get_datadir
logger = logging.getLogger(__name__)
app = typer.Typer()
@inject
def load(
datadir: str,
loader_service: services.LoaderService = Provide['loader_service'],
) -> None:
"""Load data from datadir to database.
1. Injects loader_service from container.
2. Run service in async mode.
"""
asyncio.run(loader_service.load(datadir))
@app.callback()
def main(ctx: typer.Context) -> None:
"""Main callback.
1. Used to add container to the context.
2. Invoked before every command.
"""
container = create_container()
ctx.obj = container
@app.command()
def migrate() -> None:
"""Command to migrate schema via alembic."""
typer.echo('Starting migration...')
subprocess.run(['alembic', 'upgrade', 'head'])
@app.command()
def migrate_data() -> None:
"""Command to migrate data via container's service.
Requires TOM_DATA variable from env.
"""
typer.echo('Migrating data...')
datadir = str(get_datadir())
load(datadir)
if __name__ == '__main__': # pragma: no cover
app()
| 23.674699 | 79 | 0.668702 |
9e53e22027ac9b9f00991cdf206817dfb0b818f3 | 1,282 | py | Python | gary/mhacks/urls.py | anshulkgupta/viznow | 119511770e1f5e137fa01e5f3cd56005a2871268 | [
"MIT"
] | null | null | null | gary/mhacks/urls.py | anshulkgupta/viznow | 119511770e1f5e137fa01e5f3cd56005a2871268 | [
"MIT"
] | null | null | null | gary/mhacks/urls.py | anshulkgupta/viznow | 119511770e1f5e137fa01e5f3cd56005a2871268 | [
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from mhacks import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'mhacks.views.enter_page'),
url(r'^trial/?$', 'mhacks.views.airline_page'),
url(r'^home/?$', 'mhacks.views.home_page'),
url(r'^uber/?$', 'mhacks.views.uber_page'),
url(r'^page/Custom/fileupload/Bubble/YES/?$', 'mhacks.views.bubble_page'),
url(r'^page/Custom/fileupload/Globe/YES/?$', 'mhacks.views.globe_page'),
url(r'^page/Custom/fileupload/Chloropleth/YES/?$', 'mhacks.views.chloropleth_page'),
url(r'^page/Custom/fileupload/Chord/YES/?$', 'mhacks.views.chord_page'),
url(r'^page/Custom/fileupload/Line/YES/?$', 'mhacks.views.line_page'),
url(r'^page/(?P<page>[A-Za-z0-9-_]+)/fileupload/(?P<id>[A-Za-z0-9-_]+)/?$', 'mhacks.views.fileupload_page'),
url(r'^page/(?P<page>[A-Za-z0-9-_]+)/fileupload/(?P<id>[A-Za-z0-9-_]+)/final?$', 'mhacks.views.final_custom_page'),
url(r'^upload/?$', 'mhacks.views.upload_page'),
url(r'^upload/submit/?$', 'mhacks.views.handle_upload'),
url(r'^page/(?P<id>[A-Za-z0-9-_]+)/*$', 'mhacks.views.upload_unique_page'),
url(r'^page/(?P<page>[A-Za-z0-9-_]+)/(?P<id>[A-Za-z0-9-_]+)/?$', 'mhacks.views.visualization_page')
) | 55.73913 | 119 | 0.652886 |
2d21d6a9ebf0f2beeac65479f4566ee962cb150e | 2,555 | py | Python | Second course/4th semester/Computer Graphics/Lab7/SLGraphic.py | tekcellat/University | 9a0196a45c9cf33ac58018d636c3e4857eba0330 | [
"MIT"
] | null | null | null | Second course/4th semester/Computer Graphics/Lab7/SLGraphic.py | tekcellat/University | 9a0196a45c9cf33ac58018d636c3e4857eba0330 | [
"MIT"
] | null | null | null | Second course/4th semester/Computer Graphics/Lab7/SLGraphic.py | tekcellat/University | 9a0196a45c9cf33ac58018d636c3e4857eba0330 | [
"MIT"
] | 7 | 2020-12-04T07:26:46.000Z | 2022-03-08T17:47:47.000Z | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
WIDTH = 500
HIGHT = 480
class SLGraphicsScene(QGraphicsScene):
def __init__(self, parent):
super().__init__()
self.parent = parent
def mouseMoveEvent(self, event):
parent = self.parent
if parent.rb2.isChecked():
parent.image.fill(Qt.white)
parent.draw_borders()
cord = event.scenePos()
x = cord.x()
y = cord.y()
if (x >= 10 and y >= 10 and y <= HIGHT and x <= WIDTH):
x += 2
y += 10
num = len(parent.edges)
if num > 0 and not parent.cutter_flag:
parent.image.fill(Qt.white)
parent.draw_borders()
parent.Bresenham(parent.edges[num-1][0],
parent.edges[num-1][1],
x,parent.edges[num-1][1])
parent.Bresenham(x,parent.edges[num-1][1],
x,y)
parent.Bresenham(parent.edges[num-1][0],
y,x,y)
parent.Bresenham(parent.edges[num-1][0],
parent.edges[num-1][1],
parent.edges[num-1][0],y)
if parent.rb1.isChecked():
parent.image.fill(Qt.white)
parent.draw_borders()
cord = event.scenePos()
x = cord.x()
y = cord.y()
if (x >= 10 and y >= 10 and y <= HIGHT and x <= WIDTH):
x += 2
y += 10
num = len(parent.one_slave)
if parent.capslock and num:
if y != parent.one_slave[1]:
der = ((x - parent.one_slave[0])/
(y - parent.one_slave[1]))
else:
der = 2
if abs(der) <= 1:
x = parent.one_slave[0]
else:
y = parent.one_slave[1]
if num > 0:
parent.image.fill(Qt.white)
parent.draw_borders()
parent.Bresenham(parent.one_slave[0],
parent.one_slave[1],
x,y,parent.colorhelp)
if __name__ == "__main__":
pass
| 33.181818 | 67 | 0.399609 |
81b99262129ea0b1208ef9c8d69646d1a90e841d | 1,828 | py | Python | apps/trader/forms.py | ncabelin/ebook-trading-club | e52df18203f87e0ca06ed31e9113e65dc29720e5 | [
"MIT"
] | null | null | null | apps/trader/forms.py | ncabelin/ebook-trading-club | e52df18203f87e0ca06ed31e9113e65dc29720e5 | [
"MIT"
] | null | null | null | apps/trader/forms.py | ncabelin/ebook-trading-club | e52df18203f87e0ca06ed31e9113e65dc29720e5 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Item, Proposal
class AlertForm(forms.Form):
error = forms.CharField(max_length=255, required=False)
message = forms.CharField(max_length=255, required=False)
class LoginForm(forms.Form):
username = forms.CharField(label='User Name', max_length=64)
password = forms.CharField(widget=forms.PasswordInput())
class RegisterForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username','first_name','last_name','email','password1','password2')
class ItemForm(forms.Form):
name = forms.CharField(max_length=255)
description = forms.CharField(widget=forms.Textarea)
image = forms.CharField(max_length=255)
class DeleteItemForm(forms.Form):
id = forms.IntegerField()
class EditItemForm(forms.Form):
id = forms.IntegerField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255)
description = forms.CharField(widget=forms.Textarea)
image = forms.CharField(max_length=255)
class EditUserForm(forms.Form):
username = forms.CharField(max_length=255)
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
email = forms.CharField(max_length=255)
class ChangePasswordForm(forms.Form):
password1 = forms.CharField(widget=forms.PasswordInput, max_length=255, label='Password')
password2 = forms.CharField(widget=forms.PasswordInput, max_length=255, label='Repeat Password')
| 39.73913 | 100 | 0.753282 |
b24f694e79a2ad7200e9b919e78bab0b8d677a60 | 2,842 | py | Python | B4S2 - Digital Learning Technology/Week 11/main.py | abc1236762/UniversityHomework | 688f6fc45d610f84c0c24a6d5ab75ea70ea6a59f | [
"MIT"
] | null | null | null | B4S2 - Digital Learning Technology/Week 11/main.py | abc1236762/UniversityHomework | 688f6fc45d610f84c0c24a6d5ab75ea70ea6a59f | [
"MIT"
] | 4 | 2021-03-28T14:06:09.000Z | 2021-03-28T14:06:10.000Z | B4S2 - Digital Learning Technology/Week 11/main.py | abc1236762/UniversityHomework | 688f6fc45d610f84c0c24a6d5ab75ea70ea6a59f | [
"MIT"
] | 1 | 2020-04-29T16:00:32.000Z | 2020-04-29T16:00:32.000Z | from os import path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# 資料網址
DATA_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
# 資料標籤
DATA_LABEL = ['sex', 'length', 'diameter', 'height', 'whole weight',
'shucked weight', 'viscera weight', 'shell weight', 'rings']
# 取得資料
def get_data() -> (np.ndarray, np.ndarray):
if not path.exists('data.csv'):
# 如果在本地沒有資料,先從網址上抓
df = pd.read_csv(DATA_URL)
# 因為來源沒有欄位標籤,要設置
df.columns = DATA_LABEL
# 儲存成csv
df.to_csv('data.csv', index=False)
else:
# 讀取資料
df = pd.read_csv('data.csv')
# 3種不同的weight為x,DATA_LABEL[5]至DATA_LABEL[7]對應3種不同的weight標籤
x = np.array(df[DATA_LABEL[5:8]])
# whole weight為y,DATA_LABEL[4]對應whole weight的標籤
y = np.array(df[DATA_LABEL[4]])
return x, y
# 設定圖表的各種屬性
def config_plt(title: str, xlabel: str, ylabel: str):
# 設定圖表的尺寸、標題、x軸標籤、y軸標籤、緊的輸出、有格線
plt.figure(figsize=(12.0, 6.75))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.grid(True)
# 產生資料的圖表
def gen_data_polt(x: np.ndarray, y: np.ndarray):
# 取得標題
title = ','.join(
[s.split()[0] for s in DATA_LABEL[5:8]]) + ' - ' + DATA_LABEL[4]
# 設定圖表
config_plt(title, DATA_LABEL[4].split()[1], DATA_LABEL[4])
# 針對3種不同的weight,分別以3種不同的顏色繪製與whole weight對應的關係
for i, c in enumerate(['r', 'g', 'b']):
# 因為3種不同weight的標籤在DATA_LABEL[5]開始,因此DATA_LABEL[5+i]
plt.scatter(x[..., i], y, color=c, label=DATA_LABEL[5+i])
# 繪製不同顏色代表的標記
plt.legend(loc='lower right')
# 儲存圖表
plt.savefig(f'{title}.png')
# 產生預測與答案的圖表
def gen_result_polt(y_pred: np.ndarray, y: np.ndarray, note: str):
# 取得標題
title = f'prediction - answer results ({note})'
# 設定圖表
config_plt(title, 'prediction', 'answer')
# 繪製預測與答案的關係
plt.scatter(y_pred, y, color='black')
# 儲存圖表
plt.savefig(f'{title}.png')
# 主程式
def main():
# 先取得資料並產生圖表
x, y = get_data()
gen_data_polt(x, y)
# 將資料切成訓練和測試用
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=10, random_state=0x749487)
# 建立一個套用至訓練資料集的線性複回歸模型,因為x不是1D的所以是線性複回歸
lr = LinearRegression().fit(x_train, y_train)
# 用訓練資料集進行預測得到訓練資料集的預設結果,與其答案進行比較並產生圖表
y_train_pred = lr.predict(x_train)
gen_result_polt(y_train_pred, y_train, 'train')
# 用測試資料集進行預測得到測試資料集的預設結果,與其答案進行比較並產生圖表
y_test_pred = lr.predict(x_test)
gen_result_polt(y_test_pred, y_test, 'test')
# 輸出測試資料集、其答案以及預測結果
print(f'x_test\n{x_test}')
print(f'y_test\n{y_test}')
print(f'y_test_pred\n{y_test_pred}')
if __name__ == '__main__':
# 進入主程式
main()
| 28.42 | 91 | 0.654821 |
41427f394b3e7a0318de679494ed956a5bd82c72 | 5,240 | py | Python | setup.py | Liam-Deacon/antlr4-vba-parser | af273e6d7c4efd7660d647ad5b6e338a4ff46bd3 | [
"BSD-3-Clause"
] | 1 | 2021-07-23T19:28:59.000Z | 2021-07-23T19:28:59.000Z | setup.py | Liam-Deacon/antlr4-vba-parser | af273e6d7c4efd7660d647ad5b6e338a4ff46bd3 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Liam-Deacon/antlr4-vba-parser | af273e6d7c4efd7660d647ad5b6e338a4ff46bd3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# type: ignore
"""Setup script for package."""
import os
import sys
import configparser
import datetime
import distutils.cmd
import distutils.log
import subprocess
import glob
import shutil
from pathlib import Path
setup_kwargs = {}
from setuptools import find_packages, setup
import setuptools.command.build_py
try:
import pbr
setup_kwargs['pbr'] = True
except ImportError:
setup_kwargs['pbr'] = False
here = os.path.abspath(os.path.dirname(__file__))
basename = os.path.basename(os.path.dirname(__file__))
# give a list of scripts and how they map to a package module
CONSOLE_SCRIPTS = []
class VirtualenvCommand(distutils.cmd.Command):
"""A custom command to create virtual environment."""
description = 'create virtual environment for project'
user_options = [
# The format is (long option, short option, description).
]
def initialize_options(self):
"""Set default values for options."""
# Each user option must be listed here with their default value.
...
def finalize_options(self):
"""Post-process options."""
...
def run(self):
"""Run command."""
command = [sys.executable, '-m', 'venv', 'venv']
self.announce(
'Running command: %s' % str(command),
level=distutils.log.INFO)
subprocess.check_call(command)
class AntlrBuildCommand(distutils.cmd.Command):
"""A custom command to generate antlr4 files from vba.g4 grammar file."""
description = 'generate antlr4 files form grammar'
user_options = [
# The format is (long option, short option, description).
]
def initialize_options(self):
"""Set default values for options."""
# Each user option must be listed here with their default value.
...
def finalize_options(self):
"""Post-process options."""
...
def run(self):
"""Run command."""
command = [sys.executable, 'download_external_files.py']
self.announce(
'Running command {}'.format(' '.join(command))
)
subprocess.check_call(command)
source_dir = 'data'
antlr4_jar = os.path.join(source_dir, 'antlr-4.9.2-complete.jar')
vba_g4 = os.path.join(source_dir, 'vba.g4')
command = ['java', '-jar', antlr4_jar, '-Dlanguage=Python3', vba_g4]
self.announce(
'Running command: %s' % " ".join(command),
level=distutils.log.INFO)
subprocess.check_call(command)
dest_dir = 'antlr4_vba_parser'
for filename in glob.glob(os.path.join(source_dir, '*.*')):
shutil.copy2(filename, dest_dir)
print('Copied {filename} -> {dest_dir}'.format(**locals()))
class BuildPyCommand(setuptools.command.build_py.build_py):
"""Custom build command."""
def run(self):
self.run_command('build_antlr4')
setuptools.command.build_py.build_py.run(self)
# load config using parser
parser = configparser.ConfigParser()
parser.read('%s/setup.cfg' % here)
install_requirements = [line.split('#')[0].strip(' ')
for line in open('%s/requirements.txt' % here).readlines()
if line and line.split('#')[0] and
not line.startswith('git+')] # can't currently handle git URLs unless using PBR
setup_kwargs['install_requires'] = install_requirements
# add setup.cfg information back from metadata
try:
from setuptools.config import read_configuration
config = read_configuration('%s/setup.cfg' % here)
metadata = config['metadata']
metadata['summary'] = metadata.get('summary', metadata['description'].split('\n')[0])
if setup_kwargs.pop('pbr', False) is not True:
setup_kwargs.update(metadata)
# explicitly compile a master list of install requirements - workaround for bug with PBR & bdist_wheel
setup_kwargs['install_requires'] = list(set(list(setup_kwargs.get('install_requires',
config.get('options', {})
.get('install_requires', []))) +
install_requirements))
except ImportError:
metadata = {}
finally:
readme_filename = '%s/%s' % (here, parser['metadata']['description-file'].strip())
with open(readme_filename) as f_desc:
long_description = f_desc.read()
setup_kwargs['long_description'] = long_description
# check whether we are using Markdown instead of Restructured Text and update setup accordingly
if readme_filename.lower().endswith('.md'):
setup_kwargs['long_description_content_type'] = 'text/markdown'
# update with further information for sphinx
metadata.update(parser['metadata'])
if __name__ == '__main__':
# actually perform setup here
setup(
setup_requires=['pbr', 'setuptools'],
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS
},
tests_require=['pytest', 'coverage'],
include_package_data=True,
cmdclass={
'venv': VirtualenvCommand,
'build_antlr4': AntlrBuildCommand,
'build_py': BuildPyCommand,
},
**setup_kwargs
)
| 30.465116 | 112 | 0.644084 |
bdda42cc76c0533cf7908bddea1b491c2bc92a55 | 812 | py | Python | src/zenml/integrations/airflow/orchestrators/__init__.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | 1 | 2022-03-11T10:15:22.000Z | 2022-03-11T10:15:22.000Z | src/zenml/integrations/airflow/orchestrators/__init__.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | src/zenml/integrations/airflow/orchestrators/__init__.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
The Airflow integration enables the use of Airflow as a pipeline orchestrator.
"""
from zenml.integrations.airflow.orchestrators.airflow_orchestrator import ( # noqa
AirflowOrchestrator,
)
| 38.666667 | 83 | 0.754926 |
1be7ca526125cc0b9b03ec2748c13c201564cf00 | 3,387 | py | Python | pyasn1_modules/rfc5544.py | inexio/pyasn1-modules | 13b84f74541ec442037273ddf8ba62bbba2cd974 | [
"BSD-2-Clause"
] | 2 | 2020-12-29T07:13:05.000Z | 2021-02-07T15:32:26.000Z | pyasn1_modules/rfc5544.py | inexio/pyasn1-modules | 13b84f74541ec442037273ddf8ba62bbba2cd974 | [
"BSD-2-Clause"
] | 3 | 2020-12-22T23:21:43.000Z | 2021-04-06T16:24:39.000Z | pyasn1_modules/rfc5544.py | inexio/pyasn1-modules | 13b84f74541ec442037273ddf8ba62bbba2cd974 | [
"BSD-2-Clause"
] | 1 | 2021-01-17T17:45:03.000Z | 2021-01-17T17:45:03.000Z | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2021, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# TimeStampedData
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc5544.txt
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import opentype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_modules import rfc3161
from pyasn1_modules import rfc4998
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc5652
MAX = float('inf')
otherEvidenceMap = { }
# Imports from RFC 5652
Attribute = rfc5652.Attribute
# Imports from RFC 5280
CertificateList = rfc5280.CertificateList
# Imports from RFC 3161
TimeStampToken = rfc3161.TimeStampToken
# Imports from RFC 4998
EvidenceRecord = rfc4998.EvidenceRecord
# TimeStampedData
class Attributes(univ.SetOf):
componentType = Attribute()
subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class TimeStampAndCRL(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('timeStamp', TimeStampToken()),
namedtype.OptionalNamedType('crl', CertificateList())
)
class TimeStampTokenEvidence(univ.SequenceOf):
componentType = TimeStampAndCRL()
subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
class OtherEvidence(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('oeType', univ.ObjectIdentifier()),
namedtype.NamedType('oeValue', univ.Any(),
openType=opentype.OpenType('oeType', otherEvidenceMap))
)
class Evidence(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tstEvidence',
TimeStampTokenEvidence().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('ersEvidence',
EvidenceRecord().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('otherEvidence',
OtherEvidence().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class MetaData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashProtected', univ.Boolean()),
namedtype.OptionalNamedType('fileName', char.UTF8String()),
namedtype.OptionalNamedType('mediaType', char.IA5String()),
namedtype.OptionalNamedType('otherMetaData', Attributes())
)
class TimeStampedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version',
univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))),
namedtype.OptionalNamedType('dataUri', char.IA5String()),
namedtype.OptionalNamedType('metaData', MetaData()),
namedtype.OptionalNamedType('content', univ.OctetString()),
namedtype.NamedType('temporalEvidence', Evidence())
)
id_ct_timestampedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.31')
# Update the CMS Content Type Map in rfc5652.py
_cmsContentTypesMapUpdate = {
id_ct_timestampedData: TimeStampedData(),
}
rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
| 27.314516 | 75 | 0.725716 |
c234943eda72fe5b5645ce912b43f8982d9bcf3c | 23,362 | py | Python | f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | Sinan828/mitaka_agent | 82b65db257e8b9d05d57ca21133352bc5d6a9c94 | [
"Apache-2.0"
] | null | null | null | f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | Sinan828/mitaka_agent | 82b65db257e8b9d05d57ca21133352bc5d6a9c94 | [
"Apache-2.0"
] | null | null | null | f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | Sinan828/mitaka_agent | 82b65db257e8b9d05d57ca21133352bc5d6a9c94 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2
from f5_openstack_agent.lbaasv2.drivers.bigip import l7policy_service
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_service import \
LbaasServiceObject
from f5_openstack_agent.lbaasv2.drivers.bigip import listener_service
from f5_openstack_agent.lbaasv2.drivers.bigip import pool_service
from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address
from requests import HTTPError
LOG = logging.getLogger(__name__)
class LBaaSBuilder(object):
# F5 LBaaS Driver using iControl for BIG-IP to
# create objects (vips, pools) - not using an iApp."""
def __init__(self, conf, driver, l2_service=None):
self.conf = conf
self.driver = driver
self.l2_service = l2_service
self.service_adapter = driver.service_adapter
self.listener_builder = listener_service.ListenerServiceBuilder(
self.service_adapter,
driver.cert_manager,
conf.f5_parent_ssl_profile)
self.pool_builder = pool_service.PoolServiceBuilder(
self.service_adapter
)
self.l7service = l7policy_service.L7PolicyService(conf)
self.esd = None
def init_esd(self, esd):
self.esd = esd
def is_esd(self, esd):
return self.esd.is_esd(esd)
def assure_service(self, service, traffic_group, all_subnet_hints):
"""Assure that a service is configured on the BIGIP."""
start_time = time()
LOG.debug("assuring loadbalancers")
self._assure_loadbalancer_created(service, all_subnet_hints)
LOG.debug("assuring monitors")
self._assure_monitors_created(service)
LOG.debug("assuring pools")
self._assure_pools_created(service)
LOG.debug("assuring pool members")
self._assure_members(service, all_subnet_hints)
LOG.debug("assuring l7 policies")
self._assure_l7policies_created(service)
LOG.debug("assuring listeners")
self._assure_listeners_created(service)
LOG.debug("deleting listeners")
self._assure_listeners_deleted(service)
LOG.debug("deleting l7 policies")
self._assure_l7policies_deleted(service)
LOG.debug("deleting pools")
self._assure_pools_deleted(service)
LOG.debug("deleting monitors")
self._assure_monitors_deleted(service)
LOG.debug("deleting loadbalancers")
self._assure_loadbalancer_deleted(service)
LOG.debug(" _assure_service took %.5f secs" %
(time() - start_time))
return all_subnet_hints
@staticmethod
def _set_status_as_active(svc_obj, force=False):
# If forced, then set to ACTIVE else hold ERROR
preserve_statuses = \
tuple([constants_v2.F5_ERROR, constants_v2.F5_PENDING_DELETE])
ps = svc_obj['provisioning_status']
svc_obj['provisioning_status'] = constants_v2.F5_ACTIVE \
if ps not in preserve_statuses or force else ps
@staticmethod
def _set_status_as_error(svc_obj):
svc_obj['provisioning_status'] = constants_v2.F5_ERROR
@staticmethod
def _is_not_pending_delete(svc_obj):
return svc_obj['provisioning_status'] != constants_v2.F5_PENDING_DELETE
@staticmethod
def _is_pending_delete(svc_obj):
return svc_obj['provisioning_status'] == constants_v2.F5_PENDING_DELETE
@staticmethod
def _is_not_error(svc_obj):
return svc_obj['provisioning_status'] != constants_v2.F5_ERROR
def _assure_loadbalancer_created(self, service, all_subnet_hints):
if 'loadbalancer' not in service:
return
bigips = self.driver.get_config_bigips()
loadbalancer = service["loadbalancer"]
set_active = True
if self._is_not_pending_delete(loadbalancer):
vip_address = virtual_address.VirtualAddress(
self.service_adapter,
loadbalancer)
for bigip in bigips:
try:
vip_address.assure(bigip)
except Exception as error:
LOG.error(str(error))
self._set_status_as_error(loadbalancer)
set_active = False
self._set_status_as_active(loadbalancer, force=set_active)
if self.driver.l3_binding:
loadbalancer = service["loadbalancer"]
self.driver.l3_binding.bind_address(
subnet_id=loadbalancer["vip_subnet_id"],
ip_address=loadbalancer["vip_address"])
self._update_subnet_hints(loadbalancer["provisioning_status"],
loadbalancer["vip_subnet_id"],
loadbalancer["network_id"],
all_subnet_hints,
False)
def _assure_listeners_created(self, service):
if 'listeners' not in service:
return
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
networks = service.get("networks", list())
pools = service.get("pools", list())
l7policies = service.get("l7policies", list())
l7rules = service.get("l7policy_rules", list())
bigips = self.driver.get_config_bigips()
for listener in listeners:
error = False
if self._is_not_pending_delete(listener):
svc = {"loadbalancer": loadbalancer,
"listener": listener,
"pools": pools,
"l7policies": l7policies,
"l7policy_rules": l7rules,
"networks": networks}
# create_listener() will do an update if VS exists
error = self.listener_builder.create_listener(
svc, bigips)
if error:
loadbalancer['provisioning_status'] = \
constants_v2.F5_ERROR
listener['provisioning_status'] = constants_v2.F5_ERROR
else:
listener['provisioning_status'] = constants_v2.F5_ACTIVE
if listener['admin_state_up']:
listener['operating_status'] = constants_v2.F5_ONLINE
def _assure_pools_created(self, service):
if "pools" not in service:
return
pools = service.get("pools", list())
loadbalancer = service.get("loadbalancer", dict())
monitors = \
[monitor for monitor in service.get("healthmonitors", list())
if monitor['provisioning_status'] !=
constants_v2.F5_PENDING_DELETE]
bigips = self.driver.get_config_bigips()
error = None
for pool in pools:
if pool['provisioning_status'] != constants_v2.F5_PENDING_DELETE:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
svc['members'] = self._get_pool_members(service, pool['id'])
svc['healthmonitors'] = monitors
error = self.pool_builder.create_pool(svc, bigips)
if error:
pool['provisioning_status'] = constants_v2.F5_ERROR
loadbalancer['provisioning_status'] = constants_v2.F5_ERROR
else:
pool['provisioning_status'] = constants_v2.F5_ACTIVE
pool['operating_status'] = constants_v2.F5_ONLINE
def _get_pool_members(self, service, pool_id):
"""Return a list of members associated with given pool."""
members = []
for member in service['members']:
if member['pool_id'] == pool_id:
members.append(member)
return members
def _assure_monitors_created(self, service):
monitors = service.get("healthmonitors", list())
loadbalancer = service.get("loadbalancer", dict())
bigips = self.driver.get_config_bigips()
force_active_status = True
for monitor in monitors:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": monitor}
if monitor['provisioning_status'] != \
constants_v2.F5_PENDING_DELETE:
if self.pool_builder.create_healthmonitor(svc, bigips):
monitor['provisioning_status'] = constants_v2.F5_ERROR
force_active_status = False
self._set_status_as_active(monitor, force=force_active_status)
def _assure_monitors_deleted(self, service):
monitors = service["healthmonitors"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for monitor in monitors:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": monitor}
if monitor['provisioning_status'] == \
constants_v2.F5_PENDING_DELETE:
if self.pool_builder.delete_healthmonitor(svc, bigips):
monitor['provisioning_status'] = constants_v2.F5_ERROR
def _assure_members(self, service, all_subnet_hints):
if not (("pools" in service) and ("members" in service)):
return
members = service["members"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
# Group the members by pool.
pool_to_member_map = dict()
for member in members:
if 'port' not in member and \
member['provisioning_status'] != constants_v2.F5_PENDING_DELETE:
LOG.debug("Member definition does not include Neutron port")
pool_id = member.get('pool_id', None)
if not pool_id:
LOG.error("Pool member %s does not have a valid pool id",
member.get('id', "NO MEMBER ID"))
continue
if pool_id not in pool_to_member_map:
pool_to_member_map[pool_id] = list()
pool_to_member_map[pool_id].append(member)
# Assure members by pool
for pool_id, pool_members in pool_to_member_map.iteritems():
pool = self.get_pool_by_id(service, pool_id)
svc = {"loadbalancer": loadbalancer,
"members": pool_members,
"pool": pool}
self.pool_builder.assure_pool_members(svc, bigips)
pool_deleted = self._is_pending_delete(pool)
for member in pool_members:
if pool_deleted:
member['provisioning_status'] = "PENDING_DELETE"
member['parent_pool_deleted'] = True
provisioning = member.get('provisioning_status')
if 'missing' not in member \
and provisioning != "PENDING_DELETE":
member['provisioning_status'] = "ACTIVE"
elif 'missing' in member:
member['provisioning_status'] = "ERROR"
self._update_subnet_hints(member["provisioning_status"],
member["subnet_id"],
member["network_id"],
all_subnet_hints,
True)
def _assure_loadbalancer_deleted(self, service):
if (service['loadbalancer']['provisioning_status'] !=
constants_v2.F5_PENDING_DELETE):
return
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
if self.driver.l3_binding:
self.driver.l3_binding.unbind_address(
subnet_id=loadbalancer["vip_subnet_id"],
ip_address=loadbalancer["vip_address"])
vip_address = virtual_address.VirtualAddress(
self.service_adapter,
loadbalancer)
for bigip in bigips:
vip_address.assure(bigip, delete=True)
def _assure_pools_deleted(self, service):
if 'pools' not in service:
return
pools = service["pools"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
service_members = service.get('members', list())
for pool in pools:
pool_members = [member for member in service_members
if member.get('pool_id') == pool['id']]
svc = {"loadbalancer": loadbalancer,
"pool": pool, "members": pool_members}
# Is the pool being deleted?
if pool['provisioning_status'] == constants_v2.F5_PENDING_DELETE:
# Delete pool
error = self.pool_builder.delete_pool(svc, bigips)
if error:
pool['provisioning_status'] = constants_v2.F5_ERROR
def _assure_listeners_deleted(self, service):
bigips = self.driver.get_config_bigips()
if 'listeners' in service:
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
for listener in listeners:
error = False
if listener['provisioning_status'] == \
constants_v2.F5_PENDING_DELETE:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
error = \
self.listener_builder.delete_listener(svc, bigips)
if error:
listener['provisioning_status'] = constants_v2.F5_ERROR
self.listener_builder.delete_orphaned_listeners(service, bigips)
@staticmethod
def get_pool_by_id(service, pool_id):
if pool_id and "pools" in service:
pools = service["pools"]
for pool in pools:
if pool["id"] == pool_id:
return pool
return None
def _update_subnet_hints(self, status, subnet_id,
network_id, all_subnet_hints, is_member):
bigips = self.driver.get_config_bigips()
for bigip in bigips:
subnet_hints = all_subnet_hints[bigip.device_name]
if status != constants_v2.F5_PENDING_DELETE:
if subnet_id in subnet_hints['check_for_delete_subnets']:
del subnet_hints['check_for_delete_subnets'][subnet_id]
if subnet_id not in subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet_id)
else:
if subnet_id not in subnet_hints['do_not_delete_subnets']:
subnet_hints['check_for_delete_subnets'][subnet_id] = \
{'network_id': network_id,
'subnet_id': subnet_id,
'is_for_member': is_member}
def listener_exists(self, bigip, service):
"""Test the existence of the listener defined by service."""
try:
# Throw an exception if the listener does not exist.
self.listener_builder.get_listener(service, bigip)
except HTTPError as err:
LOG.debug("Virtual service service discovery error, %s." %
err.message)
return False
return True
def _assure_l7policies_created(self, service):
if 'l7policies' not in service:
return
listener_policy_map = dict()
bigips = self.driver.get_config_bigips()
lbaas_service = LbaasServiceObject(service)
l7policies = service['l7policies']
LOG.debug("L7 debug: processing policies: %s", l7policies)
for l7policy in l7policies:
LOG.debug("L7 debug: assuring policy: %s", l7policy)
name = l7policy.get('name', None)
if not self.esd.is_esd(name):
listener_id = l7policy.get('listener_id', None)
if not listener_id or listener_id in listener_policy_map:
LOG.debug(
"L7 debug: listener policies already added: %s",
listener_id)
continue
listener_policy_map[listener_id] = \
self.l7service.build_policy(l7policy, lbaas_service)
for listener_id, policy in listener_policy_map.items():
error = False
if policy['f5_policy'].get('rules', list()):
error = self.l7service.create_l7policy(
policy['f5_policy'], bigips)
for p in service['l7policies']:
if self._is_not_pending_delete(p):
if not error:
self._set_status_as_active(p, force=True)
else:
self._set_status_as_error(p)
loadbalancer = service.get('loadbalancer', {})
if not error:
listener = lbaas_service.get_listener(listener_id)
if listener:
listener['f5_policy'] = policy['f5_policy']
else:
loadbalancer['provisioning_status'] = \
constants_v2.F5_ERROR
def _assure_l7policies_deleted(self, service):
if 'l7policies' not in service:
return
listener_policy_map = dict()
bigips = self.driver.get_config_bigips()
lbaas_service = LbaasServiceObject(service)
l7policies = service['l7policies']
for l7policy in l7policies:
name = l7policy.get('name', None)
if not self.esd.is_esd(name):
listener_id = l7policy.get('listener_id', None)
if not listener_id or listener_id in listener_policy_map:
continue
listener_policy_map[listener_id] = \
self.l7service.build_policy(l7policy, lbaas_service)
# Clean wrapper policy this is the legacy name of a policy
loadbalancer = service.get('loadbalancer', dict())
tenant_id = loadbalancer.get('tenant_id', "")
try:
wrapper_policy = {
'name': 'wrapper_policy',
'partition': self.service_adapter.get_folder_name(
tenant_id)}
self.l7service.delete_l7policy(wrapper_policy, bigips)
except HTTPError as err:
if err.response.status_code != 404:
LOG.error("Failed to remove wrapper policy: %s",
err.message)
except Exception as err:
LOG.error("Failed to remove wrapper policy: %s",
err.message)
for _, policy in listener_policy_map.items():
error = False
if not policy['f5_policy'].get('rules', list()):
error = self.l7service.delete_l7policy(
policy['f5_policy'], bigips)
for p in policy['l7policies']:
if self._is_not_pending_delete(p):
if not error:
self._set_status_as_active(p, force=True)
else:
self._set_status_as_error(p)
else:
if error:
self._set_status_as_error(p)
def get_listener_stats(self, service, stats):
"""Get statistics for a loadbalancer service.
Sums values for stats defined in stats dictionary for all listeners
defined in service object. For example, if loadbalancer has two
listeners and stats defines a stat 'clientside.bitsIn' as a key, the
sum of all pools' clientside.bitsIn will be returned in stats.
Provisioning status is ignored -- PENDING_DELETE objects are
included.
:param service: defines loadbalancer and set of pools.
:param stats: a dictionary that defines which stats to get.
Should be initialized by caller with 0 values.
:return: stats are appended to input stats dict (i.e., contains
the sum of given stats for all BIG-IPs).
"""
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
collected_stats = {}
for stat in stats:
collected_stats[stat] = 0
for listener in listeners:
svc = {"loadbalancer": loadbalancer, "listener": listener}
vs_stats = self.listener_builder.get_stats(svc, bigips, stats)
for stat in stats:
collected_stats[stat] += vs_stats[stat]
return collected_stats
def update_operating_status(self, service):
bigip = self.driver.get_active_bigip()
loadbalancer = service["loadbalancer"]
status_keys = ['status.availabilityState',
'status.enabledState']
members = service["members"]
for member in members:
if member['provisioning_status'] == constants_v2.F5_ACTIVE:
pool = self.get_pool_by_id(service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
status = self.pool_builder.get_member_status(
svc, bigip, status_keys)
member['operating_status'] = self.convert_operating_status(
status)
@staticmethod
def convert_operating_status(status):
"""Convert object status to LBaaS operating status.
status.availabilityState and status.enabledState = Operating Status
available enabled ONLINE
available disabled DISABLED
offline - OFFLINE
unknown - NO_MONITOR
"""
op_status = None
available = status.get('status.availabilityState', '')
if available == 'available':
enabled = status.get('status.enabledState', '')
if enabled == 'enabled':
op_status = constants_v2.F5_ONLINE
elif enabled == 'disabled':
op_status = constants_v2.F5_DISABLED
else:
LOG.warning('Unexpected value %s for status.enabledState',
enabled)
elif available == 'offline':
op_status = constants_v2.F5_OFFLINE
elif available == 'unknown':
op_status = constants_v2.F5_NO_MONITOR
return op_status
| 38.742952 | 79 | 0.587963 |
693af3d47073b2ee726af8b366939d2e5b2f3b14 | 4,574 | py | Python | test/api.test.py | industrydive/datawrapper | 073429e25f3923c7fb19469a298f9591b97cf287 | [
"MIT"
] | 1 | 2017-02-16T16:36:44.000Z | 2017-02-16T16:36:44.000Z | test/api.test.py | pl3442/datawrapper | bd28a50f88a07199e9e6cd1a96e1e53854ca6282 | [
"MIT"
] | null | null | null | test/api.test.py | pl3442/datawrapper | bd28a50f88a07199e9e6cd1a96e1e53854ca6282 | [
"MIT"
] | null | null | null | #
# test script for Datawrapper API
#
import requests
import os
import json
from random import randint
import yaml
config = yaml.load(open('../config.yaml').read())
domain = 'http://' + config['domain']
if 'DATAWRAPPER_DOMAIN' in os.environ:
domain = os.environ['DATAWRAPPER_DOMAIN']
endpoint = domain + '/api/'
import unittest
print 'testing on ' + domain
ns = {
'chartId': None,
'session': requests.Session()
}
# create new chart
class TestDatawrapperAPI(unittest.TestCase):
def checkRes(self, r):
self.assertIsInstance(r.json(), dict)
self.assertEqual(r.json()['status'], 'ok')
if r.json()['status'] == 'error':
print r.json()['message']
def test_01_create_new_chart(self):
global ns
r = ns['session'].post(endpoint + 'charts')
self.checkRes(r)
ns['chartId'] = r.json()['data'][0]['id']
def test_02_set_chart_data(self):
data = 'some,data,to,send\nanother,row,to,send\n'
url = endpoint + 'charts/%s/data' % ns['chartId']
r = ns['session'].put(url, data=data)
self.checkRes(r)
# check that data was set correctly
r = ns['session'].get(url)
self.assertEqual(r.text, data)
def test_03_upload_chart_data(self):
files = {'qqfile': (
'report.csv', 'other,data,to,send\nanother,row,to,send\n')}
url = endpoint + 'charts/%s/data' % ns['chartId']
r = ns['session'].post(url, files=files)
self.checkRes(r)
# check that data was set correctly
r = ns['session'].get(url)
self.assertEqual(r.text, files['qqfile'][1])
def test_04_get_chart_meta(self):
url = endpoint + 'charts/%s' % ns['chartId']
r = ns['session'].get(url)
self.checkRes(r)
gallery_default = False
if 'defaults' in config and 'show_in_gallery' in config['defaults']:
gallery_default = config['defaults']['show_in_gallery']
self.assertEqual(r.json()['data']['showInGallery'], gallery_default)
def test_05_saveMetadata(self):
url = endpoint + 'charts/%s' % ns['chartId']
r = ns['session'].get(url)
self.checkRes(r)
data = r.json()['data']
data['title'] = 'My cool new chart'
data['metadata']['describe']['source-name'] = 'Example Data Source'
data['metadata']['describe']['source-url'] = 'http://example.org'
r = ns['session'].put(url, data=json.dumps(data))
self.checkRes(r)
# self.assertEqual(r.json()['data']['showInGallery'], False)
def test_06_gallery(self):
url = endpoint + 'gallery'
r = ns['session'].get(url)
self.checkRes(r)
def test_06_visualizations(self):
url = endpoint + 'visualizations'
r = ns['session'].get(url)
self.checkRes(r)
self.assertIsInstance(r.json()['data'], list)
def test_07_bar_chart(self):
url = endpoint + 'visualizations/bar-chart'
r = ns['session'].get(url)
self.checkRes(r)
self.assertIsInstance(r.json()['data'], dict)
def test_08_account(self):
url = endpoint + 'account'
r = ns['session'].get(url)
self.checkRes(r)
self.assertIn('user', r.json()['data'])
self.assertIsInstance(r.json()['data']['user'], dict)
def test_09_set_lang_to_fr(self):
url = endpoint + 'account/lang'
r = ns['session'].put(url, data=json.dumps(dict(lang='fr')))
self.checkRes(r)
def test_10_check_lang_is_fr(self):
url = endpoint + 'account/lang'
r = ns['session'].get(url)
self.checkRes(r)
self.assertEqual(r.json()['data'], 'fr')
def test_11_charts(self):
url = endpoint + 'charts'
r = ns['session'].get(url)
self.checkRes(r)
self.assertEqual(len(r.json()['data']), 1)
def test_11a_charts_sorted(self):
url = endpoint + 'charts?order=theme'
r = ns['session'].get(url)
self.checkRes(r)
self.assertEqual(len(r.json()['data']), 1)
def test_12_estimate_job(self):
url = endpoint + 'jobs/export/estimate'
r = ns['session'].get(url)
self.checkRes(r)
def test_13_create_user(self):
url = endpoint + '/users'
password = '1234'
body = dict(pwd=password, pwd2=password,
email=('test-%d@' + config['domain']) % randint(10000, 99999))
r = ns['session'].post(url, data=json.dumps(body))
self.checkRes(r)
if __name__ == '__main__':
unittest.main()
| 31.115646 | 82 | 0.586139 |
7d977c0ade47bccf286ef68982818a4a8a052d48 | 762 | py | Python | paginas/migrations/0010_remove_publicacao_descricao_remove_publicacao_hora_and_more.py | DSheridanmt/Safety-Life | 522578858f8e063e14d0274de008c345ef2c0a75 | [
"MIT"
] | null | null | null | paginas/migrations/0010_remove_publicacao_descricao_remove_publicacao_hora_and_more.py | DSheridanmt/Safety-Life | 522578858f8e063e14d0274de008c345ef2c0a75 | [
"MIT"
] | null | null | null | paginas/migrations/0010_remove_publicacao_descricao_remove_publicacao_hora_and_more.py | DSheridanmt/Safety-Life | 522578858f8e063e14d0274de008c345ef2c0a75 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-03-13 19:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('paginas', '0009_remove_publicacao_imagem'),
]
operations = [
migrations.RemoveField(
model_name='publicacao',
name='descricao',
),
migrations.RemoveField(
model_name='publicacao',
name='hora',
),
migrations.RemoveField(
model_name='publicacao',
name='tag',
),
migrations.RemoveField(
model_name='publicacao',
name='titulo',
),
migrations.RemoveField(
model_name='publicacao',
name='upload',
),
]
| 22.411765 | 53 | 0.531496 |
d9f99ccda93f0d45e8ddcb5f99fc832c52354cd2 | 2,465 | py | Python | sdk/python/pulumi_azure_native/containerservice/v20200201/list_managed_cluster_user_credentials.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/containerservice/v20200201/list_managed_cluster_user_credentials.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/containerservice/v20200201/list_managed_cluster_user_credentials.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListManagedClusterUserCredentialsResult',
'AwaitableListManagedClusterUserCredentialsResult',
'list_managed_cluster_user_credentials',
]
@pulumi.output_type
class ListManagedClusterUserCredentialsResult:
"""
The list of credential result response.
"""
def __init__(__self__, kubeconfigs=None):
if kubeconfigs and not isinstance(kubeconfigs, list):
raise TypeError("Expected argument 'kubeconfigs' to be a list")
pulumi.set(__self__, "kubeconfigs", kubeconfigs)
@property
@pulumi.getter
def kubeconfigs(self) -> Sequence['outputs.CredentialResultResponse']:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kubeconfigs")
class AwaitableListManagedClusterUserCredentialsResult(ListManagedClusterUserCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterUserCredentialsResult(
kubeconfigs=self.kubeconfigs)
def list_managed_cluster_user_credentials(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterUserCredentialsResult:
"""
The list of credential result response.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerservice/v20200201:listManagedClusterUserCredentials', __args__, opts=opts, typ=ListManagedClusterUserCredentialsResult).value
return AwaitableListManagedClusterUserCredentialsResult(
kubeconfigs=__ret__.kubeconfigs)
| 36.791045 | 184 | 0.712779 |
c2d64e74af4c8ed3b70d51de04aa1d503243a71c | 7,412 | py | Python | courses/machine_learning/feateng/taxifare/trainer/model.py | ismailbaigteg/python | 50a15a786dbd13d097a3cf89d35a70918ae48b81 | [
"Apache-2.0"
] | 58 | 2019-05-16T00:12:11.000Z | 2022-03-14T06:12:12.000Z | courses/machine_learning/feateng/taxifare/trainer/model.py | ismailbaigteg/python | 50a15a786dbd13d097a3cf89d35a70918ae48b81 | [
"Apache-2.0"
] | 1 | 2021-03-26T00:38:05.000Z | 2021-03-26T00:38:05.000Z | courses/machine_learning/feateng/taxifare/trainer/model.py | ismailbaigteg/python | 50a15a786dbd13d097a3cf89d35a70918ae48b81 | [
"Apache-2.0"
] | 46 | 2018-03-03T17:17:27.000Z | 2022-03-24T14:56:46.000Z | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import layers
import tensorflow.contrib.learn as tflearn
from tensorflow.contrib import metrics
import numpy as np
tf.logging.set_verbosity(tf.logging.INFO)
CSV_COLUMNS = 'fare_amount,dayofweek,hourofday,pickuplon,pickuplat,dropofflon,dropofflat,passengers,key'.split(',')
SCALE_COLUMNS = ['pickuplon','pickuplat','dropofflon','dropofflat','passengers']
LABEL_COLUMN = 'fare_amount'
KEY_FEATURE_COLUMN = 'key'
DEFAULTS = [[0.0], ['Sun'], [0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
# These are the raw input columns, and will be provided for prediction also
INPUT_COLUMNS = [
# define features
layers.sparse_column_with_keys('dayofweek', keys=['Sun', 'Mon', 'Tues', 'Wed', 'Thu', 'Fri', 'Sat']),
layers.sparse_column_with_integerized_feature('hourofday', bucket_size=24),
# engineered features that are created in the input_fn
layers.real_valued_column('latdiff'),
layers.real_valued_column('londiff'),
layers.real_valued_column('euclidean'),
# real_valued_column
layers.real_valued_column('pickuplon'),
layers.real_valued_column('pickuplat'),
layers.real_valued_column('dropofflat'),
layers.real_valued_column('dropofflon'),
layers.real_valued_column('passengers'),
]
def build_estimator(model_dir, nbuckets, hidden_units):
"""
Build an estimator starting from INPUT COLUMNS.
These include feature transformations and synthetic features.
The model is a wide-and-deep model.
"""
# input columns
(dayofweek, hourofday, latdiff, londiff, euclidean, plon, plat, dlon, dlat, pcount) = INPUT_COLUMNS
# bucketize the lats & lons
latbuckets = np.linspace(38.0, 42.0, nbuckets).tolist()
lonbuckets = np.linspace(-76.0, -72.0, nbuckets).tolist()
b_plat = layers.bucketized_column(plat, latbuckets)
b_dlat = layers.bucketized_column(dlat, latbuckets)
b_plon = layers.bucketized_column(plon, lonbuckets)
b_dlon = layers.bucketized_column(dlon, lonbuckets)
# feature cross
ploc = layers.crossed_column([b_plat, b_plon], nbuckets*nbuckets)
dloc = layers.crossed_column([b_dlat, b_dlon], nbuckets*nbuckets)
pd_pair = layers.crossed_column([ploc, dloc], nbuckets ** 4 )
day_hr = layers.crossed_column([dayofweek, hourofday], 24*7)
# Wide columns and deep columns.
wide_columns = [
# feature crosses
dloc, ploc, pd_pair,
day_hr,
# sparse columns
dayofweek, hourofday,
# anything with a linear relationship
pcount
]
deep_columns = [
# embedding_column to "group" together ...
layers.embedding_column(pd_pair, 10),
layers.embedding_column(day_hr, 10),
# real_valued_column
plat, plon, dlat, dlon,
latdiff, londiff, euclidean
]
return tf.contrib.learn.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=hidden_units or [128, 32, 4])
def add_engineered(features):
# this is how you can do feature engineering in TensorFlow
lat1 = features['pickuplat']
lat2 = features['dropofflat']
lon1 = features['pickuplon']
lon2 = features['dropofflon']
latdiff = (lat1 - lat2)
londiff = (lon1 - lon2)
# set features for distance with sign that indicates direction
features['latdiff'] = latdiff
features['londiff'] = londiff
dist = tf.sqrt(latdiff*latdiff + londiff*londiff)
features['euclidean'] = dist
return features
def serving_input_fn():
feature_placeholders = {
# all the real-valued columns
column.name: tf.placeholder(tf.float32, [None]) for column in INPUT_COLUMNS[2:]
}
feature_placeholders['dayofweek'] = tf.placeholder(tf.string, [None])
feature_placeholders['hourofday'] = tf.placeholder(tf.int32, [None])
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tflearn.utils.input_fn_utils.InputFnOps(
add_engineered(features),
None,
feature_placeholders
)
def generate_csv_input_fn(filename, num_epochs=None, batch_size=512, mode=tf.contrib.learn.ModeKeys.TRAIN):
def _input_fn():
# could be a path to one file or a file pattern.
input_file_names = tf.train.match_filenames_once(filename)
#input_file_names = [filename]
filename_queue = tf.train.string_input_producer(
input_file_names, num_epochs=num_epochs, shuffle=True)
reader = tf.TextLineReader()
_, value = reader.read_up_to(filename_queue, num_records=batch_size)
value_column = tf.expand_dims(value, -1)
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return add_engineered(features), label
return _input_fn
def gzip_reader_fn():
return tf.TFRecordReader(options=tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP))
def generate_tfrecord_input_fn(data_paths, num_epochs=None, batch_size=512, mode=tf.contrib.learn.ModeKeys.TRAIN):
def get_input_features():
# Read the tfrecords. Same input schema as in preprocess
input_schema = {}
if mode != tf.contrib.learn.ModeKeys.INFER:
input_schema[LABEL_COLUMN] = tf.FixedLenFeature(shape=[1], dtype=tf.float32, default_value=0.0)
for name in ['dayofweek', 'key']:
input_schema[name] = tf.FixedLenFeature(shape=[1], dtype=tf.string, default_value='null')
for name in ['hourofday']:
input_schema[name] = tf.FixedLenFeature(shape=[1], dtype=tf.int64, default_value=0)
for name in SCALE_COLUMNS:
input_schema[name] = tf.FixedLenFeature(shape=[1], dtype=tf.float32, default_value=0.0)
# how?
keys, features = tf.contrib.learn.io.read_keyed_batch_features(
data_paths[0] if len(data_paths) == 1 else data_paths,
batch_size,
input_schema,
reader=gzip_reader_fn,
reader_num_threads=4,
queue_capacity=batch_size * 2,
randomize_input=(mode != tf.contrib.learn.ModeKeys.EVAL),
num_epochs=(1 if mode == tf.contrib.learn.ModeKeys.EVAL else num_epochs))
target = features.pop(LABEL_COLUMN)
features[KEY_FEATURE_COLUMN] = keys
return add_engineered(features), target
# Return a function to input the features into the model from a data path.
return get_input_features
def get_eval_metrics():
return {
'rmse': tflearn.MetricSpec(metric_fn=metrics.streaming_root_mean_squared_error),
'training/hptuning/metric': tflearn.MetricSpec(metric_fn=metrics.streaming_root_mean_squared_error),
}
| 36.156098 | 115 | 0.722342 |
10e679bb3dbead21401041b3087e73305d3898a4 | 7,886 | py | Python | double_dqn_agent.py | and-buk/Learning-from-Pixels | 7320f08de7b52308b0f36a3759001c85bcdb797a | [
"MIT"
] | null | null | null | double_dqn_agent.py | and-buk/Learning-from-Pixels | 7320f08de7b52308b0f36a3759001c85bcdb797a | [
"MIT"
] | null | null | null | double_dqn_agent.py | and-buk/Learning-from-Pixels | 7320f08de7b52308b0f36a3759001c85bcdb797a | [
"MIT"
] | null | null | null | import numpy as np
import random
from collections import namedtuple, deque
from model import VQNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e4) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
LR_DECAY = 0.99999 # multiplicative factor of learning rate decay
UPDATE_EVERY = 4 # how often to update the network
# Device to run the training on. Must be cuda' or 'cpu'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, frames_num):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
frames_num (int): number of stacked RGB images
"""
self.state_size = state_size
self.action_size = action_size
self.frames_num = frames_num
# Q-Network
self.qnetwork_local = VQNetwork(action_size, state_size, frames_num).to(device)
self.qnetwork_target = VQNetwork(action_size, state_size, frames_num).to(device)
# Optimization method
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Learning rate schedule
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[570], gamma=0.02)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (ndarray): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
# Convert a numpy array to a new float tensor and upload it to device
state = torch.from_numpy(state).float().to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
self.qnetwork_local.eval()
# Use the local network to get the index of highest-valued action (best action) of the next state
with torch.no_grad():
action_selection = self.qnetwork_local(next_states).max(1)[1].unsqueeze(1)
self.qnetwork_local.train()
# Get predicted Q values (for next states) from target network
Q_targets_next = self.qnetwork_target(next_states).detach().gather(1, action_selection)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
# Get values for corresponding actions along the rows action-value matrix output:
# (BATCH_SIZE, action_size) -> (BATCH_SIZE, 1)
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.smooth_l1_loss(Q_expected, Q_targets)
# Minimize the loss
# Clear the gradients, do this because gradients are accumulated
self.optimizer.zero_grad()
# Perfom a backward pass through the network to calculate the gradients (backpropagate the error)
loss.backward()
# Take a step with optimaizer to update the weights
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory.
Returns
======
batch of experiences (tuple):
states (torch.float): 5-D tensor of shape (batch_size, state_size)
actions (torch.long): 2-D tensor of shape (batch_size, 1)
rewards (torch.float): 2-D tensor of shape (batch_size, 1)
next_states (torch.float): 5-D tensor of shape (batch_size, next_state_size)
dones (torch.float): 2-D tensor of shape (batch_size, 1)
"""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | 42.171123 | 127 | 0.622115 |
207055ef15e1cd4f4ac10fa7cdcdd5eec2d34dee | 532 | py | Python | install/app_store/tk-framework-qtwidgets/v2.6.6/python/shotgun_menus/__init__.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-qtwidgets/v2.6.6/python/shotgun_menus/__init__.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-qtwidgets/v2.6.6/python/shotgun_menus/__init__.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # Copyright (c) 2016 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from .entity_field_menu import EntityFieldMenu
from .shotgun_menu import ShotgunMenu
| 40.923077 | 75 | 0.804511 |
b6fb8bc1c27545e3ace65a10fce55d97ab156acb | 16,749 | py | Python | webapp/tests/test_attime.py | techonomics69/graphite-web | b2b2a0bf708889e5dd7ce3bec7e521195584b951 | [
"Apache-2.0"
] | null | null | null | webapp/tests/test_attime.py | techonomics69/graphite-web | b2b2a0bf708889e5dd7ce3bec7e521195584b951 | [
"Apache-2.0"
] | 1 | 2020-04-27T00:55:37.000Z | 2020-04-27T00:55:37.000Z | webapp/tests/test_attime.py | techonomics69/graphite-web | b2b2a0bf708889e5dd7ce3bec7e521195584b951 | [
"Apache-2.0"
] | null | null | null | try:
import unittest2 as unittest
except ImportError:
import unittest
from graphite.render.attime import parseTimeReference, parseATTime, parseTimeOffset, getUnitString
from datetime import datetime, timedelta
from django.utils import timezone
from .base import TestCase
import pytz
import mock
class MockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2015, 3, 8, 12, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', MockedDateTime)
class ATTimeTimezoneTests(TestCase):
default_tz = timezone.get_current_timezone()
specified_tz = pytz.timezone("America/Los_Angeles")
def test_should_return_absolute_time(self):
time_string = '12:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz(self):
time_string = '12:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_YYMMDD(self):
time_string = '20150110'
expected_time = self.default_tz.localize(datetime.strptime(time_string, '%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time.astimezone(self.specified_tz))
def test_midnight(self):
expected_time = self.default_tz.localize(datetime.strptime("0:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight", self.specified_tz)
self.assertEqual(actual_time, expected_time.astimezone(self.specified_tz))
def test_offset_with_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("5:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+5h", self.specified_tz)
self.assertEqual(actual_time, expected_time.astimezone(self.specified_tz))
def test_relative_day_with_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("0:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_day_and_offset_with_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("3:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow+3h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_should_return_current_time(self):
expected_time = self.default_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now")
self.assertEqual(actual_time, expected_time)
def test_now_should_respect_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_time_in_alternate_zone(self):
expected_time = self.specified_tz.localize(datetime.strptime("04:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("-1h", self.specified_tz)
self.assertEqual(actual_time.hour, expected_time.hour)
def test_should_handle_dst_boundary(self):
expected_time = self.default_tz.localize(datetime.strptime("02:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+2h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
class AnotherMockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2015, 1, 1, 11, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', AnotherMockedDateTime)
class parseTimeReferenceTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
def test_parse_empty_return_now(self):
time_ref = parseTimeReference('')
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_None_return_now(self):
time_ref = parseTimeReference(None)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_random_string_raise_Exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeReference("random")
def test_parse_now_return_now(self):
time_ref = parseTimeReference("now")
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_colon_raises_ValueError(self):
with self.assertRaises(ValueError):
time_ref = parseTimeReference(":")
def test_parse_hour_return_hour_of_today(self):
time_ref = parseTimeReference("8:50")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_am(self):
time_ref = parseTimeReference("8:50am")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_pm(self):
time_ref = parseTimeReference("8:50pm")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 20, 50))
self.assertEquals(time_ref, expected)
def test_parse_noon(self):
time_ref = parseTimeReference("noon")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 12, 0))
self.assertEquals(time_ref, expected)
def test_parse_midnight(self):
time_ref = parseTimeReference("midnight")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_teatime(self):
time_ref = parseTimeReference("teatime")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 16, 0))
self.assertEquals(time_ref, expected)
def test_parse_yesterday(self):
time_ref = parseTimeReference("yesterday")
expected = self.zone.localize(datetime(2014, 12, 31, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_tomorrow(self):
time_ref = parseTimeReference("tomorrow")
expected = self.zone.localize(datetime(2015, 1, 2, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/25/15")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YYYY(self):
time_ref = parseTimeReference("02/25/2015")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20140606")
expected = self.zone.localize(datetime(2014, 6, 6, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_onedigits(self):
time_ref = parseTimeReference("january8")
expected = self.zone.localize(datetime(2015, 1, 8, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_twodigits(self):
time_ref = parseTimeReference("january10")
expected = self.zone.localize(datetime(2015, 1, 10, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_threedigits_raise_ValueError(self):
with self.assertRaises(ValueError):
time_ref = parseTimeReference("january800")
def test_parse_MonthName_without_DayOfMonth_raise_Exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeReference("january")
def test_parse_monday_return_monday_before_now(self):
time_ref = parseTimeReference("monday")
expected = self.zone.localize(datetime(2014, 12, 29, 0, 0))
self.assertEquals(time_ref, expected)
class Bug551771MockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2010, 3, 30, 00, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', Bug551771MockedDateTime)
class parseTimeReferenceTestBug551771(TestCase):
zone = pytz.utc
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/23/10")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20100223")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
class parseTimeOffsetTest(TestCase):
def test_parse_None_returns_empty_timedelta(self):
time_ref = parseTimeOffset(None)
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_integer_raises_TypeError(self):
with self.assertRaises(TypeError):
time_ref = parseTimeOffset(1)
def test_parse_string_starting_neither_with_minus_nor_digit_raises_KeyError(self):
with self.assertRaises(KeyError):
time_ref = parseTimeOffset("Something")
def test_parse_m_as_unit_raises_Exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeOffset("1m")
def test_parse_digits_only_raises_exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeOffset("10")
def test_parse_alpha_only_raises_KeyError(self):
with self.assertRaises(KeyError):
time_ref = parseTimeOffset("month")
def test_parse_minus_only_returns_zero(self):
time_ref = parseTimeOffset("-")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_plus_only_returns_zero(self):
time_ref = parseTimeOffset("+")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_ten_days(self):
time_ref = parseTimeOffset("10days")
expected = timedelta(10)
self.assertEquals(time_ref, expected)
def test_parse_zero_days(self):
time_ref = parseTimeOffset("0days")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_minus_ten_days(self):
time_ref = parseTimeOffset("-10days")
expected = timedelta(-10)
self.assertEquals(time_ref, expected)
def test_parse_five_seconds(self):
time_ref = parseTimeOffset("5seconds")
expected = timedelta(seconds=5)
self.assertEquals(time_ref, expected)
def test_parse_five_minutes(self):
time_ref = parseTimeOffset("5minutes")
expected = timedelta(minutes=5)
self.assertEquals(time_ref, expected)
def test_parse_five_hours(self):
time_ref = parseTimeOffset("5hours")
expected = timedelta(hours=5)
self.assertEquals(time_ref, expected)
def test_parse_five_weeks(self):
time_ref = parseTimeOffset("5weeks")
expected = timedelta(weeks=5)
self.assertEquals(time_ref, expected)
def test_parse_one_month_returns_thirty_days(self):
time_ref = parseTimeOffset("1month")
expected = timedelta(30)
self.assertEquals(time_ref, expected)
def test_parse_two_months_returns_sixty_days(self):
time_ref = parseTimeOffset("2months")
expected = timedelta(60)
self.assertEquals(time_ref, expected)
def test_parse_twelve_months_returns_360_days(self):
time_ref = parseTimeOffset("12months")
expected = timedelta(360)
self.assertEquals(time_ref, expected)
def test_parse_one_year_returns_365_days(self):
time_ref = parseTimeOffset("1year")
expected = timedelta(365)
self.assertEquals(time_ref, expected)
def test_parse_two_years_returns_730_days(self):
time_ref = parseTimeOffset("2years")
expected = timedelta(730)
self.assertEquals(time_ref, expected)
class getUnitStringTest(TestCase):
def test_get_seconds(self):
test_cases = ['s', 'se', 'sec', 'second', 'seconds']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'seconds')
def test_get_minutes(self):
test_cases = ['min', 'minute', 'minutes']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'minutes')
def test_get_hours(self):
test_cases = ['h', 'ho', 'hour', 'hours']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'hours')
def test_get_days(self):
test_cases = ['d', 'da', 'day', 'days']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'days')
def test_get_weeks(self):
test_cases = ['w', 'we', 'week', 'weeks']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'weeks')
def test_get_months(self):
test_cases = ['mon', 'month', 'months']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'months')
def test_get_years(self):
test_cases = ['y', 'ye', 'year', 'years']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'years')
def test_m_raises_Exception(self):
with self.assertRaises(Exception):
result = getUnitString("m")
def test_integer_raises_Exception(self):
with self.assertRaises(Exception):
result = getUnitString(1)
class LeapYearMockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2016, 2, 29, 00, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', LeapYearMockedDateTime)
class parseATTimeTestLeapYear(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2015, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2012, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2016, 1, 30, 0, 0))
self.assertEquals(time_ref, expected)
class LeapYearMockedDateTime2(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2013, 2, 28, 00, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', LeapYearMockedDateTime2)
class parseATTimeTestLeapYear2(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2012, 2, 29, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2009, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2013, 1, 29, 0, 0))
self.assertEquals(time_ref, expected)
class parseATTimeTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
@unittest.expectedFailure
def test_parse_noon_plus_yesterday(self):
time_ref = parseATTime("noon+yesterday")
expected = datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day - 1, 12, 00)
self.assertEquals(time_ref, expected)
| 39.224824 | 118 | 0.690668 |
74e87f5d133f764f1c12dc6f39ff78f7e3b5ffa9 | 203 | py | Python | petromark/config/desktop.py | exvas/petromark | 7c8fd7ee33418d4e2bdc086562a311955be35b70 | [
"MIT"
] | null | null | null | petromark/config/desktop.py | exvas/petromark | 7c8fd7ee33418d4e2bdc086562a311955be35b70 | [
"MIT"
] | null | null | null | petromark/config/desktop.py | exvas/petromark | 7c8fd7ee33418d4e2bdc086562a311955be35b70 | [
"MIT"
] | null | null | null | from frappe import _
def get_data():
return [
{
"module_name": "Petromark",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Petromark")
}
]
| 15.615385 | 44 | 0.586207 |
0b079d50fb753c8618033ad6ae73a54d872809e3 | 9,342 | py | Python | rpython/rtyper/module/test/test_ll_os.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2016-07-06T23:30:20.000Z | 2017-05-30T15:59:31.000Z | rpython/rtyper/module/test/test_ll_os.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | null | null | null | rpython/rtyper/module/test/test_ll_os.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2020-07-09T08:14:22.000Z | 2021-01-15T18:01:25.000Z | import os
from rpython.tool.udir import udir
from rpython.translator.c.test.test_genc import compile
from rpython.rtyper.module import ll_os
#has side effect of registering functions
from rpython.tool.pytest.expecttest import ExpectTest
from rpython.rtyper import extregistry
import errno
import sys
import py
def getllimpl(fn):
return extregistry.lookup(fn).lltypeimpl
def test_access():
filename = str(udir.join('test_access.txt'))
fd = file(filename, 'w')
fd.close()
for mode in os.R_OK, os.W_OK, os.X_OK, os.R_OK | os.W_OK | os.X_OK:
result = getllimpl(os.access)(filename, mode)
assert result == os.access(filename, mode)
def test_times():
"""
posix.times should compile as an RPython function and should return a
five-tuple giving float-representations (seconds, effectively) of the four
fields from the underlying struct tms and the return value.
"""
times = eval(compile(lambda: str(os.times()), ())())
assert isinstance(times, tuple)
assert len(times) == 5
for value in times:
assert isinstance(value, float)
def test_getlogin():
if not hasattr(os, 'getlogin'):
py.test.skip('posix specific function')
try:
expected = os.getlogin()
except OSError, e:
py.test.skip("the underlying os.getlogin() failed: %s" % e)
data = getllimpl(os.getlogin)()
assert data == expected
def test_utimes():
if os.name != 'nt':
py.test.skip('Windows specific feature')
# Windows support centiseconds
def f(fname, t1):
os.utime(fname, (t1, t1))
fname = udir.join('test_utimes.txt')
fname.ensure()
t1 = 1159195039.25
compile(f, (str, float))(str(fname), t1)
assert t1 == os.stat(str(fname)).st_mtime
def test__getfullpathname():
if os.name != 'nt':
py.test.skip('nt specific function')
posix = __import__(os.name)
sysdrv = os.getenv('SystemDrive', 'C:')
stuff = sysdrv + 'stuff'
data = getllimpl(posix._getfullpathname)(stuff)
assert data == posix._getfullpathname(stuff)
# the most intriguing failure of ntpath.py should not repeat, here:
assert not data.endswith(stuff)
def test_getcwd():
data = getllimpl(os.getcwd)()
assert data == os.getcwd()
def test_chdir():
def check_special_envvar():
if sys.platform != 'win32':
return
pwd = os.getcwd()
import ctypes
buf = ctypes.create_string_buffer(1000)
len = ctypes.windll.kernel32.GetEnvironmentVariableA('=%c:' % pwd[0], buf, 1000)
if (len == 0) and "WINGDB_PYTHON" in os.environ:
# the ctypes call seems not to work in the Wing debugger
return
assert str(buf.value).lower() == pwd.lower()
# ctypes returns the drive letter in uppercase,
# os.getcwd does not,
# but there may be uppercase in os.getcwd path
pwd = os.getcwd()
try:
check_special_envvar()
getllimpl(os.chdir)('..')
assert os.getcwd() == os.path.dirname(pwd)
check_special_envvar()
finally:
os.chdir(pwd)
def test_mkdir():
filename = str(udir.join('test_mkdir.dir'))
getllimpl(os.mkdir)(filename, 0)
exc = py.test.raises(OSError, getllimpl(os.mkdir), filename, 0)
assert exc.value.errno == errno.EEXIST
if sys.platform == 'win32':
assert exc.type is WindowsError
def test_strerror():
data = getllimpl(os.strerror)(2)
assert data == os.strerror(2)
def test_system():
filename = str(udir.join('test_system.txt'))
arg = '%s -c "print 1+1" > %s' % (sys.executable, filename)
data = getllimpl(os.system)(arg)
assert data == 0
assert file(filename).read().strip() == '2'
os.unlink(filename)
EXECVE_ENV = {"foo": "bar", "baz": "quux"}
def test_execve():
if os.name != 'posix':
py.test.skip('posix specific function')
ll_execve = getllimpl(os.execve)
def run_execve(program, args=None, env=None, do_path_lookup=False):
if args is None:
args = [program]
else:
args = [program] + args
if env is None:
env = {}
# we cannot directly call ll_execve() because it replaces the
# current process.
fd_read, fd_write = os.pipe()
childpid = os.fork()
if childpid == 0:
# in the child
os.close(fd_read)
os.dup2(fd_write, 1) # stdout
os.close(fd_write)
if do_path_lookup:
os.execvp(program, args)
else:
ll_execve(program, args, env)
assert 0, "should not arrive here"
else:
# in the parent
os.close(fd_write)
child_stdout = []
while True:
data = os.read(fd_read, 4096)
if not data: break # closed
child_stdout.append(data)
pid, status = os.waitpid(childpid, 0)
os.close(fd_read)
return status, ''.join(child_stdout)
# Test exit status and code
result, child_stdout = run_execve("/usr/bin/which", ["true"], do_path_lookup=True)
result, child_stdout = run_execve(child_stdout.strip()) # /bin/true or /usr/bin/true
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 0
result, child_stdout = run_execve("/usr/bin/which", ["false"], do_path_lookup=True)
result, child_stdout = run_execve(child_stdout.strip()) # /bin/false or /usr/bin/false
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 1
# Test environment
result, child_stdout = run_execve("/usr/bin/env", env=EXECVE_ENV)
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 0
assert dict([line.split('=') for line in child_stdout.splitlines()]) == EXECVE_ENV
# The following won't actually execute anything, so they don't need
# a child process helper.
# If the target does not exist, an OSError should result
info = py.test.raises(
OSError, ll_execve, "this/file/is/non/existent", [], {})
assert info.value.errno == errno.ENOENT
# If the target is not executable, an OSError should result
info = py.test.raises(
OSError, ll_execve, "/etc/passwd", [], {})
assert info.value.errno == errno.EACCES
def test_os_write():
#Same as test in rpython/test/test_rbuiltin
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777)
assert fd >= 0
f = getllimpl(os.write)
f(fd, 'Hello world')
os.close(fd)
with open(fname) as fid:
assert fid.read() == "Hello world"
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777)
os.close(fd)
py.test.raises(OSError, f, fd, 'Hello world')
def test_os_close():
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777)
assert fd >= 0
os.write(fd, 'Hello world')
f = getllimpl(os.close)
f(fd)
py.test.raises(OSError, f, fd)
def test_os_lseek():
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_RDWR|os.O_CREAT, 0777)
assert fd >= 0
os.write(fd, 'Hello world')
f = getllimpl(os.lseek)
f(fd,0,0)
assert os.read(fd, 11) == 'Hello world'
os.close(fd)
py.test.raises(OSError, f, fd, 0, 0)
def test_os_fsync():
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777)
assert fd >= 0
os.write(fd, 'Hello world')
f = getllimpl(os.fsync)
f(fd)
os.close(fd)
fid = open(fname)
assert fid.read() == 'Hello world'
fid.close()
py.test.raises(OSError, f, fd)
def test_os_fdatasync():
try:
f = getllimpl(os.fdatasync)
except:
py.test.skip('No fdatasync in os')
fname = str(udir.join('os_test.txt'))
fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777)
assert fd >= 0
os.write(fd, 'Hello world')
f(fd)
fid = open(fname)
assert fid.read() == 'Hello world'
os.close(fd)
py.test.raises(OSError, f, fd)
def test_os_kill():
if not hasattr(os,'kill') or sys.platform == 'win32':
py.test.skip('No kill in os')
f = getllimpl(os.kill)
import subprocess
import signal
proc = subprocess.Popen([sys.executable, "-c",
"import time;"
"time.sleep(10)",
],
)
f(proc.pid, signal.SIGTERM)
expected = -signal.SIGTERM
assert proc.wait() == expected
def test_isatty():
try:
f = getllimpl(os.isatty)
except:
py.test.skip('No isatty in os')
assert f(-1) == False
class TestOsExpect(ExpectTest):
def setup_class(cls):
if not hasattr(os, 'ttyname'):
py.test.skip("no ttyname")
def test_ttyname(self):
def f():
import os
import py
from rpython.rtyper.test.test_llinterp import interpret
def ll_to_string(s):
return ''.join(s.chars)
def f(num):
try:
return os.ttyname(num)
except OSError:
return ''
assert ll_to_string(interpret(f, [0])) == f(0)
assert ll_to_string(interpret(f, [338])) == ''
self.run_test(f)
| 30.831683 | 90 | 0.603083 |
b70f60088dbc8cef5fae49b4e52d56c39b0c5ccb | 6,434 | py | Python | lib/metrics.py | alabrashJr/Maha-Odd | cce4bab1f30589cf3d52636fe511c0269058679e | [
"MIT"
] | null | null | null | lib/metrics.py | alabrashJr/Maha-Odd | cce4bab1f30589cf3d52636fe511c0269058679e | [
"MIT"
] | null | null | null | lib/metrics.py | alabrashJr/Maha-Odd | cce4bab1f30589cf3d52636fe511c0269058679e | [
"MIT"
] | null | null | null | # Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
from bisect import bisect_right
from logging import warning
from typing import Union
import torch
from numpy import asarray, where
from sklearn.metrics import accuracy_score, roc_curve, auc, average_precision_score, f1_score
def _maybe_cast_torch_objects_to_numpy(logits, labels):
"""
Casts objects to Numpy array
:param logits: ood or classification logits
:param labels: ood or classification labels
:return: casted logits and labels
"""
if isinstance(logits, torch.Tensor):
warning("Better not to pass torch tensors for logits. Too much copyting from GPU")
logits = logits.detach().cpu().numpy()
return asarray(logits), asarray(labels)
def _validate_ood_labels(labels):
"""Ensures that labels are either 0 or 1. Accepts lists and numpy arrays"""
labels = asarray(labels)
if not ((labels == 0) | (labels == 1)).all():
raise RuntimeError("OOD labels can only be 0 or 1")
def _validate_sizes(logits, labels, only_batch_size=False):
"""Checks if sizes are same, if `only_batch_size` is True checks only first dimension"""
if not logits.size or not labels.size:
raise RuntimeError("Passed empty array to metric")
if not only_batch_size:
if logits.shape != labels.shape:
raise RuntimeError("Predictions and labels should have same shape")
else:
if logits.shape[0] != labels.shape[0]:
raise RuntimeError("Predictions and labels should have same batch size")
def classification_accuracy(predictions, labels):
"""
Classification accuracy metric
:param logits: classification predictions: batch_size X 1
:param labels: classification labels: batch_size X 1
:return: accuracy score
"""
predictions, labels = _maybe_cast_torch_objects_to_numpy(predictions, labels)
_validate_sizes(predictions, labels)
return accuracy_score(predictions.flatten(), labels.flatten())
def classification_f1_macro_score(predictions, labels):
predictions, labels = _maybe_cast_torch_objects_to_numpy(predictions, labels)
_validate_sizes(predictions, labels)
return f1_score(labels, predictions, average='macro')
def classification_f1_micro_score(predictions, labels):
predictions, labels = _maybe_cast_torch_objects_to_numpy(predictions, labels)
_validate_sizes(predictions, labels)
return f1_score(labels, predictions, average='micro')
def _cast_and_validate_ood(ood_scores, labels):
"""Combine validation helpers for OOD metrics"""
ood_scores, labels = _maybe_cast_torch_objects_to_numpy(ood_scores, labels)
_validate_ood_labels(labels)
_validate_sizes(ood_scores, labels)
return ood_scores, labels
def ood_classification_accuracy(ood_scores, labels, threshold):
"""
Classification accuracy metric for OOD task
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param threshold: decision rule for `ood_scores`
:return: OOD classification accuracy
"""
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
ood_predictions = ood_scores >= threshold
return accuracy_score(ood_predictions, labels)
def roc_auc(ood_scores, labels, swap_labels: bool = False):
"""
Area under ROC curve for OOD task
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param swap_labels: whether to swap labels, i.e. positive class would be a negative and vice versa.
:return: AUROC
"""
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
if swap_labels:
ood_scores, labels = swap_labels_scores(ood_scores, labels)
fpr, tpr, _ = roc_curve(labels, ood_scores)
return auc(fpr, tpr)
def roc_aupr(ood_scores, labels, swap_labels: bool = False):
"""
Area under PR curve for OOD task
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param swap_labels: whether to swap labels, i.e. positive class would be a negative and vice versa.
:return: AUPR
"""
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
if swap_labels:
ood_scores, labels = swap_labels_scores(ood_scores, labels)
return average_precision_score(labels, ood_scores)
def _custom_bisect(tpr, tpr_level):
idx = bisect_right(tpr, tpr_level)
while idx > -1 and tpr[idx - 1] >= tpr_level:
idx -= 1
return idx
def fpr_at_x_tpr(ood_scores, labels, tpr_level: Union[int, float], swap_labels: bool = False):
"""
Computer False Positive rate (1 - in-domain recall) at fixed True Positive rate (OOD recall)
:param ood_scores: OOD certainty scores: batch_size X 1
:param labels: OOD labels, 1 for OOD, 0 for in-domain: batch_size X 1
:param tpr_level: OOD recall, 0-100 for int arg, 0.0-1.0 for float arg
:param swap_labels: whether to swap labels, i.e. positive class would be a negative and vice versa.
:return: FPR@{trp_level}TPR
"""
assert isinstance(tpr_level, (int, float))
if isinstance(tpr_level, int):
assert 0 <= tpr_level <= 100
tpr_level /= 100
assert 0 <= tpr_level <= 1
ood_scores, labels = _cast_and_validate_ood(ood_scores, labels)
if swap_labels:
ood_scores, labels = swap_labels_scores(ood_scores, labels)
fpr, tpr, _ = roc_curve(labels, ood_scores, drop_intermediate=False)
closest_index = _custom_bisect(tpr, tpr_level)
idx = max(closest_index, 0)
idx = min(idx, len(fpr) - 1)
return fpr[idx]
def swap_labels_scores(scores, labels):
"""
Swaps positive class with negative one, revert scores order.
:param scores: certainty scores
:param labels: binary labels, 1 for positive class, 0 for negative class
:return:
"""
swapped_labels = where(labels, 0, 1)
reverted_scores = -scores
return reverted_scores, swapped_labels
| 38.993939 | 103 | 0.722878 |
f7268b17e5afdf9edaac16ec22aa1865bf00ab9e | 6,337 | py | Python | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 26 | 2018-10-30T12:47:58.000Z | 2022-03-29T08:39:00.000Z | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | p2l1pfp/cmssw | 9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
from RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi import seedGeneratorFromRegionHitsEDProducer
CommonClusterCheckPSet = seedGeneratorFromRegionHitsEDProducer.ClusterCheckPSet
photonConvTrajSeedFromSingleLeg = cms.EDProducer("PhotonConversionTrajectorySeedProducerFromSingleLeg",
TrackRefitter = cms.InputTag('TrackRefitter',''),
primaryVerticesTag = cms.InputTag("offlinePrimaryVertices"),
beamSpotInputTag = cms.InputTag("offlineBeamSpot"),
newSeedCandidates = cms.string("convSeedCandidates"),
xcheckSeedCandidates = cms.string("xcheckSeedCandidates"),
vtxMinDoF = cms.double(4),
maxDZSigmas = cms.double(10.),
maxNumSelVtx = cms.uint32(2),
applyTkVtxConstraint = cms.bool(True),
DoxcheckSeedCandidates = cms.bool(False),
OrderedHitsFactoryPSet = cms.PSet(
maxHitPairsPerTrackAndGenerator = cms.uint32(10),
maxElement = cms.uint32(40000),
SeedingLayers = cms.InputTag('convLayerPairs')
),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
ClusterCheckPSet = CommonClusterCheckPSet,
RegionFactoryPSet = cms.PSet(
RegionPSet = cms.PSet( precise = cms.bool(True),
beamSpot = cms.InputTag("offlineBeamSpot"),
originRadius = cms.double(3.0),
ptMin = cms.double(0.2),
originHalfLength = cms.double(12.0)
),
ComponentName = cms.string('GlobalRegionProducerFromBeamSpot')
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string('SeedForPhotonConversion1Leg'),
SeedMomentumForBOFF = cms.double(5.0),
propagator = cms.string('PropagatorWithMaterial'),
TTRHBuilder = cms.string('WithTrackAngle')
)
)
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toModify(photonConvTrajSeedFromSingleLeg,
OrderedHitsFactoryPSet = dict(maxElement = 10000),
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 150000,
MaxNumberOfPixelClusters = 20000,
cut = "strip < 150000 && pixel < 20000 && (strip < 20000 + 7* pixel)"
)
)
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 1000000,
MaxNumberOfPixelClusters = 100000,
cut = None
),
OrderedHitsFactoryPSet = dict(maxElement = 100000),
RegionFactoryPSet = dict(RegionPSet = dict(ptMin = 0.3)),
)
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
peripheralPbPb.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(cut = "strip < 400000 && pixel < 40000 && (strip < 60000 + 7.0*pixel) && (pixel < 8000 + 0.14*strip)")
)
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(MaxNumberOfPixelClusters = 100000,
cut = "strip < 1000000 && pixel < 100000 && (strip < 50000 + 10*pixel) && (pixel < 5000 + strip/2.)"
),
OrderedHitsFactoryPSet = dict(maxElement = 100000)
)
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
RegionFactoryPSet = dict(ComponentName = 'GlobalTrackingRegionWithVerticesProducer',
RegionPSet = _globalTrackingRegionWithVertices.RegionPSet.clone(
originRadius = 0,
originRScaling4BigEvts = True,
minOriginR = 0,
scalingStartNPix = 0,
scalingEndNPix = 1#essentially turn off immediately
),
)
)
| 70.411111 | 153 | 0.453369 |
3852afa9428538bf641528d6317e6f40f3ad4557 | 1,929 | py | Python | ui/controller/workspace_handler.py | ctwardy/sitehound | 0f928a82f761e3d0335d1d4d01f6105b726fd889 | [
"Apache-2.0"
] | null | null | null | ui/controller/workspace_handler.py | ctwardy/sitehound | 0f928a82f761e3d0335d1d4d01f6105b726fd889 | [
"Apache-2.0"
] | null | null | null | ui/controller/workspace_handler.py | ctwardy/sitehound | 0f928a82f761e3d0335d1d4d01f6105b726fd889 | [
"Apache-2.0"
] | 1 | 2018-10-02T22:03:23.000Z | 2018-10-02T22:03:23.000Z | from flask_login import login_required
from controller.InvalidException import InvalidUsage
__author__ = 'tomas'
import json
from ui import app
from flask import Response, request, jsonify
from service.workspace_service import list_workspace, add_workspace, delete_workspace, get_workspace
from utils.json_encoder import JSONEncoder
from mongoutils.errors import AddingWorkspaceError
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/api/workspace", methods=['GET'])
@login_required
def get_workspaces_api():
in_doc = list_workspace()
out_doc = JSONEncoder().encode(in_doc)
return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace/<workspace_id>", methods=['GET'])
@login_required
def get_workspace_api(workspace_id):
in_doc = get_workspace(workspace_id)
out_doc = JSONEncoder().encode(in_doc)
return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace", methods=['POST'])
@login_required
@app.errorhandler(InvalidUsage)
def add_workspace_api():
try:
name = request.data
add_workspace(name)
in_doc = list_workspace()
out_doc = JSONEncoder().encode(in_doc)
return Response(out_doc, mimetype="application/json")
except AddingWorkspaceError:
raise InvalidUsage('A workspace with that name already exists', status_code=409)
@app.route("/api/workspace/<name>", methods=['PUT'])
@login_required
def update_workspace_api(name):
add_workspace(name)
in_doc = list_workspace()
out_doc = JSONEncoder().encode(in_doc)
return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace/<id>", methods=['DELETE'])
@login_required
def delete_workspace_api(id):
delete_workspace(id)
return Response("{}", mimetype="application/json") | 29.676923 | 100 | 0.749093 |
974260a320e4b2558fd26ce72234b171b0c40614 | 5,382 | py | Python | vinzclortho/store.py | parbo/vinzclortho | bc75bceda06a6c6354c8f4759f27406056a1d605 | [
"MIT"
] | 1 | 2022-01-07T15:50:28.000Z | 2022-01-07T15:50:28.000Z | vinzclortho/store.py | parbo/vinzclortho | bc75bceda06a6c6354c8f4759f27406056a1d605 | [
"MIT"
] | null | null | null | vinzclortho/store.py | parbo/vinzclortho | bc75bceda06a6c6354c8f4759f27406056a1d605 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2001-2010 Pär Bohrarper.
# See LICENSE for details.
import sqlite3
import bsddb
import unittest
class Store(object):
"""Base class for stores."""
def put(self, key, value):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def get_iterator(self):
"""
Does not need to return an actual iterator,
just something that L{iterate} can recognize.
"""
raise NotImplementedError
def iterate(self, iterator, threshold):
"""
Iterates over keys/values starting at iterator, until threshold bytes are accumulated.
@param iterator: Something that can describe the current position
@param threshold: How many bytes to accumulate before returning
"""
raise NotImplementedError
def multi_put(self, kvlist, resolver):
for k, v in kvlist:
try:
v_curr = self.get(k)
v = resolver(v, v_curr)
except KeyError:
# This store doesn't have the key, no need to resolve
pass
# TODO: probably should check if the value was changed...
self.put(k, v)
class DictStore(Store):
"""Basic in-memory store."""
def __init__(self):
self._store = {}
def put(self, key, value):
self._store[key] = value
def get(self, key):
return self._store[key]
def delete(self, key):
del self._store[key]
def get_iterator(self):
return self._store.iteritems()
def iterate(self, iterator, threshold):
tot = 0
ret = []
try:
while tot < threshold:
k, v = iterator.next()
tot = tot + len(k) + len(v)
ret.append((k, v))
return ret, iterator
except StopIteration:
return ret, iterator
class BerkeleyDBStore(Store):
"""Store using BerkeleyDB, specifically the B-Tree version"""
def __init__(self, filename):
self._store = bsddb.btopen(filename)
def put(self, key, value):
self._store[key] = value
self._store.sync()
def get(self, key):
return self._store[key]
def delete(self, key):
del self._store[key]
self._store.sync()
def get_iterator(self):
try:
k, v = self._store.first()
return k
except bsddb.error:
return None
def iterate(self, iterator, threshold):
if iterator is None:
return [], None
iterator, v = self._store.set_location(iterator)
tot = 0
ret = [(iterator, v)]
try:
while tot < threshold:
iterator, v = self._store.next()
tot = tot + len(iterator) + len(v)
ret.append((iterator, v))
return ret, iterator
except bsddb.error:
return ret, None
class SQLiteStore(Store):
"""Store that uses SQLite for storage."""
def __init__(self, filename):
self._db = filename
self.conn = sqlite3.connect(self._db)
c = self.conn.cursor()
# Create table
c.execute("CREATE TABLE IF NOT EXISTS blobkey(k BLOB PRIMARY KEY, v BLOB)")
self.conn.commit()
c.close()
def put(self, key, value):
c = self.conn.cursor()
c.execute("INSERT OR REPLACE INTO blobkey(k, v) VALUES(?, ?)", (key, sqlite3.Binary(value)))
self.conn.commit()
c.close()
def get(self, key):
c = self.conn.cursor()
c.execute("SELECT v FROM blobkey WHERE k = ?", (key,))
value = c.fetchone()
c.close()
if value is None:
raise KeyError(key)
return value[0]
def delete(self, key):
c = self.conn.cursor()
c.execute("DELETE FROM blobkey WHERE k = ?", (key,))
self.conn.commit()
rows = c.rowcount
c.close()
if rows == 0:
raise KeyError
def get_iterator(self):
c = self.conn.cursor()
c.execute("SELECT k, v FROM blobkey")
return c
def iterate(self, iterator, threshold):
tot = 0
ret = []
try:
while tot < threshold:
k, v = iterator.next()
tot = tot + len(k) + len(v)
ret.append((k, v))
return ret, iterator
except StopIteration:
return ret, iterator
class TestStores(unittest.TestCase):
def _test_iterate(self, d):
contents = [("Key_%d"%i, "Val_%d"%i) for i in range(100)]
for k, v in contents:
d.put(k, v)
iterator = d.get_iterator()
kvlist = []
while True:
kv, iterator = d.iterate(iterator, 100)
if not kv:
break
kvlist.extend(kv)
self.assertEqual(set(contents), set([(str(k), str(v)) for k, v in kvlist]))
def test_iterate_dict(self):
d = DictStore()
self._test_iterate(d)
def test_iterate_bdb(self):
d = BerkeleyDBStore("bdb")
self._test_iterate(d)
def test_iterate_sqlite(self):
d = SQLiteStore("sqlite")
self._test_iterate(d)
if __name__=="__main__":
unittest.main()
| 27.181818 | 100 | 0.549238 |
79a966bf47562e4d6f847fed524575a517ed81ea | 1,488 | py | Python | Code/randomKmeans.py | suraj-ravishankar/Random-Global-Fast-global-k-means | 24bef99f30de188b63238e6ea0b35510f4f89d38 | [
"MIT"
] | null | null | null | Code/randomKmeans.py | suraj-ravishankar/Random-Global-Fast-global-k-means | 24bef99f30de188b63238e6ea0b35510f4f89d38 | [
"MIT"
] | null | null | null | Code/randomKmeans.py | suraj-ravishankar/Random-Global-Fast-global-k-means | 24bef99f30de188b63238e6ea0b35510f4f89d38 | [
"MIT"
] | 1 | 2020-03-06T03:41:11.000Z | 2020-03-06T03:41:11.000Z | import numpy as np
import math
import random
from itertools import repeat
def randomKmeans(samples, k, TOL, C):
SS_Previous = 0
samplesLength = len(samples)
dim = len(samples[0])
while 1:
P = [[] for i in repeat(None, k)]
# find the closest cluster centroid
for i in range(samplesLength):
minIdx = 0
minVal = eucliDist(samples[i], C[0], dim)
for j in range(1,k):
dist = eucliDist(samples[i], C[j], dim)
if (dist < minVal):
minIdx = j
minVal = dist
# assign the point to the correct cluster
P[minIdx].append(samples[i])
# recalculating cluster centroid
for j in range(k):
coords = P[j]
if(len(coords) == 0):
coords = random.sample(samples, 1)
zipped = zip(*coords)
num = len(coords)
C[j] = [math.fsum(dList)/num for dList in zipped]
SS_Error = 0
# calculating total clustering error
for idx in range(k):
for p_idx in range(len(P[idx])):
SS_Error += sqEucliDist(P[idx][p_idx], C[idx], dim)
# check if no change in SSE
delta = abs(SS_Error - SS_Previous)
if (delta < TOL):
break
SS_Previous = SS_Error
return round(SS_Error, 4), C, P
# calculating Euclidean Distance
def eucliDist(sample, center, dim):
distance = 0
for x in range(dim):
distance += pow((sample[x] - center[x]), 2)
return pow(distance,0.5)
# calculating Squared Euclidean Distance
def sqEucliDist(sample, center, dim):
sumsq = 0
for x in range(dim):
sumsq += pow((sample[x] - center[x]), 2)
return sumsq
| 23.25 | 55 | 0.65457 |
c85f8f0c4745e4f18b6448cdc44f37f4459b0d6a | 6,011 | py | Python | experiments/cascading/sim_cascading.py | lanxuedang/TIGER | a134b49f9c64321cb521a25953f9771ced9b597e | [
"MIT"
] | null | null | null | experiments/cascading/sim_cascading.py | lanxuedang/TIGER | a134b49f9c64321cb521a25953f9771ced9b597e | [
"MIT"
] | null | null | null | experiments/cascading/sim_cascading.py | lanxuedang/TIGER | a134b49f9c64321cb521a25953f9771ced9b597e | [
"MIT"
] | null | null | null | import os
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
import sys
sys.path.insert(0, os.getcwd() + '/../../')
from graph_tiger.graphs import electrical
from graph_tiger.cascading import Cascading
def plot_results(graph, params, results, xlabel='Steps', line_label='', experiment=''):
plt.figure(figsize=(6.4, 4.8))
title = '{}:step={},l={},r={},k_a={},attack={},k_d={},defense={}'.format(experiment, params['steps'], params['l'],
params['r'], params['k_a'], params['attack'],
params['k_d'], params['defense'])
for strength, result in results.items():
result_norm = [r / len(graph) for r in result]
plt.plot(result_norm, label="{}: {}".format(line_label, strength))
plt.xlabel(xlabel)
plt.ylabel(params['robust_measure'])
plt.ylim(0, 1)
save_dir = os.getcwd() + '/plots/' + experiment + '/'
os.makedirs(save_dir, exist_ok=True)
plt.legend()
plt.title(title)
plt.savefig(save_dir + title + '.pdf')
plt.show()
plt.clf()
def experiment_redundancy(graph):
params = {
'runs': 10,
'steps': 100,
'seed': 1,
'l': 0.8,
'r': 0.2,
'c': int(0.1 * len(graph)),
'k_a': 5,
'attack': 'id_node',
'attack_approx': None, # int(0.1 * len(graph)),
'k_d': 0,
'defense': None,
'robust_measure': 'largest_connected_component',
'plot_transition': False,
'gif_animation': False,
'edge_style': None,
'node_style': 'spectral',
'fa_iter': 2000,
}
results = defaultdict(list)
redundancy = np.arange(0, 0.5, .1)
for idx, r in enumerate(redundancy):
params['r'] = r
if idx == 2:
params['plot_transition'] = True
params['gif_animation'] = True
params['gif_snaps'] = True
else:
params['plot_transition'] = False
params['gif_animation'] = False
params['gif_snaps'] = False
cf = Cascading(graph, **params)
results[r] = cf.run_simulation()
plot_results(graph, params, results, xlabel='Steps', line_label='Redundancy', experiment='redundancy')
def experiment_attack(graph):
params = {
'runs': 10,
'steps': 100,
'seed': 1,
'l': 0.8,
'r': 0.4,
'c': int(0.1 * len(graph)),
'k_a': 5,
'attack': 'rnd_node',
'attack_approx': None, # int(0.1 * len(graph)),
'k_d': 0,
'defense': None,
'robust_measure': 'largest_connected_component',
'plot_transition': False,
'gif_animation': False,
'edge_style': None,
'node_style': 'spectral',
'fa_iter': 2000,
}
# rnd_node attack
results = defaultdict(list)
attack_strength = np.arange(2, 11, 2)
for idx, k_a in enumerate(attack_strength):
params['k_a'] = k_a
if idx == 2:
params['plot_transition'] = False
params['gif_animation'] = False
else:
params['plot_transition'] = False
params['gif_animation'] = False
cf = Cascading(graph, **params)
results[k_a] = cf.run_simulation()
plot_results(graph, params, results, xlabel='Steps', line_label='k_a', experiment='rnd_node_attack')
# targeted attack
params['attack'] = 'id_node'
results = defaultdict(list)
for idx, k_a in enumerate(attack_strength):
params['k_a'] = k_a
if idx == 2:
params['plot_transition'] = False
params['gif_animation'] = False
else:
params['plot_transition'] = False
params['gif_animation'] = False
cf = Cascading(graph, **params)
results[k_a] = cf.run_simulation()
plot_results(graph, params, results, xlabel='Steps', line_label='k_a', experiment='id_node_attack')
def experiment_defense(graph):
params = {
'runs': 10,
'steps': 100,
'seed': 1,
'l': 0.8,
'r': 0.2,
'c': int(0.1 * len(graph)),
'k_a': 5,
'attack': 'id_node',
'attack_approx': None, # int(0.1 * len(graph)),
'k_d': 0,
'defense': 'add_edge_preferential',
'robust_measure': 'largest_connected_component',
'plot_transition': False,
'gif_animation': False,
'edge_style': None,
'node_style': 'spectral',
'fa_iter': 2000,
}
# edge defense
results = defaultdict(list)
defense_strength = np.arange(10, 51, 10)
for idx, k_d in enumerate(defense_strength):
params['k_d'] = k_d
if idx == 2:
params['plot_transition'] = False
params['gif_animation'] = False
else:
params['plot_transition'] = False
params['gif_animation'] = False
cf = Cascading(graph, **params)
results[k_d] = cf.run_simulation()
plot_results(graph, params, results, xlabel='Steps', line_label='k_d', experiment='add_edge_pref')
# node defense
params['defense'] = 'pr_node'
defense_strength = np.arange(1, 10, 2)
results = defaultdict(list)
for idx, k_d in enumerate(defense_strength):
params['k_d'] = k_d
if idx == 2:
params['plot_transition'] = False
params['gif_animation'] = False
else:
params['plot_transition'] = False
params['gif_animation'] = False
cf = Cascading(graph, **params)
results[k_d] = cf.run_simulation()
plot_results(graph, params, results, xlabel='Steps', line_label='k_d', experiment='add_node_pr')
def main():
graph = electrical().copy()
experiment_redundancy(graph)
# experiment_attack(graph)
# experiment_defense(graph)
if __name__ == '__main__':
main()
| 25.578723 | 123 | 0.55282 |
8fb61e642a150dc242f16101c9ea63de0c499adb | 211 | py | Python | simp_py_examples/course/SM1801/t001.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | simp_py_examples/course/SM1801/t001.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | simp_py_examples/course/SM1801/t001.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | """
t001.py
This is M5Stack micropython example
"""
import simp_py # keyword
# clear screen
simp_py.lcd.clear() # module -> object -> method()
# show message on screen
simp_py.lcd.text(0,0,'hello')
| 21.1 | 54 | 0.668246 |
595a0119c2a4573a9750c525527f911524d0f312 | 2,159 | py | Python | tools/xi_format_code.py | Rhunter1/xively-client-c | bbb0a472c7b2f592c8d167eedf46221626f881df | [
"BSD-3-Clause"
] | null | null | null | tools/xi_format_code.py | Rhunter1/xively-client-c | bbb0a472c7b2f592c8d167eedf46221626f881df | [
"BSD-3-Clause"
] | null | null | null | tools/xi_format_code.py | Rhunter1/xively-client-c | bbb0a472c7b2f592c8d167eedf46221626f881df | [
"BSD-3-Clause"
] | 2 | 2019-09-18T11:26:52.000Z | 2019-10-25T19:27:31.000Z | #!/usr/bin/env python
# Copyright (c) 2003-2018, Xively All rights reserved.
#
# This is part of the Xively C Client library,
# it is licensed under the BSD 3-Clause license.
import os
import argparse
import uuid
import subprocess
import shlex
CLANG_FORMAT = "clang-format -style=file"
def clangFormatFile( filename ):
#oldFilePath = filename + '.old'
#switchFiles = False
args = shlex.split( CLANG_FORMAT )
args += [ "-i", filename ]
print( args )
p = subprocess.Popen( args )
#p.wait()
# switch files f1->tmp, f2->f1, tmp->f2
#if switchFiles:
# tmpFileName = os.path.join( startDir, str( uuid.uuid1() ) )
# os.rename( originalFilePath, tmpFileName )
# os.rename( oldFilePath, originalFilePath )
# os.rename( tmpFileName, oldFilePath )
def findFiles( startDir, fileExt, recLevel, currLevel = 0 ):
contents = os.listdir( startDir )
with open(".clang-format-ignore") as f:
files_to_ignore = [x.strip('\n') for x in f.readlines()]
files = [ x for x in contents if os.path.isfile( os.path.join( startDir, x ) ) and x.endswith( fileExt ) and x not in files_to_ignore ]
dirs = [ x for x in contents if os.path.isdir( os.path.join( startDir, x ) ) and x[ 0 ] != '.' and x not in files_to_ignore ]
for f in files:
filename = os.path.join( startDir, f )
clangFormatFile( filename )
if recLevel == 0 or ( recLevel > 0 and currLevel < recLevel ):
for d in dirs:
findFiles( os.path.join( startDir, d ), fileExt, recLevel,
currLevel + 1 )
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Source code formatter' )
parser.add_argument( '-r', dest='recursive', type=int, default=100,
help='recursive mode, default 1, set 0 if you want to enable unlimited recursion')
args = parser.parse_args()
recursive = args.recursive
directories = [ 'src/', 'include/', 'include_senml/', 'examples/' ];
for dir in directories:
findFiles( dir, ".h", recursive )
findFiles( dir, ".c", recursive )
| 31.75 | 141 | 0.624826 |
b75a0224ff7c2281626b6011975f29f456f0b2e6 | 2,570 | py | Python | kol/request/ItemDescriptionRequest.py | DamianDominoDavis/cwbot-ndy | 53b826232eadb7ef558f568872a945d04d8d4252 | [
"BSD-3-Clause"
] | null | null | null | kol/request/ItemDescriptionRequest.py | DamianDominoDavis/cwbot-ndy | 53b826232eadb7ef558f568872a945d04d8d4252 | [
"BSD-3-Clause"
] | null | null | null | kol/request/ItemDescriptionRequest.py | DamianDominoDavis/cwbot-ndy | 53b826232eadb7ef558f568872a945d04d8d4252 | [
"BSD-3-Clause"
] | null | null | null | from .GenericRequest import GenericRequest
from kol.manager import PatternManager
class ItemDescriptionRequest(GenericRequest):
"Gets the description of an item and then parses various information from the response."
def __init__(self, session, descId):
super(ItemDescriptionRequest, self).__init__(session)
self.url = session.serverURL + "desc_item.php?whichitem=%s" % descId
def parseResponse(self):
# Get the item name.
itemNamePattern = PatternManager.getOrCompilePattern("itemName")
match = itemNamePattern.search(self.responseText)
self.responseData["name"] = match.group(1)
# Get the item image.
imagePattern = PatternManager.getOrCompilePattern("itemImage")
match = imagePattern.search(self.responseText)
self.responseData["image"] = match.group(1)
# Get the item type.
typePattern = PatternManager.getOrCompilePattern("itemType")
match = typePattern.search(self.responseText)
if match:
self.responseData["type"] = match.group(1).rstrip()
# Get the autosell value.
autosellPattern = PatternManager.getOrCompilePattern("itemAutosell")
match = autosellPattern.search(self.responseText)
if match:
self.responseData["autosell"] = int(match.group(1))
else:
self.responseData["autosell"] = 0
# See if this is a cooking ingredient.
cookingPattern = PatternManager.getOrCompilePattern("isCookingIngredient")
match = cookingPattern.search(self.responseText)
if match:
self.responseData["isCookingIngredient"] = True
# See if the item is a cocktailcrafting ingredient.
cocktailcraftingPattern = PatternManager.getOrCompilePattern("isCocktailcraftingIngredient")
match = cocktailcraftingPattern.search(self.responseText)
if match:
self.responseData["isCocktailcraftingIngredient"] = True
# See if the item is a meatsmithing component.
meatsmithingPattern = PatternManager.getOrCompilePattern("isMeatsmithingComponent")
match = meatsmithingPattern.search(self.responseText)
if match:
self.responseData["isMeatsmithingComponent"] = True
# See if the item is a jewelrymaking component.
jewelrymakingPattern = PatternManager.getOrCompilePattern("isJewelrymakingComponent")
match = jewelrymakingPattern.search(self.responseText)
if match:
self.responseData["isJewelrymakingComponent"] = True
| 43.559322 | 100 | 0.695331 |
1dc22a617f2373e16e90111adf2b74d8284fcc2a | 944 | py | Python | kubernetes/test/test_v1_initializers.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_initializers.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_initializers.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | 1 | 2019-01-10T11:13:52.000Z | 2019-01-10T11:13:52.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_initializers import V1Initializers
class TestV1Initializers(unittest.TestCase):
""" V1Initializers unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Initializers(self):
"""
Test V1Initializers
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_initializers.V1Initializers()
pass
if __name__ == '__main__':
unittest.main()
| 20.977778 | 105 | 0.70339 |
b3bd49f3fc7648e971ef782e8bc6f95b7ace34a5 | 15,918 | py | Python | caas_reg.py | gfragi/py_regression | 8287a3df760fc17af7e9ea9ee6df0ace7401ef75 | [
"MIT"
] | null | null | null | caas_reg.py | gfragi/py_regression | 8287a3df760fc17af7e9ea9ee6df0ace7401ef75 | [
"MIT"
] | 1 | 2022-03-09T00:51:16.000Z | 2022-03-09T00:51:16.000Z | caas_reg.py | gfragi/py_regression | 8287a3df760fc17af7e9ea9ee6df0ace7401ef75 | [
"MIT"
] | null | null | null | # ============== Import libraries =========
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from pandas.core.dtypes.common import is_numeric_dtype, is_string_dtype
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from statsmodels.stats.outliers_influence import variance_inflation_factor
from yellowbrick.regressor import ResidualsPlot
from scipy import stats
import my_functions as mf
warnings.filterwarnings('ignore') # it is used for some minor warnings in seaborn
network = False
# ============= Load the Data ============================================================
# %% Load the csv & print columns' info
# df = pd.read_csv('cn_provider_pricing_dummy.csv') # dummy data
df = pd.read_csv('datasets/caas_data.csv') # real data
# Drop some not useful for calculation columns (sum calculation for total price)
if network:
df = df.drop(['CPU_RAM_price', 'Storage_price', 'Cluster_fee_price', 'licensed_OS_price', 'Hybrid_support_price',
'external_egress_price', 'internal_egress_price', 'product'], axis=1)
else:
df = df.drop(['CPU_RAM_price', 'Storage_price', 'Cluster_fee_price', 'licensed_OS_price', 'Hybrid_support_price',
'external_egress_price', 'internal_egress_price', 'product', 'Internal_traffic', 'External_traffic'],
axis=1)
# %% ========== Select provider =======
# df = df.loc[df['Provider'] == 'Amazon']
print('rows x columns:', df.shape)
print('Columns info:', df.info())
print('Data highlights: \n', df.describe())
# Check for null values
print(df.isnull().sum() * 100 / df.shape[0])
uniqueList = tuple((column,) for column in df)
for column in df:
print(df[column].value_counts())
# %% =========== Visualize the Data ======================================
# df.drop(['Internal_traffic'], axis=1, inplace=True)
# num_list = []
# cat_list = []
#
# for column in df:
# plt.figure(column, figsize=(8, 5))
# plt.title(column)
# if is_numeric_dtype(df[column]):
# df[column].plot(kind='hist')
# num_list.append(column)
# elif is_string_dtype(df[column]):
# df[column].value_counts().plot(kind='barh', color='#43FF76')
# cat_list.append(column)
# plt.xlabel('Bundles')
# plt.show()
# # %% Visualize numeric variables
# ax = sns.pairplot(df)
# ax.fig.suptitle('Visualize numeric variables')
# plt.plot(color='green')
# plt.show()
#
# fig_rows = 6
# fig_cols = 3
#
# # Visualize categorical variables
# fig = plt.figure(figsize=(28, 20))
# fig.suptitle('Outlier analysis for categorical variables', fontsize=32)
#
# plt.subplot(5, 3, 1)
# sns.boxplot(x='Cluster_mgmt_fee', y='Price', data=df)
# # sns.swarmplot(x='Cluster_mgmt_fee', y='Price', data=df, color=".25")
#
# plt.subplot(fig_rows, fig_cols, 2)
# sns.boxplot(x='Regional_redundancy', y='Price', data=df)
# # sns.swarmplot(x='Regional_redundancy', y='Price', data=df, color=".25")
#
# plt.subplot(fig_rows, fig_cols, 3)
# sns.boxplot(x='Autoscaling', y='Price', data=df)
# # sns.swarmplot(x='Autoscaling', y='Price', data=df, color=".25")
#
# plt.subplot(5, 3, 4)
# sns.boxplot(x='Vendor_agnostic', y='Price', data=df)
# # sns.swarmplot(x='Vendor_agnostic', y='Price', data=df, color=".25")
#
# plt.subplot(5, 3, 5)
# sns.boxplot(x='Payment', y='Price', data=df)
# # sns.swarmplot(x='Payment', y='Price', data=df, color=".25")
#
# plt.subplot(5, 3, 6)
# sns.boxplot(x='Term_Length', y='Price', data=df)
# # sns.swarmplot(x='Term_Length', y='Price', data=df, color=".25")
#
# plt.subplot(fig_rows, fig_cols, 7)
# sns.boxplot(x='Instance_Type', y='Price', data=df)
# # sns.swarmplot(x='Instance_Type', y='Price', data=df, color=".25")
#
# plt.subplot(fig_rows, fig_cols, 8)
# sns.boxplot(x='Disk_type', y='Price', data=df)
# # sns.swarmplot(x='Disk_type', y='Price', data=df, color=".25")
#
# plt.subplot(5, 3, 9)
# sns.boxplot(x='OS', y='Price', data=df)
# # sns.swarmplot(x='OS', y='Price', data=df, color=".25")
#
# plt.subplot(5, 3, 10)
# sns.boxplot(x='Multicloud_support', y='Price', data=df)
# # sns.swarmplot(x='Multicloud_support', y='Price', data=df, color=".25")
#
# plt.subplot(5, 3, 11)
# sns.boxplot(x='Pay_per_container', y='Price', data=df)
# # sns.swarmplot(x='Pay_per_container', y='Price', data=df, color=".25")
#
# plt.subplot(fig_rows, fig_cols, 12)
# sns.boxplot(x='Region', y='Price', data=df)
# # sns.swarmplot(x='Region', y='Price', data=df, color=".25")
#
# # plt.subplot(5, 3, 13)
# # sns.boxplot(x='Internal_traffic', y='Price', data=df)
# # sns.swarmplot(x='Internal_traffic', y='Price', data=df, color=".25")
#
#
# plt.subplot(5, 3, 14)
# sns.boxplot(x='External_traffic', y='Price', data=df)
# # sns.swarmplot(x='External_traffic', y='Price', data=df, color=".25")
# plt.show()
# %% =========== Data preparation =================
# Categorical variables to map
category_list_binary = ['Cluster_mgmt_fee', 'Regional_redundancy', 'Vendor_agnostic', 'Disk_type',
'Multicloud_support', 'Pay_per_container', 'Autoscaling']
# Defining the map function
def binary_map(k):
return k.map({'yes': 1, 'no': 0, 'HDD': 0, 'SSD': 1, 'vertical&horizontal': 0, 'horizontal': 1})
# Applying the function to df
df[category_list_binary] = df[category_list_binary].apply(binary_map)
df.head()
# Map Categorical variables with 3 observations
category_list = ['Payment', 'OS', 'Instance_Type', 'Region']
status = pd.get_dummies(df[category_list], drop_first=True)
status.head()
# Add the above results to the original dataframe df
df = pd.concat([df, status], axis=1)
# drop the initial categorical variables as we have created dummies
df.drop(['Payment', 'OS', 'Instance_Type', 'Region'], axis=1, inplace=True)
# Drop features and options
# #
# df = df[['Provider', 'Price', 'External_traffic', 'CPU', 'RAM', 'STORAGE', 'Cluster_mgmt_fee',
# 'Disk_type', 'Multicloud_support', 'Pay_per_container', 'Vendor_agnostic']]
# df.head()
fig = plt.figure(figsize=(10, 7))
sns.regplot(x=df.CPU, y=df.Price, color='#619CFF', marker='o')
# # legend, title, and labels.
plt.legend(labels=['CPU'])
plt.title('Relationship between Price and CPU', size=20)
plt.xlabel('CPU(Cores)', size=18)
plt.ylabel('Price ($/hour)', size=18)
plt.show()
# %% log transformation
if network:
num_list_log = ['Price', 'Internal_traffic', 'External_traffic', 'CPU', 'RAM', 'STORAGE', 'Term_Length']
else:
num_list_log = ['Price', 'CPU', 'RAM', 'STORAGE', 'Term_Length']
df[num_list_log] = np.log10(df[num_list_log] + 1)
df[num_list_log].replace([num_list_log], inplace=True)
# df = df[['Provider', 'Price', 'CPU', 'RAM', 'STORAGE', 'Cluster_mgmt_fee',
# 'Pay_per_container', 'Multicloud_support', 'Vendor_agnostic', 'Disk_type']]
# for column in df:
# plt.figure(column, figsize=(5, 5))
# plt.title(column)
# if is_numeric_dtype(df[column]):
# df[column].plot(kind='hist', color='green')
# num_list.append(column)
# elif is_string_dtype(df[column]):
# df[column].value_counts().plot(kind='bar', color='green')
# cat_list.append(column)
# plt.show()
# %% ===================== Correlation ===========================
# Check the correlation coefficients to see which variables are highly correlated
correlation_method: str = 'pearson'
corr = df.corr(method=correlation_method)
mask = np.triu(np.ones_like(corr, dtype=bool))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
f, ax = plt.subplots(figsize=(30, 18))
heatmap = sns.heatmap(corr, mask=mask, annot=True, cmap=cmap, fmt=".2f")
heatmap.set_title(f"Triangle Correlation Heatmap - CaaS", fontdict={'fontsize': 24}, pad=1)
plt.savefig('plots/caas_heatmap_triangle.png')
plt.show()
y = df.Price
x_stage = df.drop('Price', axis=1)
x = x_stage.drop('Provider', axis=1)
# print(x.info())
# =================== Calculate VIF Factors =====================
# For each X, calculate VIF and save in dataframe. variance inflation factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(x.values, i) for i in range(x.shape[1])]
vif["features"] = x.columns
vif.round(1)
# %% Features Correlating with Price
# plt.figure(figsize=(12, 15))
# heatmap = sns.heatmap(df.corr(method=correlation_method)[['Price']].sort_values(by='Price', ascending=False), vmin=-1,
# vmax=1, annot=True,
# cmap='BrBG')
# heatmap.set_title(f"Features Correlating with Price - CaaS", fontdict={'fontsize': 18}, pad=16)
# plt.savefig(f'plots/heatmap_only_price_net_{network} - CaaS.png')
# plt.show()
# %% ####### Positive Correlation ######## https://towardsdatascience.com/simple-and-multiple-linear-regression-with-python-c9ab422ec29c
# 1–0.8 → Very strong
# 0.799–0.6 → Strong
# 0.599–0.4 → Moderate
# 0.399–0.2 → Weak
# # 0.199–0 → Very Weak
#
# # regression plot using seaborn - Very strong
# fig = plt.figure(figsize=(10, 7))
# sns.regplot(x=df.External_traffic, y=df.Price, color='#619CFF', marker='o')
#
# # # legend, title, and labels.
# plt.legend(labels=['External_traffic'])
# plt.title('Relationship between Price and External_traffic', size=20)
# plt.xlabel('GB/month)', size=18)
# plt.ylabel('Price ($/hour)', size=18)
# plt.show()
#
# fig = plt.figure(figsize=(10, 7))
# sns.regplot(x=df.STORAGE, y=df.Price, color='#619CFF', marker='o')
#
# # legend, title, and labels.
# plt.legend(labels=['STORAGE'])
# plt.title('Relationship between Price and STORAGE', size=20)
# plt.xlabel('STORAGE(GB)', size=18)
# plt.ylabel('Price ($/hour)', size=18)
# plt.show()
#
# # regression plot using seaborn - Strong
# fig = plt.figure(figsize=(10, 7))
# sns.regplot(x=df.RAM, y=df.Price, color='#619CFF', marker='o')
#
# # legend, title, and labels.
# plt.legend(labels=['RAM'])
# plt.title('Relationship between Price and RAM', size=20)
# plt.xlabel('RAM(GB)', size=18)
# plt.ylabel('Price ($/hour)', size=18)
# plt.show()
#
# # %% regression plot using seaborn - Weak
# fig = plt.figure(figsize=(10, 7))
# sns.regplot(x=df.Multicloud_support, y=df.Price, color='#619CFF', marker='o')
#
# # legend, title, and labels.
# plt.legend(labels=['Multicloud_support'])
# plt.title('Relationship between Price and Multicloud_support', size=20)
# plt.xlabel('Multicloud_support', size=18)
# plt.ylabel('Price ($/hour)', size=18)
# plt.show()
#
# # %% regression plot using seaborn - Negative
# fig = plt.figure(figsize=(10, 7))
# sns.regplot(x=df.Cluster_mgmt_fee, y=df.Price, color='#619CFF', marker='o')
#
# # legend, title, and labels.
# plt.legend(labels=['Cluster_mgmt_fee'])
# plt.title('Relationship between Price and Cluster_mgmt_fee', size=20)
# plt.xlabel('Cluster_mgmt_fee', size=18)
# plt.ylabel('Price ($/hour)', size=18)
# plt.show()
# ================ Model Evaluation ===========================
# %% Evaluate the model performance, split the the dataset into 2 partitions (80% - 20% ration)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# Apply linear regression to train set
model = linear_model.LinearRegression()
model.fit(x_train, y_train)
# Apply trained model to train dataset
y_pred_train = model.predict(x_train)
print('\n======== TRAIN dataset - 80% ===========')
print('Coefficients:\n', model.coef_)
print('Intercept:', model.intercept_)
print('Mean squared error (MSE): %.3f'
% mean_squared_error(y_train, y_pred_train))
print('Coefficient of determination (R^2): %.3f\n'
% r2_score(y_train, y_pred_train))
# Apply trained model to test dataset
y_pred_test = model.predict(x_test)
print('\n========= TEST dataset - 20% ===========')
print('Coefficients:\n', model.coef_)
print('Intercept:', model.intercept_)
print('Mean squared error (MSE): %.3f'
% mean_squared_error(y_test, y_pred_test))
print('Coefficient of determination (R^2): %.3f\n'
% r2_score(y_test, y_pred_test))
# Evaluation Plots
plt.figure(figsize=(11, 5))
# 1 row, 2 column, plot 1
plt.subplot(1, 2, 1)
plt.scatter(x=y_train, y=y_pred_train, c="#7CAE00", alpha=0.3)
# Add trendline
z = np.polyfit(y_train, y_pred_train, 1)
p = np.poly1d(z)
plt.plot(y_test, p(y_test), "#F8766D")
plt.ylabel('Predicted prices')
plt.xlabel('Actual prices')
# 1 row, 2 column, plot 2
plt.subplot(1, 2, 2)
plt.scatter(x=y_test, y=y_pred_test, c="#619CFF", alpha=0.3)
z = np.polyfit(y_test, y_pred_test, 1)
p = np.poly1d(z)
plt.plot(y_test, p(y_test), "#F8766D")
plt.ylabel('Predicted prices')
plt.xlabel('Actual prices')
# plt.savefig('plots/plot_horizontal_logS.png')
# plt.savefig('plots/plot_horizontal_logS.pdf')
plt.show()
visualizer = ResidualsPlot(model, hist=True, qqplot=False)
visualizer.fit(x, y)
# visualizer.score(x_test, y_test)
visualizer.show()
# ============ Detailed calculation for statistical metrics with OLS (Ordinary Least Squares) ==============
x = sm.add_constant(x)
model_sm = sm.OLS(y, x)
results = model_sm.fit()
print(results.summary())
# ========== Export OLS results =========
metrics = pd.read_html(results.summary().tables[0].as_html(), header=0, index_col=0)[0]
coefficients = pd.read_html(results.summary().tables[1].as_html(), header=0, index_col=0)[0]
metrics.to_csv(f'results/caas_metrics.csv', index=True)
coefficients.to_csv(f'results/caas_coeff.csv', index=True)
# %%
sm.graphics.influence_plot(results, size=40, criterion='cooks', plot_alpha=0.75, ax=None)
plt.show()
# %% ======================== Tornado diagram ======================================
coeff = results.params
coeff = coeff.iloc[(coeff.abs() * -1.0).argsort()]
a4_dims = (11.7, 8.27)
fig, ax = plt.subplots(figsize=a4_dims)
sns.barplot(coeff.values, coeff.index, orient='h', ax=ax, palette="flare", capsize=None)
plt.title('Coefficients - CaaS', size=20)
plt.savefig(f'plots/caas_coeff_tornado.png')
plt.show()
# %%
sns.distplot(results.resid, fit=stats.norm, hist=True)
plt.show()
# ================= Selection of features by P-value ===========================
coeff_results = mf.load_data(f'results/caas_coeff.csv')
coeff_results.rename(columns={'Unnamed: 0': 'Feature'}, inplace=True)
significant = coeff_results[coeff_results['P>|t|'] < 0.05]
features_list = significant['Feature'].tolist()
features_list.remove('const')
# features_list.remove('AppService_Domain')
features_list.insert(0, 'Price')
# features_list.insert(0, 'RAM')
df2 = df[features_list]
# %%============ 2nd Detailed calculation for statistical metrics with OLS (Ordinary Least Squares) ==============
y = df2.Price
x = df2.drop('Price', axis=1)
# mf.ols_regression(x, y)
x = sm.add_constant(x)
model_sm = sm.OLS(y, x)
results = model_sm.fit()
print(results.summary())
print(results.params)
metrics_sign = pd.read_html(results.summary().tables[0].as_html(), header=0, index_col=0)[0]
coefficients_sign = pd.read_html(results.summary().tables[1].as_html(), header=0, index_col=0)[0]
# ========== 2nd Export OLS results =========
metrics_sign.to_csv(f'results/caas_significant_metrics.csv', index=True)
coefficients_sign.to_csv(f'results/caas_significant_coeff.csv', index=True)
# %% ========================2nd Tornado diagram ======================================
coeff = results.params
coeff = coeff.iloc[(coeff.abs() * -1.0).argsort()]
a4_dims = (11.7, 8.27)
fig, ax = plt.subplots(figsize=a4_dims)
sns.barplot(coeff.values, coeff.index, orient='h', ax=ax, palette="flare", capsize=None)
plt.title('Statistically significant coefficients - CaaS', size=20)
plt.savefig(f'plots/caas_significant_coeff_tornado.png')
plt.show()
# %%
sns.distplot(results.resid, fit=stats.norm, hist=True)
plt.show()
# =================== 2nd Calculate VIF Factors =====================
# For each X, calculate VIF and save in dataframe. variance inflation factor
vif_2 = pd.DataFrame()
vif_2["VIF_Factor"] = [variance_inflation_factor(x.values, i) for i in range(x.shape[1])]
vif_2["features"] = x.columns
vif_2.round(1)
| 35.216814 | 136 | 0.666478 |
4017984ddbf51dfdb71ce82f0df5198150f415e2 | 35,655 | py | Python | conans/test/functional/toolchains/microsoft/test_msbuilddeps.py | blackliner/conan | 7848f7fcf1d0ce6e368f1dc05e4b20f40a9203c6 | [
"MIT"
] | null | null | null | conans/test/functional/toolchains/microsoft/test_msbuilddeps.py | blackliner/conan | 7848f7fcf1d0ce6e368f1dc05e4b20f40a9203c6 | [
"MIT"
] | null | null | null | conans/test/functional/toolchains/microsoft/test_msbuilddeps.py | blackliner/conan | 7848f7fcf1d0ce6e368f1dc05e4b20f40a9203c6 | [
"MIT"
] | null | null | null | import os
import platform
import textwrap
import unittest
import pytest
from conans.test.assets.genconanfile import GenConanfile
from conans.test.assets.pkg_cmake import pkg_cmake
from conans.test.assets.sources import gen_function_cpp, gen_function_h
from conans.test.assets.visual_project_files import get_vs_project_files
from conans.test.utils.tools import TestClient
sln_file = r"""
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 15
VisualStudioVersion = 15.0.28307.757
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MyProject", "MyProject\MyProject.vcxproj", "{6F392A05-B151-490C-9505-B2A49720C4D9}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MyApp", "MyApp\MyApp.vcxproj", "{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x64.ActiveCfg = Debug|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x64.Build.0 = Debug|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x86.ActiveCfg = Debug|Win32
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x86.Build.0 = Debug|Win32
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x64.ActiveCfg = Release|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x64.Build.0 = Release|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x86.ActiveCfg = Release|Win32
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x86.Build.0 = Release|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x64.ActiveCfg = Debug|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x64.Build.0 = Debug|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x86.ActiveCfg = Debug|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x86.Build.0 = Debug|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x64.ActiveCfg = Release|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x64.Build.0 = Release|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x86.ActiveCfg = Release|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x86.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {DE6E462F-E299-4F9C-951A-F9404EB51521}
EndGlobalSection
EndGlobal
"""
myproject_vcxproj = r"""<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0"
xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{6F392A05-B151-490C-9505-B2A49720C4D9}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>MyProject</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="..\conan_Hello3.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="MyProject.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
"""
myapp_vcxproj = r"""<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0"
xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>MyApp</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="..\conan_Hello1.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="MyApp.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
"""
@pytest.mark.tool_visual_studio
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
class MSBuildGeneratorTest(unittest.TestCase):
@pytest.mark.slow
@pytest.mark.tool_cmake
def test_msbuild_generator(self):
client = TestClient()
client.save(pkg_cmake("Hello0", "1.0"))
client.run("create . ")
client.save(pkg_cmake("Hello3", "1.0"), clean_first=True)
client.run("create . ")
client.save(pkg_cmake("Hello1", "1.0", ["Hello0/1.0"]), clean_first=True)
client.run("create . ")
conanfile = textwrap.dedent("""
from conans import ConanFile, MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "Hello1/1.0", "Hello3/1.0"
generators = "MSBuildDeps"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
myapp_cpp = gen_function_cpp(name="main", msg="MyApp",
includes=["Hello1"], calls=["Hello1"])
myproject_cpp = gen_function_cpp(name="main", msg="MyProject", includes=["Hello3"],
calls=["Hello3"])
files = {"MyProject.sln": sln_file,
"MyProject/MyProject.vcxproj": myproject_vcxproj,
"MyProject/MyProject.cpp": myproject_cpp,
"MyApp/MyApp.vcxproj": myapp_vcxproj,
"MyApp/MyApp.cpp": myapp_cpp,
"conanfile.py": conanfile}
client.save(files, clean_first=True)
client.run("install .")
client.run("build .")
self.assertNotIn("warning MSB4011", client.out)
client.run_command(r"x64\Release\MyProject.exe")
self.assertIn("MyProject: Release!", client.out)
self.assertIn("Hello3: Release!", client.out)
client.run_command(r"x64\Release\MyApp.exe")
self.assertIn("MyApp: Release!", client.out)
self.assertIn("Hello0: Release!", client.out)
self.assertIn("Hello1: Release!", client.out)
def test_install_reference(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . mypkg/0.1@")
client.run("install mypkg/0.1@ -g MSBuildDeps")
self.assertIn("Generator 'MSBuildDeps' calling 'generate()'", client.out)
# https://github.com/conan-io/conan/issues/8163
props = client.load("conan_mypkg_vars_release_x64.props") # default Release/x64
folder = props[props.find("<ConanmypkgRootFolder>")+len("<ConanmypkgRootFolder>")
:props.find("</ConanmypkgRootFolder>")]
self.assertTrue(os.path.isfile(os.path.join(folder, "conaninfo.txt")))
def test_install_reference_gcc(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
generators = "MSBuildDeps"
requires = "pkg/1.0"
""")
client.save({"conanfile.py": conanfile})
client.run('install . -s os=Windows -s compiler="Visual Studio" -s compiler.version=15'
' -s compiler.runtime=MD')
self.assertIn("conanfile.py: Generator 'MSBuildDeps' calling 'generate()'", client.out)
props = client.load("conan_pkg_release_x64.props")
self.assertIn('<?xml version="1.0" encoding="utf-8"?>', props)
# This will overwrite the existing one, cause configuration and arch is the same
client.run("install . -s os=Linux -s compiler=gcc -s compiler.version=5.2 '"
"'-s compiler.libcxx=libstdc++")
self.assertIn("conanfile.py: Generator 'MSBuildDeps' calling 'generate()'", client.out)
pkg_props = client.load("conan_pkg.props")
self.assertIn('Project="conan_pkg_release_x64.props"', pkg_props)
def test_no_build_type_error(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . mypkg/0.1@")
client.run("install mypkg/0.1@ -g msbuild -s build_type=None", assert_error=True)
self.assertIn("The 'msbuild' generator requires a 'build_type' setting value", client.out)
def test_custom_configuration(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuildDeps
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
requires = "pkg/1.0"
def generate(self):
ms = MSBuildDeps(self)
ms.configuration = "My"+str(self.settings.build_type)
ms.platform = "My"+str(self.settings.arch)
ms.generate()
""")
client.save({"conanfile.py": conanfile})
client.run('install . -s os=Windows -s compiler="Visual Studio" -s compiler.version=15'
' -s compiler.runtime=MD')
props = client.load("conan_pkg_myrelease_myx86_64.props")
self.assertIn('<?xml version="1.0" encoding="utf-8"?>', props)
client.run('install . -s os=Windows -s compiler="Visual Studio" -s compiler.version=15'
' -s compiler.runtime=MD -s arch=x86 -s build_type=Debug')
props = client.load("conan_pkg_mydebug_myx86.props")
self.assertIn('<?xml version="1.0" encoding="utf-8"?>', props)
props = client.load("conan_pkg.props")
self.assertIn("conan_pkg_myrelease_myx86_64.props", props)
self.assertIn("conan_pkg_mydebug_myx86.props", props)
def test_custom_configuration_errors(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuildDeps
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
requires = "pkg/1.0"
def generate(self):
ms = MSBuildDeps(self)
ms.configuration = None
ms.generate()
""")
client.save({"conanfile.py": conanfile})
client.run('install . -s os=Windows -s compiler="Visual Studio" -s compiler.version=15'
' -s compiler.runtime=MD', assert_error=True)
self.assertIn("MSBuildDeps.configuration is None, it should have a value", client.out)
client.save({"conanfile.py": conanfile.replace("configuration", "platform")})
client.run('install . -s os=Windows -s compiler="Visual Studio" -s compiler.version=15'
' -s compiler.runtime=MD', assert_error=True)
self.assertIn("MSBuildDeps.platform is None, it should have a value", client.out)
def test_install_transitive(self):
# https://github.com/conan-io/conan/issues/8065
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkga/1.0@")
client.save({"conanfile.py": GenConanfile().with_requires("pkga/1.0")})
client.run("create . pkgb/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile, MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "pkgb/1.0@", "pkga/1.0"
generators = "msbuild"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
myapp_cpp = gen_function_cpp(name="main", msg="MyApp")
myproject_cpp = gen_function_cpp(name="main", msg="MyProject")
files = {"MyProject.sln": sln_file,
"MyProject/MyProject.vcxproj": myproject_vcxproj.replace("conan_Hello3.props",
"conandeps.props"),
"MyProject/MyProject.cpp": myproject_cpp,
"MyApp/MyApp.vcxproj": myapp_vcxproj.replace("conan_Hello1.props",
"conandeps.props"),
"MyApp/MyApp.cpp": myapp_cpp,
"conanfile.py": conanfile}
client.save(files, clean_first=True)
client.run("install .")
self.assertIn("'msbuild' has been deprecated and moved.", client.out)
client.run("build .")
self.assertNotIn("warning MSB4011", client.out)
def test_install_build_requires(self):
# https://github.com/conan-io/conan/issues/8170
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . tool/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile, load
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
build_requires = "tool/1.0"
generators = "MSBuildDeps"
def build(self):
deps = load("conandeps.props")
assert "conan_tool.props" in deps
self.output.info("Conan_tools.props in deps")
""")
client.save({"conanfile.py": conanfile})
client.run("install .")
deps = client.load("conandeps.props")
self.assertIn("conan_tool.props", deps)
client.run("create . pkg/0.1@")
self.assertIn("Conan_tools.props in deps", client.out)
def test_install_transitive_build_requires(self):
# https://github.com/conan-io/conan/issues/8170
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("export . dep/1.0@")
client.run("export . tool_build/1.0@")
client.run("export . tool_test/1.0@")
conanfile = GenConanfile().with_requires("dep/1.0").with_build_requires("tool_build/1.0").\
with_build_requirement("tool_test/1.0", force_host_context=True)
client.save({"conanfile.py": conanfile})
client.run("export . pkg/1.0@")
client.save({"conanfile.py": GenConanfile().
with_settings("os", "compiler", "arch", "build_type").
with_requires("pkg/1.0")}, clean_first=True)
client.run("install . -g MSBuildDeps -pr:b=default -pr:h=default --build=missing")
pkg = client.load("conan_pkg_release_x64.props")
assert "conan_dep.props" in pkg
assert "tool_test" in pkg # test requires are there
assert "tool_build" not in pkg
@pytest.mark.parametrize("pattern,exclude_a,exclude_b",
[("['*']", True, True),
("['pkga']", True, False),
("['pkgb']", False, True),
("['pkg*']", True, True),
("['pkga', 'pkgb']", True, True),
("['*a', '*b']", True, True),
("['nonexist']", False, False),
])
def test_exclude_code_analysis(pattern, exclude_a, exclude_b):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkga/1.0@")
client.run("create . pkgb/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "pkgb/1.0@", "pkga/1.0"
generators = "msbuild"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
profile = textwrap.dedent("""
include(default)
build_type=Release
arch=x86_64
[conf]
tools.microsoft.msbuilddeps:exclude_code_analysis = %s
""" % pattern)
client.save({"conanfile.py": conanfile,
"profile": profile})
client.run("install . --profile profile")
depa = client.load("conan_pkga_release_x64.props")
depb = client.load("conan_pkgb_release_x64.props")
if exclude_a:
inc = "$(ConanpkgaIncludeDirectories)"
ca_exclude = "<CAExcludePath>%s;$(CAExcludePath)</CAExcludePath>" % inc
assert ca_exclude in depa
else:
assert "CAExcludePath" not in depa
if exclude_b:
inc = "$(ConanpkgbIncludeDirectories)"
ca_exclude = "<CAExcludePath>%s;$(CAExcludePath)</CAExcludePath>" % inc
assert ca_exclude in depb
else:
assert "CAExcludePath" not in depb
@pytest.mark.tool_visual_studio
@pytest.mark.tool_cmake
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_vs_project_with_a():
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . updep.pkg.team/0.1@")
conanfile = textwrap.dedent("""
from conans import ConanFile, CMake
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
exports = '*'
requires = "updep.pkg.team/0.1@"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
self.copy("*.h", dst="include")
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello.a"]
""")
hello_cpp = gen_function_cpp(name="hello")
hello_h = gen_function_h(name="hello")
cmake = textwrap.dedent("""
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
cmake_minimum_required(VERSION 3.15)
project(MyLib CXX)
set(CMAKE_STATIC_LIBRARY_SUFFIX ".a")
add_library(hello hello.cpp)
""")
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": cmake,
"hello.cpp": hello_cpp,
"hello.h": hello_h})
client.run('create . mydep.pkg.team/0.1@ -s compiler="Visual Studio" -s compiler.version=15')
consumer = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "mydep.pkg.team/0.1@"
generators = "MSBuildDeps", "MSBuildToolchain"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
files = get_vs_project_files()
main_cpp = gen_function_cpp(name="main", includes=["hello"], calls=["hello"])
files["MyProject/main.cpp"] = main_cpp
files["conanfile.py"] = consumer
props = os.path.join(client.current_folder, "conandeps.props")
old = r'<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />'
new = old + '<Import Project="{props}" />'.format(props=props)
files["MyProject/MyProject.vcxproj"] = files["MyProject/MyProject.vcxproj"].replace(old, new)
client.save(files, clean_first=True)
client.run('install . -s compiler="Visual Studio" -s compiler.version=15')
client.run("build .")
client.run_command(r"x64\Release\MyProject.exe")
assert "hello: Release!" in client.out
# TODO: This doesnt' work because get_vs_project_files() don't define NDEBUG correctly
# assert "main: Release!" in client.out
@pytest.mark.tool_visual_studio
@pytest.mark.tool_cmake
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_vs_project_with_test_requires():
client = TestClient()
client.save(pkg_cmake("updep.pkg.team", "0.1"))
client.run("create . -s compiler.version=15")
client.save(pkg_cmake("mydep.pkg.team", "0.1", requires=["updep.pkg.team/0.1"]),
clean_first=True)
client.run("create . -s compiler.version=15")
consumer = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
generators = "MSBuildDeps", "MSBuildToolchain"
def build_requirements(self):
self.build_requires("mydep.pkg.team/0.1", force_host_context=True)
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
files = get_vs_project_files()
main_cpp = gen_function_cpp(name="main", includes=["mydep_pkg_team"], calls=["mydep_pkg_team"])
files["MyProject/main.cpp"] = main_cpp
files["conanfile.py"] = consumer
props = os.path.join(client.current_folder, "conandeps.props")
old = r'<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />'
new = old + '<Import Project="{props}" />'.format(props=props)
files["MyProject/MyProject.vcxproj"] = files["MyProject/MyProject.vcxproj"].replace(old, new)
client.save(files, clean_first=True)
client.run('install . -s compiler.version=15')
client.run("build .")
client.run_command(r"x64\Release\MyProject.exe")
assert "mydep_pkg_team: Release!" in client.out
assert "updep_pkg_team: Release!" in client.out
| 44.513109 | 136 | 0.660609 |
c2320f96e6278dfbf94b34050e34c10bd16fa7e8 | 3,908 | py | Python | tests/system/providers/google/bigquery/example_bigquery_operations.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | tests/system/providers/google/bigquery/example_bigquery_operations.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | tests/system/providers/google/bigquery/example_bigquery_operations.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google BigQuery service local file upload and external table creation.
"""
import os
from datetime import datetime
from pathlib import Path
from airflow import models
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateExternalTableOperator,
BigQueryDeleteDatasetOperator,
)
from airflow.providers.google.cloud.operators.gcs import GCSCreateBucketOperator, GCSDeleteBucketOperator
from airflow.providers.google.cloud.transfers.local_to_gcs import LocalFilesystemToGCSOperator
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "bigquery_operations"
DATASET_NAME = f"dataset_{DAG_ID}_{ENV_ID}"
DATA_SAMPLE_GCS_BUCKET_NAME = f"bucket_{DAG_ID}_{ENV_ID}"
DATA_SAMPLE_GCS_OBJECT_NAME = "bigquery/us-states/us-states.csv"
CSV_FILE_LOCAL_PATH = str(Path(__file__).parent / "resources" / "us-states.csv")
with models.DAG(
DAG_ID,
schedule_interval="@once",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example", "bigquery"],
) as dag:
create_bucket = GCSCreateBucketOperator(task_id="create_bucket", bucket_name=DATA_SAMPLE_GCS_BUCKET_NAME)
create_dataset = BigQueryCreateEmptyDatasetOperator(task_id="create_dataset", dataset_id=DATASET_NAME)
upload_file = LocalFilesystemToGCSOperator(
task_id="upload_file_to_bucket",
src=CSV_FILE_LOCAL_PATH,
dst=DATA_SAMPLE_GCS_OBJECT_NAME,
bucket=DATA_SAMPLE_GCS_BUCKET_NAME,
)
# [START howto_operator_bigquery_create_external_table]
create_external_table = BigQueryCreateExternalTableOperator(
task_id="create_external_table",
destination_project_dataset_table=f"{DATASET_NAME}.external_table",
bucket=DATA_SAMPLE_GCS_BUCKET_NAME,
source_objects=[DATA_SAMPLE_GCS_OBJECT_NAME],
schema_fields=[
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"},
],
)
# [END howto_operator_bigquery_create_external_table]
delete_dataset = BigQueryDeleteDatasetOperator(
task_id="delete_dataset",
dataset_id=DATASET_NAME,
delete_contents=True,
trigger_rule=TriggerRule.ALL_DONE,
)
delete_bucket = GCSDeleteBucketOperator(
task_id="delete_bucket", bucket_name=DATA_SAMPLE_GCS_BUCKET_NAME, trigger_rule=TriggerRule.ALL_DONE
)
(
# TEST SETUP
[create_bucket, create_dataset]
# TEST BODY
>> upload_file
>> create_external_table
# TEST TEARDOWN
>> delete_dataset
>> delete_bucket
)
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| 35.853211 | 109 | 0.744115 |
e857a6eea02f07024ecffece8bfc9f185aba1bfb | 118 | py | Python | 02. ePuskesmas Splitter v1.5/_constants.py | ivanwilliammd/Smale-Scale-Information-System-mini-apps | 927850728c92837e86ab60f357b383ab6dec0d87 | [
"Apache-2.0"
] | 1 | 2021-07-20T15:07:57.000Z | 2021-07-20T15:07:57.000Z | 02. ePuskesmas Splitter v1.5/_constants.py | ivanwilliammd/Smale-Scale-Information-System-mini-apps | 927850728c92837e86ab60f357b383ab6dec0d87 | [
"Apache-2.0"
] | null | null | null | 02. ePuskesmas Splitter v1.5/_constants.py | ivanwilliammd/Smale-Scale-Information-System-mini-apps | 927850728c92837e86ab60f357b383ab6dec0d87 | [
"Apache-2.0"
] | null | null | null | VERSION = "1.5.0"
BUILD_DATE = "2020-04-07T13:12:44.751234"
AUTHOR = "dr. Ivan William Harsono, MTI"
DEBUGGING = False | 29.5 | 41 | 0.711864 |
f76ed4233c319cc4eed7d60f095a15a8b8fe4aaf | 6,189 | py | Python | train/tasks/semantic/visualize_uncertainty.py | inkyusa/SalsaNext | c72cfb643add90cf51ab87e2b4eaef53bb457729 | [
"MIT"
] | null | null | null | train/tasks/semantic/visualize_uncertainty.py | inkyusa/SalsaNext | c72cfb643add90cf51ab87e2b4eaef53bb457729 | [
"MIT"
] | null | null | null | train/tasks/semantic/visualize_uncertainty.py | inkyusa/SalsaNext | c72cfb643add90cf51ab87e2b4eaef53bb457729 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import argparse
import os
import yaml
import __init__ as booger
from common.laserscan import LaserScan, SemLaserScan
from common.laserscanvis_uncert import LaserScanVisUncert
import glob
if __name__ == '__main__':
parser = argparse.ArgumentParser("./visualize.py")
parser.add_argument(
'--dataset', '-d',
type=str,
required=True,
help='Dataset to visualize. No Default',
)
parser.add_argument(
'--config', '-c',
type=str,
required=False,
default="config/labels/semantic-kitti.yaml",
help='Dataset config file. Defaults to %(default)s',
)
parser.add_argument(
'--sequence', '-s',
type=str,
default="00",
required=False,
help='Sequence to visualize. Defaults to %(default)s',
)
parser.add_argument(
'--predictions', '-p',
type=str,
default=None,
required=False,
help='Alternate location for labels, to use predictions folder. '
'Must point to directory containing the predictions in the proper format '
' (see readme)'
'Defaults to %(default)s',
)
parser.add_argument(
'--ignore_semantics', '-i',
dest='ignore_semantics',
default=False,
action='store_true',
help='Ignore semantics. Visualizes uncolored pointclouds.'
'Defaults to %(default)s',
)
parser.add_argument(
'--offset',
type=int,
default=0,
required=False,
help='Sequence to start. Defaults to %(default)s',
)
parser.add_argument(
'--ignore_safety',
dest='ignore_safety',
default=False,
action='store_true',
help='Normally you want the number of labels and ptcls to be the same,'
', but if you are not done inferring this is not the case, so this disables'
' that safety.'
'Defaults to %(default)s',
)
FLAGS, unparsed = parser.parse_known_args()
# print summary of what we will do
print("*" * 80)
print("INTERFACE:")
print("Dataset", FLAGS.dataset)
print("Config", FLAGS.config)
print("Sequence", FLAGS.sequence)
print("Predictions", FLAGS.predictions)
print("ignore_semantics", FLAGS.ignore_semantics)
print("ignore_safety", FLAGS.ignore_safety)
print("offset", FLAGS.offset)
print("*" * 80)
# open config file
try:
print("Opening config file %s" % FLAGS.config)
CFG = yaml.safe_load(open(FLAGS.config, 'r'))
except Exception as e:
print(e)
print("Error opening yaml file.")
quit()
# fix sequence name
FLAGS.sequence = '{0:02d}'.format(int(FLAGS.sequence))
# does sequence folder exist?
scan_paths = os.path.join(FLAGS.dataset, "sequences",
FLAGS.sequence, "velodyne")
if os.path.isdir(scan_paths):
print("Sequence folder exists! Using sequence from %s" % scan_paths)
else:
print("Sequence folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
scan_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(scan_paths)) for f in fn]
scan_names.sort()
proj_pred_img_names = None
# does sequence folder exist?
if not FLAGS.ignore_semantics:
if FLAGS.predictions is not None:
pred_label_paths = os.path.join(FLAGS.predictions, "sequences",
FLAGS.sequence, "predictions")
gt_label_paths = os.path.join(FLAGS.dataset, "sequences",
FLAGS.sequence, "labels")
else:
gt_label_paths = os.path.join(FLAGS.dataset, "sequences",
FLAGS.sequence, "labels")
if os.path.isdir(pred_label_paths):
print("Labels folder exists! Using labels from %s" % pred_label_paths)
else:
print("Labels folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
pred_label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(pred_label_paths)) for f in fn]
pred_label_names.sort()
gt_label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(gt_label_paths)) for f in fn]
gt_label_names.sort()
#get the list of prediction projected images
proj_pred_img_names = glob.glob('/home/sa001/workspace/SalsaNext/prediction/second_trained_with_uncert/sequences/08/proj_label_with_uncert/*.png')
proj_pred_img_names.sort()
proj_uncert_img_names = glob.glob('/home/sa001/workspace/SalsaNext/prediction/second_trained_with_uncert/sequences/08/proj_uncert/*.png')
proj_uncert_img_names.sort()
# check that there are same amount of labels and scans
if not FLAGS.ignore_safety:
assert (len(pred_label_names) == len(scan_names))
# create a scan
if FLAGS.ignore_semantics:
scan = LaserScan(project=True) # project all opened scans to spheric proj
else:
color_dict = CFG["color_map"]
scan = SemLaserScan(color_dict, project=True)
# create a visualizer
semantics = not FLAGS.ignore_semantics
if not semantics:
label_names = None
vis = LaserScanVisUncert(scan=scan,
scan_names=scan_names,
pred_label_names=pred_label_names,
gt_label_names=gt_label_names,
proj_pred_img_names = proj_pred_img_names,
proj_uncert_img_names = proj_uncert_img_names,
offset=FLAGS.offset,
semantics=semantics,
instances=False)
# print instructions
print("To navigate:")
print("\tb: back (previous scan)")
print("\tn: next (next scan)")
print("\tspace: toggle continous play")
print("\tq: quit (exit program)")
# run the visualizer
vis.run()
| 34.383333 | 154 | 0.606237 |
a2997ccce861ce7cb77b6b6ced6953e49d28ee03 | 3,434 | py | Python | register_bucket_policy/schema/aws/s3/awsapicallviacloudtrail/UserIdentity.py | aws-samples/amazon-s3-bucket-policies-versioning | df5ec498c922b6a0aa944a5205e9bc3a42d9ddfe | [
"MIT-0"
] | null | null | null | register_bucket_policy/schema/aws/s3/awsapicallviacloudtrail/UserIdentity.py | aws-samples/amazon-s3-bucket-policies-versioning | df5ec498c922b6a0aa944a5205e9bc3a42d9ddfe | [
"MIT-0"
] | null | null | null | register_bucket_policy/schema/aws/s3/awsapicallviacloudtrail/UserIdentity.py | aws-samples/amazon-s3-bucket-policies-versioning | df5ec498c922b6a0aa944a5205e9bc3a42d9ddfe | [
"MIT-0"
] | null | null | null | # coding: utf-8
import pprint
import re # noqa: F401
import six
from enum import Enum
from schema.aws.s3.awsapicallviacloudtrail.SessionContext import SessionContext # noqa: F401,E501
class UserIdentity(object):
_types = {
'sessionContext': 'SessionContext',
'accessKeyId': 'str',
'accountId': 'str',
'principalId': 'str',
'type': 'str',
'arn': 'str'
}
_attribute_map = {
'sessionContext': 'sessionContext',
'accessKeyId': 'accessKeyId',
'accountId': 'accountId',
'principalId': 'principalId',
'type': 'type',
'arn': 'arn'
}
def __init__(self, sessionContext=None, accessKeyId=None, accountId=None, principalId=None, type=None, arn=None): # noqa: E501
self._sessionContext = None
self._accessKeyId = None
self._accountId = None
self._principalId = None
self._type = None
self._arn = None
self.discriminator = None
self.sessionContext = sessionContext
self.accessKeyId = accessKeyId
self.accountId = accountId
self.principalId = principalId
self.type = type
self.arn = arn
@property
def sessionContext(self):
return self._sessionContext
@sessionContext.setter
def sessionContext(self, sessionContext):
self._sessionContext = sessionContext
@property
def accessKeyId(self):
return self._accessKeyId
@accessKeyId.setter
def accessKeyId(self, accessKeyId):
self._accessKeyId = accessKeyId
@property
def accountId(self):
return self._accountId
@accountId.setter
def accountId(self, accountId):
self._accountId = accountId
@property
def principalId(self):
return self._principalId
@principalId.setter
def principalId(self, principalId):
self._principalId = principalId
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def arn(self):
return self._arn
@arn.setter
def arn(self, arn):
self._arn = arn
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self._types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UserIdentity, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, UserIdentity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 21.734177 | 131 | 0.570472 |
f93f9275da7a6c69c754c34e11435243c0934b16 | 1,153 | py | Python | vi_municipales_2016/scraper.py | lfalvarez/vi-municipales-2016 | b76ec2d4033ea7e106219452949da6e6815584d5 | [
"MIT"
] | null | null | null | vi_municipales_2016/scraper.py | lfalvarez/vi-municipales-2016 | b76ec2d4033ea7e106219452949da6e6815584d5 | [
"MIT"
] | null | null | null | vi_municipales_2016/scraper.py | lfalvarez/vi-municipales-2016 | b76ec2d4033ea7e106219452949da6e6815584d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import facebook
from django.conf import settings
TOKEN = settings.FACEBOOK_ACCESS_TOKEN
def string_for_search_generator(candidate):
names = []
name_without_last_surname = candidate.name.rsplit(' ', 1)[0]
names.append(name_without_last_surname + u' ' + candidate.election.area.name)
names.append(name_without_last_surname + u' ' + candidate.election.position)
return names
class Scraper(object):
def scrape(self, election):
from vi_municipales_2016.models import PosibleFacebookPage
graph = facebook.GraphAPI(access_token=TOKEN, version='2.5')
for candidate in election.candidates.all():
strings = string_for_search_generator(candidate)
for search in strings:
result = graph.request('search', {'q': search, 'type': 'page'})
for data in result['data']:
url = 'http://www.facebook.com/' + data['id']
page_name = data['name']
posible_page, created = PosibleFacebookPage.objects.get_or_create(url=url, name=page_name,
candidate=candidate) | 42.703704 | 110 | 0.645273 |
ff64f757224be03b87121c6d18cd332840adb63b | 7,634 | py | Python | swifter/swifter_tests.py | def-mycroft/swifter | c93f0cebad68526a024ca3c4713cec2acdddcd02 | [
"MIT"
] | null | null | null | swifter/swifter_tests.py | def-mycroft/swifter | c93f0cebad68526a024ca3c4713cec2acdddcd02 | [
"MIT"
] | null | null | null | swifter/swifter_tests.py | def-mycroft/swifter | c93f0cebad68526a024ca3c4713cec2acdddcd02 | [
"MIT"
] | null | null | null | import unittest
import time
import numpy as np
import pandas as pd
import swifter
print(f"Version {swifter.__version__}")
def math_vec_square(x):
return x ** 2
def math_foo(x, compare_to=1):
return x ** 2 if x < compare_to else x ** (1 / 2)
def math_vec_multiply(row):
return row["x"] * row["y"]
def math_agg_foo(row):
return row.sum() - row.min()
def text_foo(row):
if row["letter"] == "A":
return row["value"] * 3
elif row["letter"] == "B":
return row["value"] ** 3
elif row["letter"] == "C":
return row["value"] / 3
elif row["letter"] == "D":
return row["value"] ** (1 / 3)
elif row["letter"] == "E":
return row["value"]
class TestSwifter(unittest.TestCase):
def assertSeriesEqual(self, a, b, msg):
try:
pd.testing.assert_series_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def assertDataFrameEqual(self, a, b, msg):
try:
pd.testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def setUp(self):
self.addTypeEqualityFunc(pd.Series, self.assertSeriesEqual)
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataFrameEqual)
def test_set_npartitions(self):
expected = 1000
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
]:
before = swifter_df._npartitions
swifter_df.set_npartitions(expected)
actual = swifter_df._npartitions
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_set_dask_scheduler(self):
expected = "my-scheduler"
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
]:
before = swifter_df._scheduler
swifter_df.set_dask_scheduler(expected)
actual = swifter_df._scheduler
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_disable_progress_bar(self):
expected = False
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
]:
before = swifter_df._progress_bar
swifter_df.progress_bar(expected)
actual = swifter_df._progress_bar
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_allow_dask_on_strings(self):
expected = True
swifter_df = pd.DataFrame().swifter
before = swifter_df._allow_dask_on_strings
swifter_df.allow_dask_on_strings(expected)
actual = swifter_df._allow_dask_on_strings
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_vectorized_math_apply_on_large_series(self):
df = pd.DataFrame({"x": np.random.normal(size=1_000_000)})
series = df["x"]
start_pd = time.time()
pd_val = series.apply(math_vec_square)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = series.swifter.apply(math_vec_square)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val)
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_large_series(self):
df = pd.DataFrame({"x": np.random.normal(size=5_000_000)})
series = df["x"]
start_pd = time.time()
pd_val = series.apply(math_foo, compare_to=1)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = series.swifter.apply(math_foo, compare_to=1)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val)
self.assertLess(swifter_time, pd_time)
def test_vectorized_math_apply_on_large_dataframe(self):
df = pd.DataFrame({"x": np.random.normal(size=1_000_000), "y": np.random.uniform(size=1_000_000)})
start_pd = time.time()
pd_val = df.apply(math_vec_multiply, axis=1)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.apply(math_vec_multiply, axis=1)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val)
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_large_dataframe(self):
df = pd.DataFrame({"x": np.random.normal(size=1_000_000), "y": np.random.uniform(size=1_000_000)})
start_pd = time.time()
pd_val = df.apply(math_agg_foo, axis=1)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.apply(math_agg_foo, axis=1)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val)
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_text_apply_on_large_dataframe(self):
df = pd.DataFrame({"letter": ["A", "B", "C", "D", "E"] * 200_000, "value": np.random.normal(size=1_000_000)})
start_pd = time.time()
pd_val = df.apply(text_foo, axis=1)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.allow_dask_on_strings(True).apply(text_foo, axis=1)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val)
self.assertLess(swifter_time, pd_time)
def test_vectorized_math_apply_on_large_rolling_dataframe(self):
df = pd.DataFrame(
{"x": np.arange(0, 1_000_000)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=1_000_000)
)
start_pd = time.time()
pd_val = df.rolling("1d").apply(sum)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.rolling("1d").apply(sum)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val)
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_large_rolling_dataframe(self):
df = pd.DataFrame(
{"x": np.arange(0, 1_000_000)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=1_000_000)
)
start_pd = time.time()
pd_val = df.rolling("1d").apply(math_agg_foo)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.rolling("1d").apply(math_agg_foo)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val)
self.assertLess(swifter_time, pd_time)
| 33.482456 | 117 | 0.620644 |
57499fed498ea717f1b93a0dd370d93714f785ff | 13,124 | py | Python | paleomix/node.py | MikkelSchubert/paleomix | 5c6414060088ba178ff1c400bdbd45d2f6b1aded | [
"MIT"
] | 33 | 2015-04-08T10:44:19.000Z | 2021-11-01T14:23:40.000Z | paleomix/node.py | MikkelSchubert/paleomix | 5c6414060088ba178ff1c400bdbd45d2f6b1aded | [
"MIT"
] | 41 | 2015-07-17T12:46:16.000Z | 2021-10-13T06:47:25.000Z | paleomix/node.py | MikkelSchubert/paleomix | 5c6414060088ba178ff1c400bdbd45d2f6b1aded | [
"MIT"
] | 19 | 2015-01-23T07:09:39.000Z | 2021-04-06T09:30:21.000Z | #!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import itertools
import logging
import os
import shutil
import sys
from pathlib import Path
from typing import Any, FrozenSet, Iterable, List, Optional, Union
import paleomix
import paleomix.common.fileutils as fileutils
from paleomix.common.command import AtomicCmd, CmdError, ParallelCmds, SequentialCmds
from paleomix.common.utilities import safe_coerce_to_frozenset
from paleomix.common.versions import Requirement
_GLOBAL_ID = itertools.count()
class NodeError(RuntimeError):
def __init__(self, *args: Any, path: Optional[str] = None):
super().__init__(*args)
self.path = path
class NodeMissingFilesError(NodeError):
pass
class CmdNodeError(NodeError):
pass
class NodeUnhandledException(NodeError):
"""This exception is thrown by Node.run() if a non-NodeError exception
is raised in a subfunction (e.g. _setup, _run, or _teardown). The text
for this exception will include both the original error message and a
stacktrace for that error."""
pass
class Node:
def __init__(
self,
description: Optional[str] = None,
threads: int = 1,
input_files: Iterable[str] = (),
output_files: Iterable[str] = (),
executables: Iterable[str] = (),
auxiliary_files: Iterable[str] = (),
requirements: Iterable[Requirement] = (),
dependencies: Iterable["Node"] = (),
):
if not (description is None or isinstance(description, str)):
raise TypeError(description)
self.__description = description
self.input_files = self._validate_files(input_files)
self.output_files = self._validate_files(output_files)
self.executables = self._validate_files(executables)
self.auxiliary_files = self._validate_files(auxiliary_files)
self.requirements = self._validate_requirements(requirements)
self.threads = self._validate_nthreads(threads)
self.dependencies = self._collect_nodes(dependencies)
# If there are no input files, the node cannot be re-run based on
# changes to the input, and nodes with output but no input are not
# expected based on current usage.
if not self.input_files and self.output_files:
raise NodeError("Node not dependent upon input files: %s" % self)
# Globally unique node ID
self.id = next(_GLOBAL_ID)
def run(self, temp_root: str) -> None:
"""Runs the node, by calling _setup, _run, and _teardown in that order.
Prior to calling these functions, a temporary dir is created using the
'temp_root' prefix from the config object. Both the config object and
the temporary dir are passed to the above functions. The temporary
dir is removed after _teardown is called, and all expected files
should have been removed/renamed at that point.
Any non-NodeError exception raised in this function is wrapped in a
NodeUnhandledException, which includes a full backtrace. This is needed
to allow showing these in the main process."""
temp = None
try:
# Generate directory name and create dir at temp_root
temp = self._create_temp_dir(temp_root)
self._setup(temp)
self._run(temp)
self._teardown(temp)
self._remove_temp_dir(temp)
except NodeMissingFilesError:
try:
# The folder is most likely empty, but it is possible to re-use temp
# directories for resumable tasks so we cannot delete it outrigth
if temp is not None:
os.rmdir(temp)
except OSError:
pass
raise
except NodeError as error:
self._write_error_log(temp, error)
raise NodeError(
"Error while running {}:\n {}".format(
self, "\n ".join(str(error).split("\n"))
),
path=temp,
)
except Exception as error:
self._write_error_log(temp, error)
raise NodeUnhandledException(
"Error while running %s" % (self,), path=temp
) from error
def _create_temp_dir(self, temp_root: str) -> str:
"""Called by 'run' in order to create a temporary folder."""
return fileutils.create_temp_dir(temp_root)
def _remove_temp_dir(self, temp: str) -> None:
"""Called by 'run' in order to remove an (now) empty temporary folder."""
temp = fileutils.fspath(temp)
log = logging.getLogger(__name__)
for filename in self._collect_files(temp):
log.warning(
"Unexpected file in temporary directory: %r",
os.path.join(temp, filename),
)
shutil.rmtree(temp)
def _setup(self, _temp: str) -> None:
"""Is called prior to '_run()' by 'run()'. Any code used to copy/link files,
or other steps needed to ready the node for running may be carried out in this
function. Checks that required input files exist, and raises an NodeError if
this is not the case."""
executables = [] # type: List[str]
for executable in self.executables:
if executable == "%(PYTHON)s":
executable = sys.executable
executables.append(executable)
missing_executables = fileutils.missing_executables(executables)
if missing_executables:
raise NodeError("Executable(s) not found: %s" % (missing_executables,))
self._check_for_input_files(self.input_files | self.auxiliary_files)
def _run(self, _temp: str) -> None:
pass
def _teardown(self, _temp: str) -> None:
self._check_for_missing_files(self.output_files, "output")
def __str__(self) -> str:
"""Returns the description passed to the constructor, or a default
description if no description was passed to the constructor."""
if self.__description:
return self.__description
return repr(self)
def __getstate__(self):
"""Called by pickle/cPickle to determine what to pickle; this is
overridden to avoid pickling of requirements, dependencies, which would
otherwise greatly inflate the amount of information that needs to be
pickled."""
obj_dict = self.__dict__.copy()
obj_dict["requirements"] = ()
obj_dict["dependencies"] = ()
return obj_dict
def _write_error_log(self, temp: Optional[str], error: Exception) -> None:
if not (temp and os.path.isdir(temp)):
return
def _fmt(values: Iterable[str]):
return "\n ".join(sorted(values))
message = [
"PALEOMIX = v%s" % (paleomix.__version__,),
"Command = %r" % (" ".join(sys.argv),),
"CWD = %r" % (os.getcwd(),),
"PATH = %r" % (os.environ.get("PATH", ""),),
"Node = %s" % (str(self),),
"Threads = %i" % (self.threads,),
"Input files = %s" % (_fmt(self.input_files),),
"Output files = %s" % (_fmt(self.output_files),),
"Auxiliary files = %s" % (_fmt(self.auxiliary_files),),
"Executables = %s" % (_fmt(self.executables),),
"",
"Errors =\n%s\n" % (error,),
]
message = "\n".join(message)
try:
with open(os.path.join(temp, "pipe.errors"), "w") as handle:
handle.write(message)
except OSError as oserror:
sys.stderr.write("ERROR: Could not write failure log: %s\n" % (oserror,))
def _collect_nodes(self, nodes: Iterable["Node"]) -> FrozenSet["Node"]:
nodes = safe_coerce_to_frozenset(nodes)
for node in nodes:
if not isinstance(node, Node):
raise TypeError(node)
return nodes
def _check_for_input_files(self, filenames: Iterable[str]) -> None:
missing_files = fileutils.missing_files(filenames)
if missing_files:
raise NodeMissingFilesError(
"Missing input files for command:\n\t- Command: %s\n\t- Files: %s"
% (self, "\n\t ".join(missing_files))
)
def _check_for_missing_files(self, filenames: Iterable[str], description: str):
missing_files = fileutils.missing_files(filenames)
if missing_files:
message = (
"Missing %s files for command:\n\t- Command: %s\n\t- Files: %s"
% (description, self, "\n\t ".join(missing_files))
)
raise NodeError(message)
@classmethod
def _validate_requirements(
cls, requirements: Iterable[Requirement]
) -> FrozenSet[Requirement]:
requirements = safe_coerce_to_frozenset(requirements)
for requirement in requirements:
if not isinstance(requirement, Requirement):
raise TypeError(requirement)
return requirements
@classmethod
def _validate_files(cls, files: Iterable[str]):
return frozenset(fileutils.validate_filenames(files))
@classmethod
def _validate_nthreads(cls, threads: Any) -> int:
if not isinstance(threads, int):
raise TypeError("'threads' must be a positive integer, not %r" % (threads,))
elif threads < 1:
raise ValueError(
"'threads' must be a positive integer, not %i" % (threads,)
)
return threads
@staticmethod
def _collect_files(root: str) -> Iterable[str]:
root = fileutils.fspath(root)
def _walk_dir(path: str) -> Iterable[str]:
for entry in os.scandir(path):
if entry.is_file():
yield str(Path(entry.path).relative_to(root))
elif entry.is_dir():
yield from _walk_dir(entry.path)
yield from _walk_dir(root)
class CommandNode(Node):
def __init__(
self,
command: Union[AtomicCmd, ParallelCmds, SequentialCmds],
description: Optional[str] = None,
threads: int = 1,
dependencies: Iterable[Node] = (),
):
Node.__init__(
self,
description=description,
input_files=command.input_files,
output_files=command.output_files,
auxiliary_files=command.auxiliary_files,
executables=command.executables,
requirements=command.requirements,
threads=threads,
dependencies=dependencies,
)
self._command = command
def _run(self, temp: str) -> None:
"""Runs the command object provided in the constructor, and waits for it to
terminate. If any errors during the running of the command, this function
raises a NodeError detailing the returned error-codes."""
try:
self._command.run(temp)
except CmdError as error:
raise CmdNodeError("%s\n\n%s" % (str(self._command), error))
return_codes = self._command.join()
if any(return_codes):
raise CmdNodeError(str(self._command))
def _teardown(self, temp: str) -> None:
required_files = self._command.expected_temp_files
current_files = set(self._collect_files(temp))
missing_files = required_files - current_files
if missing_files:
raise CmdNodeError(
(
"Error running Node, required files not created:\n"
"Temporary directory: %r\n"
"\tRequired files missing from temporary directory:\n\t - %s"
)
% (temp, "\n\t - ".join(sorted(map(repr, missing_files))))
)
self._command.commit(temp)
Node._teardown(self, temp)
| 38.151163 | 88 | 0.618028 |
e87c0509551e34d5e5a373a9b781282b325e1075 | 1,202 | py | Python | pipetter/views.py | melinath/django-pipetter | fd21254f64e3538fd6dcd5ddc4d5dc7444f5fafb | [
"0BSD"
] | 1 | 2017-10-14T16:32:21.000Z | 2017-10-14T16:32:21.000Z | pipetter/views.py | melinath/django-pipetter | fd21254f64e3538fd6dcd5ddc4d5dc7444f5fafb | [
"0BSD"
] | null | null | null | pipetter/views.py | melinath/django-pipetter | fd21254f64e3538fd6dcd5ddc4d5dc7444f5fafb | [
"0BSD"
] | null | null | null | from django.http import HttpResponse, Http404
import django.utils.simplejson as json
from pipetter.utils import refresh_cache as refresh, create_cache as create, get_cache_or_new
from pipetter import registry, NotRegistered
def refresh_cache(request, pipette_names):
"""Perform a hard refresh of the cache for any of the named pipettes."""
refresh(pipette_names.strip('/').split('/'))
return HttpResponse('')
def create_cache(request, pipette_name, argstr):
try:
create(pipette_name, tuple(argstr.strip('/').split('/')))
except NotRegistered:
raise Http404('The specified pipette "%s" does not exist or is not registered.' % pipette_name)
return HttpResponse('')
def json_response(request, pipette_name, argstr=None):
"""Return the results of a pipette as a JSON response.
Expects arguments as a '/' separated string."""
if argstr:
args = tuple(argstr.strip('/').split('/'))
else:
args = ()
try:
response_data = get_cache_or_new(pipette_name, args)
except NotRegistered:
raise Http404('The specified pipette "%s" does not exist or is not registered.' % pipette_name)
response = json.dumps(response_data)
return HttpResponse(response, mimetype='application/json') | 33.388889 | 97 | 0.749584 |
637015103a34066ba732d25e3da635dcdaedeeb7 | 1,307 | py | Python | src/eduid_common/api/oidc.py | SUNET/eduid-common | d666aec7e47e6b0ccb575d621bb6e9f40bcea4e4 | [
"BSD-3-Clause"
] | 1 | 2016-04-14T13:45:10.000Z | 2016-04-14T13:45:10.000Z | src/eduid_common/api/oidc.py | SUNET/eduid-common | d666aec7e47e6b0ccb575d621bb6e9f40bcea4e4 | [
"BSD-3-Clause"
] | 16 | 2017-03-10T11:47:59.000Z | 2020-03-19T13:51:01.000Z | src/eduid_common/api/oidc.py | SUNET/eduid-common | d666aec7e47e6b0ccb575d621bb6e9f40bcea4e4 | [
"BSD-3-Clause"
] | 3 | 2016-11-21T11:39:49.000Z | 2019-09-18T12:32:02.000Z | # -*- coding: utf-8 -*-
import logging
from sys import exit
from time import sleep
from typing import Any, Mapping
from oic.oic import Client
from oic.oic.message import RegistrationRequest
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from requests.exceptions import ConnectionError
__author__ = 'lundberg'
logger = logging.getLogger(__name__)
def init_client(client_registration_info: Mapping[str, Any], provider_configuration_info: Mapping[str, Any]) -> Client:
oidc_client = Client(client_authn_method=CLIENT_AUTHN_METHOD)
oidc_client.store_registration_info(RegistrationRequest(**client_registration_info))
provider = provider_configuration_info['issuer']
try:
oidc_client.provider_config(provider)
except ConnectionError:
logger.warning(
f'No connection to provider {provider}. Can not start without provider configuration. Retrying...'
)
# Retry after 20 seconds so we don't get an excessive exit-restart loop
sleep(20)
try:
oidc_client.provider_config(provider)
except ConnectionError:
logger.critical(
f'No connection to provider {provider}. Can not start without provider configuration. Exiting.'
)
exit(1)
return oidc_client
| 34.394737 | 119 | 0.719969 |
b557e6408fa04f80939aa7595b9f9a2fa6cf19b6 | 683 | py | Python | app/venues/migrations/0001_initial.py | swelanauguste/friendly-palm-tree | 9e9709b87b645b709b3ac8aa2f57cf29dd98e2cb | [
"MIT"
] | null | null | null | app/venues/migrations/0001_initial.py | swelanauguste/friendly-palm-tree | 9e9709b87b645b709b3ac8aa2f57cf29dd98e2cb | [
"MIT"
] | null | null | null | app/venues/migrations/0001_initial.py | swelanauguste/friendly-palm-tree | 9e9709b87b645b709b3ac8aa2f57cf29dd98e2cb | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-02-27 03:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Venue',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| 27.32 | 117 | 0.575403 |
e2d82bd5eb72c616ad8052fe30120e3acb7e5a79 | 1,925 | py | Python | forum/moderation/migrations/0004_auto_20200101_1738.py | successIA/Forum | 08de91a033da2c3779acbf95dfe0210eb1276a26 | [
"MIT"
] | null | null | null | forum/moderation/migrations/0004_auto_20200101_1738.py | successIA/Forum | 08de91a033da2c3779acbf95dfe0210eb1276a26 | [
"MIT"
] | 6 | 2020-08-13T18:54:33.000Z | 2021-06-10T20:20:16.000Z | forum/moderation/migrations/0004_auto_20200101_1738.py | successIA/ClassicForum | 08de91a033da2c3779acbf95dfe0210eb1276a26 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2020-01-01 16:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('comments', '0004_auto_20191122_0252'),
('threads', '0012_auto_20191124_0435'),
('moderation', '0003_auto_20200101_1650'),
]
operations = [
migrations.AddField(
model_name='moderatorevent',
name='comment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment'),
),
migrations.AddField(
model_name='moderatorevent',
name='hidden_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='moderatorevent',
name='thread',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='threads.Thread'),
),
migrations.AddField(
model_name='moderatorevent',
name='unhidden_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='moderatorevent',
name='event_type',
field=models.PositiveSmallIntegerField(choices=[(0, 'added_moderator'), (1, 'removed_moderator'), (2, 'added_category'), (3, 'removed_category'), (4, 'Make thread invisible'), (5, 'Make thread visible'), (6, 'Make comment invisible'), (7, 'Make comment visible')]),
),
]
| 41.847826 | 277 | 0.648312 |
eb3ae07d5d372198fe147ad2f30a5658448aad33 | 7,011 | py | Python | venv/Lib/site-packages/pycparser/lextab.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 9,953 | 2019-04-03T23:41:04.000Z | 2022-03-31T11:54:44.000Z | venv/Lib/site-packages/pycparser/lextab.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 2,695 | 2015-07-01T16:01:35.000Z | 2022-03-31T19:17:44.000Z | lib/python2.7/site-packages/pycparser/lextab.py | anish03/weather-dash | d517fa9da9028d1fc5d8fd71d77cee829ddee87b | [
"MIT"
] | 2,803 | 2019-04-06T13:15:33.000Z | 2022-03-31T07:42:01.000Z | # lextab.py. This file automatically created by PLY (version 3.10). Don't edit!
_tabversion = '3.10'
_lextokens = set(('VOID', 'LBRACKET', 'WCHAR_CONST', 'FLOAT_CONST', 'MINUS', 'RPAREN', 'LONG', 'PLUS', 'ELLIPSIS', 'GT', 'GOTO', 'ENUM', 'PERIOD', 'GE', 'INT_CONST_DEC', 'ARROW', '__INT128', 'HEX_FLOAT_CONST', 'DOUBLE', 'MINUSEQUAL', 'INT_CONST_OCT', 'TIMESEQUAL', 'OR', 'SHORT', 'RETURN', 'RSHIFTEQUAL', 'RESTRICT', 'STATIC', 'SIZEOF', 'UNSIGNED', 'UNION', 'COLON', 'WSTRING_LITERAL', 'DIVIDE', 'FOR', 'PLUSPLUS', 'EQUALS', 'ELSE', 'INLINE', 'EQ', 'AND', 'TYPEID', 'LBRACE', 'PPHASH', 'INT', 'SIGNED', 'CONTINUE', 'NOT', 'OREQUAL', 'MOD', 'RSHIFT', 'DEFAULT', 'CHAR', 'WHILE', 'DIVEQUAL', 'EXTERN', 'CASE', 'LAND', 'REGISTER', 'MODEQUAL', 'NE', 'SWITCH', 'INT_CONST_HEX', '_COMPLEX', 'PPPRAGMASTR', 'PLUSEQUAL', 'STRUCT', 'CONDOP', 'BREAK', 'VOLATILE', 'PPPRAGMA', 'ANDEQUAL', 'INT_CONST_BIN', 'DO', 'LNOT', 'CONST', 'LOR', 'CHAR_CONST', 'LSHIFT', 'RBRACE', '_BOOL', 'LE', 'SEMI', 'LT', 'COMMA', 'OFFSETOF', 'TYPEDEF', 'XOR', 'AUTO', 'TIMES', 'LPAREN', 'MINUSMINUS', 'ID', 'IF', 'STRING_LITERAL', 'FLOAT', 'XOREQUAL', 'LSHIFTEQUAL', 'RBRACKET'))
_lexreflags = 64
_lexliterals = ''
_lexstateinfo = {'ppline': 'exclusive', 'pppragma': 'exclusive', 'INITIAL': 'inclusive'}
_lexstatere = {'ppline': [('(?P<t_ppline_FILENAME>"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_ppline_LINE_NUMBER>(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P<t_ppline_NEWLINE>\\n)|(?P<t_ppline_PPLINE>line)', [None, ('t_ppline_FILENAME', 'FILENAME'), None, None, None, None, None, None, ('t_ppline_LINE_NUMBER', 'LINE_NUMBER'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_ppline_NEWLINE', 'NEWLINE'), ('t_ppline_PPLINE', 'PPLINE')])], 'pppragma': [('(?P<t_pppragma_NEWLINE>\\n)|(?P<t_pppragma_PPPRAGMA>pragma)|(?P<t_pppragma_STR>.+)', [None, ('t_pppragma_NEWLINE', 'NEWLINE'), ('t_pppragma_PPPRAGMA', 'PPPRAGMA'), ('t_pppragma_STR', 'STR')])], 'INITIAL': [('(?P<t_PPHASH>[ \\t]*\\#)|(?P<t_NEWLINE>\\n+)|(?P<t_LBRACE>\\{)|(?P<t_RBRACE>\\})|(?P<t_FLOAT_CONST>((((([0-9]*\\.[0-9]+)|([0-9]+\\.))([eE][-+]?[0-9]+)?)|([0-9]+([eE][-+]?[0-9]+)))[FfLl]?))|(?P<t_HEX_FLOAT_CONST>(0[xX]([0-9a-fA-F]+|((([0-9a-fA-F]+)?\\.[0-9a-fA-F]+)|([0-9a-fA-F]+\\.)))([pP][+-]?[0-9]+)[FfLl]?))|(?P<t_INT_CONST_HEX>0[xX][0-9a-fA-F]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)', [None, ('t_PPHASH', 'PPHASH'), ('t_NEWLINE', 'NEWLINE'), ('t_LBRACE', 'LBRACE'), ('t_RBRACE', 'RBRACE'), ('t_FLOAT_CONST', 'FLOAT_CONST'), None, None, None, None, None, None, None, None, None, ('t_HEX_FLOAT_CONST', 'HEX_FLOAT_CONST'), None, None, None, None, None, None, None, ('t_INT_CONST_HEX', 'INT_CONST_HEX')]), ('(?P<t_INT_CONST_BIN>0[bB][01]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P<t_BAD_CONST_OCT>0[0-7]*[89])|(?P<t_INT_CONST_OCT>0[0-7]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P<t_INT_CONST_DEC>(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P<t_CHAR_CONST>\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))\')|(?P<t_WCHAR_CONST>L\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))\')|(?P<t_UNMATCHED_QUOTE>(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*\\n)|(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*$))|(?P<t_BAD_CHAR_CONST>(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))[^\'\n]+\')|(\'\')|(\'([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-7])[^\'\\n]*\'))', [None, ('t_INT_CONST_BIN', 'INT_CONST_BIN'), None, None, None, None, None, None, None, ('t_BAD_CONST_OCT', 'BAD_CONST_OCT'), ('t_INT_CONST_OCT', 'INT_CONST_OCT'), None, None, None, None, None, None, None, ('t_INT_CONST_DEC', 'INT_CONST_DEC'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_CHAR_CONST', 'CHAR_CONST'), None, None, None, None, None, None, ('t_WCHAR_CONST', 'WCHAR_CONST'), None, None, None, None, None, None, ('t_UNMATCHED_QUOTE', 'UNMATCHED_QUOTE'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_BAD_CHAR_CONST', 'BAD_CHAR_CONST')]), ('(?P<t_WSTRING_LITERAL>L"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_BAD_STRING_LITERAL>"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*?([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-7])([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_ID>[a-zA-Z_$][0-9a-zA-Z_$]*)|(?P<t_STRING_LITERAL>"([^"\\\\\\n]|(\\\\(([a-zA-Z._~!=&\\^\\-\\\\?\'"])|(\\d+)|(x[0-9a-fA-F]+))))*")|(?P<t_ELLIPSIS>\\.\\.\\.)|(?P<t_PLUSPLUS>\\+\\+)|(?P<t_LOR>\\|\\|)|(?P<t_XOREQUAL>\\^=)|(?P<t_OREQUAL>\\|=)|(?P<t_LSHIFTEQUAL><<=)|(?P<t_RSHIFTEQUAL>>>=)|(?P<t_PLUSEQUAL>\\+=)|(?P<t_TIMESEQUAL>\\*=)|(?P<t_PLUS>\\+)|(?P<t_MODEQUAL>%=)|(?P<t_DIVEQUAL>/=)', [None, ('t_WSTRING_LITERAL', 'WSTRING_LITERAL'), None, None, None, None, None, None, ('t_BAD_STRING_LITERAL', 'BAD_STRING_LITERAL'), None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_ID', 'ID'), (None, 'STRING_LITERAL'), None, None, None, None, None, None, (None, 'ELLIPSIS'), (None, 'PLUSPLUS'), (None, 'LOR'), (None, 'XOREQUAL'), (None, 'OREQUAL'), (None, 'LSHIFTEQUAL'), (None, 'RSHIFTEQUAL'), (None, 'PLUSEQUAL'), (None, 'TIMESEQUAL'), (None, 'PLUS'), (None, 'MODEQUAL'), (None, 'DIVEQUAL')]), ('(?P<t_RBRACKET>\\])|(?P<t_CONDOP>\\?)|(?P<t_XOR>\\^)|(?P<t_LSHIFT><<)|(?P<t_LE><=)|(?P<t_LPAREN>\\()|(?P<t_ARROW>->)|(?P<t_EQ>==)|(?P<t_NE>!=)|(?P<t_MINUSMINUS>--)|(?P<t_OR>\\|)|(?P<t_TIMES>\\*)|(?P<t_LBRACKET>\\[)|(?P<t_GE>>=)|(?P<t_RPAREN>\\))|(?P<t_LAND>&&)|(?P<t_RSHIFT>>>)|(?P<t_MINUSEQUAL>-=)|(?P<t_PERIOD>\\.)|(?P<t_ANDEQUAL>&=)|(?P<t_EQUALS>=)|(?P<t_LT><)|(?P<t_COMMA>,)|(?P<t_DIVIDE>/)|(?P<t_AND>&)|(?P<t_MOD>%)|(?P<t_SEMI>;)|(?P<t_MINUS>-)|(?P<t_GT>>)|(?P<t_COLON>:)|(?P<t_NOT>~)|(?P<t_LNOT>!)', [None, (None, 'RBRACKET'), (None, 'CONDOP'), (None, 'XOR'), (None, 'LSHIFT'), (None, 'LE'), (None, 'LPAREN'), (None, 'ARROW'), (None, 'EQ'), (None, 'NE'), (None, 'MINUSMINUS'), (None, 'OR'), (None, 'TIMES'), (None, 'LBRACKET'), (None, 'GE'), (None, 'RPAREN'), (None, 'LAND'), (None, 'RSHIFT'), (None, 'MINUSEQUAL'), (None, 'PERIOD'), (None, 'ANDEQUAL'), (None, 'EQUALS'), (None, 'LT'), (None, 'COMMA'), (None, 'DIVIDE'), (None, 'AND'), (None, 'MOD'), (None, 'SEMI'), (None, 'MINUS'), (None, 'GT'), (None, 'COLON'), (None, 'NOT'), (None, 'LNOT')])]}
_lexstateignore = {'ppline': ' \t', 'pppragma': ' \t', 'INITIAL': ' \t'}
_lexstateerrorf = {'ppline': 't_ppline_error', 'pppragma': 't_pppragma_error', 'INITIAL': 't_error'}
_lexstateeoff = {}
| 637.363636 | 5,537 | 0.534589 |
858b5dbbcb7eca6b95fff8297e434aebfe636372 | 3,251 | py | Python | yaep/test/parse/testearley.py | kruskod/nltk | dba7b5431b1d57a75d50e048961c1a203b98c3da | [
"Apache-2.0"
] | 1 | 2015-11-25T00:47:58.000Z | 2015-11-25T00:47:58.000Z | yaep/test/parse/testearley.py | kruskod/nltk | dba7b5431b1d57a75d50e048961c1a203b98c3da | [
"Apache-2.0"
] | null | null | null | yaep/test/parse/testearley.py | kruskod/nltk | dba7b5431b1d57a75d50e048961c1a203b98c3da | [
"Apache-2.0"
] | null | null | null | import unittest
from nltk import CFG
from nltk.grammar import Nonterminal
from yaep.parse.earley import Rule, Grammar, EarleyParser, \
nonterminal_to_term
class TestRule(unittest.TestCase):
def setUp(self):
# Perform set up actions (if any)
self.production = CFG.fromstring("S -> A 'b'").productions()[0]
self.production2 = CFG.fromstring("S -> A 'b'").productions()[0]
self.production3 = CFG.fromstring("S -> A B").productions()[0]
self.rule = Rule(self.production.lhs(), self.production.rhs())
def tearDown(self):
# Perform clean-up actions (if any)
self.production = self.production2 = self.production3 = None
self.rule = None
def test__eq__(self):
self.assertEqual(self.rule, Rule(self.production2.lhs(), self.production2.rhs()))
self.failIfEqual(self.rule, Rule(self.production3.lhs(), self.production3.rhs()))
self.assertTrue(self.rule != Rule(self.production3.lhs(), self.production3.rhs()))
self.assertTrue(self.rule.is_nonterminal(0))
self.assertFalse(self.rule.is_terminal(0))
self.assertTrue(self.rule.is_terminal(1))
self.assertFalse(self.rule.is_nonterminal(1))
def testget_symbol(self):
self.assertEqual(self.rule.get_symbol(0), Nonterminal("A"))
def test__hash__(self):
self.assertEqual(hash(self.rule), hash(Rule(self.production2.lhs(), self.production2.rhs())))
self.failIfEqual(hash(self.rule), hash(Rule(self.production3.lhs(), self.production3.rhs())))
def testLen(self):
self.assertEqual(len(self.rule), 2)
class TestEarleyParser(unittest.TestCase):
def setUp(self):
# Perform set up actions (if any)
self.tokens1 = ["Mary", "called", "Jan"]
self.tokens2 = ["Mary", "called", "Jan", "from", "Frankfurt"]
grammar = None
with open("grammar.txt") as f:
grammar = CFG.fromstring(f.readlines())
self.start_nonterminal = nonterminal_to_term(grammar.start())
earley_grammar = Grammar((Rule(nonterminal_to_term(production.lhs()),
(nonterminal_to_term(fs) for fs in production.rhs())) for production
in grammar.productions()), None)
self.parser = EarleyParser(earley_grammar)
def tearDown(self):
# Perform clean-up actions (if any)
self.production = self.production2 = self.production3 = None
self.rule = None
def testparse(self):
self.parse(self.tokens1)
self.parse(self.tokens2)
def parse(self, tokens):
chartManager = self.parser.parse(tokens, self.start_nonterminal)
# print(chartManager.pretty_print(" ".join(tokens)))
# print("Final states:")
# final_states = tuple(chartManager.final_states())
# if final_states:
# for state in final_states:
# print(state.str(state.dot() - 1))
# print()
self.assertEqual(len(chartManager.charts()), len(tokens) + 1)
self.assertEqual(len(tuple(chartManager.initial_states())), 1)
self.assertTrue(chartManager.is_recognized())
# Run the unittests
if __name__ == '__main__':
unittest.main() | 36.943182 | 107 | 0.638265 |
ed6c42a12957a5175b302f067f20c8c8d2f00a76 | 8,617 | py | Python | sovtoken/sovtoken/test/test_public_xfer_1.py | ryanwest6/token-plugin | 806ce55517bb545d9a90bfe94bbb0ce250efeb95 | [
"Apache-2.0"
] | null | null | null | sovtoken/sovtoken/test/test_public_xfer_1.py | ryanwest6/token-plugin | 806ce55517bb545d9a90bfe94bbb0ce250efeb95 | [
"Apache-2.0"
] | null | null | null | sovtoken/sovtoken/test/test_public_xfer_1.py | ryanwest6/token-plugin | 806ce55517bb545d9a90bfe94bbb0ce250efeb95 | [
"Apache-2.0"
] | 1 | 2020-05-27T10:06:42.000Z | 2020-05-27T10:06:42.000Z | import pytest
from plenum.common.txn_util import get_seq_no
from plenum.common.exceptions import RequestNackedException
from plenum.common.types import OPERATION
from sovtoken.constants import SIGS, ADDRESS, SEQNO, AMOUNT, OUTPUTS
from sovtoken.test.helper import user1_token_wallet
@pytest.fixture
def addresses(helpers, user1_token_wallet):
return helpers.wallet.add_new_addresses(user1_token_wallet, 5)
@pytest.fixture
def initial_mint(helpers, addresses):
outputs = [{"address": address, "amount": 100} for address in addresses]
mint_request = helpers.request.mint(outputs)
responses = helpers.sdk.send_and_check_request_objects([mint_request])
result = helpers.sdk.get_first_result(responses)
return result
def test_multiple_inputs_with_1_incorrect_input_sig( # noqa
helpers,
addresses,
initial_mint,
):
mint_seq_no = get_seq_no(initial_mint)
[address1, address2, address3, *_] = addresses
# Multiple inputs are used in a transaction but one of the inputs
# has incorrect signature
inputs = [{"address": address1, "seqNo": mint_seq_no}, {"address": address2, "seqNo": mint_seq_no}]
outputs = [{"address": address3, "amount": 200}]
request = helpers.request.transfer(inputs, outputs)
operation = getattr(request, OPERATION)
# Change signature for 2nd input, set it same as the 1st input's signature
operation[SIGS][1] = operation[SIGS][0]
with pytest.raises(RequestNackedException):
helpers.sdk.send_and_check_request_objects([request])
def test_multiple_inputs_with_1_missing_sig( # noqa
helpers,
addresses,
initial_mint,
):
# Multiple inputs are used in a transaction but one of the inputs's
# signature is missing, so there are 3 inputs but only 2 signatures.
mint_seq_no = get_seq_no(initial_mint)
[address1, address2, address3, *_] = addresses
inputs = [{"address": address1, "seqNo": mint_seq_no}, {"address": address2, "seqNo": mint_seq_no}]
outputs = [{"address": address3, "amount": 200}]
# Remove signature for 2nd input
request = helpers.request.transfer(inputs, outputs)
request.operation[SIGS].pop()
assert len(request.operation[SIGS]) == (len(inputs) - 1)
with pytest.raises(RequestNackedException):
helpers.sdk.send_and_check_request_objects([request])
def test_inputs_contain_signature_not_in_inputs(
helpers,
addresses,
initial_mint
):
# Add signature from an address not present in input
mint_seq_no = get_seq_no(initial_mint)
[address1, address2, address3, address4, *_] = addresses
inputs = [{"address": address1, "seqNo": mint_seq_no}, {"address": address2, "seqNo": mint_seq_no}]
outputs = [{"address": address3, "amount": 200}]
request = helpers.request.transfer(inputs, outputs)
extra_sig = helpers.wallet.payment_signatures(
[{"address": address4, "seqNo": mint_seq_no}],
outputs
)[0]
request.operation[SIGS][1] = extra_sig
assert len(request.operation[SIGS]) == len(inputs)
with pytest.raises(RequestNackedException):
helpers.sdk.send_and_check_request_objects([request])
def test_empty_xfer(helpers):
inputs = []
outputs = []
identifier = "5oXnyuywuz6TvnMDXjjGUm47gToPzdCKZbDvsNdYB4Cy"
with pytest.raises(RequestNackedException):
helpers.general.do_transfer(inputs, outputs, identifier=identifier)
def test_invalid_output_numeric_amounts(helpers, addresses, initial_mint):
"""
Test transfer with different invalid numeric amounts
"""
[address1, address2, *_] = addresses
seq_no = get_seq_no(initial_mint)
inputs = [{ADDRESS: address1, SEQNO: seq_no}]
# Floats
outputs = [
{ADDRESS: address2, AMOUNT: 40.5},
{ADDRESS: address1, AMOUNT: 59.5}
]
with pytest.raises(RequestNackedException):
helpers.general.do_transfer(inputs, outputs)
# None value
outputs = [
{ADDRESS: address2, AMOUNT: 100},
{ADDRESS: address1, AMOUNT: None}
]
with pytest.raises(RequestNackedException):
helpers.general.do_transfer(inputs, outputs)
# String number
outputs = [
{ADDRESS: address2, AMOUNT: 80},
{ADDRESS: address1, AMOUNT: "20"}
]
with pytest.raises(RequestNackedException):
helpers.general.do_transfer(inputs, outputs)
# Negative Number
outputs = [
{ADDRESS: address2, AMOUNT: -50},
{ADDRESS: address1, AMOUNT: 150}
]
with pytest.raises(RequestNackedException):
helpers.general.do_transfer(inputs, outputs)
# Zero value
outputs = [
{ADDRESS: address1, AMOUNT: 100},
{ADDRESS: address2, AMOUNT: 0}
]
with pytest.raises(RequestNackedException):
helpers.general.do_transfer(inputs, outputs)
# Output without amount
request = helpers.request.transfer(inputs, outputs)
request.operation[OUTPUTS][1].pop(AMOUNT)
with pytest.raises(RequestNackedException):
helpers.sdk.send_and_check_request_objects([request])
def test_invalid_input_seq_no(helpers, addresses, initial_mint):
"""
Test transfer with different invalid numeric seq_no
"""
[address1, address2, *_] = addresses
seq_no = get_seq_no(initial_mint)
outputs = [{ADDRESS: address2, AMOUNT: 100}]
def _test_invalid_seq_no(seq_no):
inputs = [{ADDRESS: address1, SEQNO: seq_no}]
with pytest.raises(RequestNackedException):
helpers.general.do_transfer(inputs, outputs)
_test_invalid_seq_no(0)
_test_invalid_seq_no(-1)
_test_invalid_seq_no(str(seq_no))
_test_invalid_seq_no(None)
_test_invalid_seq_no(1.0)
def test_multiple_inputs_outputs_without_change(
helpers,
addresses,
initial_mint
):
[address1, address2, address3, address4, address5] = addresses
mint_seq_no = get_seq_no(initial_mint)
inputs = [
{"address": address1, "seqNo": mint_seq_no},
{"address": address2, "seqNo": mint_seq_no},
{"address": address3, "seqNo": mint_seq_no},
]
outputs = [
{"address": address4, "amount": 200},
{"address": address5, "amount": 100},
]
request = helpers.request.transfer(inputs, outputs)
response = helpers.sdk.send_and_check_request_objects([request])
assert response[0][1]["result"]["reqSignature"] != {}
result = helpers.sdk.get_first_result(response)
xfer_seq_no = get_seq_no(result)
[
address1_utxos,
address2_utxos,
address3_utxos,
address4_utxos,
address5_utxos
] = helpers.general.get_utxo_addresses(addresses)
assert address1_utxos == []
assert address2_utxos == []
assert address3_utxos == []
assert address4_utxos == [
{"address": address4, "seqNo": mint_seq_no, "amount": 100},
{"address": address4, "seqNo": xfer_seq_no, "amount": 200},
]
assert address5_utxos == [
{"address": address5, "seqNo": mint_seq_no, "amount": 100},
{"address": address5, "seqNo": xfer_seq_no, "amount": 100},
]
def test_multiple_inputs_outputs_with_change(
helpers,
addresses,
initial_mint,
user1_token_wallet,
):
[address1, address2, address3, address4, address5] = addresses
mint_seq_no = get_seq_no(initial_mint)
inputs = [
{"address": address1, "seqNo": mint_seq_no},
{"address": address2, "seqNo": mint_seq_no},
{"address": address3, "seqNo": mint_seq_no},
]
outputs = [
{"address": address4, "amount": 270},
{"address": address5, "amount": 10},
{"address": address1, "amount": 20},
]
request = helpers.request.transfer(inputs, outputs)
response = helpers.sdk.send_and_check_request_objects([request])
assert response[0][1]["result"]["reqSignature"] != {}
result = helpers.sdk.get_first_result(response)
xfer_seq_no = get_seq_no(result)
[
address1_utxos,
address2_utxos,
address3_utxos,
address4_utxos,
address5_utxos
] = helpers.general.get_utxo_addresses(addresses)
assert address1_utxos == [{"address": address1, "seqNo": xfer_seq_no, "amount": 20}]
assert address2_utxos == []
assert address3_utxos == []
assert address4_utxos == [
{"address": address4, "seqNo": mint_seq_no, "amount": 100},
{"address": address4, "seqNo": xfer_seq_no, "amount": 270},
]
assert address5_utxos == [
{"address": address5, "seqNo": mint_seq_no, "amount": 100},
{"address": address5, "seqNo": xfer_seq_no, "amount": 10},
]
| 30.775 | 103 | 0.675989 |
86a439915c059c017a27f618c5a51ac97b8cdf6e | 15,707 | py | Python | tests/integration/fileserver/fileclient_test.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | tests/integration/fileserver/fileclient_test.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | tests/integration/fileserver/fileclient_test.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Mike Place <mp@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
import errno
import logging
import os
import shutil
log = logging.getLogger(__name__)
# Import Salt Testing libs
from salttesting.unit import skipIf
from salttesting.helpers import ensure_in_syspath, destructiveTest
from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
ensure_in_syspath('../..')
# Import salt libs
import integration
import salt.utils
from salt import fileclient
from salt.ext import six
SALTENVS = ('base', 'dev')
FS_ROOT = os.path.join(integration.TMP, 'fileclient_fs_root')
CACHE_ROOT = os.path.join(integration.TMP, 'fileclient_cache_root')
SUBDIR = 'subdir'
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
def _get_file_roots():
return dict(
[(x, [os.path.join(FS_ROOT, x)]) for x in SALTENVS]
)
fileclient.__opts__ = {}
MOCKED_OPTS = {
'file_roots': _get_file_roots(),
'fileserver_backend': ['roots'],
'cachedir': CACHE_ROOT,
'file_client': 'local',
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class FileClientTest(integration.ModuleCase):
def setUp(self):
self.file_client = fileclient.Client(self.master_opts)
def test_file_list_emptydirs(self):
'''
Ensure that the fileclient class won't allow a direct call to file_list_emptydirs()
'''
with self.assertRaises(NotImplementedError):
self.file_client.file_list_emptydirs()
def test_get_file(self):
'''
Ensure that the fileclient class won't allow a direct call to get_file()
'''
with self.assertRaises(NotImplementedError):
self.file_client.get_file(None)
def test_get_file_client(self):
with patch.dict(self.get_config('minion', from_scratch=True), {'file_client': 'remote'}):
with patch('salt.fileclient.RemoteClient', MagicMock(return_value='remote_client')):
ret = fileclient.get_file_client(self.minion_opts)
self.assertEqual('remote_client', ret)
@skipIf(NO_MOCK, NO_MOCK_REASON)
@destructiveTest
class FileclientCacheTest(integration.ModuleCase):
'''
Tests for the fileclient caching. The LocalClient is the only thing we can
test as it is the only way we can mock the fileclient (the tests run from
the minion process, so the master cannot be mocked from test code).
'''
def setUp(self):
'''
No need to add a dummy foo.txt to muddy up the github repo, just make
our own fileserver root on-the-fly.
'''
def _new_dir(path):
'''
Add a new dir at ``path`` using os.makedirs. If the directory
already exists, remove it recursively and then try to create it
again.
'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# Just in case a previous test was interrupted, remove the
# directory and try adding it again.
shutil.rmtree(path)
os.makedirs(path)
else:
raise
# Crete the FS_ROOT
for saltenv in SALTENVS:
saltenv_root = os.path.join(FS_ROOT, saltenv)
# Make sure we have a fresh root dir for this saltenv
_new_dir(saltenv_root)
path = os.path.join(saltenv_root, 'foo.txt')
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is a test file in the \'{0}\' saltenv.\n'
.format(saltenv)
)
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
os.makedirs(subdir_abspath)
for subdir_file in SUBDIR_FILES:
path = os.path.join(subdir_abspath, subdir_file)
with salt.utils.fopen(path, 'w') as fp_:
fp_.write(
'This is file \'{0}\' in subdir \'{1} from saltenv '
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
)
# Create the CACHE_ROOT
_new_dir(CACHE_ROOT)
def tearDown(self):
'''
Remove the directories created for these tests
'''
shutil.rmtree(FS_ROOT)
shutil.rmtree(CACHE_ROOT)
def test_cache_dir(self):
'''
Ensure entire directory is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=None
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_file(self):
'''
Ensure file is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
)
cache_loc = os.path.join(
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
if __name__ == '__main__':
from integration import run_tests
run_tests(FileClientTest)
| 43.752089 | 97 | 0.544343 |
3f9048c334fd74983fda33bd5ce2ae927ab4496d | 12,607 | py | Python | modules/xia2/Modules/Report/__init__.py | jorgediazjr/dials-dev20191018 | 77d66c719b5746f37af51ad593e2941ed6fbba17 | [
"BSD-3-Clause"
] | null | null | null | modules/xia2/Modules/Report/__init__.py | jorgediazjr/dials-dev20191018 | 77d66c719b5746f37af51ad593e2941ed6fbba17 | [
"BSD-3-Clause"
] | null | null | null | modules/xia2/Modules/Report/__init__.py | jorgediazjr/dials-dev20191018 | 77d66c719b5746f37af51ad593e2941ed6fbba17 | [
"BSD-3-Clause"
] | 1 | 2020-02-04T15:39:06.000Z | 2020-02-04T15:39:06.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
from collections import OrderedDict
from six.moves import cStringIO as StringIO
import xia2.Handlers.Environment
import xia2.Handlers.Files
from cctbx.array_family import flex
import libtbx.phil
from iotbx import merging_statistics
from iotbx.reflection_file_reader import any_reflection_file
from mmtbx.scaling import printed_output
from dials.util.batch_handling import batch_manager
from dials.report.analysis import batch_dependent_properties
from dials.report.plots import (
i_over_sig_i_vs_batch_plot,
scale_rmerge_vs_batch_plot,
ResolutionPlotsAndStats,
IntensityStatisticsPlots,
)
from xia2.Modules.Analysis import batch_phil_scope, phil_scope, separate_unmerged
class xtriage_output(printed_output):
def __init__(self, out):
super(xtriage_output, self).__init__(out)
self.gui_output = True
self._out_orig = self.out
self.out = StringIO()
self._sub_header_to_out = {}
def show_big_header(self, text):
pass
def show_header(self, text):
self._out_orig.write(self.out.getvalue())
self.out = StringIO()
super(xtriage_output, self).show_header(text)
def show_sub_header(self, title):
self._out_orig.write(self.out.getvalue())
self.out = StringIO()
self._current_sub_header = title
assert title not in self._sub_header_to_out
self._sub_header_to_out[title] = self.out
def flush(self):
self._out_orig.write(self.out.getvalue())
self.out.flush()
self._out_orig.flush()
class Report(object):
def __init__(
self, intensities, params, batches=None, scales=None, dose=None, report_dir=None
):
self.params = params
self.intensities = intensities
self.batches = batches
self.scales = scales
self.dose = dose
self.report_dir = report_dir
self._xanalysis = None
assert self.intensities is not None
# assert self.batches is not None
if self.batches is not None and len(self.params.batch) == 0:
separate = separate_unmerged(self.intensities, self.batches)
scope = libtbx.phil.parse(batch_phil_scope)
for i, batches in separate.batches.iteritems():
batch_params = scope.extract().batch[0]
batch_params.id = i
batch_params.range = (
flex.min(batches.data()),
flex.max(batches.data()),
)
self.params.batch.append(batch_params)
if self.params.anomalous:
self.intensities = self.intensities.as_anomalous_array()
if self.batches is not None:
self.batches = self.batches.as_anomalous_array()
self.intensities.setup_binner(n_bins=self.params.resolution_bins)
self.merged_intensities = self.intensities.merge_equivalents().array()
def multiplicity_plots(self):
from xia2.command_line.plot_multiplicity import plot_multiplicity, master_phil
settings = master_phil.extract()
settings.size_inches = (5, 5)
settings.show_missing = True
settings.slice_index = 0
mult_json_files = {}
mult_img_files = {}
rd = self.report_dir or "."
for settings.slice_axis in ("h", "k", "l"):
settings.plot.filename = os.path.join(
rd,
"multiplicities_%s_%i.png"
% (settings.slice_axis, settings.slice_index),
)
settings.json.filename = os.path.join(
rd,
"multiplicities_%s_%i.json"
% (settings.slice_axis, settings.slice_index),
)
# settings.slice_axis = axis
plot_multiplicity(self.intensities, settings)
mult_json_files[settings.slice_axis] = settings.json.filename
with open(settings.plot.filename, "rb") as fh:
mult_img_files[settings.slice_axis] = (
fh.read().encode("base64").replace("\n", "")
)
return OrderedDict(
("multiplicity_%s" % axis, mult_img_files[axis]) for axis in ("h", "k", "l")
)
def symmetry_table_html(self):
symmetry_table_html = """
<p>
<b>Unit cell:</b> %s
<br>
<b>Space group:</b> %s
</p>
""" % (
self.intensities.space_group_info().symbol_and_number(),
str(self.intensities.unit_cell()),
)
return symmetry_table_html
def xtriage_report(self):
xtriage_success = []
xtriage_warnings = []
xtriage_danger = []
s = StringIO()
pout = printed_output(out=s)
from mmtbx.scaling.xtriage import xtriage_analyses
from mmtbx.scaling.xtriage import master_params as xtriage_master_params
xtriage_params = xtriage_master_params.fetch(sources=[]).extract()
xtriage_params.scaling.input.xray_data.skip_sanity_checks = True
xanalysis = xtriage_analyses(
miller_obs=self.merged_intensities,
unmerged_obs=self.intensities,
text_out=pout,
params=xtriage_params,
)
if self.report_dir is not None:
with open(os.path.join(self.report_dir, "xtriage.log"), "wb") as f:
f.write(s.getvalue())
xia2.Handlers.Files.FileHandler.record_log_file(
"Xtriage", os.path.join(self.report_dir, "xtriage.log")
)
xs = StringIO()
xout = xtriage_output(xs)
xanalysis.show(out=xout)
xout.flush()
sub_header_to_out = xout._sub_header_to_out
issues = xanalysis.summarize_issues()
# issues.show()
for level, text, sub_header in issues._issues:
summary = sub_header_to_out.get(sub_header, StringIO()).getvalue()
d = {"level": level, "text": text, "summary": summary, "header": sub_header}
if level == 0:
xtriage_success.append(d)
elif level == 1:
xtriage_warnings.append(d)
elif level == 2:
xtriage_danger.append(d)
self._xanalysis = xanalysis
return xtriage_success, xtriage_warnings, xtriage_danger
def batch_dependent_plots(self):
binned_batches, rmerge, isigi, scalesvsbatch = batch_dependent_properties(
self.batches, self.intensities, self.scales
)
batches = [{"id": b.id, "range": b.range} for b in self.params.batch]
bm = batch_manager(binned_batches, batches)
d = {}
d.update(i_over_sig_i_vs_batch_plot(bm, isigi))
d.update(scale_rmerge_vs_batch_plot(bm, rmerge, scalesvsbatch))
return d
def resolution_plots_and_stats(self):
self.merging_stats = merging_statistics.dataset_statistics(
self.intensities,
n_bins=self.params.resolution_bins,
cc_one_half_significance_level=self.params.cc_half_significance_level,
eliminate_sys_absent=self.params.eliminate_sys_absent,
use_internal_variance=self.params.use_internal_variance,
assert_is_not_unique_set_under_symmetry=False,
)
intensities_anom = self.intensities.as_anomalous_array()
intensities_anom = intensities_anom.map_to_asu().customized_copy(
info=self.intensities.info()
)
self.merging_stats_anom = merging_statistics.dataset_statistics(
intensities_anom,
n_bins=self.params.resolution_bins,
anomalous=True,
cc_one_half_significance_level=self.params.cc_half_significance_level,
eliminate_sys_absent=self.params.eliminate_sys_absent,
use_internal_variance=self.params.use_internal_variance,
assert_is_not_unique_set_under_symmetry=False,
)
is_centric = self.intensities.space_group().is_centric()
plotter = ResolutionPlotsAndStats(
self.merging_stats, self.merging_stats_anom, is_centric
)
d = OrderedDict()
d.update(plotter.cc_one_half_plot(method=self.params.cc_half_method))
d.update(plotter.i_over_sig_i_plot())
d.update(plotter.completeness_plot())
d.update(plotter.multiplicity_vs_resolution_plot())
overall_stats = plotter.overall_statistics_table(self.params.cc_half_method)
merging_stats = plotter.merging_statistics_table(self.params.cc_half_method)
return overall_stats, merging_stats, d
def intensity_stats_plots(self, run_xtriage=True):
plotter = IntensityStatisticsPlots(
self.intensities,
anomalous=self.params.anomalous,
n_resolution_bins=self.params.resolution_bins,
xtriage_analyses=self._xanalysis,
run_xtriage_analysis=run_xtriage,
)
d = {}
d.update(plotter.generate_resolution_dependent_plots())
d.update(plotter.generate_miscellanous_plots())
return d
def pychef_plots(self, n_bins=8):
import dials.pychef
intensities = self.intensities
batches = self.batches
dose = self.dose
if self.params.chef_min_completeness:
d_min = dials.pychef.resolution_limit(
mtz_file=self.unmerged_mtz,
min_completeness=self.params.chef_min_completeness,
n_bins=n_bins,
)
print("Estimated d_min for CHEF analysis: %.2f" % d_min)
sel = flex.bool(intensities.size(), True)
d_spacings = intensities.d_spacings().data()
sel &= d_spacings >= d_min
intensities = intensities.select(sel)
batches = batches.select(sel)
if dose is not None:
dose = dose.select(sel)
if dose is None:
dose = dials.pychef.batches_to_dose(batches.data(), self.params.dose)
else:
dose = dose.data()
pychef_stats = dials.pychef.Statistics(intensities, dose, n_bins=n_bins)
return pychef_stats.to_dict()
@classmethod
def from_unmerged_mtz(cls, unmerged_mtz, params, report_dir=None):
reader = any_reflection_file(unmerged_mtz)
assert reader.file_type() == "ccp4_mtz"
arrays = reader.as_miller_arrays(merge_equivalents=False)
for ma in arrays:
if ma.info().labels == ["BATCH"]:
batches = ma
elif ma.info().labels == ["I", "SIGI"]:
intensities = ma
elif ma.info().labels == ["I(+)", "SIGI(+)", "I(-)", "SIGI(-)"]:
intensities = ma
elif ma.info().labels == ["SCALEUSED"]:
scales = ma
assert intensities is not None
assert batches is not None
mtz_object = reader.file_content()
crystal_name = (
filter(
lambda c: c != "HKL_base",
map(lambda c: c.name(), mtz_object.crystals()),
)
or ["DEFAULT"]
)[0]
report_dir = (
report_dir
or xia2.Handlers.Environment.Environment.generate_directory(
[crystal_name, "report"]
)
)
indices = mtz_object.extract_original_index_miller_indices()
intensities = intensities.customized_copy(
indices=indices, info=intensities.info()
)
batches = batches.customized_copy(indices=indices, info=batches.info())
report = cls(
intensities, params, batches=batches, scales=scales, report_dir=report_dir
)
report.mtz_object = mtz_object # nasty but xia2.report relys on this attribute
return report
@classmethod
def from_data_manager(cls, data_manager, params=None):
if params is None:
params = phil_scope.extract()
params.dose.batch = []
intensities, batches, scales = data_manager.reflections_as_miller_arrays(
combined=True
)
params.batch = []
scope = libtbx.phil.parse(batch_phil_scope)
for expt in data_manager.experiments:
batch_params = scope.extract().batch[0]
batch_params.id = expt.identifier
batch_params.range = expt.scan.get_batch_range()
params.batch.append(batch_params)
intensities.set_observation_type_xray_intensity()
return cls(intensities, params, batches=batches, scales=scales)
| 36.542029 | 88 | 0.626398 |
a627bf4b29253b5505aa4933ff01f67e985a077d | 4,229 | py | Python | grapheditor/settings.py | Chudopal/Graph_editor | 133cced79d723b8b77cceffd5c44485bbdbb0822 | [
"MIT"
] | 4 | 2020-05-25T15:20:49.000Z | 2020-06-13T14:22:40.000Z | grapheditor/settings.py | Chudopal/Graph_editor | 133cced79d723b8b77cceffd5c44485bbdbb0822 | [
"MIT"
] | 4 | 2021-03-30T13:42:22.000Z | 2021-09-22T19:08:20.000Z | grapheditor/settings.py | Chudopal/Graph_editor | 133cced79d723b8b77cceffd5c44485bbdbb0822 | [
"MIT"
] | 1 | 2021-04-15T02:52:35.000Z | 2021-04-15T02:52:35.000Z | """
Django settings for grapheditor project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = 'akv&dlfj7sx69vz#k=sqj)n8ca=63qg&$rp!+v-byb8*n_eks7'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'akv&dlfj7sx69vz#k=sqj)n8ca=63qg&$rp!+v-byb8*n_eks7')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graph',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'grapheditor.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'grapheditor.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'graph/static',
]
#STATIC_ROOT = "/var/www/example.com/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' | 27.283871 | 102 | 0.714353 |
d417ae17adef507fe60be82ea2d2799ad66bf948 | 421 | py | Python | config.py | anqurvanillapy/zhai-classroom | 4c86e9c7d3d2a8d4fb97a91b76c8e654dc41335c | [
"MIT"
] | null | null | null | config.py | anqurvanillapy/zhai-classroom | 4c86e9c7d3d2a8d4fb97a91b76c8e654dc41335c | [
"MIT"
] | null | null | null | config.py | anqurvanillapy/zhai-classroom | 4c86e9c7d3d2a8d4fb97a91b76c8e654dc41335c | [
"MIT"
] | null | null | null | """Server config loader
"""
import json
import datetime as dt
CFG_NAME = ".apprc"
def read():
_cfg = {
"timedelta": dt.timedelta(weeks=1),
"img_path": "img",
"max_filename_length": 128,
"max_content_length": 5 * 1024 * 1024,
"allowed_fileexts": ["png", "jpg", "jpeg"],
}
with open(CFG_NAME, "r") as f:
_cfg.update(json.loads(f.read()))
return _cfg
| 19.136364 | 51 | 0.56057 |
adf839db3ce8b76bcd6e8e711875e74592c6e9f6 | 819 | py | Python | tensorflow/evaluate.py | tagny/iLID | 38f5dcae0dc84fd9b78e170748aa38cd8f524c70 | [
"MIT"
] | 90 | 2016-02-19T12:37:20.000Z | 2022-02-25T19:52:46.000Z | tensorflow/evaluate.py | vyas97/iLID | 4d124b76fdbc37fbafd12e860281a4bc3ddf87d9 | [
"MIT"
] | 7 | 2017-03-24T04:12:09.000Z | 2020-06-16T11:27:54.000Z | tensorflow/evaluate.py | vyas97/iLID | 4d124b76fdbc37fbafd12e860281a4bc3ddf87d9 | [
"MIT"
] | 31 | 2016-02-01T12:52:51.000Z | 2021-08-16T04:27:59.000Z | import tensorflow as tf
import numpy as np
import yaml
from scipy.ndimage import imread
from network.instances.berlinnet_unnormal import net
import networkinput
import argparse
config = yaml.load(file("config.yaml"))
def evaluate(model_path):
training_set = networkinput.CSVInput(config['TRAINING_DATA'], config['INPUT_SHAPE'], config['OUTPUT_SHAPE'][0], mode="L")
test_set = networkinput.CSVInput(config['TEST_DATA'], config['INPUT_SHAPE'], config['OUTPUT_SHAPE'][0], mode="L")
net.set_training_input(training_set, test_set)
net.load_and_evaluate(model_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', dest='model_path', required=True, help='Path to saved tensorflow model')
args = parser.parse_args()
evaluate(args.model_path) | 32.76 | 125 | 0.750916 |
786c361bde5b6d117e5df2d4c56c8ecd668c990e | 1,852 | py | Python | midterm_takehome/gemm_scratch/gemm_two_features.py | mengjian0502/eee511_team3_assignment | 6ba0015a9b49db42a4ae77e51909ef8901b7459f | [
"MIT"
] | null | null | null | midterm_takehome/gemm_scratch/gemm_two_features.py | mengjian0502/eee511_team3_assignment | 6ba0015a9b49db42a4ae77e51909ef8901b7459f | [
"MIT"
] | null | null | null | midterm_takehome/gemm_scratch/gemm_two_features.py | mengjian0502/eee511_team3_assignment | 6ba0015a9b49db42a4ae77e51909ef8901b7459f | [
"MIT"
] | null | null | null | import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from kmeans import k_means
parser = argparse.ArgumentParser(description='Kmeans clustering')
# parameters
parser.add_argument('--clusters', type=int, default=4, help='number of clusters')
args = parser.parse_args()
def plotting(predict_labels, data, num_clusters, centroids, f1, f2):
color = ['lightgreen', 'orange', 'lightblue', 'steelblue', 'red', 'blueviolet', 'aqua', 'g', 'tan', 'darkcyan', 'darkblue']
markers = ['s', 'o', 'v', '^', 'x', 'D', 'P', 'X', 'h', '+']
plt.figure(figsize=(8,8), dpi=300)
for ii in range(num_clusters):
plt.scatter(
data[predict_labels == ii, 0], data[predict_labels == ii, 1],
s=50, c=color[ii],
marker=markers[ii], edgecolor='black',
label=f'cluster {ii+1}'
)
plt.scatter(
centroids[ii, 0],
centroids[ii, 1],
marker='X',
s=100,
c='r'
)
plt.title(f'Kmeans: after clustering | Number of clusters={args.clusters}')
plt.xlabel(f1)
plt.ylabel(f2)
plt.legend(loc='best')
plt.savefig(f'./figs/kmeans_cluster_{num_clusters}_{f1}_{f2}.png', bbox_inches = 'tight', pad_inches = 0)
def main():
clusters = args.clusters
if clusters not in [4, 6, 8, 10]:
raise ValueError("Number of clusters must be 4, 6, 8, or 10!")
data_path = './data/Mall_Customers.csv'
attr = ['Gender', 'Age', 'Annual Income (k$)', 'Spending Score (1-100)']
f1, f2 = 'Age', 'Spending Score (1-100)'
df = pd.read_csv(data_path)
data = df[[f1, f2]].iloc[: , :].to_numpy()
print(f'Shape of the data: {data.shape}')
# ================ GEMM ================= #
if __name__ == '__main__':
main() | 29.396825 | 127 | 0.579374 |
aafe1ff1ce81a142a2a96d0147c11324586a0888 | 13,331 | py | Python | tests/sentry/integrations/github/test_integration.py | JeffHeon/sentry | 514bea52de53a119cf1a01b98d071f062fe13c9c | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/integrations/github/test_integration.py | JeffHeon/sentry | 514bea52de53a119cf1a01b98d071f062fe13c9c | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/integrations/github/test_integration.py | JeffHeon/sentry | 514bea52de53a119cf1a01b98d071f062fe13c9c | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import responses
import six
import sentry
from mock import MagicMock
from six.moves.urllib.parse import parse_qs, urlencode, urlparse
from sentry.constants import ObjectStatus
from sentry.integrations.github import GitHubIntegrationProvider
from sentry.models import (
Identity, IdentityProvider, IdentityStatus, Integration, OrganizationIntegration,
Repository, Project
)
from sentry.plugins import plugins
from sentry.testutils import IntegrationTestCase
from tests.sentry.plugins.testutils import GitHubPlugin # NOQA
class GitHubIntegrationTest(IntegrationTestCase):
provider = GitHubIntegrationProvider
def setUp(self):
super(GitHubIntegrationTest, self).setUp()
self.installation_id = 'install_1'
self.user_id = 'user_1'
self.app_id = 'app_1'
self.access_token = 'xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx'
self.expires_at = '3000-01-01T00:00:00Z'
self._stub_github()
def _stub_github(self):
responses.reset()
sentry.integrations.github.integration.get_jwt = MagicMock(
return_value='jwt_token_1',
)
sentry.integrations.github.client.get_jwt = MagicMock(
return_value='jwt_token_1',
)
responses.add(
responses.POST,
'https://github.com/login/oauth/access_token',
json={'access_token': self.access_token}
)
responses.add(
responses.POST,
u'https://api.github.com/installations/{}/access_tokens'.format(
self.installation_id,
),
json={
'token': self.access_token,
'expires_at': self.expires_at,
}
)
responses.add(
responses.GET,
'https://api.github.com/user',
json={'id': self.user_id}
)
responses.add(
responses.GET,
u'https://api.github.com/installation/repositories',
json={
'repositories': [
{
'id': 1296269,
'name': 'foo',
'full_name': 'Test-Organization/foo',
},
{
'id': 9876574,
'name': 'bar',
'full_name': 'Test-Organization/bar',
},
],
}
)
responses.add(
responses.GET,
u'https://api.github.com/app/installations/{}'.format(
self.installation_id,
),
json={
'id': self.installation_id,
'app_id': self.app_id,
'account': {
'login': 'Test Organization',
'avatar_url': 'http://example.com/avatar.png',
'html_url': 'https://github.com/Test-Organization',
'type': 'Organization',
},
}
)
responses.add(
responses.GET,
u'https://api.github.com/user/installations',
json={
'installations': [{'id': self.installation_id}],
}
)
responses.add(
responses.GET,
u'https://api.github.com/repos/Test-Organization/foo/hooks',
json=[],
)
def assert_setup_flow(self):
resp = self.client.get(self.init_path)
assert resp.status_code == 302
redirect = urlparse(resp['Location'])
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/apps/sentry-test-app'
# App installation ID is provided
resp = self.client.get(u'{}?{}'.format(
self.setup_path,
urlencode({'installation_id': self.installation_id})
))
redirect = urlparse(resp['Location'])
assert resp.status_code == 302
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/login/oauth/authorize'
params = parse_qs(redirect.query)
assert params['state']
assert params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert params['response_type'] == ['code']
assert params['client_id'] == ['github-client-id']
# Compact list values into singular values, since there's only ever one.
authorize_params = {k: v[0] for k, v in six.iteritems(params)}
resp = self.client.get(u'{}?{}'.format(
self.setup_path,
urlencode({
'code': 'oauth-code',
'state': authorize_params['state'],
})
))
oauth_exchange = responses.calls[0]
req_params = parse_qs(oauth_exchange.request.body)
assert req_params['grant_type'] == ['authorization_code']
assert req_params['code'] == ['oauth-code']
assert req_params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert req_params['client_id'] == ['github-client-id']
assert req_params['client_secret'] == ['github-client-secret']
assert oauth_exchange.response.status_code == 200
auth_header = responses.calls[2].request.headers['Authorization']
assert auth_header == 'Bearer jwt_token_1'
self.assertDialogSuccess(resp)
return resp
@responses.activate
def test_plugin_migration(self):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name='Test-Organization/foo',
url='https://github.com/Test-Organization/foo',
provider='github',
external_id=123,
config={
'name': 'Test-Organization/foo',
},
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name='Not-My-Org/other',
provider='github',
external_id=321,
config={
'name': 'Not-My-Org/other',
},
)
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
# Updates the existing Repository to belong to the new Integration
assert Repository.objects.get(
id=accessible_repo.id,
).integration_id == integration.id
# Doesn't touch Repositories not accessible by the new Integration
assert Repository.objects.get(
id=inaccessible_repo.id,
).integration_id is None
@responses.activate
def test_disables_plugin_when_fully_migrated(self):
project = Project.objects.create(
organization_id=self.organization.id,
)
plugin = plugins.get('github')
plugin.enable(project)
# Accessible to new Integration
Repository.objects.create(
organization_id=self.organization.id,
name='Test-Organization/foo',
url='https://github.com/Test-Organization/foo',
provider='github',
external_id=123,
config={
'name': 'Test-Organization/foo',
},
)
assert 'github' in [p.slug for p in plugins.for_project(project)]
with self.tasks():
self.assert_setup_flow()
assert 'github' not in [p.slug for p in plugins.for_project(project)]
@responses.activate
def test_basic_flow(self):
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == self.installation_id
assert integration.name == 'Test Organization'
assert integration.metadata == {
'access_token': None,
# The metadata doesn't get saved with the timezone "Z" character
# for some reason, so just compare everything but that.
'expires_at': None,
'icon': 'http://example.com/avatar.png',
'domain_name': 'github.com/Test-Organization',
'account_type': 'Organization',
}
oi = OrganizationIntegration.objects.get(
integration=integration,
organization=self.organization,
)
assert oi.config == {}
idp = IdentityProvider.objects.get(type='github')
identity = Identity.objects.get(
idp=idp,
user=self.user,
external_id=self.user_id,
)
assert identity.status == IdentityStatus.VALID
assert identity.data == {
'access_token': self.access_token,
}
@responses.activate
def test_reassign_user(self):
self.assert_setup_flow()
# Associate the identity with a user that has a password.
# Identity should be relinked.
user2 = self.create_user()
Identity.objects.get().update(user=user2)
self.assert_setup_flow()
identity = Identity.objects.get()
assert identity.user == self.user
# Associate the identity with a user without a password.
# Identity should not be relinked.
user2.set_unusable_password()
user2.save()
Identity.objects.get().update(user=user2)
resp = self.assert_setup_flow()
assert '"success":false' in resp.content
assert 'The provided GitHub account is linked to a different user' in resp.content
@responses.activate
def test_reinstall_flow(self):
self._stub_github()
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
integration.update(status=ObjectStatus.DISABLED)
assert integration.status == ObjectStatus.DISABLED
assert integration.external_id == self.installation_id
resp = self.client.get(u'{}?{}'.format(
self.init_path,
urlencode({'reinstall_id': integration.id})
))
assert resp.status_code == 302
redirect = urlparse(resp['Location'])
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/apps/sentry-test-app'
# New Installation
self.installation_id = 'install_2'
resp = self.client.get(u'{}?{}'.format(
self.setup_path,
urlencode({'installation_id': self.installation_id})
))
redirect = urlparse(resp['Location'])
assert resp.status_code == 302
assert redirect.scheme == 'https'
assert redirect.netloc == 'github.com'
assert redirect.path == '/login/oauth/authorize'
params = parse_qs(redirect.query)
assert params['state']
assert params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert params['response_type'] == ['code']
assert params['client_id'] == ['github-client-id']
# Compact list values to make the rest of this easier
authorize_params = {k: v[0] for k, v in six.iteritems(params)}
self._stub_github()
resp = self.client.get(u'{}?{}'.format(
self.setup_path,
urlencode({
'code': 'oauth-code',
'state': authorize_params['state'],
})
))
mock_access_token_request = responses.calls[0].request
req_params = parse_qs(mock_access_token_request.body)
assert req_params['grant_type'] == ['authorization_code']
assert req_params['code'] == ['oauth-code']
assert req_params['redirect_uri'] == ['http://testserver/extensions/github/setup/']
assert req_params['client_id'] == ['github-client-id']
assert req_params['client_secret'] == ['github-client-secret']
assert resp.status_code == 200
auth_header = responses.calls[2].request.headers['Authorization']
assert auth_header == 'Bearer jwt_token_1'
integration = Integration.objects.get(provider=self.provider.key)
assert integration.status == ObjectStatus.VISIBLE
assert integration.external_id == self.installation_id
@responses.activate
def test_disable_plugin_when_fully_migrated(self):
self._stub_github()
project = Project.objects.create(
organization_id=self.organization.id,
)
plugin = plugins.get('github')
plugin.enable(project)
# Accessible to new Integration - mocked in _stub_github
Repository.objects.create(
organization_id=self.organization.id,
name='Test-Organization/foo',
url='https://github.com/Test-Organization/foo',
provider='github',
external_id='123',
config={
'name': 'Test-Organization/foo',
},
)
# Enabled before
assert 'github' in [p.slug for p in plugins.for_project(project)]
with self.tasks():
self.assert_setup_flow()
# Disabled after Integration installed
assert 'github' not in [p.slug for p in plugins.for_project(project)]
| 33.3275 | 91 | 0.585252 |
a09a3961b4b3c0bbb3b3210ba93165220e3fa7e3 | 6,248 | py | Python | addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_texture_info.py | emackey/glTF-Blender-IO | 3ab37ba38a3ae483d69a029f979286ded8b9b94b | [
"Apache-2.0"
] | null | null | null | addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_texture_info.py | emackey/glTF-Blender-IO | 3ab37ba38a3ae483d69a029f979286ded8b9b94b | [
"Apache-2.0"
] | null | null | null | addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_texture_info.py | emackey/glTF-Blender-IO | 3ab37ba38a3ae483d69a029f979286ded8b9b94b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import typing
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.io.com import gltf2_io
from io_scene_gltf2.blender.exp import gltf2_blender_gather_texture
from io_scene_gltf2.blender.exp import gltf2_blender_search_node_tree
from io_scene_gltf2.blender.exp import gltf2_blender_get
from io_scene_gltf2.io.com.gltf2_io_debug import print_console
from io_scene_gltf2.io.com.gltf2_io_extensions import Extension
from io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions
@cached
def gather_texture_info(blender_shader_sockets_or_texture_slots: typing.Union[
typing.Tuple[bpy.types.NodeSocket], typing.Tuple[bpy.types.Texture]],
export_settings):
if not __filter_texture_info(blender_shader_sockets_or_texture_slots, export_settings):
return None
texture_info = gltf2_io.TextureInfo(
extensions=__gather_extensions(blender_shader_sockets_or_texture_slots, export_settings),
extras=__gather_extras(blender_shader_sockets_or_texture_slots, export_settings),
index=__gather_index(blender_shader_sockets_or_texture_slots, export_settings),
tex_coord=__gather_tex_coord(blender_shader_sockets_or_texture_slots, export_settings)
)
if texture_info.index is None:
return None
export_user_extensions('gather_texture_info_hook', export_settings, texture_info, blender_shader_sockets_or_texture_slots)
return texture_info
def __filter_texture_info(blender_shader_sockets_or_texture_slots, export_settings):
if not blender_shader_sockets_or_texture_slots:
return False
if not all([elem is not None for elem in blender_shader_sockets_or_texture_slots]):
return False
if isinstance(blender_shader_sockets_or_texture_slots[0], bpy.types.NodeSocket):
if any([__get_tex_from_socket(socket) is None for socket in blender_shader_sockets_or_texture_slots]):
# sockets do not lead to a texture --> discard
return False
resolution = __get_tex_from_socket(blender_shader_sockets_or_texture_slots[0]).shader_node.image.size
if any(any(a != b for a, b in zip(__get_tex_from_socket(elem).shader_node.image.size, resolution))
for elem in blender_shader_sockets_or_texture_slots):
def format_image(image_node):
return "{} ({}x{})".format(image_node.image.name, image_node.image.size[0], image_node.image.size[1])
images = [format_image(__get_tex_from_socket(elem).shader_node) for elem in
blender_shader_sockets_or_texture_slots]
print_console("ERROR", "Image sizes do not match. In order to be merged into one image file, "
"images need to be of the same size. Images: {}".format(images))
return False
return True
def __gather_extensions(blender_shader_sockets_or_texture_slots, export_settings):
if not hasattr(blender_shader_sockets_or_texture_slots[0], 'links'):
return None
tex_nodes = [__get_tex_from_socket(socket).shader_node for socket in blender_shader_sockets_or_texture_slots]
texture_node = tex_nodes[0] if (tex_nodes is not None and len(tex_nodes) > 0) else None
if texture_node is None:
return None
texture_transform = gltf2_blender_get.get_texture_transform_from_texture_node(texture_node)
if texture_transform is None:
return None
extension = Extension("KHR_texture_transform", texture_transform)
return {"KHR_texture_transform": extension}
def __gather_extras(blender_shader_sockets_or_texture_slots, export_settings):
return None
def __gather_index(blender_shader_sockets_or_texture_slots, export_settings):
# We just put the actual shader into the 'index' member
return gltf2_blender_gather_texture.gather_texture(blender_shader_sockets_or_texture_slots, export_settings)
def __gather_tex_coord(blender_shader_sockets_or_texture_slots, export_settings):
if isinstance(blender_shader_sockets_or_texture_slots[0], bpy.types.NodeSocket):
blender_shader_node = __get_tex_from_socket(blender_shader_sockets_or_texture_slots[0]).shader_node
if len(blender_shader_node.inputs['Vector'].links) == 0:
return 0
input_node = blender_shader_node.inputs['Vector'].links[0].from_node
if isinstance(input_node, bpy.types.ShaderNodeMapping):
if len(input_node.inputs['Vector'].links) == 0:
return 0
input_node = input_node.inputs['Vector'].links[0].from_node
if not isinstance(input_node, bpy.types.ShaderNodeUVMap):
return 0
if input_node.uv_map == '':
return 0
# Try to gather map index.
for blender_mesh in bpy.data.meshes:
if bpy.app.version < (2, 80, 0):
texCoordIndex = blender_mesh.uv_textures.find(input_node.uv_map)
else:
texCoordIndex = blender_mesh.uv_layers.find(input_node.uv_map)
if texCoordIndex >= 0:
return texCoordIndex
return 0
elif isinstance(blender_shader_sockets_or_texture_slots[0], bpy.types.MaterialTextureSlot):
# TODO: implement for texture slots
return 0
else:
raise NotImplementedError()
def __get_tex_from_socket(socket):
result = gltf2_blender_search_node_tree.from_socket(
socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return None
if result[0].shader_node.image is None:
return None
return result[0]
| 42.503401 | 126 | 0.744238 |
732137cb0875348e4f5e751639760fa1a9d47a90 | 2,715 | py | Python | env/lib/python3.10/site-packages/ExceptionHandling/_metadata.py | Arcfrost/MyBlog---TextToSpeech | 861db3881fde00397a9b826c900fa96f5c5d9ae4 | [
"MIT"
] | null | null | null | env/lib/python3.10/site-packages/ExceptionHandling/_metadata.py | Arcfrost/MyBlog---TextToSpeech | 861db3881fde00397a9b826c900fa96f5c5d9ae4 | [
"MIT"
] | null | null | null | env/lib/python3.10/site-packages/ExceptionHandling/_metadata.py | Arcfrost/MyBlog---TextToSpeech | 861db3881fde00397a9b826c900fa96f5c5d9ae4 | [
"MIT"
] | null | null | null | # This file is generated by objective.metadata
#
# Last update: Sun Jul 11 21:37:16 2021
#
# flake8: noqa
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
if objc.arch == "arm64":
def selAorI(a, b):
return a
else:
def selAorI(a, b):
return b
misc = {}
constants = """$NSStackTraceKey$NSUncaughtRuntimeErrorException$NSUncaughtSystemExceptionException$"""
enums = """$NSHandleOtherExceptionMask@512$NSHandleTopLevelExceptionMask@128$NSHandleUncaughtExceptionMask@2$NSHandleUncaughtRuntimeErrorMask@32$NSHandleUncaughtSystemExceptionMask@8$NSHangOnOtherExceptionMask@16$NSHangOnTopLevelExceptionMask@8$NSHangOnUncaughtExceptionMask@1$NSHangOnUncaughtRuntimeErrorMask@4$NSHangOnUncaughtSystemExceptionMask@2$NSLogOtherExceptionMask@256$NSLogTopLevelExceptionMask@64$NSLogUncaughtExceptionMask@1$NSLogUncaughtRuntimeErrorMask@16$NSLogUncaughtSystemExceptionMask@4$"""
misc.update({})
functions = {"NSExceptionHandlerResume": (b"v",)}
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(
b"NSObject",
b"exceptionHandler:shouldHandleException:mask:",
{
"retval": {"type": "Z"},
"arguments": {2: {"type": b"@"}, 3: {"type": b"@"}, 4: {"type": b"Q"}},
},
)
r(
b"NSObject",
b"exceptionHandler:shouldLogException:mask:",
{
"retval": {"type": "Z"},
"arguments": {2: {"type": b"@"}, 3: {"type": b"@"}, 4: {"type": b"Q"}},
},
)
finally:
objc._updatingMetadata(False)
protocols = {
"NSExceptionHandlerDelegate": objc.informal_protocol(
"NSExceptionHandlerDelegate",
[
objc.selector(
None,
b"exceptionHandler:shouldLogException:mask:",
b"Z@:@@Q",
isRequired=False,
),
objc.selector(
None,
b"exceptionHandler:shouldHandleException:mask:",
b"Z@:@@Q",
isRequired=False,
),
],
)
}
expressions = {
"NSHangOnEveryExceptionMask": "(NSHangOnUncaughtExceptionMask|NSHangOnUncaughtSystemExceptionMask|NSHangOnUncaughtRuntimeErrorMask|NSHangOnTopLevelExceptionMask|NSHangOnOtherExceptionMask)",
"NSLogAndHandleEveryExceptionMask": "(NSLogUncaughtExceptionMask|NSLogUncaughtSystemExceptionMask|NSLogUncaughtRuntimeErrorMask|NSHandleUncaughtExceptionMask|NSHandleUncaughtSystemExceptionMask|NSHandleUncaughtRuntimeErrorMask|NSLogTopLevelExceptionMask|NSHandleTopLevelExceptionMask|NSLogOtherExceptionMask|NSHandleOtherExceptionMask)",
}
# END OF FILE
| 32.321429 | 508 | 0.67477 |
e8fddbc838267d165c049da4bb8d63f317b3e132 | 652 | py | Python | app.py | Guts76/tp-gcp-flask | e3057e46676b0dc56bab474af8672ce6ce3cec88 | [
"MIT"
] | null | null | null | app.py | Guts76/tp-gcp-flask | e3057e46676b0dc56bab474af8672ce6ce3cec88 | [
"MIT"
] | null | null | null | app.py | Guts76/tp-gcp-flask | e3057e46676b0dc56bab474af8672ce6ce3cec88 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, make_response, g
import os
import socket
import random
import json
import logging
option_a = os.getenv('OPTION_A', "Cats")
option_b = os.getenv('OPTION_B', "Dogs")
hostname = socket.gethostname()
app = Flask(__name__)
gunicorn_error_logger = logging.getLogger('gunicorn.error')
app.logger.handlers.extend(gunicorn_error_logger.handlers)
app.logger.setLevel(logging.INFO)
@app.route("/", methods=['POST','GET'])
def hello():
name = os.getenv("NAME")
return '<h1>Bonjour tout le monde</h1>'+name
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
| 23.285714 | 67 | 0.728528 |
bf3a2fc52c1cd67e9ce24516b4fc3319344f2330 | 1,964 | py | Python | architecture.py | orffen/swn-py | 8cbff08e02bb761bf98c6c30b76865a49d31a3a3 | [
"MIT"
] | 1 | 2018-02-19T04:26:19.000Z | 2018-02-19T04:26:19.000Z | architecture.py | orffen/swn-py | 8cbff08e02bb761bf98c6c30b76865a49d31a3a3 | [
"MIT"
] | null | null | null | architecture.py | orffen/swn-py | 8cbff08e02bb761bf98c6c30b76865a49d31a3a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# architecture.py
# SWN Architecture Generator
#
# Copyright (c) 2014 Steve Simenic <orffen@orffenspace.com>
#
# This file is part of the SWN Toolbox.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import json
import random
import sys
class Architecture:
"""
This class generates an architecture element from
tables/architecture.json, which can be accessed through
the "element" attribute.
"""
def __init__(self):
with open("tables/architecture.json", "r") as file:
architecture = json.load(file)
self.element = str(random.choice(architecture["element"]))
def __str__(self):
return self.element
if __name__ == "__main__":
try:
times = int(sys.argv[1])
except:
times = 1
for i in range(times):
if i != 0:
print("-----------+-+-+-----------")
print(Architecture())
| 33.288136 | 79 | 0.689919 |
1bfff7c36e5ef22964811e966f253d09d9fabfe0 | 2,160 | py | Python | docs/example.py | ausaki/python-validator | c795b038b53cb54adf4acceb223b156eb903002c | [
"MIT"
] | 44 | 2018-07-30T07:09:15.000Z | 2021-11-30T02:37:00.000Z | docs/example.py | ausaki/python-validator | c795b038b53cb54adf4acceb223b156eb903002c | [
"MIT"
] | 8 | 2019-02-18T15:00:31.000Z | 2021-02-02T07:20:57.000Z | docs/example.py | ausaki/python-validator | c795b038b53cb54adf4acceb223b156eb903002c | [
"MIT"
] | 6 | 2019-03-10T20:34:23.000Z | 2022-01-18T05:34:13.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
from validator import Validator, StringField, IntegerField, EnumField, ListField, DictField, create_validator
from validator.exceptions import FieldRequiredError
import json
import pprint
class UserInfoValidator(Validator):
name = StringField(max_length=50, required=True)
age = IntegerField(min_value=1, max_value=120, default=20)
sex = EnumField(choices=['f', 'm'])
data = {
'name': 'Michael',
'age': 24,
'sex': 'f'
}
v = UserInfoValidator(data)
print('正确数据')
print('data: ', data)
print('is_valid:', v.is_valid())
print('errors:', v.errors)
print('validated_data:', v.validated_data)
data = {
'age': '24',
'sex': 'f'
}
v = UserInfoValidator(data)
print('错误数据')
print('data: ', data)
print('is_valid:', v.is_valid())
print('errors:', v.errors['age'])
print('str_errors:', v.str_errors)
print('validated_data:', v.validated_data)
data = {
'name': 'abc' * 20,
'age': 24,
'sex': 'f'
}
v = UserInfoValidator(data)
print('错误数据')
print('data: ', data)
print('is_valid:', v.is_valid())
print('errors:', v.str_errors)
print('validated_data:', v.validated_data)
data = {
'name': 'Michael',
'age': 24,
'sex': 'c'
}
v = UserInfoValidator(data)
print('错误数据')
print('data: ', data)
print('is_valid:', v.is_valid())
print('errors:', v.str_errors)
print('validated_data:', v.validated_data)
data = UserInfoValidator.mock_data()
print('mock_data:', data)
print('to_dict:')
pprint.pprint(UserInfoValidator.to_dict())
# ListField dict
class V(Validator):
cards = ListField(min_length=1, max_length=52,
field=IntegerField(min_value=1, max_value=13))
print(json.dumps(V.to_dict(), indent=4))
V = create_validator(V.to_dict())
print(json.dumps(V.to_dict(), indent=4))
data = {
'rectangle': {
'type': 'dict',
'validator': {
'width': {
'type': 'integer',
'default': '__empty__'
},
'height': {
'type': 'integer',
}
},
}
}
V = create_validator(data)
print(json.dumps(V.to_dict(), indent=4))
| 21.176471 | 109 | 0.623611 |
423036f6e84ffed39bb6d12589bbe354fcf8b883 | 1,429 | py | Python | lite/tools/cmake_tools/parse_op_registry.py | banbishan/Paddle-Lite | 02517c12c31609f413a1c47a83e25d3fbff07074 | [
"Apache-2.0"
] | null | null | null | lite/tools/cmake_tools/parse_op_registry.py | banbishan/Paddle-Lite | 02517c12c31609f413a1c47a83e25d3fbff07074 | [
"Apache-2.0"
] | null | null | null | lite/tools/cmake_tools/parse_op_registry.py | banbishan/Paddle-Lite | 02517c12c31609f413a1c47a83e25d3fbff07074 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Collect op registry information. '''
import sys
import logging
ops_list_path = sys.argv[1]
dest_path = sys.argv[2]
out_lines = [
'#pragma once',
'#include "paddle_lite_factory_helper.h"',
'',
]
with open(ops_list_path) as f:
for line in f:
path = line.strip()
with open(path) as g:
for line in g:
key = 'REGISTER_LITE_OP'
if line.startswith(key):
end = line.find(',')
op = line[len(key) + 1:end]
if not op: continue
if "_grad" in op: continue
out = "USE_LITE_OP(%s);" % op
out_lines.append(out)
with open(dest_path, 'w') as f:
logging.info("write op list to %s" % dest_path)
f.write('\n'.join(out_lines))
| 31.065217 | 74 | 0.621414 |
2189c69e2164afa57ffa595737a8431caf2bb3f1 | 4,127 | py | Python | src/GitHub.py | salob/python-graphql | b69afc43e29da9855767d32599c2e366478c7799 | [
"MIT"
] | null | null | null | src/GitHub.py | salob/python-graphql | b69afc43e29da9855767d32599c2e366478c7799 | [
"MIT"
] | null | null | null | src/GitHub.py | salob/python-graphql | b69afc43e29da9855767d32599c2e366478c7799 | [
"MIT"
] | null | null | null |
'''
Created on 18 Feb 2021
@author: salob
'''
import requests
from datetime import datetime
class GitHub(object):
'''
Github object
'''
def __init__(self,ghKey,apiUrl="https://api.github.com/graphql"):
'''
Constructor
'''
self.key = ghKey
self.apiurl = apiUrl
self.header={"Authorization": "Bearer "+self.key}
def runQuery(self,query):
s = requests.Session()
request = s.post(self.apiurl,headers=self.header,json={'query': query})
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def getIssueByNumber(self,owner,repo,issueNumber):
query = """
query{
repository(owner: "%s" , name: "%s") {
issue30: issue(number: %i) {
...IssueFragment
}
}
}
fragment IssueFragment on Issue {
title
createdAt
body
}
""" % (owner,repo,issueNumber)
results = self.runQuery(query)
return results
def getLabelByName(self,owner,repo,label):
query = """
query {
repository(owner:"%s", name:"%s") {
label(name:"%s") {
id
}
}
}
""" % (owner,repo,label)
results = self.runQuery(query)
return results
#There should only be one actual release wiki issue per release ID
def getIssueByExactTitle(self,owner,repo,label,issueTitle):
issues = self.getIssuesByTitleKeywordAndLabel(owner,repo,label,issueTitle)
for issue in issues['data']['search']['nodes']:
if issue['title'] == issueTitle:
return issue
def getCommentByAuthorAndTitle(self,issue,commentAuthor,commentTitle):
comments = issue['comments']['edges']
for comment in comments:
if comment['node']['body'].startswith(commentTitle) and comment['node']['author']['login'] == commentAuthor:
return comment
#returns list of issues containing keyword
def getIssuesByTitleKeywordAndLabel(self,owner,repo,label,keyword):
query = """
{
search(query: "repo:%s/%s label:\\"%s\\" in:title %s", type: ISSUE, first: 100) {
nodes {
... on Issue {
id
number
title
body
comments(first:100){
edges{
node{
id
author{
login
}
body
}
}
}
}
}
}
}
""" % (owner,repo,label,keyword)
results = self.runQuery(query)
return results
def createIssue(self,input):
mutation = """
mutation{
createIssue(input:{%s}) {
issue{
title
id
}
}
}
""" % (input)
results = self.runQuery(mutation)
return results
def updateIssueComment(self,commentId,newText):
mutation = """
mutation{
updateIssueComment(input:{id:"%s",body:"%s"}) {
issueComment{
body
}
}
}
""" % (commentId,newText)
results = self.runQuery(mutation)
return results
def addIssueComment(self,issueId,commentTitle):
mutation = """
mutation{
addComment(input:{subjectId:"%s",body:"%s"}) {
commentEdge{
node{
body
}
}
}
}
""" % (issueId,commentTitle)
results = self.runQuery(mutation)
return results
| 26.286624 | 120 | 0.464744 |
b18628795a59867692f9921925b8ac82a4fa1bac | 59,238 | py | Python | ec2/spark_ec2.py | bopopescu/wso2-spark | 6982456ded39a8fef0ad26600218f8f575aac2a5 | [
"Apache-2.0",
"MIT"
] | 11 | 2016-05-26T12:06:38.000Z | 2020-07-06T20:37:07.000Z | ec2/spark_ec2.py | bopopescu/wso2-spark | 6982456ded39a8fef0ad26600218f8f575aac2a5 | [
"Apache-2.0",
"MIT"
] | null | null | null | ec2/spark_ec2.py | bopopescu/wso2-spark | 6982456ded39a8fef0ad26600218f8f575aac2a5 | [
"Apache-2.0",
"MIT"
] | 9 | 2016-07-29T01:13:50.000Z | 2020-07-23T16:16:17.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division, print_function, with_statement
import codecs
import hashlib
import itertools
import logging
import os
import os.path
import pipes
import random
import shutil
import string
from stat import S_IRUSR
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
if sys.version < "3":
from urllib2 import urlopen, Request, HTTPError
else:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
raw_input = input
xrange = range
SPARK_EC2_VERSION = "1.4.1"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
VALID_SPARK_VERSIONS = set([
"0.7.3",
"0.8.0",
"0.8.1",
"0.9.0",
"0.9.1",
"0.9.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"1.2.0",
"1.2.1",
"1.3.0",
"1.3.1",
"1.4.0",
"1.4.1",
"1.4.2"
])
SPARK_TACHYON_MAP = {
"1.0.0": "0.4.1",
"1.0.1": "0.4.1",
"1.0.2": "0.4.1",
"1.1.0": "0.5.0",
"1.1.1": "0.5.0",
"1.2.0": "0.5.0",
"1.2.1": "0.5.0",
"1.3.0": "0.5.0",
"1.3.1": "0.5.0",
"1.4.0": "0.6.4",
"1.4.1": "0.6.4",
"1.4.2": "0.6.4"
}
DEFAULT_SPARK_VERSION = SPARK_EC2_VERSION
DEFAULT_SPARK_GITHUB_REPO = "https://github.com/apache/spark"
# Default location to get the spark-ec2 scripts (and ami-list) from
DEFAULT_SPARK_EC2_GITHUB_REPO = "https://github.com/mesos/spark-ec2"
DEFAULT_SPARK_EC2_BRANCH = "branch-1.4"
def setup_external_libs(libs):
"""
Download external libraries from PyPI to SPARK_EC2_DIR/lib/ and prepend them to our PATH.
"""
PYPI_URL_PREFIX = "https://pypi.python.org/packages/source"
SPARK_EC2_LIB_DIR = os.path.join(SPARK_EC2_DIR, "lib")
if not os.path.exists(SPARK_EC2_LIB_DIR):
print("Downloading external libraries that spark-ec2 needs from PyPI to {path}...".format(
path=SPARK_EC2_LIB_DIR
))
print("This should be a one-time operation.")
os.mkdir(SPARK_EC2_LIB_DIR)
for lib in libs:
versioned_lib_name = "{n}-{v}".format(n=lib["name"], v=lib["version"])
lib_dir = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name)
if not os.path.isdir(lib_dir):
tgz_file_path = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name + ".tar.gz")
print(" - Downloading {lib}...".format(lib=lib["name"]))
download_stream = urlopen(
"{prefix}/{first_letter}/{lib_name}/{lib_name}-{lib_version}.tar.gz".format(
prefix=PYPI_URL_PREFIX,
first_letter=lib["name"][:1],
lib_name=lib["name"],
lib_version=lib["version"]
)
)
with open(tgz_file_path, "wb") as tgz_file:
tgz_file.write(download_stream.read())
with open(tgz_file_path, "rb") as tar:
if hashlib.md5(tar.read()).hexdigest() != lib["md5"]:
print("ERROR: Got wrong md5sum for {lib}.".format(lib=lib["name"]), file=stderr)
sys.exit(1)
tar = tarfile.open(tgz_file_path)
tar.extractall(path=SPARK_EC2_LIB_DIR)
tar.close()
os.remove(tgz_file_path)
print(" - Finished downloading {lib}.".format(lib=lib["name"]))
sys.path.insert(1, lib_dir)
# Only PyPI libraries are supported.
external_libs = [
{
"name": "boto",
"version": "2.34.0",
"md5": "5556223d2d0cc4d06dd4829e671dcecd"
}
]
setup_external_libs(external_libs)
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
prog="spark-ec2",
version="%prog {v}".format(v=SPARK_EC2_VERSION),
usage="%prog [options] <action> <cluster_name>\n\n"
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region used to launch instances in, or to find them in (default: %default)")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies) (default: a single zone chosen at random)")
parser.add_option(
"-a", "--ami",
help="Amazon Machine Image ID to use")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"--spark-git-repo",
default=DEFAULT_SPARK_GITHUB_REPO,
help="Github repo from which to checkout supplied commit hash (default: %default)")
parser.add_option(
"--spark-ec2-git-repo",
default=DEFAULT_SPARK_EC2_GITHUB_REPO,
help="Github repo from which to checkout spark-ec2 (default: %default)")
parser.add_option(
"--spark-ec2-git-branch",
default=DEFAULT_SPARK_EC2_BRANCH,
help="Github repo branch of spark-ec2 to use (default: %default)")
parser.add_option(
"--deploy-root-dir",
default=None,
help="A directory to copy into / on the first master. " +
"Must be absolute. Note that a trailing slash is handled as per rsync: " +
"If you omit it, the last directory of the --deploy-root-dir path will be created " +
"in / before copying its contents. If you append the trailing slash, " +
"the directory is not created and its contents are copied directly into /. " +
"(default: %default).")
parser.add_option(
"--hadoop-major-version", default="1",
help="Major version of Hadoop. Valid options are 1 (Hadoop 1.0.4), 2 (CDH 4.2.0), yarn " +
"(Hadoop 2.4.0) (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0." +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--placement-group", type="string", default=None,
help="Which placement group to try and launch " +
"instances into. Assumes placement group is already " +
"created.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES. Not used if YARN " +
"is used as Hadoop major version (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMIs interpret this as an initialization script)")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
parser.add_option(
"--subnet-id", default=None,
help="VPC subnet to launch instances in")
parser.add_option(
"--vpc-id", default=None,
help="VPC to launch instances in")
parser.add_option(
"--private-ips", action="store_true", default=False,
help="Use private IPs for instances rather than public if VPC/subnet " +
"requires that.")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print("ERROR: The environment variable AWS_ACCESS_KEY_ID must be set",
file=stderr)
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print("ERROR: The environment variable AWS_SECRET_ACCESS_KEY must be set",
file=stderr)
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name, vpc_id):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print("Creating security group " + name)
return conn.create_security_group(name, "Spark EC2 group", vpc_id)
def get_validate_spark_version(version, repo):
if "." in version:
version = version.replace("v", "")
if version not in VALID_SPARK_VERSIONS:
print("Don't know about Spark version: {v}".format(v=version), file=stderr)
sys.exit(1)
return version
else:
github_commit_url = "{repo}/commit/{commit_hash}".format(repo=repo, commit_hash=version)
request = Request(github_commit_url)
request.get_method = lambda: 'HEAD'
try:
response = urlopen(request)
except HTTPError as e:
print("Couldn't validate Spark commit: {url}".format(url=github_commit_url),
file=stderr)
print("Received HTTP response code of {code}.".format(code=e.code), file=stderr)
sys.exit(1)
return version
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2015-05-08
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
EC2_INSTANCE_TYPES = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.large": "pvm",
"c3.xlarge": "pvm",
"c3.2xlarge": "pvm",
"c3.4xlarge": "pvm",
"c3.8xlarge": "pvm",
"c4.large": "hvm",
"c4.xlarge": "hvm",
"c4.2xlarge": "hvm",
"c4.4xlarge": "hvm",
"c4.8xlarge": "hvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"d2.xlarge": "hvm",
"d2.2xlarge": "hvm",
"d2.4xlarge": "hvm",
"d2.8xlarge": "hvm",
"g2.2xlarge": "hvm",
"g2.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.xlarge": "hvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m3.medium": "hvm",
"m3.large": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"t1.micro": "pvm",
"t2.micro": "hvm",
"t2.small": "hvm",
"t2.medium": "hvm",
}
def get_tachyon_version(spark_version):
return SPARK_TACHYON_MAP.get(spark_version, "")
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
def get_spark_ami(opts):
if opts.instance_type in EC2_INSTANCE_TYPES:
instance_type = EC2_INSTANCE_TYPES[opts.instance_type]
else:
instance_type = "pvm"
print("Don't recognize %s, assuming type is pvm" % opts.instance_type, file=stderr)
# URL prefix from which to fetch AMI information
ami_prefix = "{r}/{b}/ami-list".format(
r=opts.spark_ec2_git_repo.replace("https://github.com", "https://raw.github.com", 1),
b=opts.spark_ec2_git_branch)
ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
reader = codecs.getreader("ascii")
try:
ami = reader(urlopen(ami_path)).read().strip()
except:
print("Could not resolve AMI at: " + ami_path, file=stderr)
sys.exit(1)
print("Spark AMI: " + ami)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print("ERROR: Must provide an identity file (-i) for ssh connections.", file=stderr)
sys.exit(1)
if opts.key_pair is None:
print("ERROR: Must provide a key pair name (-k) to use on instances.", file=stderr)
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print("Setting up security groups...")
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
if opts.vpc_id is None:
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
else:
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
master_group.authorize('tcp', 8080, 8081, authorized_address)
master_group.authorize('tcp', 18080, 18080, authorized_address)
master_group.authorize('tcp', 19999, 19999, authorized_address)
master_group.authorize('tcp', 50030, 50030, authorized_address)
master_group.authorize('tcp', 50070, 50070, authorized_address)
master_group.authorize('tcp', 60070, 60070, authorized_address)
master_group.authorize('tcp', 4040, 4045, authorized_address)
# HDFS NFS gateway requires 111,2049,4242 for tcp & udp
master_group.authorize('tcp', 111, 111, authorized_address)
master_group.authorize('udp', 111, 111, authorized_address)
master_group.authorize('tcp', 2049, 2049, authorized_address)
master_group.authorize('udp', 2049, 2049, authorized_address)
master_group.authorize('tcp', 4242, 4242, authorized_address)
master_group.authorize('udp', 4242, 4242, authorized_address)
# RM in YARN mode uses 8088
master_group.authorize('tcp', 8088, 8088, authorized_address)
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
if opts.vpc_id is None:
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
else:
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
slave_group.authorize('tcp', 8080, 8081, authorized_address)
slave_group.authorize('tcp', 50060, 50060, authorized_address)
slave_group.authorize('tcp', 50075, 50075, authorized_address)
slave_group.authorize('tcp', 60060, 60060, authorized_address)
slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print("ERROR: There are already instances running in group %s or %s" %
(master_group.name, slave_group.name), file=stderr)
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print("Launching instances...")
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print("Could not find AMI " + opts.ami, file=stderr)
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print("Waiting for spot instances to be granted...")
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print("All %d slaves granted" % opts.slaves)
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print("%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves))
except:
print("Canceling spot instance requests")
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print(("WARNING: %d instances are still running" % running), file=stderr)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
slave_nodes += slave_res.instances
print("Launched {s} slave{plural_s} in {z}, regid = {r}".format(
s=num_slaves_this_zone,
plural_s=('' if num_slaves_this_zone == 1 else 's'),
z=zone,
r=slave_res.id))
i += 1
# Launch or resume masters
if existing_masters:
print("Starting master...")
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
master_nodes = master_res.instances
print("Launched master in %s, regid = %s" % (zone, master_res.id))
# This wait time corresponds to SPARK-4983
print("Waiting for AWS to propagate instance metadata...")
time.sleep(5)
# Give the instances descriptive names
for master in master_nodes:
master.add_tag(
key='Name',
value='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
for slave in slave_nodes:
slave.add_tag(
key='Name',
value='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
# Return all the instances
return (master_nodes, slave_nodes)
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
"""
Get the EC2 instances in an existing cluster if available.
Returns a tuple of lists of EC2 instance objects for the masters and slaves.
"""
print("Searching for existing cluster {c} in region {r}...".format(
c=cluster_name, r=opts.region))
def get_instances(group_names):
"""
Get all non-terminated instances that belong to any of the provided security groups.
EC2 reservation filters and instance states are documented here:
http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
"""
reservations = conn.get_all_reservations(
filters={"instance.group-name": group_names})
instances = itertools.chain.from_iterable(r.instances for r in reservations)
return [i for i in instances if i.state not in ["shutting-down", "terminated"]]
master_instances = get_instances([cluster_name + "-master"])
slave_instances = get_instances([cluster_name + "-slaves"])
if any((master_instances, slave_instances)):
print("Found {m} master{plural_m}, {s} slave{plural_s}.".format(
m=len(master_instances),
plural_m=('' if len(master_instances) == 1 else 's'),
s=len(slave_instances),
plural_s=('' if len(slave_instances) == 1 else 's')))
if not master_instances and die_on_error:
print("ERROR: Could not find a master for cluster {c} in region {r}.".format(
c=cluster_name, r=opts.region), file=sys.stderr)
sys.exit(1)
return (master_instances, slave_instances)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = get_dns_name(master_nodes[0], opts.private_ips)
if deploy_ssh_key:
print("Generating cluster's SSH key on master...")
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print("Transferring cluster's SSH key to slaves...")
for slave in slave_nodes:
slave_address = get_dns_name(slave, opts.private_ips)
print(slave_address)
ssh_write(slave_address, opts, ['tar', 'x'], dot_ssh_tar)
modules = ['spark', 'ephemeral-hdfs', 'persistent-hdfs',
'mapreduce', 'spark-standalone', 'tachyon']
if opts.hadoop_major_version == "1":
modules = list(filter(lambda x: x != "mapreduce", modules))
if opts.ganglia:
modules.append('ganglia')
# Clear SPARK_WORKER_INSTANCES if running on YARN
if opts.hadoop_major_version == "yarn":
opts.worker_instances = ""
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
print("Cloning spark-ec2 scripts from {r}/tree/{b} on master...".format(
r=opts.spark_ec2_git_repo, b=opts.spark_ec2_git_branch))
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone {r} -b {b} spark-ec2".format(r=opts.spark_ec2_git_repo,
b=opts.spark_ec2_git_branch)
)
print("Deploying files to master...")
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
if opts.deploy_root_dir is not None:
print("Deploying {s} to master...".format(s=opts.deploy_root_dir))
deploy_user_files(
root_dir=opts.deploy_root_dir,
opts=opts,
master_nodes=master_nodes
)
print("Running setup on master...")
setup_spark_cluster(master, opts)
print("Done!")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print("Spark standalone cluster started at http://%s:8080" % master)
if opts.ganglia:
print("Ganglia started at http://%s:5080/ganglia" % master)
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print(textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
))
return s.returncode == 0
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
dns_name = get_dns_name(i, opts.private_ips)
if not is_ssh_available(host=dns_name, opts=opts):
return False
else:
return True
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(5 * num_attempts) # seconds
for i in cluster_instances:
i.update()
statuses = conn.get_all_instance_status(instance_ids=[i.id for i in cluster_instances])
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print("Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
))
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2015-05-08
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.large": 2,
"c3.xlarge": 2,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c4.large": 0,
"c4.xlarge": 0,
"c4.2xlarge": 0,
"c4.4xlarge": 0,
"c4.8xlarge": 0,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"d2.xlarge": 3,
"d2.2xlarge": 6,
"d2.4xlarge": 12,
"d2.8xlarge": 24,
"g2.2xlarge": 1,
"g2.8xlarge": 2,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.xlarge": 1,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"m1.small": 1,
"m1.medium": 1,
"m1.large": 2,
"m1.xlarge": 4,
"m2.xlarge": 1,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m3.medium": 1,
"m3.large": 1,
"m3.xlarge": 2,
"m3.2xlarge": 2,
"r3.large": 1,
"r3.xlarge": 1,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"t1.micro": 0,
"t2.micro": 0,
"t2.small": 0,
"t2.medium": 0,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type, file=stderr)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built Spark deploy
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
tachyon_v = get_tachyon_version(spark_v)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
tachyon_v = ""
print("Deploying Spark via git hash; Tachyon won't be set up")
modules = filter(lambda x: x != "tachyon", modules)
master_addresses = [get_dns_name(i, opts.private_ips) for i in master_nodes]
slave_addresses = [get_dns_name(i, opts.private_ips) for i in slave_nodes]
worker_instances_str = "%d" % opts.worker_instances if opts.worker_instances else ""
template_vars = {
"master_list": '\n'.join(master_addresses),
"active_master": active_master,
"slave_list": '\n'.join(slave_addresses),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"tachyon_version": tachyon_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": worker_instances_str,
"spark_master_opts": opts.master_opts
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Deploy a given local directory to a cluster, WITHOUT parameter substitution.
# Note that unlike deploy_files, this works for binary files.
# Also, it is up to the user to add (or not) the trailing slash in root_dir.
# Files are only deployed to the first master instance in the cluster.
#
# root_dir should be an absolute path.
def deploy_user_files(root_dir, opts, master_nodes):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s" % root_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n"
"Please check that you have provided the correct --identity-file and "
"--key-pair parameters and try again.".format(host))
else:
raise e
print("Error executing remote command, retrying after 30 seconds: {0}".format(e),
file=stderr)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print("Error {0} while executing remote command, retrying after 30 seconds".
format(status), file=stderr)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total // num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
# Gets the IP address, taking into account the --private-ips flag
def get_ip_address(instance, private_ips=False):
ip = instance.ip_address if not private_ips else \
instance.private_ip_address
return ip
# Gets the DNS name, taking into account the --private-ips flag
def get_dns_name(instance, private_ips=False):
dns = instance.public_dns_name if not private_ips else \
instance.private_ip_address
return dns
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to start up.",
DeprecationWarning
)
if opts.identity_file is not None:
if not os.path.exists(opts.identity_file):
print("ERROR: The identity file '{f}' doesn't exist.".format(f=opts.identity_file),
file=stderr)
sys.exit(1)
file_mode = os.stat(opts.identity_file).st_mode
if not (file_mode & S_IRUSR) or not oct(file_mode)[-2:] == '00':
print("ERROR: The identity file must be accessible only by you.", file=stderr)
print('You can fix this with: chmod 400 "{f}"'.format(f=opts.identity_file),
file=stderr)
sys.exit(1)
if opts.instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for instance-type: {t}".format(
t=opts.instance_type), file=stderr)
if opts.master_instance_type != "":
if opts.master_instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for master-instance-type: {t}".format(
t=opts.master_instance_type), file=stderr)
# Since we try instance types even if we can't resolve them, we check if they resolve first
# and, if they do, see if they resolve to the same virtualization type.
if opts.instance_type in EC2_INSTANCE_TYPES and \
opts.master_instance_type in EC2_INSTANCE_TYPES:
if EC2_INSTANCE_TYPES[opts.instance_type] != \
EC2_INSTANCE_TYPES[opts.master_instance_type]:
print("Error: spark-ec2 currently does not support having a master and slaves "
"with different AMI virtualization types.", file=stderr)
print("master instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.master_instance_type]), file=stderr)
print("slave instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.instance_type]), file=stderr)
sys.exit(1)
if opts.ebs_vol_num > 8:
print("ebs-vol-num cannot be greater than 8", file=stderr)
sys.exit(1)
# Prevent breaking ami_prefix (/, .git and startswith checks)
# Prevent forks with non spark-ec2 names for now.
if opts.spark_ec2_git_repo.endswith("/") or \
opts.spark_ec2_git_repo.endswith(".git") or \
not opts.spark_ec2_git_repo.startswith("https://github.com") or \
not opts.spark_ec2_git_repo.endswith("spark-ec2"):
print("spark-ec2-git-repo must be a github repo and it must not have a trailing / or .git. "
"Furthermore, we currently only support forks named spark-ec2.", file=stderr)
sys.exit(1)
if not (opts.deploy_root_dir is None or
(os.path.isabs(opts.deploy_root_dir) and
os.path.isdir(opts.deploy_root_dir) and
os.path.exists(opts.deploy_root_dir))):
print("--deploy-root-dir must be an absolute path to a directory that exists "
"on the local file system", file=stderr)
sys.exit(1)
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print((e), file=stderr)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print("ERROR: You have to start at least 1 slave", file=sys.stderr)
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
if any(master_nodes + slave_nodes):
print("The following instances will be terminated:")
for inst in master_nodes + slave_nodes:
print("> %s" % get_dns_name(inst, opts.private_ips))
print("ALL DATA ON ALL NODES WILL BE LOST!!")
msg = "Are you sure you want to destroy the cluster {c}? (y/N) ".format(c=cluster_name)
response = raw_input(msg)
if response == "y":
print("Terminating master...")
for inst in master_nodes:
inst.terminate()
print("Terminating slaves...")
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
print("Deleting security groups (this will take some time)...")
attempt = 1
while attempt <= 3:
print("Attempt %d" % attempt)
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print("Deleting rules in security group " + group.name)
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
# It is needed to use group_id to make it work with VPC
conn.delete_security_group(group_id=group.id)
print("Deleted security group %s" % group.name)
except boto.exception.EC2ResponseError:
success = False
print("Failed to delete security group %s" % group.name)
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print("Failed to delete all security groups after 3 tries.")
print("Try re-running in a few minutes.")
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
master = get_dns_name(master_nodes[0], opts.private_ips)
print("Logging into master " + master + "...")
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Rebooting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print("Rebooting " + inst.id)
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
print(get_dns_name(master_nodes[0], opts.private_ips))
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Stopping master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print("Stopping slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print("Starting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print("Starting master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
# Determine types of running instances
existing_master_type = master_nodes[0].instance_type
existing_slave_type = slave_nodes[0].instance_type
# Setting opts.master_instance_type to the empty string indicates we
# have the same instance type for the master and the slaves
if existing_master_type == existing_slave_type:
existing_master_type = ""
opts.master_instance_type = existing_master_type
opts.instance_type = existing_slave_type
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print("Invalid action: %s" % action, file=stderr)
sys.exit(1)
def main():
try:
real_main()
except UsageError as e:
print("\nError:\n", e, file=stderr)
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| 40.352861 | 100 | 0.596847 |
94dd07c8922bf1415bad0bbc5de2b929739912b6 | 5,530 | py | Python | nipype/interfaces/slicer/legacy/filtering.py | hanke/nipype | 71fb90a1fd55e7c6a42e0315ba6e603d8301b6ab | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/slicer/legacy/filtering.py | hanke/nipype | 71fb90a1fd55e7c6a42e0315ba6e603d8301b6ab | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/slicer/legacy/filtering.py | hanke/nipype | 71fb90a1fd55e7c6a42e0315ba6e603d8301b6ab | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class OtsuThresholdImageFilterInputSpec(CommandLineInputSpec):
insideValue = traits.Int(desc="The value assigned to pixels that are inside the computed threshold", argstr="--insideValue %d")
outsideValue = traits.Int(desc="The value assigned to pixels that are outside the computed threshold", argstr="--outsideValue %d")
numberOfBins = traits.Int(desc="This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", argstr="--numberOfBins %d")
inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s")
class OtsuThresholdImageFilterOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Output filtered", exists=True)
class OtsuThresholdImageFilter(SEMLikeCommandLine):
"""title: Otsu Threshold Image Filter
category: Legacy.Filtering
description: This filter creates a binary thresholded image that separates an image into foreground and background components. The filter calculates the optimum threshold separating those two classes so that their combined spread (intra-class variance) is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter applies that threshold to the input image using the itkBinaryThresholdImageFilter. The numberOfHistogram bins can be set for the Otsu Calculator. The insideValue and outsideValue can be set for the BinaryThresholdImageFilter. The filter produces a labeled volume.
The original reference is:
N.Otsu, ‘‘A threshold selection method from gray level histograms,’’ IEEE Trans.Syst.ManCybern.SMC-9,62–66 1979.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdImageFilter
contributor: Bill Lorensen (GE)
acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium
"""
input_spec = OtsuThresholdImageFilterInputSpec
output_spec = OtsuThresholdImageFilterOutputSpec
_cmd = "OtsuThresholdImageFilter "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class ResampleScalarVolumeInputSpec(CommandLineInputSpec):
spacing = InputMultiPath(traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", argstr="--spacing %s")
interpolation = traits.Enum("linear", "nearestNeighbor", "bspline", "hamming", "cosine", "welch", "lanczos", "blackman", desc="Sampling algorithm (linear, nearest neighbor, bspline(cubic) or windowed sinc). There are several sinc algorithms available as described in the following publication: Erik H. W. Meijering, Wiro J. Niessen, Josien P. W. Pluim, Max A. Viergever: Quantitative Comparison of Sinc-Approximating Kernels for Medical Image Interpolation. MICCAI 1999, pp. 210-217. Each window has a radius of 3;", argstr="--interpolation %s")
InputVolume = File(position=-2, desc="Input volume to be resampled", exists=True, argstr="%s")
OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", argstr="%s")
class ResampleScalarVolumeOutputSpec(TraitedSpec):
OutputVolume = File(position=-1, desc="Resampled Volume", exists=True)
class ResampleScalarVolume(SEMLikeCommandLine):
"""title: Resample Scalar Volume
category: Legacy.Filtering
description: Resampling an image is an important task in image analysis. It is especially important in the frame of image registration. This module implements image resampling through the use of itk Transforms. This module uses an Identity Transform. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. Several interpolators are available: linear, nearest neighbor, bspline and five flavors of sinc. The sinc interpolators, although more precise, are much slower than the linear and nearest neighbor interpolator. To resample label volumnes, nearest neighbor interpolation should be used exclusively.
version: 0.1.0.$Revision: 20594 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleVolume
contributor: Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ResampleScalarVolumeInputSpec
output_spec = ResampleScalarVolumeOutputSpec
_cmd = "ResampleScalarVolume "
_outputs_filenames = {'OutputVolume':'OutputVolume.nii'}
| 69.125 | 925 | 0.787703 |
59064a72a9d7b6a05a8cc4e346ff91e700ef3928 | 18,297 | py | Python | tests/unittests/test_pulsar.py | praksinha/hubble | 54062cf07bf2462ea9be149d740f38defd849b25 | [
"Apache-2.0"
] | null | null | null | tests/unittests/test_pulsar.py | praksinha/hubble | 54062cf07bf2462ea9be149d740f38defd849b25 | [
"Apache-2.0"
] | null | null | null | tests/unittests/test_pulsar.py | praksinha/hubble | 54062cf07bf2462ea9be149d740f38defd849b25 | [
"Apache-2.0"
] | null | null | null | """
Test the fim (pulsar) internals for various correctness
"""
import os
import shutil
import logging
import six
from salt.exceptions import CommandExecutionError
import hubblestack.extmods.modules.pulsar as pulsar
log = logging.getLogger(__name__)
class TestPulsar(object):
""" An older set of pulsar tests """
def test_virtual(self):
var = pulsar.__virtual__()
assert var is True
def test_enqueue(self):
pulsar.__context__ = {}
var = pulsar._enqueue
assert var != 0
def test_get_notifier(self):
pulsar.__context__ = {}
var = pulsar._get_notifier
assert var != 0
def test_dict_update_for_merge_dict(self):
dest = {'key1': 'val1'}
upd = {'key_2': 'val_2'}
test_dict = {'key1': 'val1', 'key_2': 'val_2'}
var = pulsar._dict_update(dest, upd, recursive_update=True, merge_lists=False)
assert var == test_dict
def test_dict_update_for_classic_dictUpdate(self):
dest = {'key1': 'val1'}
upd = {'key_2': 'val_2'}
test_dict = {'key1': 'val1', 'key_2': 'val_2'}
var = pulsar._dict_update(dest, upd, recursive_update=False, merge_lists=False)
assert var == test_dict
def test_dict_update_for_dest_TypeError(self):
dest = 'TestValue1'
upd = {'key_1': 'val_1', 'key_2': 'val_2'}
try:
var = pulsar._dict_update(dest, upd, recursive_update=True, merge_lists=False)
except TypeError:
pass
def test_dict_update_for_upd_TypeError(self):
dest = {'key_1': 'val_1', 'key_2': 'val_2'}
upd = 'TestValue2'
try:
var = pulsar._dict_update(dest, upd, recursive_update=True, merge_lists=False)
except TypeError:
pass
def test_dict_update_recurssive(self):
ret = {}
dest = {'data':
{'blacklist': {'talk1': {'data': {'Ubuntu-16.04': [{'/etc/inetd.conf': {'pattern': '^talk', 'tag': 'CIS-5.1.4'}}, {'/etc/inetd.conf': {'pattern': '^ntalk', 'tag': 'CIS-5.1.4'}}]}, 'description': 'Ensure talk server is not enabled'}},
'whitelist': {'ssh_ignore_rhosts': {'data': {'Ubuntu-16.04': [{'/etc/ssh/sshd_config': {'pattern': 'IgnoreRhosts', 'tag': 'CIS-9.3.6', 'match_output': 'yes'}}]}, 'description': 'Set SSH IgnoreRhosts to Yes'}}}}
upd = {'data':
{'blacklist': {'talk2': {'data': {'Ubuntu-16.04': [{'/etc/inetd.conf': {'pattern': '^talk', 'tag': 'CIS-5.1.4'}}, {'/etc/inetd.conf': {'pattern': '^ntalk', 'tag': 'CIS-5.1.4'}}]}, 'description': 'Ensure talk server is not enabled'}}}}
data_list = [dest, upd]
for data in data_list:
val = pulsar._dict_update(dest, data, recursive_update=True, merge_lists=True)
assert (len(val['data']['blacklist'])) == 2
def test_process(self):
configfile = 'tests/unittests/resources/hubblestack_pulsar_config.yaml'
verbose = False
def config_get(_, default):
''' pretend salt[config.get] '''
return default
__salt__ = {}
__salt__['config.get'] = config_get
pulsar.__salt__ = __salt__
pulsar.__opts__ = {}
pulsar.__context__ = {}
var = pulsar.process(configfile, verbose)
pulsar.__salt__ = {}
assert len(var) == 0
assert isinstance(var, list)
def test_top_result_for_list(self):
topfile = 'tests/unittests/resources/top.pulsar'
def cp_cache_file(_):
''' pretend salt[cp.cache_file] '''
return 'tests/unittests/resources/top.pulsar'
def match_compound(value):
''' pretend match.compound '''
return value
__salt__ = {}
__salt__['cp.cache_file'] = cp_cache_file
__salt__['match.compound'] = match_compound
pulsar.__salt__ = __salt__
get_top_data_config = pulsar.get_top_data(topfile)
configs = ['salt://hubblestack_pulsar/' + config.replace('.', '/') + '.yaml'
for config in get_top_data_config]
assert configs[0] == 'salt://hubblestack_pulsar/hubblestack_pulsar_config.yaml'
def test_get_top_data(self):
topfile = 'tests/unittests/resources/top.pulsar'
def cp_cache_file(topfile):
''' pretend salt[cp.cache_file] '''
return topfile
def match_compound(value):
''' pretend match.compound '''
return value
__salt__ = {}
__salt__['cp.cache_file'] = cp_cache_file
__salt__['match.compound'] = match_compound
pulsar.__salt__ = __salt__
result = pulsar.get_top_data(topfile)
pulsar.__salt__ = {}
assert isinstance(result, list)
assert result[0] == 'hubblestack_pulsar_config'
def test_get_top_data_for_CommandExecutionError(self):
topfile = '/testfile'
def cp_cache_file(_):
''' pretend salt[cp.cache_file] '''
return '/testfile'
def match_compound(value):
''' pretend match.compound '''
return value
__salt__ = {}
__salt__['cp.cache_file'] = cp_cache_file
__salt__['match.compound'] = match_compound
pulsar.__salt__ = __salt__
try:
result = pulsar.get_top_data(topfile)
pulsar.__salt__ = {}
except CommandExecutionError:
pass
class TestPulsar2(object):
""" A slightly newer set of pulsar internals tets """
tdir = 'blah'
tfile = os.path.join(tdir, 'file')
atdir = os.path.abspath(tdir)
atfile = os.path.abspath(tfile)
def reset(self, **kwargs):
def config_get(_, default):
''' pretend salt[config.get] '''
return default
if 'paths' not in kwargs:
kwargs['paths'] = []
__salt__ = {}
__salt__['config.get'] = config_get
pulsar.__salt__ = __salt__
pulsar.__opts__ = {'pulsar': kwargs}
pulsar.__context__ = {}
self.nuke_tdir()
pulsar._get_notifier() # sets up the dequeue
self.events = []
self.notifier = pulsar.__context__['pulsar.notifier']
self.watch_manager = self.notifier._watch_manager
self.watch_manager.update_config()
def process(self):
self.events.extend([ "{change}({path})".format(**x) for x in pulsar.process() ])
def get_clear_events(self):
ret = self.events
self.events = list()
return ret
def nuke_tdir(self):
if os.path.isdir(self.tdir):
shutil.rmtree(self.tdir)
def mk_tdir_and_write_tfile(self, fname=None, to_write='supz\n'):
if fname is None:
fname = self.tfile
if not os.path.isdir(self.tdir):
os.mkdir(self.tdir)
with open(self.tfile, 'w') as fh:
fh.write(to_write)
def mk_subdir_files(self, *files, **kwargs):
if len(files) == 1 and isinstance(files[0], (list, tuple)):
files = files[0]
for file in files:
file = file if file.startswith(self.tdir + '/') else os.path.join(self.tdir, file)
split_file = file.split('/')
if split_file:
output_fname = split_file.pop()
dir_to_make = ''
for i in split_file:
dir_to_make = os.path.join(dir_to_make, i)
if not os.path.isdir(i):
os.mkdir(dir_to_make)
forms = ('{}_out', 'out_{}', '{}_to_write', 'to_write')
for form in forms:
to_write = kwargs.get(form.format(output_fname))
if to_write is not None:
break
if to_write is None:
to_write = 'supz\n'
output_fname = os.path.join(dir_to_make, output_fname)
with open(output_fname, 'a') as fh:
fh.write(to_write if to_write is not None else 'supz\n')
def more_fname(self, number, base=None):
if base is None:
base = self.tfile
return '{0}_{1}'.format(base, number)
def mk_more_files(self, count=1, to_write='supz-{0}\n'):
for i in range(count):
with open(self.more_fname(i), 'w') as fh:
fh.write(to_write.format(count))
def test_listify_anything(self):
listify_fn = pulsar.PulsarWatchManager._listify_anything
def assert_len_listify_is(list_arg, expected):
""" compact comparifier """
assert len( listify_fn(list_arg) ) == expected
def assert_str_listify_is(list_arg, expected):
""" compact comparifier """
assert str(sorted(listify_fn(list_arg))) == str(sorted(expected))
assert_len_listify_is(None, 0)
assert_len_listify_is([None], 0)
assert_len_listify_is(set([None]), 0)
assert_len_listify_is(set(), 0)
assert_len_listify_is([], 0)
assert_len_listify_is([[],[],(),{}, None,[None]], 0)
oogly_list = [[1],[2],(1,),(5),{2}, None,[None],{'one':1}]
assert_len_listify_is(oogly_list, 4)
assert_str_listify_is(oogly_list, [1,2,5,'one'])
def test_add_watch(self, modality='add-watch'):
options = {}
kwargs = { self.atdir: options }
if modality in ('watch_new_files', 'watch_files'):
options[modality] = True
self.reset(**kwargs)
# NOTE: without new_files and/or without watch_files parent_db should
# remain empty, and we shouldn't get a watch on tfile
os.mkdir(self.tdir)
if modality == 'add-watch':
self.watch_manager.add_watch(self.tdir, pulsar.DEFAULT_MASK)
elif modality in ('watch', 'watch_new_files', 'watch_files'):
self.watch_manager.watch(self.tdir)
else:
raise Exception("unknown modality")
self.process()
assert len(self.events) == 0
assert self.watch_manager.watch_db.get(self.tdir) is None
assert self.watch_manager.watch_db.get(self.atdir) > 0
assert len(self.watch_manager.watch_db) == 1
assert not isinstance(self.watch_manager.parent_db.get(self.atdir), set)
self.mk_tdir_and_write_tfile() # write supz to tfile
self.process()
assert len(self.events) == 2
assert self.events[0].startswith('IN_CREATE')
assert self.events[1].startswith('IN_MODIFY')
if modality in ('watch_files', 'watch_new_files'):
assert len(self.watch_manager.watch_db) == 2
assert isinstance(self.watch_manager.parent_db.get(self.atdir), set)
else:
assert len(self.watch_manager.watch_db) == 1
assert not isinstance(self.watch_manager.parent_db.get(self.atdir), set)
self.nuke_tdir()
def test_watch(self):
self.test_add_watch(modality='watch')
def test_watch_new_files(self):
self.test_add_watch(modality='watch_new_files')
def test_recurse_without_watch_files(self):
config1 = {self.atdir: { 'recurse': False }}
config2 = {self.atdir: { 'recurse': True }}
self.reset(**config1)
self.mk_subdir_files('blah1','a/b/c/blah2')
self.watch_manager.watch(self.tdir)
self.watch_manager.prune()
set1 = set(self.watch_manager.watch_db)
self.reset(**config2)
self.mk_subdir_files('blah1','a/b/c/blah2')
self.watch_manager.watch(self.tdir)
self.watch_manager.prune()
set2 = set(self.watch_manager.watch_db)
set0_a = set([self.atdir])
set0_b = [self.atdir]
for i in 'abc':
set0_b.append( os.path.join(set0_b[-1], i) )
set0_b = set(set0_b)
assert set1 == set0_a
assert set2 == set0_b
def config_make_files_watch_process_reconfig(self, config, reconfig=None, mk_files=0):
"""
create a config (arg0),
make tdir and tfile,
watch the tdir,
store watch_db in set0,
make additional files (default: 0),
execute process(),
store watch_db in set1,
reconfigure using reconfig param (named param or arg1) (default: None)
execute process(),
store watch_db in set2
return set0, set1, set2 as a tuple
"""
self.reset(**config)
self.mk_tdir_and_write_tfile()
self.watch_manager.watch(self.tdir)
set0 = set(self.watch_manager.watch_db)
if mk_files > 0:
self.mk_more_files(count=mk_files)
self.process()
set1 = set(self.watch_manager.watch_db)
if reconfig is None:
del self.watch_manager.cm.nc_config[ self.atdir ]
else:
self.watch_manager.cm.nc_config[ self.atdir ] = reconfig
self.process()
set2 = set(self.watch_manager.watch_db)
return set0, set1, set2
def test_pruning_watch_files_false(self):
set0, set1, set2 = self.config_make_files_watch_process_reconfig({self.atdir:{}}, None, mk_files=2)
assert set0 == set([self.atdir])
assert set1 == set([self.atdir])
assert set2 == set()
def test_pruning_watch_new_files_then_false(self):
config1 = {self.atdir: { 'watch_new_files': True }}
config2 = {self.atdir: { 'watch_new_files': False }}
set0, set1, set2 = self.config_make_files_watch_process_reconfig(config1, config2, mk_files=2)
fname1 = self.more_fname(0, base=self.atfile)
fname2 = self.more_fname(1, base=self.atfile)
assert set0 == set([self.atdir])
assert set1 == set([self.atdir, fname1, fname2])
assert set2 == set([self.atdir])
def test_pruning_watch_files_then_false(self):
config1 = {self.atdir: { 'watch_files': True }}
config2 = {self.atdir: { 'watch_files': False }}
set0, set1, set2 = self.config_make_files_watch_process_reconfig(config1, config2, mk_files=2)
fname1 = self.more_fname(0, base=self.atfile)
fname2 = self.more_fname(1, base=self.atfile)
assert set0 == set([self.atdir, self.atfile])
assert set1 == set([self.atdir, self.atfile, fname1, fname2])
assert set2 == set([self.atdir])
def test_pruning_watch_new_files_then_nothing(self):
config1 = {self.atdir: { 'watch_new_files': True }}
set0, set1, set2 = self.config_make_files_watch_process_reconfig(config1, None, mk_files=2)
fname1 = self.more_fname(0, base=self.atfile)
fname2 = self.more_fname(1, base=self.atfile)
assert set0 == set([self.atdir])
assert set1 == set([self.atdir, fname1, fname2])
assert set2 == set()
def test_pruning_watch_files_then_nothing(self):
config1 = {self.atdir: { 'watch_files': True }}
set0, set1, set2 = self.config_make_files_watch_process_reconfig(config1, None, mk_files=2)
fname1 = self.more_fname(0, base=self.atfile)
fname2 = self.more_fname(1, base=self.atfile)
assert set0 == set([self.atdir, self.atfile])
assert set1 == set([self.atdir, fname1, fname2, self.atfile])
assert set2 == set()
def test_watch_files_events(self):
config = {self.atdir: { 'watch_files': True }}
self.reset(**config)
self.mk_tdir_and_write_tfile()
set0 = set(self.watch_manager.watch_db)
pulsar.process()
set1 = set(self.watch_manager.watch_db)
levents1 = len(self.events)
assert set0 == set()
assert set1 == set([self.atdir, self.atfile])
assert levents1 == 0
with open(self.atfile, 'a') as fh:
fh.write('supz\n')
self.process()
set_ = set(self.watch_manager.watch_db)
events_ = self.get_clear_events()
assert set_ == set1
assert events_ == ['IN_MODIFY({})'.format(self.atfile)]
os.unlink(self.atfile)
self.process()
set_ = set(self.watch_manager.watch_db)
events_ = self.get_clear_events()
assert set_ == set([self.atdir])
assert events_ == ['IN_DELETE({})'.format(self.atfile)]
with open(self.atfile, 'a') as fh:
fh.write('supz\n')
self.process()
set_ = set(self.watch_manager.watch_db)
events_ = self.get_clear_events()
assert set_ == set1
assert events_ == ['IN_CREATE({})'.format(self.atfile)]
with open(self.atfile, 'a') as fh:
fh.write('supz\n')
self.process()
set_ = set(self.watch_manager.watch_db)
events_ = self.get_clear_events()
assert set_ == set1
assert events_ == ['IN_MODIFY({})'.format(self.atfile)]
def test_single_file_events(self):
config = {self.atfile: dict()}
self.reset(**config)
self.mk_tdir_and_write_tfile()
set0 = set(self.watch_manager.watch_db)
assert set0 == set()
pulsar.process()
set1 = set(self.watch_manager.watch_db)
levents1 = len(self.events)
assert set1 == set([self.atfile])
assert levents1 == 0
with open(self.atfile, 'a') as fh:
fh.write('supz\n')
self.process()
set2 = set(self.watch_manager.watch_db)
events2 = self.get_clear_events()
assert set2 == set1
assert events2 == ['IN_MODIFY({})'.format(self.atfile)]
os.unlink(self.atfile)
self.process()
set_ = set(self.watch_manager.watch_db)
events_ = self.get_clear_events()
assert set_ == set() # this is DELETE_SELF now (technically)
assert events_ == ['IN_DELETE({})'.format(self.atfile)]
with open(self.atfile, 'a') as fh:
fh.write('supz\n')
self.process()
set_ = set(self.watch_manager.watch_db)
events_ = self.get_clear_events()
assert set_ == set1
assert events_ == ['IN_CREATE({})'.format(self.atfile)]
with open(self.atfile, 'a') as fh:
fh.write('supz\n')
self.process()
set_ = set(self.watch_manager.watch_db)
events_ = self.get_clear_events()
assert set_ == set1
assert events_ == ['IN_MODIFY({})'.format(self.atfile)]
| 36.667335 | 249 | 0.595125 |
7e0035cd6e61b9153652f5b38eb832c6db59a787 | 227 | py | Python | Sem3/Python/assignment4/12_string.py | nsudhanva/mca-code | 812348ce53edbe0f42f85a9c362bfc8aad64e1e7 | [
"MIT"
] | null | null | null | Sem3/Python/assignment4/12_string.py | nsudhanva/mca-code | 812348ce53edbe0f42f85a9c362bfc8aad64e1e7 | [
"MIT"
] | null | null | null | Sem3/Python/assignment4/12_string.py | nsudhanva/mca-code | 812348ce53edbe0f42f85a9c362bfc8aad64e1e7 | [
"MIT"
] | 2 | 2018-10-12T06:38:14.000Z | 2019-01-30T04:38:03.000Z | some_strings = list(input('Enter some strings: ').replace(' ', '').split(','))
def count_ind_strings(some_strings):
for i in some_strings:
print('Length of string:', i, ':', len(i))
count_ind_strings(some_strings) | 32.428571 | 78 | 0.669604 |
04f86b2e5601e436ae67948cc8416cf8cc831f07 | 1,769 | py | Python | examples/dfp/v201711/custom_field_service/get_all_custom_fields.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:22.000Z | 2019-10-21T04:10:22.000Z | examples/dfp/v201711/custom_field_service/get_all_custom_fields.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201711/custom_field_service/get_all_custom_fields.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:51.000Z | 2019-10-21T04:10:51.000Z | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all custom fields.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201711')
# Create a statement to select custom fields.
statement = dfp.StatementBuilder()
# Retrieve a small amount of custom fields at a time, paging
# through until all custom fields have been retrieved.
while True:
response = custom_field_service.getCustomFieldsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for custom_field in response['results']:
# Print out some information for each custom field.
print('Custom field with ID "%d" and name "%s" was found.\n' %
(custom_field['id'], custom_field['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 34.019231 | 74 | 0.722442 |
2ff4b9c1a2a02caca268ec262b43814910202500 | 1,222 | py | Python | tests/datasets.py | sam-atkins/manage-conf | de9d0fd8d512061e4e52766eb3db1ca8eafaa63c | [
"MIT"
] | null | null | null | tests/datasets.py | sam-atkins/manage-conf | de9d0fd8d512061e4e52766eb3db1ca8eafaa63c | [
"MIT"
] | 30 | 2019-05-29T11:04:54.000Z | 2019-07-04T06:23:58.000Z | tests/datasets.py | sam-atkins/manage-conf | de9d0fd8d512061e4e52766eb3db1ca8eafaa63c | [
"MIT"
] | null | null | null | import datetime
BOTO_PAYLOAD = {
"Parameters": [
{
"Name": "/portal/dev/ALLOWED_HOSTS",
"Type": "StringList",
"Value": "\"['uglyurl.execute-api.us-east-1.amazonaws.com']\"",
"Version": 5,
"LastModifiedDate": datetime.datetime(2019, 3, 26, 16, 15, 45, 414000),
},
{
"Name": "/portal/dev/SECRET_KEY",
"Type": "SecureString",
"Value": '"not-a-good-secret"',
"Version": 2,
"LastModifiedDate": datetime.datetime(2019, 3, 26, 14, 53, 25, 738000),
},
{
"Name": "/portal/dev/STATICFILES_STORAGE",
"Type": "String",
"Value": '"S3-storage"',
"Version": 2,
"LastModifiedDate": datetime.datetime(2019, 3, 26, 14, 53, 39, 600000),
},
],
"ResponseMetadata": {
"RequestId": "XXXXXXXXXX",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "XXXXXXXXXX",
"content-type": "application/x-amz-json-1.1",
"content-length": "1621",
"date": "Sat, 30 Mar 2019 08:11:35 GMT",
},
"RetryAttempts": 0,
},
}
| 30.55 | 83 | 0.477087 |
a2bbac2a25474af938c9829739df9f044f486b01 | 1,456 | py | Python | torch/distributed/algorithms/model_averaging/utils.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 3 | 2019-01-21T12:15:39.000Z | 2019-06-08T13:59:44.000Z | torch/distributed/algorithms/model_averaging/utils.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 1 | 2021-06-25T22:00:31.000Z | 2021-06-25T22:00:31.000Z | torch/distributed/algorithms/model_averaging/utils.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 1 | 2021-10-05T07:05:26.000Z | 2021-10-05T07:05:26.000Z | # flake8: noqa C101
import itertools
from typing import Iterator
import torch
import torch.distributed as dist
def average_parameters(
params: Iterator[torch.nn.Parameter], process_group: dist.ProcessGroup
):
"""
Averages all the given parameters.
For allreduce efficiency, all the parameters are flattened into a contiguous buffer.
Thus, it requires extra memory of the same size as the given parameters.
"""
group_to_use = process_group if process_group is not None else dist.group.WORLD
# Do not update any parameter if not in the process group.
if dist._rank_not_in_group(group_to_use):
return
params_it1, params_it2 = itertools.tee(params)
# If the input parameters have different data types,
# packing these parameters will trigger an implicit type up-casting.
# The original parameter data types will be restored during the subsequent unpacking.
flat_params = torch.cat([p.data.view(-1) for p in params_it1])
flat_params /= dist.get_world_size(group_to_use)
# Make sure the allreduce will not conflict with any other ongoing process group.
if torch.cuda.is_available():
torch.cuda.synchronize()
dist.all_reduce(flat_params, group=group_to_use)
offset = 0
for p in params_it2:
with torch.no_grad():
p.set_(flat_params[offset : offset + p.numel()].view_as(p).type_as(p)) # type: ignore[call-overload]
offset += p.numel()
| 38.315789 | 113 | 0.721841 |
43aae35ac1ea566e255f6a96f521ebc14fc594c9 | 428 | py | Python | pythia/datasets/scene_graph_database.py | SCUT-AILab/CRN_tvqa | 0680ed828208ec8c104965438fa0b1cd2010df1f | [
"BSD-3-Clause"
] | 11 | 2020-10-27T08:59:10.000Z | 2022-03-01T10:45:51.000Z | pythia/datasets/scene_graph_database.py | SCUT-AILab/CRN_tvqa | 0680ed828208ec8c104965438fa0b1cd2010df1f | [
"BSD-3-Clause"
] | 2 | 2020-10-27T08:58:47.000Z | 2021-03-02T07:57:54.000Z | pythia/datasets/scene_graph_database.py | SCUT-AILab/CRN_tvqa | 0680ed828208ec8c104965438fa0b1cd2010df1f | [
"BSD-3-Clause"
] | 4 | 2020-09-13T02:39:54.000Z | 2022-03-06T14:23:53.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
from pythia.datasets.image_database import ImageDatabase
class SceneGraphDatabase(ImageDatabase):
def __init__(self, scene_graph_path):
super().__init__(scene_graph_path)
self.data_dict = {}
for item in self.data:
self.data_dict[item["image_id"]] = item
def __getitem__(self, idx):
return self.data_dict[idx]
| 30.571429 | 57 | 0.668224 |
2bebbafb9f946fb5036d0473ccb16c883da82c12 | 969 | py | Python | profil3r/get/datas/src/classes/core/services/_forum.py | GuillaumeFalourd/formulas-insights | c43f8f96e28343ab0919e10d7dc26b2dfeb0792b | [
"Apache-2.0"
] | 5 | 2020-09-30T19:20:42.000Z | 2022-02-25T22:20:30.000Z | profil3r/get/datas/src/classes/core/services/_forum.py | GuillaumeFalourd/formulas-insights | c43f8f96e28343ab0919e10d7dc26b2dfeb0792b | [
"Apache-2.0"
] | 5 | 2020-09-28T21:53:07.000Z | 2021-05-06T14:58:10.000Z | profil3r/get/datas/src/classes/core/services/_forum.py | GuillaumeFalourd/formulas-insights | c43f8f96e28343ab0919e10d7dc26b2dfeb0792b | [
"Apache-2.0"
] | null | null | null | from classes.modules.forum.zeroxzerozerosec import ZeroxZeroZeroSec
from classes.modules.forum.jeuxvideo import JeuxVideo
from classes.modules.forum.hackernews import Hackernews
from classes.modules.forum.crackedto import CrackedTo
# 0x00sec
def zeroxzerozerosec(self):
self.result["0x00sec"] = ZeroxZeroZeroSec(self.CONFIG, self.permutations_list).search()
# print results
self.print_results("0x00sec")
# jeuxvideo.com
def jeuxvideo(self):
self.result["jeuxvideo.com"] = JeuxVideo(self.CONFIG, self.permutations_list).search()
# print results
self.print_results("jeuxvideo.com")
# Hackernews
def hackernews(self):
self.result["hackernews"] = Hackernews(self.CONFIG, self.permutations_list).search()
# print results
self.print_results("hackernews")
# Cracked.to
def crackedto(self):
self.result["crackedto"] = CrackedTo(self.CONFIG, self.permutations_list).search()
# print results
self.print_results("crackedto") | 34.607143 | 92 | 0.76161 |
142644c1f4dd5cbd9b1ac44138773daa57461fbc | 360 | py | Python | tasks.py | h4ndzdatm0ld/cloud-mgmt | f21bc1f5c772ef018338c6bc041c7475537c7eb6 | [
"Apache-2.0"
] | null | null | null | tasks.py | h4ndzdatm0ld/cloud-mgmt | f21bc1f5c772ef018338c6bc041c7475537c7eb6 | [
"Apache-2.0"
] | null | null | null | tasks.py | h4ndzdatm0ld/cloud-mgmt | f21bc1f5c772ef018338c6bc041c7475537c7eb6 | [
"Apache-2.0"
] | null | null | null | """Tasks for use with Invoke."""
from invoke import task
@task
def yamllint(context):
"""Run yamllint."""
exec_cmd = "yamllint ."
context.run(exec_cmd)
@task
def tests(context):
"""Run all tests for this repository."""
print("Running yamllint")
yamllint(context)
print("yamllint succeeded")
print("All tests have passed!")
| 18 | 44 | 0.652778 |
b27072257c8126bd067b4834b21eb12d2a8d66f6 | 8,268 | py | Python | atom/nucleus/python/nucleus_api/models/page_model.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/nucleus_api/models/page_model.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/nucleus_api/models/page_model.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PageModel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'content': 'list[Model]',
'first': 'bool',
'last': 'bool',
'number': 'int',
'number_of_elements': 'int',
'size': 'int',
'sort': 'list[Sort]',
'total_elements': 'int',
'total_pages': 'int'
}
attribute_map = {
'content': 'content',
'first': 'first',
'last': 'last',
'number': 'number',
'number_of_elements': 'number_of_elements',
'size': 'size',
'sort': 'sort',
'total_elements': 'total_elements',
'total_pages': 'total_pages'
}
def __init__(self, content=None, first=None, last=None, number=None, number_of_elements=None, size=None, sort=None, total_elements=None, total_pages=None): # noqa: E501
"""PageModel - a model defined in Swagger""" # noqa: E501
self._content = None
self._first = None
self._last = None
self._number = None
self._number_of_elements = None
self._size = None
self._sort = None
self._total_elements = None
self._total_pages = None
self.discriminator = None
if content is not None:
self.content = content
if first is not None:
self.first = first
if last is not None:
self.last = last
if number is not None:
self.number = number
if number_of_elements is not None:
self.number_of_elements = number_of_elements
if size is not None:
self.size = size
if sort is not None:
self.sort = sort
if total_elements is not None:
self.total_elements = total_elements
if total_pages is not None:
self.total_pages = total_pages
@property
def content(self):
"""Gets the content of this PageModel. # noqa: E501
:return: The content of this PageModel. # noqa: E501
:rtype: list[Model]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this PageModel.
:param content: The content of this PageModel. # noqa: E501
:type: list[Model]
"""
self._content = content
@property
def first(self):
"""Gets the first of this PageModel. # noqa: E501
:return: The first of this PageModel. # noqa: E501
:rtype: bool
"""
return self._first
@first.setter
def first(self, first):
"""Sets the first of this PageModel.
:param first: The first of this PageModel. # noqa: E501
:type: bool
"""
self._first = first
@property
def last(self):
"""Gets the last of this PageModel. # noqa: E501
:return: The last of this PageModel. # noqa: E501
:rtype: bool
"""
return self._last
@last.setter
def last(self, last):
"""Sets the last of this PageModel.
:param last: The last of this PageModel. # noqa: E501
:type: bool
"""
self._last = last
@property
def number(self):
"""Gets the number of this PageModel. # noqa: E501
:return: The number of this PageModel. # noqa: E501
:rtype: int
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this PageModel.
:param number: The number of this PageModel. # noqa: E501
:type: int
"""
self._number = number
@property
def number_of_elements(self):
"""Gets the number_of_elements of this PageModel. # noqa: E501
:return: The number_of_elements of this PageModel. # noqa: E501
:rtype: int
"""
return self._number_of_elements
@number_of_elements.setter
def number_of_elements(self, number_of_elements):
"""Sets the number_of_elements of this PageModel.
:param number_of_elements: The number_of_elements of this PageModel. # noqa: E501
:type: int
"""
self._number_of_elements = number_of_elements
@property
def size(self):
"""Gets the size of this PageModel. # noqa: E501
:return: The size of this PageModel. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this PageModel.
:param size: The size of this PageModel. # noqa: E501
:type: int
"""
self._size = size
@property
def sort(self):
"""Gets the sort of this PageModel. # noqa: E501
:return: The sort of this PageModel. # noqa: E501
:rtype: list[Sort]
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this PageModel.
:param sort: The sort of this PageModel. # noqa: E501
:type: list[Sort]
"""
self._sort = sort
@property
def total_elements(self):
"""Gets the total_elements of this PageModel. # noqa: E501
:return: The total_elements of this PageModel. # noqa: E501
:rtype: int
"""
return self._total_elements
@total_elements.setter
def total_elements(self, total_elements):
"""Sets the total_elements of this PageModel.
:param total_elements: The total_elements of this PageModel. # noqa: E501
:type: int
"""
self._total_elements = total_elements
@property
def total_pages(self):
"""Gets the total_pages of this PageModel. # noqa: E501
:return: The total_pages of this PageModel. # noqa: E501
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""Sets the total_pages of this PageModel.
:param total_pages: The total_pages of this PageModel. # noqa: E501
:type: int
"""
self._total_pages = total_pages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PageModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.518519 | 173 | 0.562409 |
45ed01d62cb0ff174a0a4d9cbc251765998370dd | 2,760 | py | Python | tools/cardiac_py/mesh/read_anat.py | paulkefer/cardioid | 59c07b714d8b066b4f84eb50487c36f6eadf634c | [
"MIT-0",
"MIT"
] | 33 | 2018-12-12T20:05:06.000Z | 2021-09-26T13:30:16.000Z | tools/cardiac_py/mesh/read_anat.py | paulkefer/cardioid | 59c07b714d8b066b4f84eb50487c36f6eadf634c | [
"MIT-0",
"MIT"
] | 5 | 2019-04-25T11:34:43.000Z | 2021-11-14T04:35:37.000Z | tools/cardiac_py/mesh/read_anat.py | paulkefer/cardioid | 59c07b714d8b066b4f84eb50487c36f6eadf634c | [
"MIT-0",
"MIT"
] | 15 | 2018-12-21T22:44:59.000Z | 2021-08-29T10:30:25.000Z | '''
Created on 14/01/2013
@author: butler
'''
import glob
class AnatReader():
'''
classdocs
'''
def __init__(self, file_stem):
g_matcher = file_stem
self.f_list = glob.glob(g_matcher)
self.f_list.sort()
if len(self.f_list) < 0:
print "No files matching"
raise
self.header_vars = {}
self.__parse_header()
def __iter__(self):
return self
def __parse_header(self):
""" Very simple parser"""
self.header_open = False
self.file_index = 0
print self.f_list
self.fd = open(self.f_list[self.file_index])
while True:
line = self.fd.readline()
if not self.header_open:
if "{" in line:
self.header_open = True
else:
if "//" in line:
continue
if "}" in line:
break
fields = line.rstrip("\n").strip(" ").split(";")
# check for split b
if fields[-1] == "":
# we have a standard case and we can treat it how we want
for field in fields:
print field
if len(field) == 0:
continue
keypair = field.split("=")
if len(keypair) == 2:
self.header_vars[keypair[0].strip(" ")] = \
keypair[1].strip(" ")
elif len(fields) == 1:
# we have something else check if h. .. will be first field
field1 = fields[0]
prospective_h = field1.split("=")[0].strip(" ")
prospective_rhs = field1.split("=")[1].strip(" ")
if prospective_h == "h":
self.h = [prospective_rhs]
line = self.fd.readline()
l2 = line.rstrip("\n").strip(" ")
self.h.append(l2)
line = self.fd.readline()
l3 = line.rstrip("\n").rstrip(";").strip(" ")
self.h.append(l3)
# Done with H
line = self.fd.readline()
print "should be nothing: ", line
def next(self):
line = self.fd.readline()
if not line:
if self.file_index < len(self.f_list) - 1:
self.file_index = self.file_index + 1
self.fd.close()
self.fd = open(self.f_list[self.file_index])
line = self.fd.readline()
else:
raise StopIteration
return line
| 33.253012 | 79 | 0.433696 |
364f54bd0c93b616d06f4786bb18e14191523292 | 130 | py | Python | dennis5/src/bias_inits.py | DarkElement75/dennis | 411153b374c48a1e268dd0adffc5d9e5dc84c2c8 | [
"MIT"
] | 2 | 2016-08-09T21:29:46.000Z | 2016-09-17T23:42:06.000Z | dennis5/src/bias_inits.py | DarkElement75/dennis | 411153b374c48a1e268dd0adffc5d9e5dc84c2c8 | [
"MIT"
] | null | null | null | dennis5/src/bias_inits.py | DarkElement75/dennis | 411153b374c48a1e268dd0adffc5d9e5dc84c2c8 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
def standard(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=1.0))
| 16.25 | 62 | 0.753846 |
3806300e0887731964c8c04ddf744920697fc2eb | 2,885 | py | Python | pages/admin.py | buketkonuk/pythondotorg | 4d8d9728eea7c7b2fef32eb6f24fda409cf24a06 | [
"Apache-2.0"
] | 1 | 2021-01-03T00:58:16.000Z | 2021-01-03T00:58:16.000Z | pages/admin.py | buketkonuk/pythondotorg | 4d8d9728eea7c7b2fef32eb6f24fda409cf24a06 | [
"Apache-2.0"
] | null | null | null | pages/admin.py | buketkonuk/pythondotorg | 4d8d9728eea7c7b2fef32eb6f24fda409cf24a06 | [
"Apache-2.0"
] | 1 | 2019-09-02T00:51:38.000Z | 2019-09-02T00:51:38.000Z | from django.conf import settings
from django.contrib import admin
from django.db import models
from django.utils.safestring import mark_safe
from bs4 import BeautifulSoup
from cms.admin import ContentManageableModelAdmin
from .models import Page, Image, DocumentFile
class PageAdminImageFileWidget(admin.widgets.AdminFileWidget):
def render(self, name, value, attrs=None):
""" Fix admin rendering """
content = super().render(name, value, attrs=None)
soup = BeautifulSoup(content, 'lxml')
# Show useful link/relationship in admin
a_href = soup.find('a')
if a_href and a_href.attrs['href']:
a_href.attrs['href'] = a_href.attrs['href'].replace(settings.MEDIA_ROOT, settings.MEDIA_URL)
a_href.string = a_href.text.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)
if '//' in a_href.attrs['href']:
a_href.attrs['href'] = a_href.attrs['href'].replace('//', '/')
a_href.string = a_href.text.replace('//', '/')
return mark_safe(soup)
class ImageInlineAdmin(admin.StackedInline):
model = Image
extra = 1
formfield_overrides = {
models.ImageField: {'widget': PageAdminImageFileWidget},
}
class DocumentFileInlineAdmin(admin.StackedInline):
model = DocumentFile
extra = 1
formfield_overrides = {
models.FileField: {'widget': PageAdminImageFileWidget},
}
class PagePathFilter(admin.SimpleListFilter):
""" Admin list filter to allow drilling down by first two levels of pages """
title = 'Path'
parameter_name = 'pathlimiter'
def lookups(self, request, model_admin):
""" Determine the lookups we want to use """
path_values = Page.objects.order_by('path').values_list('path', flat=True)
path_set = []
for v in path_values:
if v == '':
path_set.append(('', '/'))
else:
parts = v.split('/')[:2]
new_value = "/".join(parts)
new_tuple = (new_value, new_value)
if new_tuple not in path_set:
path_set.append((new_value, new_value))
return path_set
def queryset(self, request, queryset):
if self.value():
return queryset.filter(path__startswith=self.value())
class PageAdmin(ContentManageableModelAdmin):
search_fields = ['title', 'path']
list_display = ('get_title', 'path', 'is_published',)
list_filter = [PagePathFilter, 'is_published']
inlines = [ImageInlineAdmin, DocumentFileInlineAdmin]
fieldsets = [
(None, {'fields': ('title', 'keywords', 'description', 'path', 'content', 'content_markup_type', 'is_published')}),
('Advanced options', {'classes': ('collapse',), 'fields': ('template_name',)}),
]
save_as = True
admin.site.register(Page, PageAdmin)
| 32.41573 | 123 | 0.634315 |
59b5971b3c0abded83d1e67b3ba060349c3a44b3 | 12,796 | py | Python | sdk/python/pulumi_google_native/dns/v1beta2/response_policy_rule.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/dns/v1beta2/response_policy_rule.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/dns/v1beta2/response_policy_rule.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ResponsePolicyRuleArgs', 'ResponsePolicyRule']
@pulumi.input_type
class ResponsePolicyRuleArgs:
def __init__(__self__, *,
response_policy: pulumi.Input[str],
behavior: Optional[pulumi.Input['ResponsePolicyRuleBehavior']] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
local_data: Optional[pulumi.Input['ResponsePolicyRuleLocalDataArgs']] = None,
project: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResponsePolicyRule resource.
:param pulumi.Input['ResponsePolicyRuleBehavior'] behavior: Answer this query with a behavior rather than DNS data.
:param pulumi.Input[str] dns_name: The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
:param pulumi.Input['ResponsePolicyRuleLocalDataArgs'] local_data: Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
:param pulumi.Input[str] rule_name: An identifier for this rule. Must be unique with the ResponsePolicy.
"""
pulumi.set(__self__, "response_policy", response_policy)
if behavior is not None:
pulumi.set(__self__, "behavior", behavior)
if client_operation_id is not None:
pulumi.set(__self__, "client_operation_id", client_operation_id)
if dns_name is not None:
pulumi.set(__self__, "dns_name", dns_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if local_data is not None:
pulumi.set(__self__, "local_data", local_data)
if project is not None:
pulumi.set(__self__, "project", project)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
@property
@pulumi.getter(name="responsePolicy")
def response_policy(self) -> pulumi.Input[str]:
return pulumi.get(self, "response_policy")
@response_policy.setter
def response_policy(self, value: pulumi.Input[str]):
pulumi.set(self, "response_policy", value)
@property
@pulumi.getter
def behavior(self) -> Optional[pulumi.Input['ResponsePolicyRuleBehavior']]:
"""
Answer this query with a behavior rather than DNS data.
"""
return pulumi.get(self, "behavior")
@behavior.setter
def behavior(self, value: Optional[pulumi.Input['ResponsePolicyRuleBehavior']]):
pulumi.set(self, "behavior", value)
@property
@pulumi.getter(name="clientOperationId")
def client_operation_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_operation_id")
@client_operation_id.setter
def client_operation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_operation_id", value)
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> Optional[pulumi.Input[str]]:
"""
The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
"""
return pulumi.get(self, "dns_name")
@dns_name.setter
def dns_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="localData")
def local_data(self) -> Optional[pulumi.Input['ResponsePolicyRuleLocalDataArgs']]:
"""
Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
"""
return pulumi.get(self, "local_data")
@local_data.setter
def local_data(self, value: Optional[pulumi.Input['ResponsePolicyRuleLocalDataArgs']]):
pulumi.set(self, "local_data", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
An identifier for this rule. Must be unique with the ResponsePolicy.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
class ResponsePolicyRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
behavior: Optional[pulumi.Input['ResponsePolicyRuleBehavior']] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
local_data: Optional[pulumi.Input[pulumi.InputType['ResponsePolicyRuleLocalDataArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
response_policy: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new Response Policy Rule.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['ResponsePolicyRuleBehavior'] behavior: Answer this query with a behavior rather than DNS data.
:param pulumi.Input[str] dns_name: The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
:param pulumi.Input[pulumi.InputType['ResponsePolicyRuleLocalDataArgs']] local_data: Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
:param pulumi.Input[str] rule_name: An identifier for this rule. Must be unique with the ResponsePolicy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResponsePolicyRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new Response Policy Rule.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param ResponsePolicyRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResponsePolicyRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
behavior: Optional[pulumi.Input['ResponsePolicyRuleBehavior']] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
local_data: Optional[pulumi.Input[pulumi.InputType['ResponsePolicyRuleLocalDataArgs']]] = None,
project: Optional[pulumi.Input[str]] = None,
response_policy: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResponsePolicyRuleArgs.__new__(ResponsePolicyRuleArgs)
__props__.__dict__["behavior"] = behavior
__props__.__dict__["client_operation_id"] = client_operation_id
__props__.__dict__["dns_name"] = dns_name
__props__.__dict__["kind"] = kind
__props__.__dict__["local_data"] = local_data
__props__.__dict__["project"] = project
if response_policy is None and not opts.urn:
raise TypeError("Missing required property 'response_policy'")
__props__.__dict__["response_policy"] = response_policy
__props__.__dict__["rule_name"] = rule_name
super(ResponsePolicyRule, __self__).__init__(
'google-native:dns/v1beta2:ResponsePolicyRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ResponsePolicyRule':
"""
Get an existing ResponsePolicyRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ResponsePolicyRuleArgs.__new__(ResponsePolicyRuleArgs)
__props__.__dict__["behavior"] = None
__props__.__dict__["dns_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["local_data"] = None
__props__.__dict__["rule_name"] = None
return ResponsePolicyRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def behavior(self) -> pulumi.Output[str]:
"""
Answer this query with a behavior rather than DNS data.
"""
return pulumi.get(self, "behavior")
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> pulumi.Output[str]:
"""
The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.
"""
return pulumi.get(self, "dns_name")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="localData")
def local_data(self) -> pulumi.Output['outputs.ResponsePolicyRuleLocalDataResponse']:
"""
Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.
"""
return pulumi.get(self, "local_data")
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Output[str]:
"""
An identifier for this rule. Must be unique with the ResponsePolicy.
"""
return pulumi.get(self, "rule_name")
| 45.537367 | 332 | 0.657862 |
4e73479d8e18acd3b3b6af8fce96136a97a20510 | 114 | py | Python | CodeWars/8 Kyu/Century From Year.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/8 Kyu/Century From Year.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/8 Kyu/Century From Year.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | def century(year):
if year%100==0:
return (int(year//100))
else:
return (int(year//100)+1) | 22.8 | 33 | 0.535088 |
dba8611418601be75b90b3506212f629b5d33497 | 17,080 | py | Python | core/admin.py | saggins/lynbrook-app-backend | d5bad6e0742853bb39c5a15d3b7332b7114b671d | [
"MIT"
] | null | null | null | core/admin.py | saggins/lynbrook-app-backend | d5bad6e0742853bb39c5a15d3b7332b7114b671d | [
"MIT"
] | null | null | null | core/admin.py | saggins/lynbrook-app-backend | d5bad6e0742853bb39c5a15d3b7332b7114b671d | [
"MIT"
] | null | null | null | import csv
import io
import qrcode
from datauri import DataURI
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.http.response import Http404, HttpResponse
from django.shortcuts import render
from django.urls import path
from django.urls.base import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
from qrcode.image.svg import SvgPathFillImage
from .models import *
def with_inline_organization_permissions(get_organization=lambda x: x):
def deco(cls):
class Admin(cls):
def has_view_permission(self, request, obj=None):
if obj is None or request.user.is_superuser:
return True
org = get_organization(obj)
return org.is_admin(request.user) or org.is_advisor(request.user)
def has_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj)
def has_add_permission(self, request, obj=None):
return self.has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
return self.has_change_permission(request, obj)
return Admin
return deco
def with_organization_permissions():
def deco(cls):
class Admin(cls):
def has_module_permission(self, request):
return True
def has_view_permission(self, request, obj=None):
if obj is None or request.user.is_superuser:
return True
return obj.organization.is_admin(request.user) or obj.organization.is_advisor(request.user)
def has_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj)
def has_delete_permission(self, request, obj=None):
return self.has_change_permission(request, obj)
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(
Q(**{f"organization__admins": request.user}) | Q(**{f"organization__advisors": request.user})
).distinct()
def get_form(self, request, obj=None, change=False, **kwargs):
if not request.user.is_superuser:
form_class = cls.AdminAdvisorForm
class UserForm(form_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
q = Q(admins=request.user) | Q(advisors=request.user)
if "organization" in self.fields:
self.fields["organization"].queryset = (
self.fields["organization"].queryset.filter(q).distinct()
)
kwargs["form"] = UserForm
return super().get_form(request, obj=obj, **kwargs)
return Admin
return deco
class AdminAdvisorListFilter(admin.SimpleListFilter):
title = _("organization")
parameter_name = "organization"
def lookups(self, request, model_admin):
if request.user.is_superuser:
orgs = Organization.objects.all()
else:
orgs = Organization.objects.filter(Q(admins=request.user) | Q(advisors=request.user)).distinct()
return [(org.id, org.name) for org in orgs]
def queryset(self, request, queryset):
if not self.value():
return queryset
return queryset.filter(organization=self.value())
class EventListFilter(admin.SimpleListFilter):
title = _("event")
parameter_name = "event"
def lookups(self, request, model_admin):
if request.user.is_superuser:
events = Event.objects.all()
else:
events = Event.objects.filter(
Q(organization__admins=request.user) | Q(organization__advisors=request.user)
).distinct()
return [(event.id, event) for event in events]
def queryset(self, request, queryset):
if not self.value():
return queryset
return queryset.filter(event=self.value())
@admin.register(User)
class UserAdmin(BaseUserAdmin, DynamicArrayMixin):
class AdvisorOrganizationAdmin(admin.TabularInline, DynamicArrayMixin):
model = Organization.advisors.through
verbose_name = "Organization"
verbose_name_plural = "Advisor For"
extra = 0
class AdminOrganizationAdmin(admin.TabularInline, DynamicArrayMixin):
model = Organization.admins.through
verbose_name = "Organization"
verbose_name_plural = "Admin For"
extra = 0
class MembershipAdmin(admin.TabularInline, DynamicArrayMixin):
model = Membership
extra = 0
class ExpoPushTokenAdmin(admin.TabularInline, DynamicArrayMixin):
model = ExpoPushToken
extra = 0
fieldsets = (
(None, {"fields": ("email", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name", "type", "grad_year")}),
(_("Permissions"), {"fields": ("is_active", "is_staff", "is_superuser")}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("email", "grad_year", "password1", "password2")}),)
list_display = ("email", "first_name", "last_name", "is_staff")
list_filter = ("is_staff", "is_superuser", "grad_year")
search_fields = ("email", "first_name", "last_name")
ordering = None
inlines = (AdvisorOrganizationAdmin, AdminOrganizationAdmin, MembershipAdmin, ExpoPushTokenAdmin)
def has_view_permission(self, request, obj=None):
return True
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin, DynamicArrayMixin):
@with_inline_organization_permissions()
class InlineLinkAdmin(admin.TabularInline, DynamicArrayMixin):
model = OrganizationLink
extra = 0
def has_view_permission(self, request, obj=None):
print(obj)
return super().has_view_permission(request, obj=obj)
class AdvisorForm(forms.ModelForm):
class Meta:
fields = (
"advisors",
"admins",
"name",
"description",
"category",
"day",
"time",
"link",
"ical_links",
)
class AdminForm(forms.ModelForm):
class Meta:
fields = (
"admins",
"name",
"description",
"category",
"day",
"time",
"link",
"ical_links",
)
list_display = ("name", "type", "day", "time", "location", "points_link")
list_filter = ("type", "day", "category")
readonly_fields = ("points_link",)
autocomplete_fields = ("advisors", "admins")
inlines = (InlineLinkAdmin,)
def has_module_permission(self, request):
return True
def has_view_permission(self, request, obj=None):
if obj is None or request.user.is_superuser:
return True
return obj.is_admin(request.user) or obj.is_advisor(request.user)
def has_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj)
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(Q(admins=request.user) | Q(advisors=request.user)).distinct()
def get_form(self, request, obj=None, **kwargs):
if not request.user.is_superuser:
kwargs["form"] = self.AdvisorForm if obj.is_advisor(request.user) else self.AdminForm
return super().get_form(request, obj=obj, **kwargs)
def points_link(self, obj):
return mark_safe(f'<a href={reverse("admin:core_organization_points", args=[obj.id])}>View Points</a>')
def get_urls(self):
return [
path("<path:object_id>/points/csv/", self.points_csv_view, name="core_organization_points"),
path("<path:object_id>/points/", self.points_view, name="core_organization_points"),
*super().get_urls(),
]
def get_org_with_points(self, request, object_id):
qs = super().get_queryset(request)
qs = qs.prefetch_related(
Prefetch("memberships", Membership.objects.select_related("user").order_by("-points")),
Prefetch("events", Event.objects.prefetch_related("submissions")),
)
return qs.get(id=object_id)
def points_view(self, request, object_id):
try:
org = self.get_org_with_points(request, object_id)
except self.model.DoesNotExist:
return self._get_obj_does_not_exist_redirect(request, self.model._meta, object_id)
events = [
(e, {x.user_id: e.points if x.points is None else x.points for x in e.submissions.all()})
for e in org.events.all()
]
context = dict(
org=org,
events=[event.name for event, _ in events],
members=[
dict(
**membership.user.to_json(),
points=membership.points,
events=[users.get(membership.user.id) for event, users in events],
)
for membership in org.memberships.all()
],
)
return render(request, "core/organization_points.html", context)
def points_csv_view(self, request, object_id):
try:
org = self.get_org_with_points(request, object_id)
except self.model.DoesNotExist:
raise Http404
events = [
(e, {x.user_id: e.points if x.points is None else x.points for x in e.submissions.all()})
for e in org.events.all()
]
response = HttpResponse(
content_type="text/csv", headers={"Content-Disposition": 'attachment; filename="points.csv"'}
)
writer = csv.DictWriter(
response,
fieldnames=["id", "email", "first_name", "last_name", "grad_year", "points", *[e.name for e, _ in events]],
)
writer.writeheader()
for membership in org.memberships.all():
writer.writerow(
dict(
**membership.user.to_json(),
points=membership.points,
**{event.name: users.get(membership.user.id) for event, users in events},
)
)
return response
@admin.register(Event)
@with_organization_permissions()
class EventAdmin(admin.ModelAdmin, DynamicArrayMixin):
class AdminAdvisorForm(forms.ModelForm):
class Meta:
fields = ("organization", "name", "description", "start", "end", "points", "submission_type")
list_filter = (AdminAdvisorListFilter,)
date_hierarchy = "start"
list_display = ("name", "organization", "start", "end", "points", "user_count")
search_fields = ("name",)
readonly_fields = ("code", "qr_code", "sign_in")
def user_count(self, obj):
return obj.users.count()
@admin.display(description="QR Code")
def qr_code(self, obj):
if obj.code is None:
return "-"
qr_svg = qrcode.make(f"lhs://{obj.code}", image_factory=SvgPathFillImage, box_size=50, border=0)
uri_svg = DataURI.make("image/svg+xml", charset="UTF-8", base64=True, data=qr_svg.to_string())
return mark_safe(f'<img src="{uri_svg}" alt="lhs://{obj.code}">')
@admin.display(description="Sign In Instructions")
def sign_in(self, obj):
return mark_safe(
"""
<p>Members can sign in in one of the following ways:</p>
<p>• Scanning the QR Code in the Lynbrook App</li></p>
<p>• Entering the 6-digit code manually in the Lynbrook App</li></p>
<p>• Entering the 6-digit code in the web form at <a href="https://lynbrookasb.org/">https://lynbrookasb.org/</a></li></p>
"""
)
def has_add_permission(self, request):
return True
@admin.register(Membership)
@with_organization_permissions()
class MembershipAdmin(admin.ModelAdmin, DynamicArrayMixin):
class AdminAdvisorForm(forms.ModelForm):
class Meta:
fields = ("points_spent",)
list_filter = (AdminAdvisorListFilter,)
list_display = ("user", "organization", "points", "points_spent", "active")
search_fields = ("user__first_name", "user__last_name")
readonly_fields = ("organization", "user", "points", "active")
@admin.register(Submission)
class SubmissionAdmin(admin.ModelAdmin, DynamicArrayMixin):
class AdminAdvisorForm(forms.ModelForm):
class Meta:
fields = ("event", "user", "points")
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
events = Event.objects.filter(
Q(**{f"organization__admins": request.user}) | Q(**{f"organization__advisors": request.user})
)
return qs.filter(event__in=events)
list_filter = (EventListFilter,)
search_fields = ("event__name", "user__first_name", "user__last_name")
list_display = ("user", "event", "points", "file")
autocomplete_fields = ("user", "event")
ordering = ("event", "user")
def organization(self, obj):
return obj.event.organization
def has_module_permission(self, request):
return True
def has_view_permission(self, request, obj=None):
if obj is None or request.user.is_superuser:
return True
return obj.event.organization.is_admin(request.user) or obj.event.organization.is_advisor(request.user)
def has_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj)
def has_delete_permission(self, request, obj=None):
return self.has_change_permission(request, obj)
def has_add_permission(self, request):
return True
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(
Q(**{f"event__organization__admins": request.user}) | Q(**{f"event__organization__advisors": request.user})
).distinct()
def get_form(self, request, obj=None, change=False, **kwargs):
if not request.user.is_superuser:
form_class = self.AdminAdvisorForm
class UserForm(form_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
q = Q(organization__admins=request.user) | Q(organization__advisors=request.user)
self.fields["event"].queryset = (
self.fields["event"].queryset.filter(q).order_by("-start").distinct()
)
kwargs["form"] = UserForm
return super().get_form(request, obj=obj, **kwargs)
@admin.register(Post)
@with_organization_permissions()
class PostAdmin(admin.ModelAdmin, DynamicArrayMixin):
@with_inline_organization_permissions(lambda x: x.organization)
class InlinePollAdmin(admin.StackedInline, DynamicArrayMixin):
model = Poll
extra = 0
class AdminAdvisorForm(forms.ModelForm):
class Meta:
fields = ("organization", "title", "content", "published")
list_filter = (AdminAdvisorListFilter,)
date_hierarchy = "date"
list_display = ("title", "date", "organization", "published")
list_filter = ("organization", "published")
list_editable = ("published",)
inlines = (InlinePollAdmin,)
def has_add_permission(self, request):
return True
@admin.register(Prize)
@with_organization_permissions()
class PrizeAdmin(admin.ModelAdmin, DynamicArrayMixin):
class AdminAdvisorForm(forms.ModelForm):
class Meta:
fields = ("organization", "name", "description", "points")
list_display = ("name", "description", "organization", "points")
list_filter = (AdminAdvisorListFilter,)
def has_add_permission(self, request):
return True
@admin.register(Period)
class PeriodAdmin(admin.ModelAdmin, DynamicArrayMixin):
list_display = ("id", "name", "customizable")
list_editable = ("customizable",)
@admin.register(Schedule)
class ScheduleAdmin(admin.ModelAdmin, DynamicArrayMixin):
class InlinePeriodAdmin(admin.TabularInline, DynamicArrayMixin):
model = SchedulePeriod
extra = 0
list_display = ("name", "start", "end", "weekday", "priority")
inlines = (InlinePeriodAdmin,)
save_as = True
| 35.957895 | 134 | 0.61774 |
2ba8cc4c968ee899cd10b469932cf25a92608a9a | 352 | py | Python | uniset/_category/__init__.py | hukkinj1/uniset | eb1b5831bf282504585c8a384bf649780708f9ad | [
"MIT"
] | null | null | null | uniset/_category/__init__.py | hukkinj1/uniset | eb1b5831bf282504585c8a384bf649780708f9ad | [
"MIT"
] | null | null | null | uniset/_category/__init__.py | hukkinj1/uniset | eb1b5831bf282504585c8a384bf649780708f9ad | [
"MIT"
] | null | null | null | """A package containing category-based sets of Unicode code points.
THIS PACKAGE IS AUTO-GENERATED. DO NOT EDIT!
"""
SUBCATEGORIES = ('Cc', 'Zs', 'Po', 'Sc', 'Ps', 'Pe', 'Sm', 'Pd', 'Nd', 'Lu', 'Sk', 'Pc', 'Ll', 'So', 'Lo', 'Pi', 'Cf', 'No', 'Pf', 'Lt', 'Lm', 'Mn', 'Me', 'Mc', 'Nl', 'Zl', 'Zp', 'Cs')
MAINCATEGORIES = ('L', 'P', 'Z', 'N', 'M', 'S')
| 44 | 184 | 0.505682 |
e5422946c0c064d9d090633ab52b902a1a751112 | 2,321 | py | Python | tests/test_relu.py | SamDM/Paddle2ONNX | 5ae527e966c4ea62b1f35fd326efbc45385c5580 | [
"Apache-2.0"
] | null | null | null | tests/test_relu.py | SamDM/Paddle2ONNX | 5ae527e966c4ea62b1f35fd326efbc45385c5580 | [
"Apache-2.0"
] | null | null | null | tests/test_relu.py | SamDM/Paddle2ONNX | 5ae527e966c4ea62b1f35fd326efbc45385c5580 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from onnxbase import APIOnnx
from onnxbase import randtool
class Net(paddle.nn.Layer):
"""
simplr Net
"""
def __init__(self):
super(Net, self).__init__()
def forward(self, inputs):
"""
forward
"""
x = paddle.nn.functional.relu(inputs)
return x
def test_relu_9():
"""
api: paddle.relu
op version: 9
"""
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'relu', [9])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 3, 3]).astype('float32')))
obj.run()
def test_relu_10():
"""
api: paddle.relu
op version: 10
"""
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'relu', [10])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 3, 3]).astype('float32')))
obj.run()
def test_relu_11():
"""
api: paddle.relu
op version: 11
"""
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'relu', [11])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 3, 3]).astype('float32')))
obj.run()
def test_relu_12():
"""
api: paddle.relu
op version: 12
"""
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'relu', [12])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 3, 3]).astype('float32')))
obj.run()
| 23.683673 | 74 | 0.586816 |
2c7dc155f8340f8f1f287ee1a36db08a9af3ad37 | 6,261 | py | Python | GroundedScan/gym_minigrid/rendering.py | czlwang/groundedSCAN | 3d03ac6de37dde8d22d487dc3cc5a53af188fa2e | [
"MIT"
] | null | null | null | GroundedScan/gym_minigrid/rendering.py | czlwang/groundedSCAN | 3d03ac6de37dde8d22d487dc3cc5a53af188fa2e | [
"MIT"
] | null | null | null | GroundedScan/gym_minigrid/rendering.py | czlwang/groundedSCAN | 3d03ac6de37dde8d22d487dc3cc5a53af188fa2e | [
"MIT"
] | null | null | null | import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QImage, QPixmap, QPainter, QColor, QPolygon
from PyQt5.QtCore import QPoint, QRect
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QTextEdit
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QFrame
class Window(QMainWindow):
"""
Simple application window to render the environment into
"""
def __init__(self):
super().__init__()
self.setWindowTitle('MiniGrid Gym Environment')
# Image label to display the rendering
self.imgLabel = QLabel()
self.imgLabel.setFrameStyle(QFrame.Panel | QFrame.Sunken)
# Text box for the mission
self.missionBox = QTextEdit()
self.missionBox.setReadOnly(True)
self.missionBox.setMinimumSize(400, 30)
#self.missionBox.setMaximumSize(400, 100)
# Center the image
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.imgLabel)
hbox.addStretch(1)
# Arrange widgets vertically
vbox = QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self.missionBox)
# Create a main widget for the window
self.mainWidget = QWidget(self)
self.setCentralWidget(self.mainWidget)
self.mainWidget.setLayout(vbox)
self.setFixedSize(400, 450)
# Show the application window
self.show()
self.setFocus()
self.closed = False
# Callback for keyboard events
self.keyDownCb = None
def closeEvent(self, event):
self.closed = True
def setPixmap(self, pixmap):
self.imgLabel.setPixmap(pixmap)
def setText(self, text):
self.missionBox.setPlainText(text)
def setKeyDownCb(self, callback):
self.keyDownCb = callback
def keyPressEvent(self, e):
if self.keyDownCb == None:
return
keyName = None
if e.key() == Qt.Key_Left:
keyName = 'LEFT'
elif e.key() == Qt.Key_Right:
keyName = 'RIGHT'
elif e.key() == Qt.Key_Up:
keyName = 'UP'
elif e.key() == Qt.Key_Down:
keyName = 'DOWN'
elif e.key() == Qt.Key_Space:
keyName = 'SPACE'
elif e.key() == Qt.Key_Return:
keyName = 'RETURN'
elif e.key() == Qt.Key_Alt:
keyName = 'ALT'
elif e.key() == Qt.Key_Control:
keyName = 'CTRL'
elif e.key() == Qt.Key_PageUp:
keyName = 'PAGE_UP'
elif e.key() == Qt.Key_PageDown:
keyName = 'PAGE_DOWN'
elif e.key() == Qt.Key_Backspace:
keyName = 'BACKSPACE'
elif e.key() == Qt.Key_Escape:
keyName = 'ESCAPE'
if keyName == None:
return
self.keyDownCb(keyName)
class Renderer:
def __init__(self, width, height, ownWindow=False):
self.width = width
self.height = height
self.img = QImage(width, height, QImage.Format_RGB888)
self.painter = QPainter()
self.window = None
if ownWindow:
self.app = QApplication([])
self.window = Window()
def close(self):
"""
Deallocate resources used
"""
pass
def beginFrame(self):
self.painter.begin(self.img)
self.painter.setRenderHint(QPainter.Antialiasing, False)
# Clear the background
self.painter.setBrush(QColor(0, 0, 0))
self.painter.drawRect(0, 0, self.width - 1, self.height - 1)
def endFrame(self):
self.painter.end()
if self.window:
if self.window.closed:
self.window = None
else:
self.window.setPixmap(self.getPixmap())
self.app.processEvents()
def getPixmap(self):
return QPixmap.fromImage(self.img)
def save(self, save_location):
self.app.processEvents()
self.window.show()
pix = QPixmap(self.window.mainWidget.size())
self.window.mainWidget.render(pix)
success = pix.save(save_location)
return success
def getArray(self):
"""
Get a numpy array of RGB pixel values.
The array will have shape (height, width, 3)
"""
numBytes = self.width * self.height * 3
buf = self.img.bits().asstring(numBytes)
output = np.frombuffer(buf, dtype='uint8')
output = output.reshape((self.height, self.width, 3))
return output
def getFullScreen(self, temp):
pix = QPixmap(self.window.mainWidget.size())
self.window.mainWidget.render(pix)
image = pix.toImage()
s = image.bits().asstring(image.width() * image.height() * 3)
arr = np.fromstring(s, dtype='uint8').reshape((image.width(), image.height(), 3))
pix.save(temp)
return arr
def push(self):
self.painter.save()
def pop(self):
self.painter.restore()
def rotate(self, degrees):
self.painter.rotate(degrees)
def translate(self, x, y):
self.painter.translate(x, y)
def scale(self, x, y):
self.painter.scale(x, y)
def setLineColor(self, r, g, b, a=255):
self.painter.setPen(QColor(r, g, b, a))
def setColor(self, r, g, b, a=255):
self.painter.setBrush(QColor(r, g, b, a))
def setLineWidth(self, width):
pen = self.painter.pen()
pen.setWidthF(width)
self.painter.setPen(pen)
def drawLine(self, x0, y0, x1, y1):
self.painter.drawLine(x0, y0, x1, y1)
def drawCircle(self, x, y, r):
center = QPoint(x, y)
self.painter.drawEllipse(center, r, r)
def drawPolygon(self, points):
"""Takes a list of points (tuples) as input"""
points = map(lambda p: QPoint(p[0], p[1]), points)
self.painter.drawPolygon(QPolygon(points))
def drawPolyline(self, points):
"""Takes a list of points (tuples) as input"""
points = map(lambda p: QPoint(p[0], p[1]), points)
self.painter.drawPolyline(QPolygon(points))
def fillRect(self, x, y, width, height, r, g, b, a=255):
self.painter.fillRect(QRect(x, y, width, height), QColor(r, g, b, a))
| 28.852535 | 89 | 0.586009 |
a1a2b32a3602fb18610ec6215fc6aed6c6beac1b | 27,957 | py | Python | server/app/scanpy_engine/scanpy_engine.py | hy395/cellxgene | 9d92fd724fb3ed3df2aaa99b655c8b34aa96f68f | [
"MIT"
] | null | null | null | server/app/scanpy_engine/scanpy_engine.py | hy395/cellxgene | 9d92fd724fb3ed3df2aaa99b655c8b34aa96f68f | [
"MIT"
] | null | null | null | server/app/scanpy_engine/scanpy_engine.py | hy395/cellxgene | 9d92fd724fb3ed3df2aaa99b655c8b34aa96f68f | [
"MIT"
] | null | null | null | import warnings
import copy
import threading
from datetime import datetime
import os.path
from hashlib import blake2b
import base64
import numpy as np
import pandas
from pandas.core.dtypes.dtypes import CategoricalDtype
import anndata
from scipy import sparse
from server import __version__ as cellxgene_version
from server.app.driver.driver import CXGDriver
from server.app.util.constants import Axis, DEFAULT_TOP_N, MAX_LAYOUTS
from server.app.util.errors import (
FilterError,
JSONEncodingValueError,
PrepareError,
ScanpyFileError,
DisabledFeatureError,
)
from server.app.util.utils import jsonify_scanpy, requires_data
from server.app.scanpy_engine.diffexp import diffexp_ttest
from server.app.util.fbs.matrix import encode_matrix_fbs, decode_matrix_fbs
from server.app.scanpy_engine.labels import read_labels, write_labels
import server.app.scanpy_engine.matrix_proxy # noqa: F401
from server.app.util.matrix_proxy import MatrixProxy
def has_method(o, name):
""" return True if `o` has callable method `name` """
op = getattr(o, name, None)
return op is not None and callable(op)
class ScanpyEngine(CXGDriver):
def __init__(self, data_locator=None, args={}):
super().__init__(data_locator, args)
# lock used to protect label file write ops
self.label_lock = threading.RLock()
if self.data:
self._validate_and_initialize()
def update(self, data_locator=None, args={}):
super().__init__(data_locator, args)
if self.data:
self._validate_and_initialize()
@staticmethod
def _get_default_config():
return {
"layout": [],
"max_category_items": 100,
"obs_names": None,
"var_names": None,
"diffexp_lfc_cutoff": 0.01,
"annotations": False,
"annotations_file": None,
"annotations_output_dir": None,
"backed": False,
"disable_diffexp": False,
"diffexp_may_be_slow": False
}
def get_config_parameters(self, uid=None, collection=None):
params = {
"max-category-items": self.config["max_category_items"],
"disable-diffexp": self.config["disable_diffexp"],
"diffexp-may-be-slow": self.config["diffexp_may_be_slow"],
"annotations": self.config["annotations"]
}
if self.config["annotations"]:
if uid is not None:
params.update({
"annotations-user-data-idhash": self.get_userdata_idhash(uid)
})
if self.config['annotations_file'] is not None:
# user has hard-wired the name of the annotation data collection
fname = os.path.basename(self.config['annotations_file'])
collection_fname = os.path.splitext(fname)[0]
params.update({
'annotations-data-collection-is-read-only': True,
'annotations-data-collection-name': collection_fname
})
elif collection is not None:
params.update({
'annotations-data-collection-is-read-only': False,
'annotations-data-collection-name': collection
})
return params
@staticmethod
def _create_unique_column_name(df, col_name_prefix):
""" given the columns of a dataframe, and a name prefix, return a column name which
does not exist in the dataframe, AND which is prefixed by `prefix`
The approach is to append a numeric suffix, starting at zero and increasing by
one, until an unused name is found (eg, prefix_0, prefix_1, ...).
"""
suffix = 0
while f"{col_name_prefix}{suffix}" in df:
suffix += 1
return f"{col_name_prefix}{suffix}"
def _alias_annotation_names(self):
"""
The front-end relies on the existance of a unique, human-readable
index for obs & var (eg, var is typically gene name, obs the cell name).
The user can specify these via the --obs-names and --var-names config.
If they are not specified, use the existing index to create them, giving
the resulting column a unique name (eg, "name").
In both cases, enforce that the result is unique, and communicate the
index column name to the front-end via the obs_names and var_names config
(which is incorporated into the schema).
"""
self.original_obs_index = self.data.obs.index
for (ax_name, config_name) in ((Axis.OBS, "obs_names"), (Axis.VAR, "var_names")):
name = self.config[config_name]
df_axis = getattr(self.data, str(ax_name))
if name is None:
# Default: create unique names from index
if not df_axis.index.is_unique:
raise KeyError(
f"Values in {ax_name}.index must be unique. "
"Please prepare data to contain unique index values, or specify an "
"alternative with --{ax_name}-name."
)
name = self._create_unique_column_name(df_axis.columns, "name_")
self.config[config_name] = name
# reset index to simple range; alias name to point at the
# previously specified index.
df_axis.rename_axis(name, inplace=True)
df_axis.reset_index(inplace=True)
elif name in df_axis.columns:
# User has specified alternative column for unique names, and it exists
if not df_axis[name].is_unique:
raise KeyError(
f"Values in {ax_name}.{name} must be unique. "
"Please prepare data to contain unique values."
)
df_axis.reset_index(drop=True, inplace=True)
else:
# user specified a non-existent column name
raise KeyError(
f"Annotation name {name}, specified in --{ax_name}-name does not exist."
)
@staticmethod
def _can_cast_to_float32(ann):
if ann.dtype.kind == "f":
if not np.can_cast(ann.dtype, np.float32):
warnings.warn(
f"Annotation {ann.name} will be converted to 32 bit float and may lose precision."
)
return True
return False
@staticmethod
def _can_cast_to_int32(ann):
if ann.dtype.kind in ["i", "u"]:
if np.can_cast(ann.dtype, np.int32):
return True
ii32 = np.iinfo(np.int32)
if ann.min() >= ii32.min and ann.max() <= ii32.max:
return True
return False
@staticmethod
def _get_col_type(col):
dtype = col.dtype
data_kind = dtype.kind
schema = {}
if ScanpyEngine._can_cast_to_float32(col):
schema["type"] = "float32"
elif ScanpyEngine._can_cast_to_int32(col):
schema["type"] = "int32"
elif dtype == np.bool_:
schema["type"] = "boolean"
elif data_kind == "O" and dtype == "object":
schema["type"] = "string"
elif data_kind == "O" and dtype == "category":
schema["type"] = "categorical"
schema["categories"] = dtype.categories.tolist()
else:
raise TypeError(
f"Annotations of type {dtype} are unsupported by cellxgene."
)
return schema
@requires_data
def _create_schema(self):
self.schema = {
"dataframe": {
"nObs": self.cell_count,
"nVar": self.gene_count,
"type": str(self.data.X.dtype),
},
"annotations": {
"obs": {
"index": self.config["obs_names"],
"columns": []
},
"var": {
"index": self.config["var_names"],
"columns": []
}
},
"layout": {"obs": []}
}
for ax in Axis:
curr_axis = getattr(self.data, str(ax))
for ann in curr_axis:
ann_schema = {"name": ann, "writable": False}
ann_schema.update(self._get_col_type(curr_axis[ann]))
self.schema["annotations"][ax]["columns"].append(ann_schema)
for layout in self.config['layout']:
layout_schema = {
"name": layout,
"type": "float32",
"dims": [f"{layout}_0", f"{layout}_1"]
}
self.schema["layout"]["obs"].append(layout_schema)
@requires_data
def get_schema(self, uid=None, collection=None):
schema = self.schema # base schema
# add label obs annotations as needed
labels = read_labels(self.get_anno_fname(uid, collection))
if labels is not None and not labels.empty:
schema = copy.deepcopy(schema)
for col in labels.columns:
col_schema = {
"name": col,
"writable": True,
}
col_schema.update(self._get_col_type(labels[col]))
schema["annotations"]["obs"]["columns"].append(col_schema)
return schema
def get_userdata_idhash(self, uid):
"""
Return a short hash that weakly identifies the user and dataset.
Used to create safe annotations output file names.
"""
id = (uid + self.data_locator.abspath()).encode()
idhash = base64.b32encode(blake2b(id, digest_size=5).digest()).decode('utf-8')
return idhash
def get_anno_fname(self, uid=None, collection=None):
""" return the current annotation file name """
if not self.config["annotations"]:
return None
if self.config["annotations_file"] is not None:
return self.config["annotations_file"]
# we need to generate a file name, which we can only do if we have a UID and collection name
if uid is None or collection is None:
return None
idhash = self.get_userdata_idhash(uid)
return os.path.join(self.get_anno_output_dir(), f"{collection}-{idhash}.csv")
def get_anno_output_dir(self):
""" return the current annotation output directory """
if not self.config["annotations"]:
return None
if self.config['annotations_output_dir']:
return self.config['annotations_output_dir']
if self.config['annotations_file']:
return os.path.dirname(os.path.abspath(self.config['annotations_file']))
return os.getcwd()
def get_anno_backup_dir(self, uid, collection=None):
""" return the current annotation backup directory """
if not self.config["annotations"]:
return None
fname = self.get_anno_fname(uid, collection)
root, ext = os.path.splitext(fname)
return f"{root}-backups"
def _load_data(self, data_locator):
# as of AnnData 0.6.19, backed mode performs initial load fast, but at the
# cost of significantly slower access to X data.
try:
# there is no guarantee data_locator indicates a local file. The AnnData
# API will only consume local file objects. If we get a non-local object,
# make a copy in tmp, and delete it after we load into memory.
with data_locator.local_handle() as lh:
# as of AnnData 0.6.19, backed mode performs initial load fast, but at the
# cost of significantly slower access to X data.
backed = 'r' if self.config['backed'] else None
self.data = anndata.read_h5ad(lh, backed=backed)
except ValueError:
raise ScanpyFileError(
"File must be in the .h5ad format. Please read "
"https://github.com/theislab/scanpy_usage/blob/master/170505_seurat/info_h5ad.md to "
"learn more about this format. You may be able to convert your file into this format "
"using `cellxgene prepare`, please run `cellxgene prepare --help` for more "
"information."
)
except MemoryError:
raise ScanpyFileError("Out of memory - file is too large for available memory.")
except Exception as e:
raise ScanpyFileError(
f"{e} - file not found or is inaccessible. File must be an .h5ad object. "
f"Please check your input and try again."
)
@requires_data
def _validate_and_initialize(self):
# var and obs column names must be unique
if not self.data.obs.columns.is_unique or not self.data.var.columns.is_unique:
raise KeyError(f"All annotation column names must be unique.")
self._alias_annotation_names()
self._validate_data_types()
self.cell_count = self.data.shape[0]
self.gene_count = self.data.shape[1]
self._default_and_validate_layouts()
self._create_schema()
# if the user has specified a fixed label file, go ahead and validate it
# so that we can remove errors early in the process.
if self.config["annotations_file"]:
self._validate_label_data(read_labels(self.get_anno_fname()))
# heuristic
n_values = self.data.shape[0] * self.data.shape[1]
if (n_values > 1e8 and self.config['backed'] is True) or (n_values > 5e8):
self.config.update({"diffexp_may_be_slow": True})
@requires_data
def _default_and_validate_layouts(self):
""" function:
a) generate list of default layouts, if not already user specified
b) validate layouts are legal. remove/warn on any that are not
c) cap total list of layouts at global const MAX_LAYOUTS
"""
layouts = self.config['layout']
# handle default
if layouts is None or len(layouts) == 0:
# load default layouts from the data.
layouts = [key[2:] for key in self.data.obsm_keys() if type(key) == str and key.startswith("X_")]
if len(layouts) == 0:
raise PrepareError(f"Unable to find any precomputed layouts within the dataset.")
# remove invalid layouts
valid_layouts = []
obsm_keys = self.data.obsm_keys()
for layout in layouts:
layout_name = f"X_{layout}"
if layout_name not in obsm_keys:
warnings.warn(f"Ignoring unknown layout name: {layout}.")
elif not self._is_valid_layout(self.data.obsm[layout_name]):
warnings.warn(f"Ignoring layout due to malformed shape or data type: {layout}")
else:
valid_layouts.append(layout)
if len(valid_layouts) == 0:
raise PrepareError(f"No valid layout data.")
# cap layouts to MAX_LAYOUTS
self.config['layout'] = valid_layouts[0:MAX_LAYOUTS]
@requires_data
def _is_valid_layout(self, arr):
""" return True if this layout data is a valid array for front-end presentation:
* ndarray, with shape (n_obs, >= 2), dtype float/int/uint
* contains only finite values
"""
is_valid = type(arr) == np.ndarray and arr.dtype.kind in "fiu"
is_valid = is_valid and arr.shape[0] == self.data.n_obs and arr.shape[1] >= 2
is_valid = is_valid and np.all(np.isfinite(arr))
return is_valid
@requires_data
def _validate_data_types(self):
if sparse.isspmatrix(self.data.X) and not sparse.isspmatrix_csc(self.data.X):
warnings.warn(
f"Scanpy data matrix is sparse, but not a CSC (columnar) matrix. "
f"Performance may be improved by using CSC."
)
if self.data.X.dtype != "float32":
warnings.warn(
f"Scanpy data matrix is in {self.data.X.dtype} format not float32. "
f"Precision may be truncated."
)
for ax in Axis:
curr_axis = getattr(self.data, str(ax))
for ann in curr_axis:
datatype = curr_axis[ann].dtype
downcast_map = {
"int64": "int32",
"uint32": "int32",
"uint64": "int32",
"float64": "float32",
}
if datatype in downcast_map:
warnings.warn(
f"Scanpy annotation {ax}:{ann} is in unsupported format: {datatype}. "
f"Data will be downcast to {downcast_map[datatype]}."
)
if isinstance(datatype, CategoricalDtype):
category_num = len(curr_axis[ann].dtype.categories)
if category_num > 500 and category_num > self.config['max_category_items']:
warnings.warn(
f"{str(ax).title()} annotation '{ann}' has {category_num} categories, this may be "
f"cumbersome or slow to display. We recommend setting the "
f"--max-category-items option to 500, this will hide categorical "
f"annotations with more than 500 categories in the UI"
)
@requires_data
def _validate_label_data(self, labels):
"""
labels is None if disabled, empty if enabled by no data
"""
if labels is None or labels.empty:
return
# all lables must have a name, which must be unique and not used in obs column names
if not labels.columns.is_unique:
raise KeyError(f"All column names specified in user annotations must be unique.")
# the label index must be unique, and must have same values the anndata obs index
if not labels.index.is_unique:
raise KeyError(f"All row index values specified in user annotations must be unique.")
if not labels.index.equals(self.original_obs_index):
raise KeyError("Label file row index does not match H5AD file index. "
"Please ensure that column zero (0) in the label file contain the same "
"index values as the H5AD file.")
duplicate_columns = list(set(labels.columns) & set(self.data.obs.columns))
if len(duplicate_columns) > 0:
raise KeyError(f"Labels file may not contain column names which overlap "
f"with h5ad obs columns {duplicate_columns}")
# labels must have same count as obs annotations
if labels.shape[0] != self.data.obs.shape[0]:
raise ValueError("Labels file must have same number of rows as h5ad file.")
@staticmethod
def _annotation_filter_to_mask(filter, d_axis, count):
mask = np.ones((count,), dtype=bool)
for v in filter:
if d_axis[v["name"]].dtype.name in ["boolean", "category", "object"]:
key_idx = np.in1d(getattr(d_axis, v["name"]), v["values"])
mask = np.logical_and(mask, key_idx)
else:
min_ = v.get("min", None)
max_ = v.get("max", None)
if min_ is not None:
key_idx = (getattr(d_axis, v["name"]) >= min_).ravel()
mask = np.logical_and(mask, key_idx)
if max_ is not None:
key_idx = (getattr(d_axis, v["name"]) <= max_).ravel()
mask = np.logical_and(mask, key_idx)
return mask
@staticmethod
def _index_filter_to_mask(filter, count):
mask = np.zeros((count,), dtype=bool)
for i in filter:
if type(i) == list:
mask[i[0]: i[1]] = True
else:
mask[i] = True
return mask
@staticmethod
def _axis_filter_to_mask(filter, d_axis, count):
mask = np.ones((count,), dtype=bool)
if "index" in filter:
mask = np.logical_and(
mask, ScanpyEngine._index_filter_to_mask(filter["index"], count)
)
if "annotation_value" in filter:
mask = np.logical_and(
mask,
ScanpyEngine._annotation_filter_to_mask(
filter["annotation_value"], d_axis, count
),
)
return mask
@requires_data
def _filter_to_mask(self, filter, use_slices=True):
if use_slices:
obs_selector = slice(0, self.data.n_obs)
var_selector = slice(0, self.data.n_vars)
else:
obs_selector = None
var_selector = None
if filter is not None:
if Axis.OBS in filter:
obs_selector = self._axis_filter_to_mask(
filter["obs"], self.data.obs, self.data.n_obs
)
if Axis.VAR in filter:
var_selector = self._axis_filter_to_mask(
filter["var"], self.data.var, self.data.n_vars
)
return obs_selector, var_selector
@requires_data
def annotation_to_fbs_matrix(self, axis, fields=None, uid=None, collection=None):
if axis == Axis.OBS:
if self.config["annotations"]:
try:
labels = read_labels(self.get_anno_fname(uid, collection))
except Exception as e:
raise ScanpyFileError(
f"Error while loading label file: {e}, File must be in the .csv format, please check "
f"your input and try again."
)
else:
labels = None
if labels is not None and not labels.empty:
df = self.data.obs.join(labels, self.config['obs_names'])
else:
df = self.data.obs
else:
df = self.data.var
if fields is not None and len(fields) > 0:
df = df[fields]
return encode_matrix_fbs(df, col_idx=df.columns)
@requires_data
def annotation_put_fbs(self, axis, fbs, uid=None, collection=None):
if not self.config["annotations"]:
raise DisabledFeatureError("Writable annotations are not enabled")
fname = self.get_anno_fname(uid, collection)
if not fname:
raise ScanpyFileError("Writable annotations - unable to determine file name for annotations")
if axis != Axis.OBS:
raise ValueError("Only OBS dimension access is supported")
new_label_df = decode_matrix_fbs(fbs)
if not new_label_df.empty:
new_label_df.index = self.original_obs_index
self._validate_label_data(new_label_df) # paranoia
# if any of the new column labels overlap with our existing labels, raise error
duplicate_columns = list(set(new_label_df.columns) & set(self.data.obs.columns))
if not new_label_df.columns.is_unique or len(duplicate_columns) > 0:
raise KeyError(f"Labels file may not contain column names which overlap "
f"with h5ad obs columns {duplicate_columns}")
# update our internal state and save it. Multi-threading often enabled,
# so treat this as a critical section.
with self.label_lock:
lastmod = self.data_locator.lastmodtime()
lastmodstr = "'unknown'" if lastmod is None else lastmod.isoformat(timespec="seconds")
header = f"# Annotations generated on {datetime.now().isoformat(timespec='seconds')} " \
f"using cellxgene version {cellxgene_version}\n" \
f"# Input data file was {self.data_locator.uri_or_path}, " \
f"which was last modified on {lastmodstr}\n"
write_labels(fname, new_label_df, header, backup_dir=self.get_anno_backup_dir(uid, collection))
return jsonify_scanpy({"status": "OK"})
@requires_data
def data_frame_to_fbs_matrix(self, filter, axis):
"""
Retrieves data 'X' and returns in a flatbuffer Matrix.
:param filter: filter: dictionary with filter params
:param axis: string obs or var
:return: flatbuffer Matrix
Caveats:
* currently only supports access on VAR axis
* currently only supports filtering on VAR axis
"""
if axis != Axis.VAR:
raise ValueError("Only VAR dimension access is supported")
try:
obs_selector, var_selector = self._filter_to_mask(filter, use_slices=False)
except (KeyError, IndexError, TypeError) as e:
raise FilterError(f"Error parsing filter: {e}") from e
if obs_selector is not None:
raise FilterError("filtering on obs unsupported")
# Currently only handles VAR dimension
X = MatrixProxy.create(self.data.X if var_selector is None
else self.data.X[:, var_selector])
return encode_matrix_fbs(X, col_idx=np.nonzero(var_selector)[0], row_idx=None)
@requires_data
def diffexp_topN(self, obsFilterA, obsFilterB, top_n=None, interactive_limit=None):
if Axis.VAR in obsFilterA or Axis.VAR in obsFilterB:
raise FilterError("Observation filters may not contain vaiable conditions")
try:
obs_mask_A = self._axis_filter_to_mask(
obsFilterA["obs"], self.data.obs, self.data.n_obs
)
obs_mask_B = self._axis_filter_to_mask(
obsFilterB["obs"], self.data.obs, self.data.n_obs
)
except (KeyError, IndexError) as e:
raise FilterError(f"Error parsing filter: {e}") from e
if top_n is None:
top_n = DEFAULT_TOP_N
result = diffexp_ttest(
self.data, obs_mask_A, obs_mask_B, top_n, self.config['diffexp_lfc_cutoff']
)
try:
return jsonify_scanpy(result)
except ValueError:
raise JSONEncodingValueError(
"Error encoding differential expression to JSON"
)
@requires_data
def layout_to_fbs_matrix(self):
"""
Return the default 2-D layout for cells as a FBS Matrix.
Caveats:
* does not support filtering
* only returns Matrix in columnar layout
All embeddings must be individually centered & scaled (isotropically)
to a [0, 1] range.
"""
try:
layout_data = []
for layout in self.config["layout"]:
full_embedding = self.data.obsm[f"X_{layout}"]
embedding = full_embedding[:, :2]
# scale isotropically
min = embedding.min(axis=0)
max = embedding.max(axis=0)
scale = np.amax(max - min)
normalized_layout = (embedding - min) / scale
# translate to center on both axis
translate = 0.5 - ((max - min) / scale / 2)
normalized_layout = normalized_layout + translate
normalized_layout = normalized_layout.astype(dtype=np.float32)
layout_data.append(pandas.DataFrame(normalized_layout, columns=[f"{layout}_0", f"{layout}_1"]))
except ValueError as e:
raise PrepareError(
f"Layout has not been calculated using {self.config['layout']}, "
f"please prepare your datafile and relaunch cellxgene") from e
df = pandas.concat(layout_data, axis=1, copy=False)
return encode_matrix_fbs(df, col_idx=df.columns, row_idx=None)
| 41.851796 | 111 | 0.58336 |
e4dea0485e697e846e04ecd15cc443326ff1c662 | 21,585 | py | Python | openai/baselines/baselines/deepq/experiments_17_balanced_alpha09/cloud_environment_real.py | habichta/ETHZDeepReinforcementLearning | e1ae22159753724290f20068214bb3d94fcb7be4 | [
"BSD-3-Clause"
] | 7 | 2018-01-23T05:17:50.000Z | 2020-10-30T02:29:59.000Z | openai/baselines/baselines/deepq/experiments_17_balanced_alpha09beta_reward_large_shorter/cloud_environment_real.py | habichta/ETHZDeepReinforcementLearning | e1ae22159753724290f20068214bb3d94fcb7be4 | [
"BSD-3-Clause"
] | null | null | null | openai/baselines/baselines/deepq/experiments_17_balanced_alpha09beta_reward_large_shorter/cloud_environment_real.py | habichta/ETHZDeepReinforcementLearning | e1ae22159753724290f20068214bb3d94fcb7be4 | [
"BSD-3-Clause"
] | 2 | 2018-01-23T05:17:58.000Z | 2018-07-02T00:13:34.000Z | import sys
import numpy as np
import pandas as pd
import random
import os
from scipy import misc
import pickle
import cv2
#TODO: note gradient norm is clipped by baseline at 10
class RealCloudEnvironment():
def __init__(self, data_path,img_path,train_set_path, image_size=84,
sequence_length=4, sequence_stride=9, action_nr=7, action_type=1,adapt_step_size=True, ramp_step=0.1, episode_length_train=None,
file="rl_data_sp.csv",load_train_episodes=None,mask_path=None,sample_training_episodes=None,exploration_follow="IRR",start_exploration_deviation=0.2,clip_irradiance=False):
self.sequence_length = sequence_length
self.sequence_stride = sequence_stride
self.episode_length_train = episode_length_train
self.ramp_step = ramp_step
self.image_size = image_size
self.load_train_episodes = load_train_episodes
self.mask_path = mask_path
self.sample_training_episodes = sample_training_episodes
self.start_exploration_deviation = start_exploration_deviation
self.exploration_follow=exploration_follow
self.adapt_step_size=adapt_step_size
self.clip_irradiance = clip_irradiance
self.observation_space = self.ObservationSpace(
(image_size * image_size * sequence_length * 3 + sequence_length + 1, 1))
# self.observation_space
self.action_space = self.ActionSpace(action_type, action_nr, ramp_step,adapt_step_size)
if self.mask_path:
self.mask=misc.imread(self.mask_path)==0 #255 and 0 values
else:
self.mask=None
self.file_path = os.path.join(data_path, file)
self.img_path = img_path
# Episodes:
self.train_episodes = self.__create_episodes(train_set_path=train_set_path)
self.nr_train_episodes = len(self.train_episodes)
self.temp_train_episodes = list(self.train_episodes)
# Training globals
self.current_episode_train_step_pointer = None
self.current_episode_train = None
self.current_episode_train_control_input_values = []
self.start_date = None
self.end_date = None
@property
def current_train_episode(self):
return self.current_episode_train
@property
def current_train_control_inputs(self):
return self.current_episode_train_control_input_values
@property
def episode_n(self):
return self.nr_train_episodes
@property
def episode_id(self):
return self.start_date
@property
def episode_end_id(self):
return self.end_date
def reset(self):
print("Resetting environment...")
if not self.temp_train_episodes:
print("Epoch finished...")
# When all trianing episodes have been sampled at least once, renew the list, start again
self.temp_train_episodes = list(self.train_episodes)
print("Sampling episode...")
# Sample a random episode from the train_episodes list, delete it from list so that it is not sampled in this epoch again
self.current_episode_train = self.temp_train_episodes.pop(
random.randrange(len(self.temp_train_episodes))) # sample episode and remove from temporary list
print("Episode (from/to): ", str(self.current_episode_train.index[0]),
str(self.current_episode_train.index[-1]))
print("Samples in episode:", len(self.current_episode_train))
# get index from current eppisode (Datetime)
index = self.current_episode_train.index.tolist()
self.start_date =index[0]
self.end_date = index[-1]
# Create index for smples depending on image sequence length and stride
self.train_episode_samples = [index[i:(i + (self.sequence_length * self.sequence_stride)):self.sequence_stride]
for i in
range(len(index) - (self.sequence_length - 1) * self.sequence_stride)]
# Set pointer to the current sample, advanced by step()
self.current_episode_train_step_pointer = 0
# Get first sample index, list of timestamps of the images and irradiance data
first_state_index = self.train_episode_samples[self.current_episode_train_step_pointer]
# Load actual data given the timestamps
current_state = self.current_episode_train.loc[first_state_index]
# list of image_names
images_names = current_state['img_name'].values
# create paths to images of that sample
image_paths = [os.path.join(self.img_path, name) for name in images_names]
# Initialize irradiance and control input
curr_irr =np.array(current_state["irr"].values)
curr_mpc = np.array(current_state["mpc"].values)
#MPC follow : current_control_input = current_mpc[-1]
#Random:
if self.exploration_follow == "IRR":
curr_ci = curr_irr[-1]
elif self.exploration_follow == "MPC":
curr_ci = curr_mpc[-1]
else:
raise ValueError("Choose correct exploration follow: IRR or MPC")
if self.start_exploration_deviation:
curr_ci = curr_ci+np.float32(np.random.uniform(-self.start_exploration_deviation,self.start_exploration_deviation)) # at least some different steps in beginning of episodes
#Check:
if curr_ci< 0.0:
curr_ci = 0.0
#current_control_input = np.random.uniform(200.0,800.0)
# Reset list that stores all controlinputs for an episode and append first control input
current_timestamp = current_state.index[-1]
self.current_episode_train_control_input_values = []
self.current_episode_train_control_input_values.append(
(curr_ci, current_timestamp)) # add tuple with control input and timestamp
# Decode jpeg images and preprocess
image_tensor = self.__decode_image(image_paths)
env_obs = np.concatenate([image_tensor.ravel(), curr_irr, np.reshape(curr_ci, (1))]).astype(np.float16)[:, None]
"""
cv2.imshow('next_state_image_32', np.uint8(np.reshape(env_obs[0:-3], (84, 84, 6))[:, :, 3:6]))
cv2.waitKey(50)
"""
return env_obs
def step(self, action):
# Update step variable
current_step = self.current_episode_train_step_pointer
self.current_episode_train_step_pointer += 1 # next step to get data of next state
next_step = self.current_episode_train_step_pointer
# get state data
current_state = self.current_episode_train.loc[self.train_episode_samples[current_step]]
next_state = self.current_episode_train.loc[self.train_episode_samples[next_step]] # data of next state
next_irr = np.array(next_state["irr"].values) # irradiance in next step batch x 1
curr_irr = np.array(current_state["irr"].values)
current_control_input = self.current_episode_train_control_input_values[-1][
0] # get last control_input from list
# calculate the next controlinput given the current input and the time difference + ramp between current and next state
next_ci, reward = self.action_space.calculate_step(action=action,next_irr=next_irr[-1],curr_irr=curr_irr[-1],current_ci=current_control_input,curr_index=current_state.index.values[
-1],next_index=next_state.index.values[-1])
# Update control input list
next_timestamp = next_state.index[-1]
self.current_episode_train_control_input_values.append(
(next_ci, next_timestamp)) # Add next ocntrol input value
# done: whether the next state is the last of the episode. Z.b. end of day
done = bool(next_state.iloc[-1]["done"])
# Get images of next state
images_names = next_state['img_name'].values
image_paths = [os.path.join(self.img_path, name) for name in images_names]
next_image_tensor = self.__decode_image(image_paths)
next_env_obs = np.concatenate([next_image_tensor.ravel(), next_irr, np.reshape(next_ci, (1))]).astype(np.float16)[:,None]
#DEBUG: #######################################################################################################
#Show both images of next state
"""
cv2.imshow('next_state_image_32',
np.uint8(np.concatenate((np.reshape(next_env_obs[0:-3], (84, 84, 6))[:, :, 0:3],
np.reshape(next_env_obs[0:-3], (84, 84, 6))[:, :, 3:6]), axis=1)))
cv2.waitKey(5)
print(next_timestamp, " reward:", reward, " next_irr:", '{0:.8f}'.format(next_irr[-1]), " next_ci:",
'{0:.8f}'.format(next_ci), " action:", action)
cv2.imshow('next_state_image_32',
np.uint8((np.reshape(next_env_obs[0:-3], (128, 128, 6))[:, :, 0] -
np.reshape(next_env_obs[0:-3], (128, 128, 6))[:, :, 3])))
cv2.waitKey(50)
#Show difference of both next state images
cv2.imshow('next_state_image_32',
np.uint8((np.reshape(next_env_obs[0:-3], (84, 84, 6))[:, :, 0] -
np.reshape(next_env_obs[0:-3], (84, 84, 6))[:, :, 3])))
images_names = current_state['img_name'].values
image_paths = [os.path.join(self.img_path, name) for name in images_names]
current_image_tensor = self.__decode_image(image_paths)
#Show both current image nad next state image
cv2.imshow('next_state_image_32',
np.uint8(np.concatenate((current_image_tensor[:,:,3:6],
np.reshape(next_env_obs[0:-3], (84, 84, 6))[:, :, 3:6]), axis=0)))
#Show difference between current state image and next state image. Check if there is at least some difference between them in 84x84
cv2.imshow('next_state_image_32',
np.uint8((current_image_tensor[:, :, 2]-np.reshape(next_env_obs[0:-3], (84, 84, 6))[:, :, 2]), axis=0))
if done:
irr_list, mpc_list, ci_list, index_list = [], [], [], []
# column_list = ["irr", "mpc", "ci"]
for t in self.current_train_control_inputs:
index_list.append(t[1])
ci_list.append(t[0])
ci_df = pd.DataFrame(data=ci_list, index=index_list, columns=["ci"])
irrmpc_df = self.current_episode_train.loc[ci_df.index]
data_df = pd.concat([ci_df, irrmpc_df], axis=1)
#data_df[["ci", "irr", "mpc"]].plot()
print(data_df)
#plt.show()
#######################################################################################################
"""
return next_env_obs, reward, done,0 # return s',r,d
def __decode_image(self, image_paths):
#Node newer images are further back in terms of channel coordinates! 0:3 -> first image .... etc. the last iamge is in the last 3 channels
image_np = np.concatenate([self.__preprocess_image(cv2.imread(image)) for image in image_paths], axis=2)
return image_np
def __preprocess_image(self, image):
if self.mask_path:
image[self.mask]=0.0
image = misc.imresize(image, [self.image_size, self.image_size, 3])
return image
def __create_episodes(self, train_set_path):
print("Environment: Loading rl_data file and datasets...")
rl_pd = pd.DataFrame.from_csv(self.file_path).sort_index()
#Divide mpc and irr by 1000 to normalize all values between 0 and 1 (more or less, since there is some irradiance >1000):
rl_pd[['mpc','irr','cs']] = rl_pd[['mpc','irr','cs']]/1000.0
if train_set_path:
print("reading " + str(train_set_path))
with open(str(train_set_path)) as f:
self.train_list = sorted([os.path.basename(l).split('-', 1)[1] for l in f.read().splitlines()])
else:
self.train_list = None
print("Creating episodes...")
train_episodes = []
if self.load_train_episodes:
with open(self.load_train_episodes,'rb') as f:
train_episodes = pickle.load(f)
else:
if self.train_list:
for train_day_it in self.train_list:
td_pd = pd.DataFrame(rl_pd.loc[train_day_it])
if self.episode_length_train is None: # 1 day = 1 episode
done_pd = np.zeros(len(td_pd.index)).astype(int)
done_pd[-1] = 1
td_pd["done"] = done_pd
train_episodes.append(td_pd)
else:
for g, episode in td_pd.groupby(np.arange(len(td_pd)) // self.episode_length_train):
episode_df = pd.DataFrame(episode)
done_pd = np.zeros(len(episode_df.index)).astype(int)
done_pd[-1] = 1
episode_df["done"] = done_pd
train_episodes.append(episode_df)
print("Episodes in Set:" ,len(train_episodes))
train_episodes_filtered = [te for te in train_episodes if self.filter_episodes(te)] # filter out too small episodes. at 1 step
train_episodes_final = []
if self.clip_irradiance: #changes irradiance to 1.0 or 0.0. 1.0 if irradiance is larger than 70% of clear sky model
for e_pd in train_episodes_filtered:
e_pd['irr'] = np.where(e_pd['cs'] * 0.7 < e_pd['irr'], 1.0, 0.0)
train_episodes_final.append(e_pd)
else:
train_episodes_final=train_episodes_filtered
if self.sample_training_episodes:
train_episodes_final = np.random.choice(train_episodes_final,size=self.sample_training_episodes)
print("Episodes in Set (after filter and sampling):", len(train_episodes))
return train_episodes_final
def filter_episodes(self, df):
keep = True
df['tvalue'] = df.index
df['delta'] = (df['tvalue'] - df['tvalue'].shift()).fillna(0)
if np.max(df['delta']/ np.timedelta64(1,'s')) > 14.0: # check there are not too large time differences between samples
keep = False
if len(df) < self.sequence_length*self.sequence_stride+1: # some episodes can be too small since not for all days sample % episode_size == 0 !
keep = False
return keep
def mpc_exploration(self, mpc_prob=0.5):
"""
env.reset needs to be called first. Create exploration that follows MPC in trianing set to a certain degree
:param mpc_prob: Probability of taking action that gets closest to mpc (other actions will be chosen with probability (1-p)/(num actions-1)
:param num_actions: nr actions
:return: action to choose (integer)
"""
# Get next state
current_step = self.current_episode_train_step_pointer
next_step = self.current_episode_train_step_pointer + 1
current_state = self.current_episode_train.loc[self.train_episode_samples[current_step]]
next_state = self.current_episode_train.loc[self.train_episode_samples[next_step]]
next_irr = np.array(next_state["irr"].values)
curr_irr = np.array(current_state["irr"].values)
current_control_input = self.current_episode_train_control_input_values[-1][
0] # get last control_input from list
mpc = np.array(next_state["mpc"].values)[-1]
control_inputs = list()
for a in range(self.action_space.n):
next_ci, _ = self.action_space.calculate_step(action=a, next_irr=next_irr[-1],
curr_irr=curr_irr[-1], current_ci=current_control_input,
curr_index=current_state.index.values[
-1], next_index=next_state.index.values[-1])
control_inputs.append(abs(next_ci - mpc))
#best_action = np.argmin(control_inputs[1:])+1 #do not take 0 action into account, only favour non zero
best_action = np.argmin(control_inputs)
action_array = np.arange(0,self.action_space.n, 1)
normal_action_weight = (1 - mpc_prob) / (self.action_space.n - 1)
action_weights = np.ones(self.action_space.n) * normal_action_weight
action_weights[best_action] = mpc_prob
action = np.random.choice(action_array, replace=False, p=action_weights)
return action
class ActionSpace():
def __init__(self, type=0, action_nr=2, ramp_step=0.1,adapt_step_size=True):
self.n = action_nr
self.type = type
self.ramp_step = ramp_step
self.adapt_step_size = adapt_step_size
def calculate_step(self, action, next_irr, curr_irr, current_ci,curr_index,next_index):
# return next ci given action
ramp_step_s = self.ramp_step/60.0
if self.adapt_step_size:
stretch_factor = (next_index - curr_index) / np.timedelta64(1, 's')
else:
stretch_factor = 7.0 # median difference between states in 7 seconds
if self.type == 0:
if action == 0: # upper target
next_ci = float(curr_irr)
elif action == 1: # upper target
next_ci = float(current_ci) + ramp_step_s*stretch_factor
elif action == 2: # lower target
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor)
elif action == 3:
next_ci = float(current_ci) + ramp_step_s*stretch_factor / 2
elif action == 4:
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor / 2)
elif action == 5:
next_ci = float(current_ci) + ramp_step_s*stretch_factor / 4
elif action == 6:
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor / 4)
else:
raise ValueError('Illegal action')
elif self.type == 1:
if action == 0:
diff = curr_irr - current_ci
ramp = np.abs(diff) > ramp_step_s*stretch_factor
if ramp:
next_ci = current_ci + np.sign(diff) * ramp_step_s*stretch_factor
else:
next_ci = curr_irr
elif action == 1: # upper target
next_ci = float(current_ci) + ramp_step_s*stretch_factor
elif action == 2: # lower target
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor)
elif action == 3:
next_ci = float(current_ci) + ramp_step_s*stretch_factor/2
elif action == 4:
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor/2)
elif action == 5:
next_ci = float(current_ci) + ramp_step_s*stretch_factor / 4
elif action == 6:
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor / 4)
else:
raise ValueError('Illegal action')
elif self.type == 2:
if action == 0:
diff = next_irr - current_ci
ramp = np.abs(diff) > ramp_step_s*stretch_factor
if ramp:
next_ci = current_ci + np.sign(diff) * ramp_step_s*stretch_factor
else:
next_ci = next_irr
elif action == 1: # upper target
next_ci = float(current_ci) + ramp_step_s*stretch_factor
elif action == 2: # lower target
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor)
elif action == 3:
next_ci = float(current_ci) + ramp_step_s*stretch_factor/2
elif action == 4:
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor/2)
elif action == 5:
next_ci = float(current_ci) + ramp_step_s*stretch_factor / 4
elif action == 6:
next_ci = np.maximum(0.0, current_ci - ramp_step_s*stretch_factor / 4)
else:
raise ValueError('Illegal action')
elif self.type == -1:
# naive policy with curr irr
diff = curr_irr - current_ci
ramp = np.abs(diff) > ramp_step_s*stretch_factor
if ramp:
next_ci = current_ci + np.sign(diff) * ramp_step_s*stretch_factor
else:
next_ci = curr_irr
else:
raise ValueError('Illegal Action Set')
reward = np.maximum(-np.abs(next_ci - next_irr).squeeze(),-3.0) # clip reward against outliers/errors. should not reach a level of -3.0
return next_ci, reward
class ObservationSpace():
def __init__(self, shape):
self.shape = shape | 40.649718 | 190 | 0.598286 |
56c8c62259e79e6c8982dae3866f07917743ea6d | 64,372 | py | Python | python/pyspark/ml/classification.py | watera/spark | 1386fd28daf798bf152606f4da30a36223d75d18 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 1 | 2021-09-14T07:31:45.000Z | 2021-09-14T07:31:45.000Z | python/pyspark/ml/classification.py | watera/spark | 1386fd28daf798bf152606f4da30a36223d75d18 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null | python/pyspark/ml/classification.py | watera/spark | 1386fd28daf798bf152606f4da30a36223d75d18 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.param.shared import *
from pyspark.ml.regression import DecisionTreeModel, DecisionTreeRegressionModel, \
RandomForestParams, TreeEnsembleModel, TreeEnsembleParams
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.wrapper import JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'OneVsRest', 'OneVsRestModel']
@inherit_doc
class JavaClassificationModel(JavaPredictionModel):
"""
(Private) Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@inherit_doc
class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasProbabilityCol, HasRawPredictionCol,
HasElasticNetParam, HasFitIntercept, HasStandardization, HasThresholds,
HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=2.0, features=Vectors.dense(1.0)),
... Row(label=0.0, weight=2.0, features=Vectors.sparse(1, [], []))]).toDF()
>>> blor = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight")
>>> blorModel = blor.fit(bdf)
>>> blorModel.coefficients
DenseVector([5.5...])
>>> blorModel.intercept
-2.68...
>>> mdf = sc.parallelize([
... Row(label=1.0, weight=2.0, features=Vectors.dense(1.0)),
... Row(label=0.0, weight=2.0, features=Vectors.sparse(1, [], [])),
... Row(label=2.0, weight=2.0, features=Vectors.dense(3.0))]).toDF()
>>> mlor = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight",
... family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> print(mlorModel.coefficientMatrix)
DenseMatrix([[-2.3...],
[ 0.2...],
[ 2.1... ]])
>>> mlorModel.interceptVector
DenseVector([2.0..., 0.8..., -2.8...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0))]).toDF()
>>> result = blorModel.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([0.99..., 0.00...])
>>> result.rawPrediction
DenseVector([8.22..., -8.22...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(1, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
.. versionadded:: 1.3.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self.setParams._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self._clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self._clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getParam(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getParam(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
class LogisticRegressionModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
java_blrt_summary = self._call_java("summary")
# Note: Once multiclass is added, update this to return correct summary
return BinaryLogisticRegressionTrainingSummary(java_blrt_summary)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
return BinaryLogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Abstraction for multinomial Logistic Regression Training results.
Currently, the training summary ignores the training weights except
for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class BinaryLogisticRegressionSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. seealso:: `Wikipedia reference \
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Note: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("roc")
@property
@since("2.0.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
Note: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("areaUnderROC")
@property
@since("2.0.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
Note: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("pr")
@property
@since("2.0.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
Note: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("2.0.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
Note: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("precisionByThreshold")
@property
@since("2.0.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
Note: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("recallByThreshold")
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
.. note:: Experimental
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
class TreeClassifierParams(object):
"""
Private class to track supported impurity measures.
.. versionadded:: 1.4.0
"""
supportedImpurities = ["entropy", "gini"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeClassifierParams, self).__init__()
@since("1.6.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.6.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
.. versionadded:: 1.4.0
"""
supportedLossTypes = ["logistic"]
@inherit_doc
class DecisionTreeClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasProbabilityCol, HasRawPredictionCol, DecisionTreeParams,
TreeClassifierParams, HasCheckpointInterval, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed")
>>> model = dt.fit(td)
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel (uid=...) of depth 1 with 3 nodes...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
@inherit_doc
class DecisionTreeClassificationModel(DecisionTreeModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
Note: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
HasRawPredictionCol, HasProbabilityCol,
RandomForestParams, TreeClassifierParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42)
>>> model = rf.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.rawPrediction)
0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel (uid=...) of depth..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0)
Sets params for linear classification.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
class RandomForestClassificationModel(TreeEnsembleModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class GBTClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
Note: Multiclass labels are not currently supported.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Notes on Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42)
>>> model = gbt.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel (uid=...)...with 5 trees...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
class GBTClassificationModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class NaiveBayes(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasProbabilityCol,
HasRawPredictionCol, HasThresholds, HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values must be nonnegative.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
.. versionadded:: 1.5.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default) and bernoulli.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
self._setDefault(smoothing=1.0, modelType="multinomial")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
class NaiveBayesModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@inherit_doc
class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasSeed, HasStepSize, JavaMLWritable,
JavaMLReadable):
"""
.. note:: Experimental
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[2, 2, 2], blockSize=1, seed=123)
>>> model = mlp.fit(df)
>>> model.layers
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.transform(testDF).show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.layers == model2.layers
True
>>> model.weights == model2.weights
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.layers == model.layers
True
.. versionadded:: 1.6.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
blockSize = Param(Params._dummy(), "blockSize", "Block size for stacking input data in " +
"matrices. Data is stacked within partitions. If block size is more than " +
"remaining data in a partition then it is adjusted to the size of this " +
"data. Recommended size is between 10 and 1000, default is 128.",
typeConverter=TypeConverters.toInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
self._setDefault(maxIter=100, tol=1E-4, blockSize=128, stepSize=0.03, solver="l-bfgs")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("1.6.0")
def getBlockSize(self):
"""
Gets the value of blockSize or its default value.
"""
return self.getOrDefault(self.blockSize)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("2.0.0")
def getStepSize(self):
"""
Gets the value of stepSize or its default value.
"""
return self.getOrDefault(self.stepSize)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("2.0.0")
def getSolver(self):
"""
Gets the value of solver or its default value.
"""
return self.getOrDefault(self.solver)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
class MultilayerPerceptronClassificationModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
.. note:: Experimental
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def layers(self):
"""
array of layer sizes including input and output layers.
"""
return self._call_java("javaLayers")
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
class OneVsRestParams(HasFeaturesCol, HasLabelCol, HasPredictionCol):
"""
Parameters for OneVsRest and OneVsRestModel.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
.. note:: Only LogisticRegression and NaiveBayes are supported now.
"""
return self._set(classifier=value)
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=0.0, features=Vectors.dense(1.0, 0.8)),
... Row(label=1.0, features=Vectors.sparse(2, [], [])),
... Row(label=2.0, features=Vectors.dense(0.5, 0.5))]).toDF()
>>> lr = LogisticRegression(maxIter=5, regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> model = ovr.fit(df)
>>> [x.coefficients for x in model.models]
[DenseVector([3.3925, 1.8785]), DenseVector([-4.3016, -6.3163]), DenseVector([-4.5855, 6.1785])]
>>> [x.intercept for x in model.models]
[-3.64747..., 2.55078..., -1.10165...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0))]).toDF()
>>> model.transform(test0).head().prediction
1.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
0.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4))]).toDF()
>>> model.transform(test2).head().prediction
2.0
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
classifier=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
classifier=None)
"""
super(OneVsRest, self).__init__()
kwargs = self.__init__._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol=None, labelCol=None, predictionCol=None, classifier=None):
"""
setParams(self, featuresCol=None, labelCol=None, predictionCol=None, classifier=None):
Sets params for OneVsRest.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
assert isinstance(classifier, HasRawPredictionCol),\
"Classifier %s doesn't extend from HasRawPredictionCol." % type(classifier)
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
return classifier.fit(trainingDataset, paramMap)
# TODO: Parallel training for all classes.
models = [trainSingleClass(i) for i in range(numClasses)]
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
return _java_obj
class OneVsRestModel(Model, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = model._call_java("getRawPredictionCol")
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(
lambda predictions: float(max(enumerate(predictions), key=operator.itemgetter(1))[0]),
DoubleType())
# output label and label metadata as prediction
return aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName])).drop(accColName)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\
.setFeaturesCol(featuresCol).setClassifier(classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
java_models = [model._to_java() for model in self.models]
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, java_models)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
return _java_obj
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| 38.52304 | 100 | 0.627835 |
68c159e493d653a1dfa8b80aefd0b693c06db686 | 112 | py | Python | skforecast/model_selection/__init__.py | hdiazsqlr/skforecast | 5ee79a51960a27db9e169706014528eae403e1c2 | [
"MIT"
] | 1 | 2022-01-31T19:14:25.000Z | 2022-01-31T19:14:25.000Z | skforecast/model_selection/__init__.py | hdiazsqlr/skforecast | 5ee79a51960a27db9e169706014528eae403e1c2 | [
"MIT"
] | null | null | null | skforecast/model_selection/__init__.py | hdiazsqlr/skforecast | 5ee79a51960a27db9e169706014528eae403e1c2 | [
"MIT"
] | null | null | null | from .model_selection import time_series_splitter, cv_forecaster, backtesting_forecaster, grid_search_forecaster | 112 | 112 | 0.910714 |
832bc381fc237b2bc72088a5ddca51081341693b | 248 | py | Python | tests/legacy_pytests/chdir_abspath_test/testme.py | depaul-dice/provenance-to-use | e16e2824fbbe0b4e09cc50f0d2bcec3400bf4b87 | [
"BSD-3-Clause"
] | null | null | null | tests/legacy_pytests/chdir_abspath_test/testme.py | depaul-dice/provenance-to-use | e16e2824fbbe0b4e09cc50f0d2bcec3400bf4b87 | [
"BSD-3-Clause"
] | null | null | null | tests/legacy_pytests/chdir_abspath_test/testme.py | depaul-dice/provenance-to-use | e16e2824fbbe0b4e09cc50f0d2bcec3400bf4b87 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
import sys
sys.path.insert(0, '..')
from cde_test_common import *
def checker_func():
assert os.path.isfile(CDE_ROOT_DIR + '/home/pgbovine/tmp.txt')
generic_test_runner(["python", "chdir_abspath_test.py"], checker_func)
| 22.545455 | 70 | 0.741935 |
aba6e7bdf32605c82dbe00a5be84d07e1179c2f1 | 3,577 | py | Python | pandas_ta/performance/trend_return.py | maxdignan/pandas-ta | c4722d107393965638b77df8d969441e762afe34 | [
"MIT"
] | 2 | 2021-09-09T09:43:13.000Z | 2022-01-02T22:08:29.000Z | pandas_ta/performance/trend_return.py | tristcoil/pandas-ta | cafee0225b62a33b83e4628697262c21484cd2e3 | [
"MIT"
] | null | null | null | pandas_ta/performance/trend_return.py | tristcoil/pandas-ta | cafee0225b62a33b83e4628697262c21484cd2e3 | [
"MIT"
] | 1 | 2021-01-24T15:53:39.000Z | 2021-01-24T15:53:39.000Z | # -*- coding: utf-8 -*-
from pandas import DataFrame, Series
from .log_return import log_return
from .percent_return import percent_return
from pandas_ta.utils import get_offset, verify_series, zero
def trend_return(close, trend, log=True, cumulative=None, trend_reset=0, offset=None, **kwargs):
"""Indicator: Trend Return"""
# Validate Arguments
close = verify_series(close)
trend = verify_series(trend)
cumulative = cumulative if cumulative is not None and isinstance(cumulative, bool) else False
trend_reset = int(trend_reset) if trend_reset and isinstance(trend_reset, int) else 0
offset = get_offset(offset)
# Calculate Result
if log:
returns = log_return(close, cumulative=False)
else:
returns = percent_return(close, cumulative=False)
trends = trend.astype(int)
returns = (trends * returns).apply(zero)
tsum = 0
m = trends.size
result = []
for i in range(0, m):
if trends[i] == trend_reset:
tsum = 0
else:
return_ = returns[i]
if cumulative:
tsum += return_
else:
tsum = return_
result.append(tsum)
_cumulative = "C" if cumulative else ""
_log = "L" if log else "P"
_returns = "LOGRET" if log else "PCTRET"
_props = f"{_cumulative}{_log}TR"
df = DataFrame({
_props: result,
f"TR_{_returns}": returns,
f"{_props}_Trends": trends,
f"{_props}_Trades": trends.diff().shift(1).fillna(0).astype(int),
}, index=close.index)
# Offset
if offset != 0:
df = df.shift(offset)
# Name & Category
df.name = _props
df.category = "performance"
return df
trend_return.__doc__ = \
"""Trend Return
Calculates the (Cumulative) Returns of a Trend as defined by a sequence of booleans called a 'trend'. One popular example in TA literature is to be long
when the 'close' > 'moving average'. In which case, the trend= close > sma(close, 50). By default it calculates log returns but can also use percent change.
Examples:
ta.trend_return(close, trend= close > ta.sma(close, 50))
ta.trend_return(close, trend= ta.ema(close, 8) > ta.ema(close, 21))
Sources: Kevin Johnson
Calculation:
Default Inputs:
trend_reset=0, log=True, cumulative=False
sum = 0
returns = log_return if log else percent_return # These are not cumulative
returns = (trend * returns).apply(zero)
for i, in range(0, trend.size):
if item == trend_reset:
sum = 0
else:
return_ = returns.iloc[i]
if cumulative:
sum += return_
else:
sum = return_
trend_return.append(sum)
if cumulative and variable:
trend_return += returns
Args:
close (pd.Series): Series of 'close's
trend (pd.Series): Series of 'trend's. Preferably 0's and 1's.
trend_reset (value): Value used to identify if a trend has ended. Default: 0
log (bool): Calculate logarithmic returns. Default: True
cumulative (bool): If True, returns the cumulative returns. Default: False
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
variable (bool, optional): Whether to include if return fluxuations in the
cumulative returns.
Returns:
pd.DataFrame: Returns columns: Trend Return, Close Return, Trends, and
Trades (Enter: 1, Exit: -1, Otherwise: 0).
""" | 32.518182 | 156 | 0.645513 |
fa3ffebd98079edd7bc292c4ff5c240aee08dc2a | 447 | py | Python | scripts/mlp/demo_configs/anymal_platform_random.py | stonneau/multicontact-locomotion-planning | a2c5dd35955a44c5a454d114c9dcaf0fec19424f | [
"BSD-2-Clause"
] | 31 | 2019-11-08T14:46:03.000Z | 2022-03-25T08:09:16.000Z | python/mlp/demo_configs/anymal_platform_random.py | pFernbach/multicontact-locomotion-planning | 86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2 | [
"BSD-2-Clause"
] | 21 | 2019-04-12T13:13:31.000Z | 2021-04-02T14:28:15.000Z | python/mlp/demo_configs/anymal_platform_random.py | pFernbach/multicontact-locomotion-planning | 86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2 | [
"BSD-2-Clause"
] | 11 | 2019-04-12T13:03:55.000Z | 2021-11-22T08:19:06.000Z | TIMEOPT_CONFIG_FILE = "cfg_softConstraints_anymal_kinOrientation.yaml"
from .common_anymal import *
SCRIPT_PATH = "memmo"
ENV_NAME = "multicontact/plateforme_not_flat"
DURATION_INIT = 2. # Time to init the motion
DURATION_FINAL = 2. # Time to stop the robot
DURATION_FINAL_SS = 1.
DURATION_SS = 2.
DURATION_DS = 2.
DURATION_TS = 0.8
DURATION_QS = 0.5
#COM_SHIFT_Z = -0.02
#TIME_SHIFT_COM = 1.
## Override default settings :
YAW_ROT_GAIN = 1.
| 23.526316 | 70 | 0.762864 |
e0b2cd1705a7f6cc73e0684f3b07a317a9d05dfd | 2,472 | py | Python | dist/Lib/site-packages/neo4jrestclient/iterable.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | 1 | 2021-10-04T18:22:12.000Z | 2021-10-04T18:22:12.000Z | dist/Lib/site-packages/neo4jrestclient/iterable.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | 10 | 2021-06-16T20:48:32.000Z | 2021-10-04T18:22:02.000Z | try2/lib/python3.9/site-packages/neo4jrestclient-2.1.1-py3.9.egg/neo4jrestclient/iterable.py | diatomsRcool/eco-kg | 4251f42ca2ab353838a39b640cb97593db76d4f4 | [
"BSD-3-Clause"
] | 1 | 2022-01-13T10:05:55.000Z | 2022-01-13T10:05:55.000Z | # -*- coding: utf-8 -*-
class Iterable(list):
"""
Class to iterate among returned objects.
"""
def __init__(self, cls, lst=None, attr=None, auth=None, cypher=None):
if lst is None:
lst = []
self._auth = auth or {}
self._cypher = cypher
self._list = lst
self._index = len(lst)
self._class = cls
self._attribute = attr
super(Iterable, self).__init__(lst)
def __getslice__(self, *args, **kwargs):
eltos = super(Iterable, self).__getslice__(*args, **kwargs)
if self._attribute:
return [self._class(elto[self._attribute], update_dict=elto,
auth=self._auth, cypher=self._cypher)
for elto in eltos]
else:
return [self._class(elto, auth=self._auth, cypher=self._cypher)
for elto in eltos]
def __getitem__(self, index):
elto = super(Iterable, self).__getitem__(index)
if self._attribute:
return self._class(elto[self._attribute], update_dict=elto,
auth=self._auth, cypher=self._cypher)
else:
return self._class(elto, auth=self._auth, cypher=self._cypher)
def __repr__(self):
return self.__unicode__()
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return u"<Neo4j %s: %s>" % (self.__class__.__name__,
self._class.__name__)
def __contains__(self, value):
# TODO: Find a better way to check if value is instance of Base
# avoiding a circular loop of imports
# if isinstance(value, Base) and hasattr(value, "url"):
if (hasattr(value, "url") and hasattr(value, "id")
and hasattr(value, "_dic")):
if self._attribute:
return value.url in [elto[self._attribute]
for elto in self._list]
else:
return value.url in self._list
return False
def __iter__(self):
return self
@property
def single(self):
try:
return self[0]
except KeyError:
return None
def __next__(self):
if self._index == 0:
raise StopIteration
self._index = self._index - 1
return self.__getitem__(self._index)
def next(self):
return self.__next__()
| 31.291139 | 75 | 0.549757 |
983e1db4c504dc45246230f3f268f98dc81539aa | 4,160 | py | Python | data/dataloader.py | QWERDFBAS/remove-stamp | e6462ab6425b07cea840b1e57c3b5a133632e130 | [
"MIT"
] | null | null | null | data/dataloader.py | QWERDFBAS/remove-stamp | e6462ab6425b07cea840b1e57c3b5a133632e130 | [
"MIT"
] | null | null | null | data/dataloader.py | QWERDFBAS/remove-stamp | e6462ab6425b07cea840b1e57c3b5a133632e130 | [
"MIT"
] | null | null | null | import torch
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
import cv2
from os import listdir, walk
from os.path import join
from random import randint
import random
from PIL import Image
from torchvision.transforms import Compose, RandomCrop, ToTensor, ToPILImage, Resize, RandomHorizontalFlip
def random_horizontal_flip(imgs):
if random.random() < 0.3:
for i in range(len(imgs)):
imgs[i] = imgs[i].transpose(Image.FLIP_LEFT_RIGHT)
return imgs
def random_rotate(imgs):
if random.random() < 0.3:
max_angle = 10
angle = random.random() * 2 * max_angle - max_angle
# print(angle)
for i in range(len(imgs)):
img = np.array(imgs[i])
w, h = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle, 1)
img_rotation = cv2.warpAffine(img, rotation_matrix, (h, w))
imgs[i] =Image.fromarray(img_rotation)
return imgs
def CheckImageFile(filename):
return any(filename.endswith(extention) for extention in ['.png', '.PNG', '.jpg', '.JPG', '.jpeg', '.JPEG', '.bmp', '.BMP'])
def ImageTransform(loadSize):
return Compose([
Resize(size=loadSize, interpolation=Image.BICUBIC),
ToTensor(),
])
class ErasingData(Dataset):
def __init__(self, dataRoot, loadSize, training=True):
super(ErasingData, self).__init__()
'''
join()合并新的路径
返回值:
第一个:当前访问的文件夹路径
第二个:当前文件夹下的子目录list
第三个:当前文件夹下的文件list
'''
self.imageFiles = [join(dataRootK, files) for dataRootK, dn, filenames in walk(dataRoot) \
for files in filenames if CheckImageFile(files)] #默认没有子目录
self.loadSize = loadSize
self.ImgTrans = ImageTransform(loadSize) #对图片进行了两个操作,1.将图片大小进行转换 2.转为Tensor格式【C,H,W】, 【0,1.0】
self.training = training
'''
当用实例对象[xxxx]自动调用这个方法。
'''
def __getitem__(self, index):
'''
数据集的形式:
三个文件夹, 1。all_images 对应原图像
2.all_labels 对应去除印章后的图片
3.mask 对应印章的图片
'''
img = Image.open(self.imageFiles[index])
mask = Image.open(self.imageFiles[index].replace('all_images','mask'))
gt = Image.open(self.imageFiles[index].replace('all_images','all_labels'))
# import pdb;pdb.set_trace()
if self.training:
# ### for data augmentation
all_input = [img, mask, gt]
# 一定概率左右反转
all_input = random_horizontal_flip(all_input)
# 对图像一定概率随机旋转
all_input = random_rotate(all_input)
img = all_input[0]
mask = all_input[1]
gt = all_input[2]
### for data augmentation
inputImage = self.ImgTrans(img.convert('RGB'))
mask = self.ImgTrans(mask.convert('RGB'))
groundTruth = self.ImgTrans(gt.convert('RGB'))
path = self.imageFiles[index].split('/')[-1]
# import pdb;pdb.set_trace()
return inputImage, groundTruth, mask, path
def __len__(self):
return len(self.imageFiles)
class devdata(Dataset):
def __init__(self, dataRoot, gtRoot, loadSize=512):
super(devdata, self).__init__()
self.imageFiles = [join (dataRootK, files) for dataRootK, dn, filenames in walk(dataRoot) \
for files in filenames if CheckImageFile(files)]
self.gtFiles = [join (gtRootK, files) for gtRootK, dn, filenames in walk(gtRoot) \
for files in filenames if CheckImageFile(files)]
self.loadSize = loadSize
self.ImgTrans = ImageTransform(loadSize)
def __getitem__(self, index):
img = Image.open(self.imageFiles[index])
gt = Image.open(self.gtFiles[index])
#import pdb;pdb.set_trace()
inputImage = self.ImgTrans(img.convert('RGB'))
groundTruth = self.ImgTrans(gt.convert('RGB'))
path = self.imageFiles[index].split('/')[-1]
return inputImage, groundTruth,path
def __len__(self):
return len(self.imageFiles)
| 35.254237 | 128 | 0.60601 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.