blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd5c0cd1865b5ad0d1bf1a2dbf519ca17ecc33c8
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/third_party/web_platform_tests/XMLHttpRequest/resources/conditional.py
|
42dfecdfbc3d8090dca55c2e78280d3a3d24e2f0
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
conditional.py
|
def main(request, response):
tag = request.GET.first("tag", None)
match = request.headers.get("If-None-Match", None)
date = request.GET.first("date", "")
modified = request.headers.get("If-Modified-Since", None)
if tag:
response.headers.set("ETag", '"%s"' % tag)
elif date:
response.headers.set("Last-Modified", date)
if ((match is not None and match == tag) or
(modified is not None and modified == date)):
response.status = (304, "SUPERCOOL")
return ""
else:
response.headers.set("Content-Type", "text/plain")
return "MAYBE NOT"
|
30dcbd5cdec4eb03751956e60aad4255c0787e73
|
34305ef03fffd872195fced3d946fcaccbc79ddf
|
/skrf/vi/vna/keysight/__init__.py
|
93c9b83d7d1415a86868267e374a82db704d02c3
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-rf/scikit-rf
|
20477c643883b6b46dca50fca31ad1010a9512e9
|
26243ffc45160f17612badc880ad5d022036537a
|
refs/heads/master
| 2023-08-19T03:34:42.208425
| 2023-07-28T13:56:06
| 2023-07-28T13:56:06
| 3,218,028
| 555
| 276
|
BSD-3-Clause
| 2023-09-12T21:56:17
| 2012-01-19T14:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
__init__.py
|
"""
.. module:: skrf.vi.vna.keysight
===========================================
Keysight VNAs (:mod:`skrf.vi.vna.keysight`)
===========================================
"""
from .fieldfox import FieldFox
from .pna import PNA
|
abe0f294df3576255b0ae66c52833843d961de7c
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_op/ascend/apply_ftrl.py
|
51f8450c789a4db34d0e8e1889afd17afbd53054
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,209
|
py
|
apply_ftrl.py
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: apply_ftrl"""
import akg.tvm
import akg.topi
import akg.utils
from akg.utils.format_transform import get_shape
import akg.utils as utils
from akg.utils.kernel_exec import product_is_mini
from akg.utils.dsl_create import TensorUtils
from akg.ops.math import divide
def apply_ftrl_impl(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, with_l2_shrinkage=False):
"""Ftrl-proximal Optimization algorithm"""
dtype = var.dtype
# cast to float32 for higher accuracy
compute_dtype = dtype
if dtype == "float16":
compute_dtype = "float32"
var, accum, linear, grad, lr, l1, l2, lr_power = [akg.topi.cast(t, compute_dtype) for t in
[var, accum, linear, grad, lr, l1, l2, lr_power]]
if with_l2_shrinkage:
l2_shrinkage = akg.topi.cast(l2_shrinkage, compute_dtype)
shape = var.shape
# grad_shrinkage = grad + 2 * l2_shrinkage * var
if with_l2_shrinkage:
l2_shrinkage = akg.topi.broadcast_to(l2_shrinkage, shape)
grad_shrinkage = akg.tvm.compute(shape, lambda *indice:
grad(*indice) + akg.tvm.const(2.0, compute_dtype) * l2_shrinkage(*indice) *
var(*indice), name="grad_shrinkage")
else:
grad_shrinkage = grad
# accum_new = accum + grad^2
accum_new = akg.tvm.compute(shape, lambda *indice: accum(*indice) + grad(*indice)*grad(*indice), name="accum_new")
# linear_new = linear + grad_shrinkage - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
lr_power_neg = akg.topi.negative(lr_power)
accum_new_pow = akg.topi.power(accum_new, lr_power_neg)
accum_pow = akg.topi.power(accum, lr_power_neg)
accum_pow_sub = akg.topi.subtract(accum_new_pow, accum_pow)
accum_pow_sub_div_lr = divide(accum_pow_sub, lr, target=utils.CCE)
linear_add_shrinkage = akg.topi.add(linear, grad_shrinkage)
linear_new = akg.tvm.compute(shape, lambda *indice:
linear_add_shrinkage(*indice) - accum_pow_sub_div_lr(*indice)*var(*indice),
name="linear_new")
# x = clip(linear_new, -l1, l1) - linear_new
l1_neg = akg.topi.negative(l1)
linear_new_clip = akg.topi.minimum(akg.topi.maximum(linear_new, l1_neg), l1)
x_res = akg.topi.subtract(linear_new_clip, linear_new)
# y = accum_new^(-lr_power) / lr + 2 * l2
accum_new_pow_div_lr = divide(accum_new_pow, lr, target="cce")
l2_2 = akg.topi.multiply(l2, 2)
y_res = akg.topi.add(accum_new_pow_div_lr, l2_2)
# var_new = x / y
var_new = divide(x_res, y_res, target="cce")
# cast to original type
if dtype == "float16":
var_new = akg.topi.cast(var_new, dtype)
accum_new = akg.topi.cast(accum_new, dtype)
linear_new = akg.topi.cast(linear_new, dtype)
return var_new, accum_new, linear_new
@utils.check_input_type(*([akg.tvm.tensor.Tensor]*8), (str, type(None)))
def apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power, target=utils.CCE):
"""
Ftrl-proximal optimization algorithm.
Note:
accum_new = accum + grad * grad
linear_new = linear + grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
x = clip(linear_new, -l1, l1) - linear_new
y = accum_new^(-lr_power) / lr + 2 * l2
var_new = x / y
Args:
var (tvm.tensor.Tensor): The tensor to be updated. Should be float16 or float32.
accum (tvm.tensor.Tensor): A tensor of same shape and type as var. Eatch entry in it must be
greater or equal to zero.
linear (tvm.tensor.Tensor): A tensor of same shape and type as var.
grad (tvm.tensor.Tensor): A tensor of same shape and type as var.
lr (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l1 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
l2 (tvm.tensor.Tensor): A scalar tensor of the same type as `var`.
lr_power (tvm.tensor.Tensor): A scalar tensor of the same type as `var`. Value of it
must be less or equal to zero.
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated accum.
tvm.tensor.Tensor, updated linear.
"""
# As vlog instruction on mini product has a percision problem and mini product used to infer
# rather than train
if product_is_mini():
raise RuntimeError("The apply_ftrl operator does not support the mini product")
# check_shape
utils.check_shape(var)
shape = get_shape(var)
for tensor in (accum, linear, grad):
utils.elemwise_shape_check(shape, tensor.shape)
sclar_shape = (1,)
for sclar in (lr, l1, l2, lr_power):
utils.elemwise_shape_check(sclar.shape, sclar_shape)
# check dtype
dtype = var.dtype
utils.ops_dtype_check(dtype, [utils.DtypeForDavinci.FLOAT16, utils.DtypeForDavinci.FLOAT32])
for tensor in (var, accum, linear, grad, lr, l1, l2, lr_power):
utils.elemwise_dtype_check(tensor.dtype, dtype)
var_new, accum_new, linear_new = apply_ftrl_impl(var, accum, linear, grad, lr, l1, l2, None,
lr_power, with_l2_shrinkage=False)
# update by inplace
(var_new, accum_new, linear_new), binds_info = \
TensorUtils.inplace_set_tensors((var, accum, linear), (var_new, accum_new, linear_new))
attrs = {utils.BINDS: binds_info}
return var_new, accum_new, linear_new, attrs
|
5f2b6b239d4f6a0a68d408265a74cbf81f3115fc
|
40ca168bbb9c865a13c83ef479838981c5b7a1c0
|
/packages/syft/src/syft/types/datetime.py
|
c03e1433fd0b5526d1426109785c53ffb4d405be
|
[
"Python-2.0",
"Apache-2.0"
] |
permissive
|
OpenMined/PySyft
|
6907171bc35062d04c1b6320097c3bcafb65ae68
|
1833278212d89e66853f28a7ca365261550bbe4f
|
refs/heads/dev
| 2023-09-05T05:50:48.773703
| 2023-09-05T04:00:44
| 2023-09-05T04:00:44
| 97,641,933
| 9,473
| 2,530
|
Apache-2.0
| 2023-09-14T12:50:53
| 2017-07-18T20:41:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
datetime.py
|
# stdlib
from datetime import datetime
from functools import total_ordering
from typing import Optional
# third party
from typing_extensions import Self
# relative
from ..serde.serializable import serializable
from .syft_object import SYFT_OBJECT_VERSION_1
from .syft_object import SyftObject
from .uid import UID
@serializable()
@total_ordering
class DateTime(SyftObject):
__canonical_name__ = "DateTime"
__version__ = SYFT_OBJECT_VERSION_1
id: Optional[UID]
utc_timestamp: float
@classmethod
def now(cls) -> Self:
return cls(utc_timestamp=datetime.utcnow().timestamp())
def __str__(self) -> str:
utc_datetime = datetime.utcfromtimestamp(self.utc_timestamp)
return utc_datetime.strftime("%Y-%m-%d %H:%M:%S")
def __hash__(self) -> int:
return hash(self.utc_timestamp)
def __eq__(self, other: Self) -> bool:
return self.utc_timestamp == other.utc_timestamp
def __lt__(self, other: Self) -> bool:
return self.utc_timestamp < other.utc_timestamp
|
ad9195879182f610d46d5fb338499b07f234fcc1
|
c46da4655eb03ddf1444476f26b3b9e9474a45e7
|
/excalibur/utils/database.py
|
c4d0604ec3fd8906eafc0e0ec2645f57f9164ba4
|
[
"MIT"
] |
permissive
|
camelot-dev/excalibur
|
7e4c892d5f16d1861840ccadd99ed5886ae84d4c
|
2a8e6cabfe8fa1fb2265da0756cf0130d5e52025
|
refs/heads/master
| 2023-08-30T03:10:35.837139
| 2023-07-15T11:04:02
| 2023-07-15T11:04:02
| 153,899,105
| 1,356
| 222
|
MIT
| 2023-07-15T11:04:04
| 2018-10-20T11:34:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 243
|
py
|
database.py
|
from ..settings import engine
def initialize_database():
from ..models import Base
Base.metadata.create_all(engine)
def reset_database():
from ..models import Base
Base.metadata.drop_all(engine)
initialize_database()
|
938d85229eab01b9736d9cb02c0d4d42e3ff6767
|
5e9576c368e98927e2965bd2fb23bd35d9993d69
|
/featuretools/primitives/standard/transform/binary/multiply_numeric.py
|
176eed3bc405905de6e8eb1301b06cb614e46915
|
[
"BSD-3-Clause"
] |
permissive
|
alteryx/featuretools
|
c6e319e063e8e84e7684bf232376f95dc5272160
|
c284c2d27a95b81e0bae913ac90df2b02c8f3b37
|
refs/heads/main
| 2023-08-25T12:21:33.945418
| 2023-08-23T16:30:25
| 2023-08-23T16:30:25
| 102,908,804
| 1,783
| 201
|
BSD-3-Clause
| 2023-09-07T18:53:19
| 2017-09-08T22:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
multiply_numeric.py
|
import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class MultiplyNumeric(TransformPrimitive):
"""Performs element-wise multiplication of two lists.
Description:
Given a list of values X and a list of values
Y, determine the product of each value in X
with its corresponding value in Y.
Examples:
>>> multiply_numeric = MultiplyNumeric()
>>> multiply_numeric([2, 1, 2], [1, 2, 2]).tolist()
[2, 2, 4]
"""
name = "multiply_numeric"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the product of {} and {}"
def get_function(self):
return np.multiply
def generate_name(self, base_feature_names):
return "%s * %s" % (base_feature_names[0], base_feature_names[1])
|
52c08dbd8f23451732d465bc8ff35b17765845e7
|
2d9a3ce2a04190d0032e8a298829022260b1d76b
|
/indra/preassembler/__init__.py
|
c08362655bd48a1f694fe2f4a211e4851cc3f550
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
sorgerlab/indra
|
f127a0f9bdd2d3f48df14575883fd31e2f4de4bf
|
6d6ca1174792b6c5a05cbf3afcb9f138fabcec6a
|
refs/heads/master
| 2023-08-21T13:25:54.654995
| 2023-06-11T16:46:41
| 2023-06-11T16:46:41
| 22,848,436
| 158
| 61
|
BSD-2-Clause
| 2023-08-30T21:47:59
| 2014-08-11T17:44:05
|
Python
|
UTF-8
|
Python
| false
| false
| 37,882
|
py
|
__init__.py
|
import time
import tqdm
import logging
import itertools
import functools
import collections
import networkx as nx
from indra.util import fast_deepcopy
from indra.statements import *
from indra.statements import stmt_type as indra_stmt_type
from .refinement import *
logger = logging.getLogger(__name__)
class Preassembler(object):
"""De-duplicates statements and arranges them in a specificity hierarchy.
Parameters
----------
ontology : :py:class:`indra.ontology.IndraOntology`
An INDRA Ontology object.
stmts : list of :py:class:`indra.statements.Statement` or None
A set of statements to perform pre-assembly on. If None, statements
should be added using the :py:meth:`add_statements` method.
matches_fun : Optional[function]
A functon which takes a Statement object as argument and
returns a string key that is used for duplicate recognition. If
supplied, it overrides the use of the built-in matches_key method of
each Statement being assembled.
refinement_fun : Optional[function]
A function which takes two Statement objects and an ontology
as an argument and returns True or False. If supplied, it overrides
the built-in refinement_of method of each Statement being assembled.
Attributes
----------
stmts : list of :py:class:`indra.statements.Statement`
Starting set of statements for preassembly.
unique_stmts : list of :py:class:`indra.statements.Statement`
Statements resulting from combining duplicates.
related_stmts : list of :py:class:`indra.statements.Statement`
Top-level statements after building the refinement hierarchy.
ontology : dict[:py:class:`indra.preassembler.ontology_graph.IndraOntology`]
An INDRA Ontology object.
"""
def __init__(self, ontology, stmts=None, matches_fun=None,
refinement_fun=None):
self.ontology = ontology
if stmts:
logger.debug("Deepcopying stmts in __init__")
self.stmts = fast_deepcopy(stmts)
else:
self.stmts = []
self.unique_stmts = None
self.related_stmts = None
self.matches_fun = matches_fun if matches_fun else \
default_matches_fun
self.refinement_fun = refinement_fun if refinement_fun else \
default_refinement_fun
self._comparison_counter = 0
def add_statements(self, stmts):
"""Add to the current list of statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Statements to add to the current list.
"""
self.stmts += fast_deepcopy(stmts)
def combine_duplicates(self):
"""Combine duplicates among `stmts` and save result in `unique_stmts`.
A wrapper around the method :py:meth:`combine_duplicate_stmts`.
"""
if self.unique_stmts is None:
self.unique_stmts = self.combine_duplicate_stmts(self.stmts)
return self.unique_stmts
def _get_stmt_matching_groups(self, stmts):
"""Use the matches_fun method to get sets of matching statements."""
# Remove exact duplicates using a set() call, then make copies:
logger.debug('%d statements before removing object duplicates.' %
len(stmts))
st = list(set(stmts))
logger.debug('%d statements after removing object duplicates.' %
len(stmts))
# Group statements according to whether they are matches (differing
# only in their evidence).
# Sort the statements in place by matches_key()
st.sort(key=self.matches_fun)
return itertools.groupby(st, key=self.matches_fun)
def combine_duplicate_stmts(self, stmts):
"""Combine evidence from duplicate Statements.
Statements are deemed to be duplicates if they have the same key
returned by the `matches_key()` method of the Statement class. This
generally means that statements must be identical in terms of their
arguments and can differ only in their associated `Evidence` objects.
This function keeps the first instance of each set of duplicate
statements and merges the lists of Evidence from all of the other
statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Set of statements to de-duplicate.
Returns
-------
list of :py:class:`indra.statements.Statement`
Unique statements with accumulated evidence across duplicates.
Examples
--------
De-duplicate and combine evidence for two statements differing only
in their evidence lists:
>>> from indra.ontology.bio import bio_ontology
>>> map2k1 = Agent('MAP2K1')
>>> mapk1 = Agent('MAPK1')
>>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 1')])
>>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 2')])
>>> pa = Preassembler(bio_ontology)
>>> uniq_stmts = pa.combine_duplicate_stmts([stmt1, stmt2])
>>> uniq_stmts
[Phosphorylation(MAP2K1(), MAPK1(), T, 185)]
>>> sorted([e.text for e in uniq_stmts[0].evidence])
['evidence 1', 'evidence 2']
"""
# Helper function to get a list of evidence matches keys
def _ev_keys(sts):
ev_keys = []
for stmt in sts:
for ev in stmt.evidence:
ev_keys.append(ev.matches_key())
return ev_keys
# Iterate over groups of duplicate statements
unique_stmts = []
for _, duplicates in self._get_stmt_matching_groups(stmts):
ev_keys = set()
# Get the first statement and add the evidence of all subsequent
# Statements to it
duplicates = list(duplicates)
start_ev_keys = _ev_keys(duplicates)
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
new_stmt = stmt.make_generic_copy()
if len(duplicates) == 1:
new_stmt.uuid = stmt.uuid
raw_text = [None if ag is None else ag.db_refs.get('TEXT')
for ag in stmt.agent_list(deep_sorted=True)]
raw_grounding = [None if ag is None else ag.db_refs
for ag in stmt.agent_list(deep_sorted=True)]
for ev in stmt.evidence:
ev_key = ev.matches_key() + str(raw_text) + \
str(raw_grounding)
if ev_key not in ev_keys:
# In case there are already agents annotations, we
# just add a new key for raw_text, otherwise create
# a new key
if 'agents' in ev.annotations:
ev.annotations['agents']['raw_text'] = raw_text
ev.annotations['agents']['raw_grounding'] = \
raw_grounding
else:
ev.annotations['agents'] = \
{'raw_text': raw_text,
'raw_grounding': raw_grounding}
if 'prior_uuids' not in ev.annotations:
ev.annotations['prior_uuids'] = []
ev.annotations['prior_uuids'].append(stmt.uuid)
new_stmt.evidence.append(ev)
ev_keys.add(ev_key)
end_ev_keys = _ev_keys([new_stmt])
if len(end_ev_keys) != len(start_ev_keys):
logger.debug('%d redundant evidences eliminated.' %
(len(start_ev_keys) - len(end_ev_keys)))
# This should never be None or anything else
assert isinstance(new_stmt, Statement)
unique_stmts.append(new_stmt)
# At this point, we should do a hash refresh so that the statements
# returned don't have stale hashes.
for stmt in unique_stmts:
for shallow in (True, False):
stmt.get_hash(shallow=shallow, refresh=True,
matches_fun=self.matches_fun)
return unique_stmts
# Note that the kwargs here are just there for backwards compatibility
# with old code that uses arguments related to multiprocessing.
def combine_related(self, return_toplevel=True, filters=None, **kwargs):
"""Connect related statements based on their refinement relationships.
This function takes as a starting point the unique statements (with
duplicates removed) and returns a modified flat list of statements
containing only those statements which do not represent a refinement of
other existing statements. In other words, the more general versions of
a given statement do not appear at the top level, but instead are
listed in the `supports` field of the top-level statements.
If :py:attr:`unique_stmts` has not been initialized with the
de-duplicated statements, :py:meth:`combine_duplicates` is called
internally.
After this function is called the attribute :py:attr:`related_stmts` is
set as a side-effect.
The procedure for combining statements in this way involves a series
of steps:
1. The statements are subjected to (built-in or user-supplied) filters
that group them based on potential refinement relationships. For
instance, the ontology-based filter positions each statement,
based on its agent arguments, with the ontology, and determines
potential refinements based on paths in the ontology graph.
2. Each statement is then compared with the set of statements it
can potentially refine, as determined by the pre-filters.
If the statement represents a refinement of
the other (as defined by the `refinement_of()` method implemented
for the Statement), then the more refined statement is added
to the `supports` field of the more general statement, and the
more general statement is added to the `supported_by` field of
the more refined statement.
3. A new flat list of statements is created that contains only those
statements that have no `supports` entries (statements containing
such entries are not eliminated, because they will be retrievable
from the `supported_by` fields of other statements). This list
is returned to the caller.
.. note:: Subfamily relationships must be consistent across arguments
For now, we require that merges can only occur if the *isa*
relationships are all in the *same direction for all the agents* in
a Statement. For example, the two statement groups: `RAF_family ->
MEK1` and `BRAF -> MEK_family` would not be merged, since BRAF
*isa* RAF_family, but MEK_family is not a MEK1. In the future this
restriction could be revisited.
Parameters
----------
return_toplevel : Optional[bool]
If True only the top level statements are returned.
If False, all statements are returned. Default: True
filters : Optional[list[:py:class:`indra.preassembler.refinement.RefinementFilter`]]
A list of RefinementFilter classes that implement filters on
possible statement refinements. For details on how to
construct such a filter, see the documentation of
:py:class:`indra.preassembler.refinement.RefinementFilter`.
If no user-supplied filters are provided, the default ontology-based
filter is applied. If a list of filters is provided here, the
:py:class:`indra.preassembler.refinement.OntologyRefinementFilter`
isn't appended by default, and should be added by the user, if
necessary. Default: None
Returns
-------
list of :py:class:`indra.statement.Statement`
The returned list contains Statements representing the more
concrete/refined versions of the Statements involving particular
entities. The attribute :py:attr:`related_stmts` is also set to
this list. However, if return_toplevel is False then all
statements are returned, irrespective of level of specificity.
In this case the relationships between statements can
be accessed via the supports/supported_by attributes.
Examples
--------
A more general statement with no information about a Phosphorylation
site is identified as supporting a more specific statement:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> combined_stmts = pa.combine_related() # doctest:+ELLIPSIS
>>> combined_stmts
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> combined_stmts[0].supported_by
[Phosphorylation(BRAF(), MAP2K1())]
>>> combined_stmts[0].supported_by[0].supports
[Phosphorylation(BRAF(), MAP2K1(), S)]
"""
if self.related_stmts is not None:
if return_toplevel:
return self.related_stmts
else:
assert self.unique_stmts is not None
return self.unique_stmts
# Call combine_duplicates, which lazily initializes self.unique_stmts
unique_stmts = self.combine_duplicates()
# Generate the index map, linking related statements.
idx_map = self._generate_id_maps(unique_stmts,
filters=filters)
# Now iterate over all indices and set supports/supported by
for ix1, ix2 in idx_map:
unique_stmts[ix1].supported_by.append(unique_stmts[ix2])
unique_stmts[ix2].supports.append(unique_stmts[ix1])
# Get the top level statements
self.related_stmts = [st for st in unique_stmts if not st.supports]
logger.debug('%d top level' % len(self.related_stmts))
if return_toplevel:
return self.related_stmts
else:
return unique_stmts
def _generate_relation_tuples(self, unique_stmts, split_idx=None,
filters=None):
"""Return refinement relations as a set of statement hash tuples."""
relations = self._generate_relations(unique_stmts=unique_stmts,
split_idx=split_idx,
filters=filters)
relation_tuples = set()
for refiner, refineds in relations.items():
relation_tuples |= {(refiner, refined) for refined in refineds}
return relation_tuples
def _generate_relations(self, unique_stmts, split_idx=None,
filters=None):
"""Return refinement relations as a dict using statement hashes."""
ts = time.time()
# Statements keyed by their hashes
stmts_by_hash = {stmt.get_hash(matches_fun=self.matches_fun):
stmt for stmt in unique_stmts}
# Here we apply any additional filters to cut down the number of
# potential comparisons before actually making comparisons
if not filters:
filters = [OntologyRefinementFilter(ontology=self.ontology)]
# Here we handle split_idx to allow finding refinements between
# two distinct groups of statements (identified by an index at which we
# split the unique_statements list) rather than globally across
# all unique statements.
if split_idx:
stmt_to_idx = {stmt.get_hash(matches_fun=self.matches_fun): idx
for idx, stmt in enumerate(unique_stmts)}
split_groups = {sh: (idx <= split_idx)
for sh, idx in stmt_to_idx.items()}
sgf = SplitGroupFilter(split_groups=split_groups)
filters.append(sgf)
# We can now append the confirmation filter
confirm_filter = \
RefinementConfirmationFilter(ontology=self.ontology,
refinement_fun=self.refinement_fun)
filters.append(confirm_filter)
# Initialize all filters
for filt in filters:
filt.initialize(stmts_by_hash=stmts_by_hash)
# This is the core of refinement finding. Here we apply filter functions
# per statement, sequentially.
# Since the actual comparison which evaluates the refinement_fun on
# potentially related statements is the last filter, we don't need to
# do any further operations after this loop.
relations = {}
for stmt_hash, stmt in tqdm.tqdm(stmts_by_hash.items(),
desc='Finding refinement relations'):
rels = find_refinements_for_statement(stmt, filters)
if rels:
relations[stmt_hash] = rels
te = time.time()
logger.info('Found %d refinements in %.2fs' %
(sum([len(v) for v in relations.values()]), te-ts))
self._comparison_counter = confirm_filter.comparison_counter
logger.info('Total comparisons: %d' % self._comparison_counter)
return relations
# Note that the kwargs here are just there for backwards compatibility
# with old code that uses arguments related to multiprocessing.
def _generate_id_maps(self, unique_stmts, split_idx=None,
filters=None, **kwargs):
"""Return pairs of statement indices representing refinement relations.
Parameters
----------
unique_stmts : list[indra.statements.Statement]
A list of de-duplicated INDRA Statements.
split_idx : Optional[int]
An index at which the flat list of unique statements should be split
and compared for refinements only across the two groups, not
within each group. By default, no splitting is done and all statements
are compared for refinements.
filters : Optional[list[:py:class:`indra.preassembler.refinement.RefinementFilter`]]
A list of RefinementFilter classes that implement filters on
possible statement refinements. For details on how to
construct such a filter, see the documentation of
:py:class:`indra.preassembler.refinement.RefinementFilter`.
If no user-supplied filters are provided, the default ontology-based
filter is applied. If a list of filters is provided here, the
:py:class:`indra.preassembler.refinement.OntologyRefinementFilter`
isn't appended by default, and should be added by the user, if
necessary. Default: None
Returns
-------
list[tuple]
A list of tuples where the first element of each tuple is
the linear index of a statement in the unique stmts list
which refines the statement whose index is the second
element of the tuple.
"""
stmt_to_idx = {stmt.get_hash(matches_fun=self.matches_fun): idx
for idx, stmt in enumerate(unique_stmts)}
# Make a list of Statement types
if len(unique_stmts) != len(stmt_to_idx):
raise ValueError('The unique statements used as an input for '
'finding refinements do not all have distinct '
'matches key hashes. This could be due to cached '
'hashes being outdated or hashes not having been '
'calculated according to a custom matches key '
'function used for refinement finding.')
relation_tuples = \
self._generate_relation_tuples(unique_stmts,
split_idx=split_idx,
filters=filters)
idx_maps = [(stmt_to_idx[refiner], stmt_to_idx[refined])
for refiner, refined in relation_tuples]
return idx_maps
def find_contradicts(self):
"""Return pairs of contradicting Statements.
Returns
-------
contradicts : list(tuple(Statement, Statement))
A list of Statement pairs that are contradicting.
"""
# Make a dict of Statement by type
stmts_by_type = collections.defaultdict(list)
for stmt in self.stmts:
stmts_by_type[indra_stmt_type(stmt)].append(stmt)
stmts_by_type = dict(stmts_by_type)
# Handle Statements with polarity first
pos_stmts = AddModification.__subclasses__()
neg_stmts = [modclass_to_inverse[c] for c in pos_stmts]
pos_stmts += [Activation, IncreaseAmount]
neg_stmts += [Inhibition, DecreaseAmount]
contradicts = []
# Handle statements with polarity first
# TODO: we could probably do some optimization here
# to not have to check statements combinatorially
for pst, nst in zip(pos_stmts, neg_stmts):
poss = stmts_by_type.get(pst, [])
negs = stmts_by_type.get(nst, [])
for ps, ns in itertools.product(poss, negs):
if ps.contradicts(ns, self.ontology):
contradicts.append((ps, ns))
# Handle neutral Statements next
neu_stmts = [Influence, ActiveForm]
for stt in neu_stmts:
stmts = stmts_by_type.get(stt, [])
for st1, st2 in itertools.combinations(stmts, 2):
if st1.contradicts(st2, self.ontology):
contradicts.append((st1, st2))
return contradicts
def _normalize_relations(self, ns, rank_key, rel_fun, flip_polarity):
# Find related entries, sort them, and return the first one which is
# the one that will be normalized to
def _replace_grounding(ns, entry, rank_key, rel_fun):
rel_ents = rel_fun(ns, entry)
if rel_ents:
rel_ents = [(ns, e.split('#')[1] if '#' in e else e)
for ns, e in rel_ents]
sorted_entries = sorted([(ns, entry)] + rel_ents,
key=rank_key)
_, chosen = sorted_entries[0]
return chosen, chosen != entry
else:
return entry, False
# If no custom rank_key was provided we use the original value to
# sort by
if rank_key is None:
def polarity_rank_key(args):
ns, entry = args
pol = self.ontology.get_polarity(ns, entry)
# Here we flip polarities to rank positive polarity before
# negative
pol_rank = -1 if pol is None else -pol
return pol_rank, entry
rank_key = polarity_rank_key
# We now go agent by agent to normalize grounding
for stmt in self.stmts:
for agent_idx, agent in enumerate(stmt.agent_list()):
# If the relevant namespace is an entry
if agent is not None and ns in agent.db_refs:
grounding = agent.db_refs[ns]
# If we have a list, we iterate over it and normalize
# each entry separately
if isinstance(grounding, list):
new_grounding = []
for idx, (entry, score) in enumerate(grounding):
chosen, changed = _replace_grounding(ns, entry,
rank_key,
rel_fun)
new_grounding.append((chosen, score))
# If the top grounding was changed and we need
# to flip polarity then the Statement's polarity
# is flipped
if idx == 0 and changed and flip_polarity:
stmt.flip_polarity(agent_idx=agent_idx)
agent.db_refs[ns] = new_grounding
# If there's only one grounding then we just normalize
# that one
else:
chosen, changed = _replace_grounding(ns, grounding,
rank_key, rel_fun)
agent.db_refs[ns] = chosen
if changed and flip_polarity:
stmt.flip_polarity(agent_idx=agent_idx)
def normalize_equivalences(self, ns, rank_key=None):
"""Normalize to one of a set of equivalent concepts across statements.
This function changes Statements in place without returning a value.
Parameters
----------
ns : str
The db_refs namespace for which the equivalence relation should
be applied.
rank_key : Optional[function]
A function handle which assigns a sort key to each entry in the
given namespace to allow prioritizing in a controlled way which
concept is normalized to.
"""
rel_fun = functools.partial(self.ontology.child_rel,
rel_types={'is_equal'})
self._normalize_relations(ns, rank_key, rel_fun, False)
def normalize_opposites(self, ns, rank_key=None):
"""Normalize to one of a pair of opposite concepts across statements.
This function changes Statements in place without returning a value.
Parameters
----------
ns : str
The db_refs namespace for which the opposite relation should
be applied.
rank_key : Optional[function]
A function handle which assigns a sort key to each entry in the
given namespace to allow prioritizing in a controlled way which
concept is normalized to.
"""
rel_fun = functools.partial(self.ontology.child_rel,
rel_types={'is_opposite'})
self._normalize_relations(ns, rank_key, rel_fun, True)
def find_refinements_for_statement(stmt, filters):
"""Return refinements for a single statement given initialized filters.
Parameters
----------
stmt : indra.statements.Statement
The statement whose relations should be found.
filters : list[:py:class:`indra.preassembler.refinement.RefinementFilter`]
A list of refinement filter instances. The filters passed to this
function need to have been initialized with stmts_by_hash.
Returns
-------
set
A set of statement hashes that this statement refines.
"""
first_filter = True
relations = {}
for filt in filters:
# The first filter outputs all the possible relations that it
# can find, while subsequent filters are taking the results of
# the previous filter as the basis of further filtering down
# on possible refinements.
possibly_related = None if first_filter else relations
# We pass in the specific statement and any constraints on
# previously determined possible relations to the filter.
relations = filt.get_less_specifics(stmt,
possibly_related=possibly_related)
first_filter = False
return relations
def render_stmt_graph(statements, reduce=True, english=False, rankdir=None,
agent_style=None):
"""Render the statement hierarchy as a pygraphviz graph.
Parameters
----------
statements : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
reduce : bool
Whether to perform a transitive reduction of the edges in the graph.
Default is True.
english : bool
If True, the statements in the graph are represented by their
English-assembled equivalent; otherwise they are represented as
text-formatted Statements.
rankdir : str or None
Argument to pass through to the pygraphviz `AGraph` constructor
specifying graph layout direction. In particular, a value of 'LR'
specifies a left-to-right direction. If None, the pygraphviz default
is used.
agent_style : dict or None
Dict of attributes specifying the visual properties of nodes. If None,
the following default attributes are used::
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
Returns
-------
pygraphviz.AGraph
Pygraphviz graph with nodes representing statements and edges pointing
from supported statements to supported_by statements.
Examples
--------
Pattern for getting statements and rendering as a Graphviz graph:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> graph = render_stmt_graph(pa.related_stmts)
>>> graph.write('example_graph.dot') # To make the DOT file
>>> graph.draw('example_graph.png', prog='dot') # To make an image
Resulting graph:
.. image:: /images/example_graph.png
:align: center
:alt: Example statement graph rendered by Graphviz
"""
import pygraphviz as pgv
from indra.assemblers.english import EnglishAssembler
# Set the default agent formatting properties
if agent_style is None:
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
# Sets to store all of the nodes and edges as we recursively process all
# of the statements
nodes = set([])
edges = set([])
stmt_dict = {}
# Recursive function for processing all statements
def process_stmt(stmt):
nodes.add(str(stmt.matches_key()))
stmt_dict[str(stmt.matches_key())] = stmt
for sby_ix, sby_stmt in enumerate(stmt.supported_by):
edges.add((str(stmt.matches_key()), str(sby_stmt.matches_key())))
process_stmt(sby_stmt)
# Process all of the top-level statements, getting the supporting statements
# recursively
for stmt in statements:
process_stmt(stmt)
# Create a networkx graph from the nodes
nx_graph = nx.DiGraph()
nx_graph.add_edges_from(edges)
# Perform transitive reduction if desired
if reduce:
nx_graph = nx.algorithms.dag.transitive_reduction(nx_graph)
# Create a pygraphviz graph from the nx graph
try:
pgv_graph = pgv.AGraph(name='statements', directed=True,
rankdir=rankdir)
except NameError:
logger.error('Cannot generate graph because '
'pygraphviz could not be imported.')
return None
for node in nx_graph.nodes():
stmt = stmt_dict[node]
if english:
ea = EnglishAssembler([stmt])
stmt_str = ea.make_model()
else:
stmt_str = str(stmt)
pgv_graph.add_node(node,
label='%s (%d)' % (stmt_str, len(stmt.evidence)),
**agent_style)
pgv_graph.add_edges_from(nx_graph.edges())
return pgv_graph
def flatten_stmts(stmts):
"""Return the full set of unique stms in a pre-assembled stmt graph.
The flattened list of statements returned by this function can be
compared to the original set of unique statements to make sure no
statements have been lost during the preassembly process.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
Returns
-------
stmts : list of :py:class:`indra.statements.Statement`
List of all statements contained in the hierarchical statement graph.
Examples
--------
Calling :py:meth:`combine_related` on two statements results in one
top-level statement; calling :py:func:`flatten_stmts` recovers both:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> flattened = flatten_stmts(pa.related_stmts)
>>> flattened.sort(key=lambda x: x.matches_key())
>>> flattened
[Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)]
"""
total_stmts = set(stmts)
for stmt in stmts:
if stmt.supported_by:
children = flatten_stmts(stmt.supported_by)
total_stmts = total_stmts.union(children)
return list(total_stmts)
def flatten_evidence(stmts, collect_from=None):
"""Add evidence from *supporting* stmts to evidence for *supported* stmts.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
collect_from : str in ('supports', 'supported_by')
String indicating whether to collect and flatten evidence from the
`supports` attribute of each statement or the `supported_by` attribute.
If not set, defaults to 'supported_by'.
Returns
-------
stmts : list of :py:class:`indra.statements.Statement`
Statement hierarchy identical to the one passed, but with the
evidence lists for each statement now containing all of the evidence
associated with the statements they are supported by.
Examples
--------
Flattening evidence adds the two pieces of evidence from the supporting
statement to the evidence list of the top-level statement:
>>> from indra.ontology.bio import bio_ontology
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1,
... evidence=[Evidence(text='foo'), Evidence(text='bar')])
>>> st2 = Phosphorylation(braf, map2k1, residue='S',
... evidence=[Evidence(text='baz'), Evidence(text='bak')])
>>> pa = Preassembler(bio_ontology, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> [e.text for e in pa.related_stmts[0].evidence]
['baz', 'bak']
>>> flattened = flatten_evidence(pa.related_stmts)
>>> sorted([e.text for e in flattened[0].evidence])
['bak', 'bar', 'baz', 'foo']
"""
if collect_from is None:
collect_from = 'supported_by'
if collect_from not in ('supports', 'supported_by'):
raise ValueError('collect_from must be one of "supports", '
'"supported_by"')
logger.info('Flattening evidence based on %s' % collect_from)
# Copy all of the statements--these will be the ones where we update
# the evidence lists
stmts = fast_deepcopy(stmts)
for stmt in stmts:
# We get the original evidence keys here so we can differentiate them
# from ones added during flattening.
orig_ev_keys = [ev.matches_key() for ev in stmt.evidence]
# We now do the flattening
total_evidence = _flatten_evidence_for_stmt(stmt, collect_from)
# Here we add annotations for each evidence in the list,
# depending on whether it's an original direct evidence or one that
# was added during flattening
new_evidence = []
for ev in total_evidence:
ev_key = ev.matches_key()
if ev_key in orig_ev_keys:
ev.annotations['support_type'] = 'direct'
new_evidence.append(ev)
else:
ev_copy = fast_deepcopy(ev)
ev_copy.annotations['support_type'] = collect_from
new_evidence.append(ev_copy)
# Now set the new evidence list as the copied statement's evidence
stmt.evidence = new_evidence
return stmts
def _flatten_evidence_for_stmt(stmt, collect_from):
supp_stmts = (stmt.supports if collect_from == 'supports'
else stmt.supported_by)
total_evidence = set(stmt.evidence)
for supp_stmt in supp_stmts:
child_evidence = _flatten_evidence_for_stmt(supp_stmt, collect_from)
total_evidence = total_evidence.union(child_evidence)
return list(total_evidence)
def default_matches_fun(st):
return st.matches_key()
|
d7b70300a993375a9d08dd7c0cf5bb59e0510036
|
e7252e004fcc5f6f1c290c65a8612fa68ed9a17c
|
/tests/test_suggest_column_types.py
|
e36c58fa8716937759e4ed4407de2cbb3e01a378
|
[
"Apache-2.0"
] |
permissive
|
simonw/sqlite-utils
|
30978f99d921afa9e9dae741d0197a7c647e033b
|
1260bdc7bfe31c36c272572c6389125f8de6ef71
|
refs/heads/main
| 2023-09-03T22:26:14.952114
| 2023-08-18T17:47:10
| 2023-08-18T17:47:10
| 140,912,432
| 1,288
| 93
|
Apache-2.0
| 2023-09-09T00:45:31
| 2018-07-14T03:21:46
|
Python
|
UTF-8
|
Python
| false
| false
| 989
|
py
|
test_suggest_column_types.py
|
import pytest
from collections import OrderedDict
from sqlite_utils.utils import suggest_column_types
@pytest.mark.parametrize(
"records,types",
[
([{"a": 1}], {"a": int}),
([{"a": 1}, {"a": None}], {"a": int}),
([{"a": "baz"}], {"a": str}),
([{"a": "baz"}, {"a": None}], {"a": str}),
([{"a": 1.2}], {"a": float}),
([{"a": 1.2}, {"a": None}], {"a": float}),
([{"a": [1]}], {"a": str}),
([{"a": [1]}, {"a": None}], {"a": str}),
([{"a": (1,)}], {"a": str}),
([{"a": {"b": 1}}], {"a": str}),
([{"a": {"b": 1}}, {"a": None}], {"a": str}),
([{"a": OrderedDict({"b": 1})}], {"a": str}),
([{"a": 1}, {"a": 1.1}], {"a": float}),
([{"a": b"b"}], {"a": bytes}),
([{"a": b"b"}, {"a": None}], {"a": bytes}),
([{"a": "a", "b": None}], {"a": str, "b": str}),
],
)
def test_suggest_column_types(records, types):
assert types == suggest_column_types(records)
|
0cb77a55f0d838d78635090b228c7e6e2cf194aa
|
8c39ba92cc71ff78242477d3256f6ee3daa872c7
|
/conans/model/options.py
|
478b15034cf28f859decdc195ea035bc6c5cf977
|
[
"MIT"
] |
permissive
|
conan-io/conan
|
eb4427e534a0edbb1fb06c753d5d9587faaef93c
|
bac455d1329b6744cdc41747354a727c9233179f
|
refs/heads/release/2.0
| 2023-09-03T18:51:54.345761
| 2023-09-03T17:30:43
| 2023-09-03T17:30:43
| 47,190,624
| 7,754
| 1,182
|
MIT
| 2023-09-14T15:16:09
| 2015-12-01T13:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 17,135
|
py
|
options.py
|
from conans.errors import ConanException
from conans.model.recipe_ref import ref_matches
_falsey_options = ["false", "none", "0", "off", ""]
def option_not_exist_msg(option_name, existing_options):
""" Someone is referencing an option that is not available in the current package
options
"""
result = ["option '%s' doesn't exist" % option_name,
"Possible options are %s" % existing_options or "none"]
return "\n".join(result)
class _PackageOption:
def __init__(self, name, value, possible_values=None):
self._name = name
self._value = value # Value None = not defined
# possible_values only possible origin is recipes
if possible_values is None:
self._possible_values = None
else:
# This can contain "ANY"
self._possible_values = [str(v) if v is not None else None for v in possible_values]
def dumps(self, scope=None):
if self._value is None:
return None
if scope:
return "%s:%s=%s" % (scope, self._name, self._value)
else:
return "%s=%s" % (self._name, self._value)
def copy_conaninfo_option(self):
# To generate a copy without validation, for package_id info.options value
assert self._possible_values is not None # this should always come from recipe, with []
return _PackageOption(self._name, self._value, self._possible_values + ["ANY"])
def __bool__(self):
if self._value is None:
return False
return self._value.lower() not in _falsey_options
def __str__(self):
return str(self._value)
def __int__(self):
return int(self._value)
def _check_valid_value(self, value):
""" checks that the provided value is allowed by current restrictions
"""
if self._possible_values is None: # validation not defined (profile)
return
if value in self._possible_values:
return
if value is not None and "ANY" in self._possible_values:
return
msg = ("'%s' is not a valid 'options.%s' value.\nPossible values are %s"
% (value, self._name, self._possible_values))
raise ConanException(msg)
def __eq__(self, other):
# To promote the other to string, and always compare as strings
# if self.options.myoption == 1 => will convert 1 to "1"
if other is None:
return self._value is None
other = str(other)
self._check_valid_value(other)
if self._value is None:
return False # Other is not None here
return other == self.__str__()
@property
def value(self):
return self._value
@value.setter
def value(self, v):
v = str(v) if v is not None else None
self._check_valid_value(v)
self._value = v
def validate(self):
# check that this has a valid option value defined
if self._value is not None:
return
if None not in self._possible_values:
raise ConanException("'options.%s' value not defined" % self._name)
class _PackageOptions:
def __init__(self, recipe_options_definition=None):
if recipe_options_definition is None:
self._constrained = False
self._data = {}
else:
self._constrained = True
self._data = {str(option): _PackageOption(str(option), None, possible_values)
for option, possible_values in recipe_options_definition.items()}
self._freeze = False
def dumps(self, scope=None):
result = []
for _, package_option in sorted(list(self._data.items())):
dump = package_option.dumps(scope)
if dump:
result.append(dump)
return "\n".join(result)
@property
def possible_values(self):
return {k: v._possible_values for k, v in self._data.items()}
def update(self, options):
"""
@type options: _PackageOptions
"""
# Necessary for init() extending of options for python_requires_extend
for k, v in options._data.items():
self._data[k] = v
def clear(self):
# for header_only() clearing
self._data.clear()
def freeze(self):
self._freeze = True
def __contains__(self, option):
return str(option) in self._data
def get_safe(self, field, default=None):
return self._data.get(field, default)
def rm_safe(self, field):
# This should never raise any exception, in any case
self._data.pop(field, None)
def validate(self):
for child in self._data.values():
child.validate()
def copy_conaninfo_options(self):
# To generate a copy without validation, for package_id info.options value
result = _PackageOptions()
for k, v in self._data.items():
result._data[k] = v.copy_conaninfo_option()
return result
def _ensure_exists(self, field):
if self._constrained and field not in self._data:
raise ConanException(option_not_exist_msg(field, list(self._data.keys())))
def __getattr__(self, field):
assert field[0] != "_", "ERROR %s" % field
try:
return self._data[field]
except KeyError:
raise ConanException(option_not_exist_msg(field, list(self._data.keys())))
def __delattr__(self, field):
assert field[0] != "_", "ERROR %s" % field
# It is always possible to remove an option, even if it is frozen (freeze=True),
# and it got a value, because it is the only way an option could be removed
# conditionally to other option value (like fPIC if shared)
self._ensure_exists(field)
del self._data[field]
def __setattr__(self, field, value):
if field[0] == "_":
return super(_PackageOptions, self).__setattr__(field, value)
self._set(field, value)
def __setitem__(self, item, value):
self._set(item, value)
def _set(self, item, value):
# programmatic way to define values, for Conan codebase
current_value = self._data.get(item)
if self._freeze and current_value.value is not None and current_value != value:
raise ConanException(f"Incorrect attempt to modify option '{item}' "
f"from '{current_value}' to '{value}'")
self._ensure_exists(item)
self._data.setdefault(item, _PackageOption(item, None)).value = value
def items(self):
result = []
for field, package_option in sorted(list(self._data.items())):
result.append((field, package_option.value))
return result
def update_options(self, other, is_pattern=False):
"""
@param is_pattern: if True, then the value might not exist and won't be updated
@type other: _PackageOptions
"""
for k, v in other._data.items():
if is_pattern and k not in self._data:
continue
self._set(k, v)
class Options:
def __init__(self, options=None, options_values=None):
# options=None means an unconstrained/profile definition
try:
self._package_options = _PackageOptions(options)
# Addressed only by name, as only 1 configuration is allowed
# if more than 1 is present, 1 should be "private" requirement and its options
# are not public, not overridable
self._deps_package_options = {} # {name("Boost": PackageOptions}
if options_values:
for k, v in options_values.items():
if v is None:
continue # defining a None value means same as not giving value
k = str(k).strip()
v = str(v).strip()
tokens = k.split(":", 1)
if len(tokens) == 2:
package, option = tokens
if "/" not in package and "*" not in package and "&" not in package:
msg = "The usage of package names `{}` in options is " \
"deprecated, use a pattern like `{}/*:{}` " \
"instead".format(k, package, option)
raise ConanException(msg)
self._deps_package_options.setdefault(package, _PackageOptions())[option] = v
else:
self._package_options[k] = v
except Exception as e:
raise ConanException("Error while initializing options. %s" % str(e))
def __repr__(self):
return self.dumps()
@property
def possible_values(self):
return self._package_options.possible_values
def dumps(self):
""" produces a multiline text representation of all values, first self then others.
In alphabetical order, skipping real None (not string "None") values:
option1=value1
other_option=3
OtherPack:opt3=12.1
"""
result = []
pkg_options_dumps = self._package_options.dumps()
if pkg_options_dumps:
result.append(pkg_options_dumps)
for pkg_pattern, pkg_option in sorted(self._deps_package_options.items()):
dep_pkg_option = pkg_option.dumps(scope=pkg_pattern)
if dep_pkg_option:
result.append(dep_pkg_option)
return "\n".join(result)
@staticmethod
def loads(text):
""" parses a multiline text in the form produced by dumps(), NO validation here
"""
values = {}
for line in text.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
name, value = line.split("=", 1)
values[name] = value
return Options(options_values=values)
def serialize(self):
# used by ConanInfo serialization, involved in "list package-ids" output
# we need to maintain the "options" and "req_options" first level or servers will break
# This happens always after reading from conaninfo.txt => all str and not None
result = {k: v for k, v in self._package_options.items()}
# Include the dependencies ones, in case they have been explicitly added in package_id()
# to the conaninfo.txt, we want to report them
for pkg_pattern, pkg_option in sorted(self._deps_package_options.items()):
for key, value in pkg_option.items():
result["%s:%s" % (pkg_pattern, key)] = value
return result
def clear(self):
# for header_only() clearing
self._package_options.clear()
self._deps_package_options.clear()
def __contains__(self, option):
return option in self._package_options
def __getattr__(self, attr):
return getattr(self._package_options, attr)
def __setattr__(self, attr, value):
if attr[0] == "_" or attr == "values":
return super(Options, self).__setattr__(attr, value)
return setattr(self._package_options, attr, value)
def __delattr__(self, field):
self._package_options.__delattr__(field)
def __getitem__(self, item):
if isinstance(item, str):
if "/" not in item and "*" not in item: # FIXME: To allow patterns like "*" or "foo*"
item += "/*"
return self._deps_package_options.setdefault(item, _PackageOptions())
def scope(self, ref):
""" when there are free options like "shared=True", they apply to the "consumer" package
Once we know the name of such consumer package, it can be defined in the data, so it will
be later correctly apply when processing options """
package_options = self._deps_package_options.setdefault(str(ref), _PackageOptions())
package_options.update_options(self._package_options)
self._package_options = _PackageOptions()
def copy_conaninfo_options(self):
# To generate the package_id info.options copy, that can destroy, change and remove things
result = Options()
result._package_options = self._package_options.copy_conaninfo_options()
# In most scenarios this should be empty at this stage, because it was cleared
if self._deps_package_options:
raise ConanException("Dependencies options were defined incorrectly. Maybe you"
" tried to define options values in 'requirements()' or other"
" invalid place")
return result
def update(self, options=None, options_values=None):
# Necessary for init() extending of options for python_requires_extend
new_options = Options(options, options_values)
self._package_options.update(new_options._package_options)
for pkg, pkg_option in new_options._deps_package_options.items():
self._deps_package_options.setdefault(pkg, _PackageOptions()).update(pkg_option)
def update_options(self, other):
"""
dict-like update of options, "other" has priority, overwrite existing
@type other: Options
"""
self._package_options.update_options(other._package_options)
for pkg, pkg_option in other._deps_package_options.items():
self._deps_package_options.setdefault(pkg, _PackageOptions()).update_options(pkg_option)
def apply_downstream(self, down_options, profile_options, own_ref, is_consumer):
""" compute the current package options, starting from the self defined ones and applying
the options defined by the downstrream consumers and the profile
Only modifies the current package_options, not the dependencies ones
"""
assert isinstance(down_options, Options)
assert isinstance(profile_options, Options)
for defined_options in down_options, profile_options:
if own_ref is None or own_ref.name is None:
# If the current package doesn't have a name defined, is a pure consumer without name
# Get the non-scoped options, plus the "all-matching=*" pattern
self._package_options.update_options(defined_options._package_options)
for pattern, options in defined_options._deps_package_options.items():
if ref_matches(None, pattern, is_consumer=is_consumer):
self._package_options.update_options(options, is_pattern=True)
else:
# If the current package has a name, there should be a match, either exact name
# match, or a fnmatch approximate one
for pattern, options in defined_options._deps_package_options.items():
if ref_matches(own_ref, pattern, is_consumer=is_consumer):
self._package_options.update_options(options, is_pattern="*" in pattern)
self._package_options.freeze()
def get_upstream_options(self, down_options, own_ref, is_consumer):
""" compute which options should be propagated to the dependencies, a combination of the
downstream defined default_options with the current default_options ones. This happens
at "configure()" time, while building the graph. Also compute the minimum "self_options"
which is the state that a package should define in order to reproduce
"""
assert isinstance(down_options, Options)
# We need to store a copy for internal propagation for test_requires and tool_requires
private_deps_options = Options()
private_deps_options._deps_package_options = self._deps_package_options.copy()
# self_options are the minimal necessary for a build-order
# TODO: check this, isn't this just a copy?
self_options = Options()
self_options._deps_package_options = down_options._deps_package_options.copy()
# compute now the necessary to propagate all down - self + self deps
upstream_options = Options()
for pattern, options in down_options._deps_package_options.items():
if ref_matches(own_ref, pattern, is_consumer=is_consumer):
# Remove the exact match-name to this package, don't further propagate up
pattern_name = pattern.split("/", 1)[0]
if "*" not in pattern_name:
continue
self._deps_package_options.setdefault(pattern, _PackageOptions()).update_options(options)
upstream_options._deps_package_options = self._deps_package_options
# When the upstream is computed, the current dependencies are invalidated, so users will
# not be able to do ``self.options["mydep"]`` because it will be empty. self.dependencies
# is the way to access dependencies (in other methods)
self._deps_package_options = {}
return self_options, upstream_options, private_deps_options
|
182ddbdc2f0693d5634c5af2765a23ea808ef595
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/tkinter/__canvas__/canvas-solar-system/main.py
|
0ecc842cffc120c097b9f588b0ad82d31728d935
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
main.py
|
#!/usr/bin/env python3
import tkinter as tk
import math
# --- functions ---
def calculate_position(data):
#unpack data
center_x, center_y, radius, distance, angle, angle_speed, x, y = data
# calculate new position of object
x = center_x - distance * math.sin(math.radians(-angle))
y = center_y - distance * math.cos(math.radians(-angle))
# save positon so other object can use it as its center of rotation
data[6] = x
data[7] = y
# calcuate oval coordinates
x1 = x - radius
y1 = y - radius
x2 = x + radius
y2 = y + radius
return x1, y1, x2, y2
def create_object(data):
# calculate oval coordinates
x1, y1, x2, y2 = calculate_position(data)
# create oval
return c.create_oval(x1, y1, x2, y2)
def move_object(object_id, data):
# calculate oval coordinates
x1, y1, x2, y2 = calculate_position(data)
# move oval
c.coords(object_id, x1, y1, x2, y2)
def animate():
# move earth - angle += angle_speed
earth[4] += earth[5]
move_object(e_id, earth)
# moon uses earth position as center of rotation
moon[0] = earth[6]
moon[1] = earth[7]
# move move - angle += angle_speed
moon[4] += moon[5]
move_object(m_id, moon)
# animate again after 100ms
root.after(100, animate)
# --- main ---
# canvas size
WIDTH = 500
HEIGHT = 500
# center of solar system
center_x = WIDTH//2
center_y = HEIGHT//2
# objects data
# [center of rotation x and y, radius, distance from center, current angle, angle speed, current positon x and y]
sun = [center_x, center_y, 30, 0, 0, 0, 0, 0]
earth = [center_x, center_y, 10, 100, 0, 1, 0, 0]
moon = [0, 0, 5, 40, 0, 3, 0, 0]
# - init -
root = tk.Tk()
root.title("Solar System")
# - canvas -
c = tk.Canvas(root, width=WIDTH, heigh=HEIGHT)
c.pack()
# create sun and earth
s_id = create_object(sun)
e_id = create_object(earth)
# moon uses earth position as center of rotation
moon[0] = earth[6]
moon[1] = earth[7]
# create moon
m_id = create_object(moon)
# start animation
animate()
# - start program -
root.mainloop()
|
3c915cb810a71d285f04f60ecf52f5c4f6105491
|
84bdf2b9e7a684b3322f08fe995fec0fb6ef0b43
|
/pycaret/utils/time_series/forecasting/models.py
|
87a653550ac66f5e758ff255f0b7f1e3c358f835
|
[
"MIT"
] |
permissive
|
pycaret/pycaret
|
ea613de9b8da2e38b1988ff7a2952abe59a3b93c
|
ea88b2d3ee22c080ca5059f2f6f1547952b9451c
|
refs/heads/master
| 2023-08-31T23:33:50.953586
| 2023-08-30T09:31:32
| 2023-08-30T09:31:32
| 223,636,350
| 7,915
| 1,994
|
MIT
| 2023-09-11T03:58:59
| 2019-11-23T18:40:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,817
|
py
|
models.py
|
import pandas as pd
from sktime.forecasting.base import BaseForecaster
from pycaret.utils.time_series import TSExogenousPresent
# from pycaret.time_series import TSForecastingExperiment
def _disable_pred_int_enforcement(forecaster, enforce_pi: bool) -> bool:
"""Checks to see if prediction interval should be enforced. If it should
but the forecaster does not support it, the container will be disabled.
Parameters
----------
forecaster : `sktime` compatible forecaster
forecaster to check for prediction interval capability.
Can be a dummy object of the forecasting class
enforce_pi : bool
Should prediction interval be enforced?
Returns
-------
bool
True if user wants to enforce prediction interval and forecaster
does not supports it. False otherwise.
"""
if enforce_pi and not forecaster.get_tag("capability:pred_int"):
return True
return False
def _disable_exogenous_enforcement(
forecaster, enforce_exogenous: bool, exp_has_exogenous: TSExogenousPresent
) -> bool:
"""Checks to see if exogenous support should be enforced. If it should
but the forecaster does not support it, the container will be disabled.
NOTE: Only enforced if the experiment has exogenous variables. If it does not,
then no models are disabled.
Parameters
----------
forecaster : `sktime` compatible forecaster
forecaster to check for prediction interval capability.
Can be a dummy object of the forecasting class
enforce_exogenous : bool
Should exogenous support be enforced?
exp_has_exogenous : TSExogenousTypes
Whether the experiment has exogenous variables or not?
Returns
-------
bool
True if user wants to enforce exogenous support and forecaster
does not supports it. False otherwise.
"""
# Disable models only if the experiment has exogenous variables
if (
exp_has_exogenous == TSExogenousPresent.YES
and enforce_exogenous
and forecaster.get_tag("ignores-exogeneous-X")
):
return True
return False
def _check_enforcements(forecaster, experiment) -> bool:
"""Checks whether the model supports certain features such as
(1) Prediction Interval, and (2) support for exogenous variables. The checks
depend on what features are requested by the user during the experiment setup.
Parameters
----------
forecaster : sktime compatible forecaster
The forecaster which needs to be checked for feature support
experiment : TSForecastingExperiment
Used to check what features are requested by the user during setup.
Returns
-------
bool
True if the model should remain active in the experiment, False otherwise.
"""
active = True
# Pred Interval Enforcement ----
disable_pred_int = _disable_pred_int_enforcement(
forecaster=forecaster, enforce_pi=experiment.enforce_pi
)
# Exogenous variable support Enforcement ----
disable_exog_enforcement = _disable_exogenous_enforcement(
forecaster=forecaster,
enforce_exogenous=experiment.enforce_exogenous,
exp_has_exogenous=experiment.exogenous_present,
)
if disable_pred_int or disable_exog_enforcement:
active = False
return active
class DummyForecaster(BaseForecaster):
"""Dummy Forecaster for initial pycaret pipeline"""
_tags = {
"scitype:y": "univariate", # which y are fine? univariate/multivariate/both
"ignores-exogeneous-X": False, # does estimator use the exogenous X?
"handles-missing-data": False, # can estimator handle missing data?
"y_inner_mtype": "pd.Series", # which types do _fit, _predict, assume for y?
"X_inner_mtype": "pd.DataFrame", # which types do _fit, _predict, assume for X?
"requires-fh-in-fit": False, # is forecasting horizon already required in fit?
"X-y-must-have-same-index": True, # can estimator handle different X/y index?
"enforce-index-type": None, # index type that needs to be enforced in X/y
"capability:pred_int": False,
}
def _fit(self, y, X=None, fh=None):
self._fh_len = None
if fh is not None:
self._fh_len = len(fh)
self._is_fitted = True
return self
def _predict(self, fh=None, X=None):
self.check_is_fitted()
if fh is not None:
preds = pd.Series([-99_999] * len(fh))
elif self._fh_len is not None:
# fh seen during fit
preds = pd.Series([-99_999] * self._fh_len)
else:
raise ValueError(
f"{type(self).__name__}: `fh` is unknown. Unable to make predictions."
)
return preds
|
a6293be14e0f977010683aa057ae04ce7f56e34d
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/python_api/lldbutil/iter/TestRegistersIterator.py
|
fbb8bff412879c88f8451739a8706f331d1a97d8
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 4,125
|
py
|
TestRegistersIterator.py
|
"""
Test the iteration protocol for frame registers.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class RegistersIteratorTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line1 = line_number(
'main.cpp', '// Set break point at this line.')
@add_test_categories(['pyapi'])
def test_iter_registers(self):
"""Test iterator works correctly for lldbutil.iter_registers()."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line1)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
if not process:
self.fail("SBTarget.LaunchProcess() failed")
import lldbsuite.test.lldbutil as lldbutil
for thread in process:
if thread.GetStopReason() == lldb.eStopReasonBreakpoint:
for frame in thread:
# Dump the registers of this frame using
# lldbutil.get_GPRs() and friends.
if self.TraceOn():
print(frame)
REGs = lldbutil.get_GPRs(frame)
num = len(REGs)
if self.TraceOn():
print(
"\nNumber of general purpose registers: %d" %
num)
for reg in REGs:
self.assertTrue(reg)
if self.TraceOn():
print("%s => %s" % (reg.GetName(), reg.GetValue()))
REGs = lldbutil.get_FPRs(frame)
num = len(REGs)
if self.TraceOn():
print("\nNumber of floating point registers: %d" % num)
for reg in REGs:
self.assertTrue(reg)
if self.TraceOn():
print("%s => %s" % (reg.GetName(), reg.GetValue()))
REGs = lldbutil.get_ESRs(frame)
if self.platformIsDarwin():
if self.getArchitecture() != 'armv7' and self.getArchitecture() != 'armv7k':
num = len(REGs)
if self.TraceOn():
print(
"\nNumber of exception state registers: %d" %
num)
for reg in REGs:
self.assertTrue(reg)
if self.TraceOn():
print(
"%s => %s" %
(reg.GetName(), reg.GetValue()))
else:
self.assertIsNone(REGs)
# And these should also work.
for kind in ["General Purpose Registers",
"Floating Point Registers"]:
REGs = lldbutil.get_registers(frame, kind)
self.assertTrue(REGs)
REGs = lldbutil.get_registers(
frame, "Exception State Registers")
if self.platformIsDarwin():
if self.getArchitecture() != 'armv7' and self.getArchitecture() != 'armv7k':
self.assertIsNotNone(REGs)
else:
self.assertIsNone(REGs)
# We've finished dumping the registers for frame #0.
break
|
1b72b160d54cb6b98b0dcc08a4dd4e6b301ad0b2
|
56d6257e932e1397ab03b1e7ccc6231378665b04
|
/Push2/settings.py
|
c344b053269958ce47a2926de4b280a10a1fa059
|
[] |
no_license
|
gluon/AbletonLive10.1_MIDIRemoteScripts
|
e6c8dc4956cff9630aaa36f3667994387ad1d0cf
|
2468b51eba7e5082b06f9e381b3e72027c5f272c
|
refs/heads/master
| 2023-01-10T18:37:46.504180
| 2022-12-23T09:21:48
| 2022-12-23T09:21:48
| 213,423,555
| 205
| 59
| null | 2021-02-12T16:15:01
| 2019-10-07T15:44:52
|
Python
|
UTF-8
|
Python
| false
| false
| 496
|
py
|
settings.py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push2/settings.py
from __future__ import absolute_import, print_function, unicode_literals
from pushbase.setting import OnOffSetting
def create_settings(preferences = None):
preferences = preferences if preferences is not None else {}
return {u'workflow': OnOffSetting(name=u'Workflow', value_labels=[u'Scene', u'Clip'], default_value=True, preferences=preferences)}
|
7cde62188710f3d5e4b3ff07da5d1ee8072cc234
|
755d990ce11af7d4f76d9fa858bd00dc6c249b16
|
/ai_economist/foundation/entities/endogenous.py
|
b6eb42a8bb5487d4cfffb0ef1d3ca6f2b30c25e6
|
[
"BSD-3-Clause"
] |
permissive
|
salesforce/ai-economist
|
50703073355e3ec8428531c5d21790a7b39254e7
|
a84d5f3fdcabb207d9fde7754d34906903b3e184
|
refs/heads/master
| 2023-09-05T07:32:13.070833
| 2022-05-09T21:28:55
| 2022-05-09T21:28:55
| 276,688,708
| 1,063
| 227
|
BSD-3-Clause
| 2023-08-20T13:42:31
| 2020-07-02T15:56:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
endogenous.py
|
# Copyright (c) 2020, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
from ai_economist.foundation.base.registrar import Registry
class Endogenous:
"""Base class for endogenous entity classes.
Endogenous entities are those that, conceptually, describe the internal state
of an agent. This provides a convenient way to separate physical entities (which
may exist in the world, be exchanged among agents, or are otherwise in principal
observable by others) from endogenous entities (such as the amount of labor
effort an agent has experienced).
Endogenous entities are registered in the "endogenous" portion of an agent's
state and should only be observable by the agent itself.
"""
name = None
def __init__(self):
assert self.name is not None
endogenous_registry = Registry(Endogenous)
@endogenous_registry.add
class Labor(Endogenous):
"""Labor accumulated through working. Included in all environments by default."""
name = "Labor"
|
e341bfe1fb160c3165fb30223e0a76500b81500d
|
364774e29ef2474552ea3839de0951e63cbae0a6
|
/wouso/core/common.py
|
53b62cd668477903600a9135270f15a817d0d4aa
|
[
"Apache-2.0"
] |
permissive
|
rosedu/wouso
|
66c50ef750cf79d6959768f7df93cc08607cc266
|
ed34c62ac925db719388f27fe5acb40376d8d0c1
|
refs/heads/master
| 2022-10-29T14:28:51.818073
| 2022-09-24T18:54:04
| 2022-09-24T18:54:04
| 2,965,476
| 121
| 97
|
NOASSERTION
| 2019-11-15T09:33:50
| 2011-12-12T16:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,832
|
py
|
common.py
|
from django.core.cache import cache
import sys
class App:
""" Interface extended by Game and by Top and Qproposal Activity"""
@classmethod
def name(kls):
return kls.__name__.lower()
@classmethod
def disabled(kls):
""" Search for a disabled config setting.
"""
from wouso.core.config.models import BoolSetting
return BoolSetting.get('setting-%s' % kls.name()).get_value() is False
@classmethod
def get_modifiers(kls):
""" Return a list of modifiers - as names (this translates to artifact names)
Player has_modifier checks if the user has an artifact with the modifier id.
"""
return []
@classmethod
def get_sidebar_widget(kls, request):
""" Return the sidebar widget, for current HttpRequest request.
This is called in interface.context_processors.sidebar """
# DEPRECATED
return None
@classmethod
def get_unread_count(kls, request):
""" Return the app-specific unread counter.
"""
return 0
@classmethod
def get_header_link(kls, request):
""" Return dictionary containing (link, text, count) for the content
to be displayed in the page header.
Called in interface.context_processors.header """
# DEPRECATED
return None
@classmethod
def get_footer_link(kls, request):
""" Return html content to be displayed in the footer
Called in interface.context_processors.footer """
# DEPRECATED
return None
@classmethod
def get_profile_actions(kls, request, player):
""" Return html content for player's profile view """
return ''
@classmethod
def get_profile_superuser_actions(kls, request, player):
""" Return html content for player's profile view
in the superuser row """
return ''
@classmethod
def get_api(kls):
""" Return a dictionary with url-regex keys, and PistonHandler values.
"""
return {}
@classmethod
def management_task(cls, datetime=lambda: datetime.now(), stdout=sys.stdout):
""" Execute maintance task, such as:
- calculate top ranks
- inactivate expired spells
- expire challenges not played
This method is called from wousocron management task, and the datetime might be faked.
"""
pass
management_task = None # Disable it by default
class Item(object):
"""
Interface for items that can and should be cached. Usually, they have a string id as the SQL key.
"""
CREATE_IF_NOT_EXISTS = False
@classmethod
def add(cls, name, **data):
if isinstance(name, cls):
name.save()
obj = name
elif isinstance(name, dict):
obj = cls.objects.get_or_create(**name)[0]
else:
obj = cls.objects.get_or_create(name=name, **data)[0]
return obj
@classmethod
def get(cls, id):
# TODO: deprecate, poor design
if isinstance(id, cls):
return id
if isinstance(id, dict):
id = id.get('id', '')
try:
return cls.objects.get(name=id)
except cls.DoesNotExist:
try:
return cls.objects.get(id=id)
except:
return None
def __str__(self):
return u'%s' % self.id
def __unicode__(self):
return self.name if hasattr(self, 'name') else str(self)
class CachedItem(Item):
"""
Interface for standard cached objects
"""
CACHE_PART = 'id'
@classmethod
def _cache_key(cls, part):
return cls.__name__ + str(part)
def _get_cache_key(self, part):
return self.__class__._cache_key(part)
def _cache_key_part(self):
return getattr(self, self.CACHE_PART)
@classmethod
def _get_fresh(cls, part):
try:
return cls.objects.get(**{cls.CACHE_PART: part})
except cls.DoesNotExist:
return None
def save(self, **kwargs):
r = super(CachedItem, self).save(**kwargs)
if hasattr(self, self.CACHE_PART) and getattr(self, self.CACHE_PART):
key = self._get_cache_key(self._cache_key_part())
cache.delete(key)
return r
def delete(self):
key = self._get_cache_key(self._cache_key_part())
cache.delete(key)
return super(CachedItem, self).delete()
@classmethod
def get(cls, part):
if isinstance(part, cls):
return part
key = cls._cache_key(part)
if key in cache:
return cache.get(key)
obj = cls._get_fresh(part)
cache.set(key, obj)
return obj
def __str__(self):
return str(getattr(self, self.CACHE_PART))
|
02f47ef4c9841eba2fdbf77eabcfb4caf958aebb
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/subaru/__init__.py
|
26b81c84a1ea6285d76587c28143be8509df223a
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 40
|
py
|
__init__.py
|
"""Tests for the Subaru integration."""
|
c95b6cd93adf7e447b74be8bcb8b8524cb0f9370
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/unit_tests/mo/front/Log1p_test.py
|
ae3b23e0ae774780cd8e6cb82c612066e32e7573
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
Log1p_test.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from openvino.tools.mo.front.Log1p import Log1p
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph
nodes_attributes = {
'placeholder': {'shape': np.array([4, 5, 6]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
# Log1p operation
'Log1p': {'kind': 'op', 'op': 'Log1p'},
# Test operation
'last': {'type': None, 'value': None, 'kind': 'op', 'op': None},
# Add and Log operations
'const': {'kind': 'op', 'op': 'Const', 'value': np.ones([1], dtype=np.float32)},
'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'},
'log': {'type': 'Log', 'kind': 'op', 'op': 'Log'},
}
class TestLog1p(unittest.TestCase):
def test_log1p_test(self):
graph = build_graph(nodes_attributes,
[('placeholder', 'Log1p'),
('Log1p', 'last')
], nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('const', 'add'),
('placeholder', 'add'),
('add', 'log'),
('log', 'last'),
], nodes_with_edges_only=True)
graph.stage = 'front'
tested_class = Log1p()
tested_class.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True)
self.assertTrue(flag, resp)
|
65926ad29ad9d3501de6abbaeeb4beb43b2bb6a0
|
3d74f759ee48d383aa82eeff0a55864a93a001ba
|
/tools/fuchsia/interpolate_test_suite.py
|
bfa386954661487e6560a6050bf1ddb44e8bc21b
|
[
"BSD-3-Clause"
] |
permissive
|
flutter/engine
|
78be5418a9b2f7730dda9ca9fcb25b7055f3da85
|
902ece7f89d7730cc69f35e098b223cbbf4e25f1
|
refs/heads/main
| 2023-09-04T06:12:34.462953
| 2023-09-04T05:33:32
| 2023-09-04T05:33:32
| 39,211,337
| 7,090
| 6,862
|
BSD-3-Clause
| 2023-09-14T21:58:17
| 2015-07-16T17:39:56
|
C++
|
UTF-8
|
Python
| false
| false
| 806
|
py
|
interpolate_test_suite.py
|
#!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Interpolates test suite information into a cml file.
"""
from argparse import ArgumentParser
import sys
def main():
# Parse arguments.
parser = ArgumentParser()
parser.add_argument('--input', action='store', required=True)
parser.add_argument('--test-suite', action='store', required=True)
parser.add_argument('--output', action='store', required=True)
args = parser.parse_args()
# Read, interpolate, write.
with open(args.input, 'r') as i, open(args.output, 'w') as o:
o.write(i.read().replace('{{TEST_SUITE}}', args.test_suite))
return 0
if __name__ == '__main__':
sys.exit(main())
|
234c8e7d4055152014d1451f369a189f25008405
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/rllib/core/learner/reduce_result_dict_fn.py
|
47ca597d2b11f08d9b008afee1d65af6d4118880
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
reduce_result_dict_fn.py
|
"""The following is set of default rllib reduction methods for ResultDicts"""
from typing import List
import numpy as np
import tree # pip install dm-tree
from ray.rllib.utils.typing import ResultDict
def _reduce_mean_results(results: List[ResultDict]) -> ResultDict:
"""Takes the average of all the leaves in the result dict
Args:
results: list of result dicts to average
Returns:
Averaged result dict
"""
return tree.map_structure(lambda *x: np.mean(x), *results)
|
d09931910e8d21e26583f3f4fce373df4fc13309
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/runtime/script_printer.py
|
2ed2b8ddd4bcbe4a707860ea7f74256c2ed45cc1
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 11,126
|
py
|
script_printer.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configuration of TVMScript printer"""
from typing import Dict, List, Optional, Sequence
from tvm._ffi import get_global_func, register_object
from tvm.runtime import Object
from . import _ffi_node_api
from .object_path import ObjectPath
@register_object("node.PrinterConfig")
class PrinterConfig(Object):
"""Configuration of TVMScript printer"""
binding_names: Sequence[str]
show_meta: bool
ir_prefix: str
tir_prefix: str
buffer_dtype: str
int_dtype: str
float_dtype: str
verbose_expr: bool
indent_spaces: int
print_line_numbers: bool
num_context_lines: int
syntax_sugar: bool
show_object_address: bool
path_to_underline: Optional[List[ObjectPath]]
path_to_annotate: Optional[Dict[ObjectPath, str]]
obj_to_underline: Optional[List[Object]]
obj_to_annotate: Optional[Dict[Object, str]]
def __init__(
self,
*,
name: Optional[str] = None,
show_meta: bool = False,
ir_prefix: str = "I",
tir_prefix: str = "T",
buffer_dtype: str = "float32",
int_dtype: str = "int32",
float_dtype: str = "void",
verbose_expr: bool = False,
indent_spaces: int = 4,
print_line_numbers: bool = False,
num_context_lines: Optional[int] = None,
syntax_sugar: bool = True,
show_object_address: bool = True,
path_to_underline: Optional[List[ObjectPath]] = None,
path_to_annotate: Optional[Dict[ObjectPath, str]] = None,
obj_to_underline: Optional[List[Object]] = None,
obj_to_annotate: Optional[Dict[Object, str]] = None,
) -> None:
if num_context_lines is None:
num_context_lines = -1
cfg = {
"show_meta": show_meta,
"ir_prefix": ir_prefix,
"tir_prefix": tir_prefix,
"buffer_dtype": buffer_dtype,
"int_dtype": int_dtype,
"float_dtype": float_dtype,
"verbose_expr": verbose_expr,
"indent_spaces": indent_spaces,
"print_line_numbers": print_line_numbers,
"num_context_lines": num_context_lines,
"syntax_sugar": syntax_sugar,
"show_object_address": show_object_address,
"path_to_underline": path_to_underline,
"path_to_annotate": path_to_annotate,
"obj_to_underline": obj_to_underline,
"obj_to_annotate": obj_to_annotate,
}
if name is not None:
cfg["name"] = name
self.__init_handle_by_constructor__(
_ffi_node_api.PrinterConfig, cfg # type: ignore # pylint: disable=no-member
)
def _script(obj: Object, config: PrinterConfig) -> str:
return _ffi_node_api.TVMScriptPrinterScript(obj, config) # type: ignore # pylint: disable=no-member
def _relax_script(obj: Object, config: PrinterConfig) -> str:
func = get_global_func("script.printer.ReprPrintRelax")
return func(obj, config)
class Scriptable:
"""A base class that enables the script() and show() method."""
def script(
self,
*,
name: Optional[str] = None,
show_meta: bool = False,
ir_prefix: str = "I",
tir_prefix: str = "T",
buffer_dtype: str = "float32",
int_dtype: str = "int32",
float_dtype: str = "void",
verbose_expr: bool = False,
indent_spaces: int = 4,
print_line_numbers: bool = False,
num_context_lines: int = -1,
syntax_sugar: bool = True,
show_object_address: bool = False,
path_to_underline: Optional[List[ObjectPath]] = None,
path_to_annotate: Optional[Dict[ObjectPath, str]] = None,
obj_to_underline: Optional[List[Object]] = None,
obj_to_annotate: Optional[Dict[Object, str]] = None,
) -> str:
"""Print TVM IR into TVMScript text format
Parameters
----------
name : Optional[str] = None
The name of the object
show_meta : bool = False
Whether to print the meta data of the object
ir_prefix : str = "I"
The prefix of AST nodes from tvm.ir
tir_prefix : str = "T"
The prefix of AST nodes from tvm.tir
buffer_dtype : str = "float32"
The default data type of buffer
int_dtype : str = "int32"
The default data type of integer
float_dtype : str = "void"
The default data type of float
verbose_expr : bool = False
Whether to print the detailed definition of each variable in the expression
indent_spaces : int = 4
The number of spaces for indentation
print_line_numbers : bool = False
Whether to print line numbers
num_context_lines : int = -1
The number of lines of context to print before and after the line to underline.
syntax_sugar: bool = True
Whether to output with syntax sugar, set false for complete printing.
show_object_address: bool = False
Whether to include the object's adddress as part of the TVMScript name
path_to_underline : Optional[List[ObjectPath]] = None
Object path to be underlined
path_to_annotate : Optional[Dict[ObjectPath, str]] = None
Object path to be annotated
obj_to_underline : Optional[List[Object]] = None
Object to be underlined
obj_to_annotate : Optional[Dict[Object, str]] = None
Object to be annotated
Returns
-------
script : str
The TVM Script of the given TVM IR
"""
return _script(
self,
PrinterConfig(
name=name,
show_meta=show_meta,
ir_prefix=ir_prefix,
tir_prefix=tir_prefix,
buffer_dtype=buffer_dtype,
int_dtype=int_dtype,
float_dtype=float_dtype,
verbose_expr=verbose_expr,
indent_spaces=indent_spaces,
print_line_numbers=print_line_numbers,
num_context_lines=num_context_lines,
syntax_sugar=syntax_sugar,
show_object_address=show_object_address,
path_to_underline=path_to_underline,
path_to_annotate=path_to_annotate,
obj_to_underline=obj_to_underline,
obj_to_annotate=obj_to_annotate,
),
)
def show(
self,
style: Optional[str] = None,
black_format: bool = True,
*,
name: Optional[str] = None,
show_meta: bool = False,
ir_prefix: str = "I",
tir_prefix: str = "T",
buffer_dtype: str = "float32",
int_dtype: str = "int32",
float_dtype: str = "void",
verbose_expr: bool = False,
indent_spaces: int = 4,
print_line_numbers: bool = False,
num_context_lines: int = -1,
syntax_sugar: bool = True,
show_object_address: bool = True,
path_to_underline: Optional[List[ObjectPath]] = None,
path_to_annotate: Optional[Dict[ObjectPath, str]] = None,
obj_to_underline: Optional[List[Object]] = None,
obj_to_annotate: Optional[Dict[Object, str]] = None,
) -> None:
"""A sugar for print highlighted TVM script.
Parameters
----------
style : str, optional
Pygmentize printing style, auto-detected if None. See
`tvm.script.highlight.cprint` for more details.
black_format: bool
If true (default), use the formatter Black to format the TVMScript
name : Optional[str] = None
The name of the object
show_meta : bool = False
Whether to print the meta data of the object
ir_prefix : str = "I"
The prefix of AST nodes from tvm.ir
tir_prefix : str = "T"
The prefix of AST nodes from tvm.tir
buffer_dtype : str = "float32"
The default data type of buffer
int_dtype : str = "int32"
The default data type of integer
float_dtype : str = "void"
The default data type of float
verbose_expr : bool = False
Whether to print the detailed definition of each variable in the expression
indent_spaces : int = 4
The number of spaces for indentation
print_line_numbers : bool = False
Whether to print line numbers
num_context_lines : int = -1
The number of lines of context to print before and after the line to underline.
syntax_sugar: bool = True
Whether to output with syntax sugar, set false for complete printing.
show_object_address: bool = False
Whether to include the object's adddress as part of the TVMScript name
path_to_underline : Optional[List[ObjectPath]] = None
Object path to be underlined
path_to_annotate : Optional[Dict[ObjectPath, str]] = None
Object path to be annotated
obj_to_underline : Optional[List[Object]] = None
Object to be underlined
obj_to_annotate : Optional[Dict[Object, str]] = None
Object to be annotated
"""
from tvm.script.highlight import ( # pylint: disable=import-outside-toplevel
cprint,
)
cprint(
self.script(
name=name,
show_meta=show_meta,
ir_prefix=ir_prefix,
tir_prefix=tir_prefix,
buffer_dtype=buffer_dtype,
int_dtype=int_dtype,
float_dtype=float_dtype,
verbose_expr=verbose_expr,
indent_spaces=indent_spaces,
print_line_numbers=print_line_numbers,
num_context_lines=num_context_lines,
syntax_sugar=syntax_sugar,
show_object_address=show_object_address,
path_to_underline=path_to_underline,
path_to_annotate=path_to_annotate,
obj_to_underline=obj_to_underline,
obj_to_annotate=obj_to_annotate,
),
style=style,
black_format=black_format,
)
|
0b14165234464e979ff164b3233a243899d21c7f
|
fce81b804cae23f525a5ad4370b684bf0dc531a5
|
/numpy/typing/tests/data/fail/nested_sequence.pyi
|
6301e51769fee30db50bfaf1e2777bf894166de8
|
[
"Zlib",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
numpy/numpy
|
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
|
dc2ff125493777a1084044e6cd6857a42ee323d4
|
refs/heads/main
| 2023-09-05T10:10:52.767363
| 2023-09-04T18:03:29
| 2023-09-04T18:03:29
| 908,607
| 25,725
| 11,968
|
BSD-3-Clause
| 2023-09-14T21:26:09
| 2010-09-13T23:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 427
|
pyi
|
nested_sequence.pyi
|
from collections.abc import Sequence
from numpy._typing import _NestedSequence
a: Sequence[float]
b: list[complex]
c: tuple[str, ...]
d: int
e: str
def func(a: _NestedSequence[int]) -> None:
...
reveal_type(func(a)) # E: incompatible type
reveal_type(func(b)) # E: incompatible type
reveal_type(func(c)) # E: incompatible type
reveal_type(func(d)) # E: incompatible type
reveal_type(func(e)) # E: incompatible type
|
45c17869cbfe8872b39bd915f19a811d15a7bd85
|
450916eee7580beb928ed8f387db4f0a8c1aa508
|
/src/amuse/test/suite/ext_tests/test_orbital_elements.py
|
966328f8228230fce6cbc8ceb54aae2c7f9512bb
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
amusecode/amuse
|
42095545893f5a86ea79c2a52ce54d3ce8eb204f
|
b57c1e2fda1457d5025307be105c2aa59b19b574
|
refs/heads/main
| 2023-08-31T04:50:48.880044
| 2023-08-30T12:00:20
| 2023-08-30T12:00:20
| 18,516,331
| 158
| 118
|
Apache-2.0
| 2023-08-30T12:00:22
| 2014-04-07T12:35:07
|
AMPL
|
UTF-8
|
Python
| false
| false
| 27,092
|
py
|
test_orbital_elements.py
|
import numpy
from amuse.test import amusetest
from amuse.ext.orbital_elements import (
generate_binaries,
get_orbital_elements_from_binaries,
new_binary_from_orbital_elements,
# get_orbital_elements_from_binary,
orbital_elements_for_rel_posvel_arrays,
orbital_elements,
rel_posvel_arrays_from_orbital_elements,
)
from amuse.units import units
from amuse.units import constants
from amuse.units import nbody_system
from amuse import datamodel
from numpy import random
class KeplerTests(amusetest.TestCase):
def test1(self):
mass1 = 1 | nbody_system.mass
mass2 = 1 | nbody_system.mass
binary = new_binary_from_orbital_elements(
mass1,
mass2,
1 | nbody_system.length
)
self.assertEqual(len(binary), 2)
binary.position -= binary[0].position
binary.velocity -= binary[0].velocity
self.assertAlmostRelativeEquals(
binary[0].position,
[0, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(
binary[1].position,
[1, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(
binary[0].velocity,
[0, 0, 0] | nbody_system.speed)
self.assertAlmostRelativeEquals(
binary[1].velocity,
[0, numpy.sqrt(2), 0] | nbody_system.speed)
def test2(self):
# test going around in a circular orbit
mass1 = 1 | nbody_system.mass
mass2 = 1 | nbody_system.mass
binary = new_binary_from_orbital_elements(
mass1,
mass2,
1 | nbody_system.length,
eccentricity=0,
true_anomaly=90,
)
self.assertEqual(len(binary), 2)
binary.position -= binary[0].position
binary.velocity -= binary[0].velocity
self.assertAlmostRelativeEquals(
binary[0].position,
[0, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(
binary[1].position,
[0, 1, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(
binary[0].velocity,
[0, 0, 0] | nbody_system.speed)
self.assertAlmostRelativeEquals(
binary[1].velocity,
[-numpy.sqrt(2), 0, 0] | nbody_system.speed)
binary = new_binary_from_orbital_elements(
mass1,
mass2,
1 | nbody_system.length,
eccentricity=0,
true_anomaly=180,
)
self.assertEqual(len(binary), 2)
binary.position -= binary[0].position
binary.velocity -= binary[0].velocity
self.assertAlmostRelativeEquals(
binary[0].position, [0, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(
binary[1].position, [-1, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(
binary[0].velocity, [0, 0, 0] | nbody_system.speed)
self.assertAlmostRelativeEquals(
binary[1].velocity, [0, -numpy.sqrt(2), 0] | nbody_system.speed)
binary = new_binary_from_orbital_elements(
mass1,
mass2,
1 | nbody_system.length,
eccentricity=0,
true_anomaly=270,
)
self.assertEqual(len(binary), 2)
binary.position -= binary[0].position
binary.velocity -= binary[0].velocity
self.assertAlmostRelativeEquals(binary[0].position, [0, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(binary[1].position, [0, -1, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(binary[0].velocity, [0, 0, 0] | nbody_system.speed)
self.assertAlmostRelativeEquals(binary[1].velocity, [numpy.sqrt(2), 0, 0] | nbody_system.speed)
binary = new_binary_from_orbital_elements(
mass1,
mass2,
1 | nbody_system.length,
eccentricity=0,
true_anomaly=45,
)
self.assertEqual(len(binary), 2)
binary.position -= binary[0].position
binary.velocity -= binary[0].velocity
self.assertAlmostRelativeEquals(binary[0].position, [0, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(binary[1].position, [0.5 * numpy.sqrt(2), 0.5 * numpy.sqrt(2), 0] | nbody_system.length)
self.assertAlmostRelativeEquals(binary[0].velocity, [0, 0, 0] | nbody_system.speed)
self.assertAlmostRelativeEquals(binary[1].velocity, [-1, 1, 0] | nbody_system.speed)
def test3(self):
mass1 = 1. | nbody_system.mass
mass2 = 1. | nbody_system.mass
binary = new_binary_from_orbital_elements(
mass1,
mass2,
1. | nbody_system.length,
eccentricity=0.
)
self.assertEqual(len(binary), 2)
self.assertAlmostRelativeEquals(binary[0].position, [-0.5, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(binary[1].position, [0.5, 0, 0] | nbody_system.length)
self.assertAlmostRelativeEquals(binary[0].velocity, [0, -1/numpy.sqrt(2), 0] | nbody_system.speed)
self.assertAlmostRelativeEquals(binary[1].velocity, [0, 1/numpy.sqrt(2), 0] | nbody_system.speed)
def test4(self):
numpy.random.seed(3456789)
N = 100
mass1 = random.random(N) | nbody_system.mass
mass2 = random.random(N) | nbody_system.mass
semi_major_axis = (-numpy.log(random.random(N))) | nbody_system.length
eccentricity = random.random(N)
true_anomaly = (360.*random.random(N)-180.) | units.deg
inclination = (180*random.random(N)) | units.deg
longitude_of_the_ascending_node = (
360*random.random(N)-180) | units.deg
argument_of_periapsis = (360*random.random(N)-180) | units.deg
for arg in zip(
mass1, mass2, semi_major_axis, eccentricity, true_anomaly,
inclination, longitude_of_the_ascending_node,
argument_of_periapsis):
arg_ = orbital_elements(
new_binary_from_orbital_elements(*arg))
for i, (copy, org) in enumerate(zip(arg_, arg)):
self.assertAlmostEqual(copy, org)
def test5(self):
numpy.random.seed(4567893)
N = 100
mass1 = random.random(N) | units.MSun
mass2 = random.random(N) | units.MSun
semi_major_axis = (-numpy.log(random.random(N))) | units.AU
eccentricity = random.random(N)
true_anomaly = (360.*random.random(N)-180.) | units.deg
inclination = (180*random.random(N)) | units.deg
longitude_of_the_ascending_node = (
360*random.random(N)-180
) | units.deg
argument_of_periapsis = (360*random.random(N)-180) | units.deg
for arg in zip(
mass1, mass2, semi_major_axis, eccentricity, true_anomaly,
inclination, longitude_of_the_ascending_node,
argument_of_periapsis):
arg_ = orbital_elements(
new_binary_from_orbital_elements(*arg, G=constants.G),
G=constants.G)
for i, (copy, org) in enumerate(zip(arg_, arg)):
self.assertAlmostEqual(copy, org)
def test6(self):
"""
testing orbital_elements_for_rel_posvel_arrays for N particles
with random orbital elements
"""
numpy.random.seed(666)
N = 100
mass_sun = 1. | units.MSun
mass1 = numpy.ones(N) * mass_sun
mass2 = numpy.zeros(N) | units.MSun
semi_major_axis = (-numpy.log(random.random(N))) | units.AU
eccentricity = random.random(N)
true_anomaly = 360.*random.random(N)-180.
inclination = 180*random.random(N)
longitude_of_the_ascending_node = 360*random.random(N)-180
argument_of_periapsis = 360*random.random(N)-180
comets = datamodel.Particles(N)
for i, arg in enumerate(zip(mass1, mass2, semi_major_axis, eccentricity, true_anomaly, inclination,
longitude_of_the_ascending_node, argument_of_periapsis)):
sun_and_comet = new_binary_from_orbital_elements(*arg, G=constants.G)
comets[i].mass = sun_and_comet[1].mass
comets[i].position = sun_and_comet[1].position
comets[i].velocity = sun_and_comet[1].velocity
semi_major_axis_ext, eccentricity_ext, ta_ext, inclination_ext, \
longitude_of_the_ascending_node_ext, argument_of_periapsis_ext = \
orbital_elements_for_rel_posvel_arrays(comets.position,
comets.velocity,
comets.mass + mass_sun,
G=constants.G)
self.assertAlmostEqual(semi_major_axis, semi_major_axis_ext)
self.assertAlmostEqual(eccentricity, eccentricity_ext)
self.assertAlmostEqual(inclination, inclination_ext)
self.assertAlmostEqual(longitude_of_the_ascending_node, longitude_of_the_ascending_node_ext)
self.assertAlmostEqual(argument_of_periapsis, argument_of_periapsis_ext)
self.assertAlmostEqual(true_anomaly, ta_ext)
def test7(self):
"""
testing orbital_elements_for_rel_posvel_arrays for the case of one particle
"""
numpy.random.seed(999)
mass1 = 0.5 | units.MSun
mass2 = 0.8 | units.MSun
sem = 12. | units.AU
ecc = 0.05
inc = 20.
lon = 10.
arg = 0.4
ta = 360.*random.random()-180.
binary = new_binary_from_orbital_elements(mass1,
mass2,
sem,
ecc,
ta,
inc,
lon,
arg,
G=constants.G)
rel_pos = binary[1].position - binary[0].position
rel_vel = binary[1].velocity - binary[0].velocity
mass_12 = binary[1].mass + binary[0].mass
sem_ext, ecc_ext, ta_ext, inc_ext, lon_ext, arg_ext = \
orbital_elements_for_rel_posvel_arrays(rel_pos, rel_vel, mass_12, G=constants.G)
self.assertAlmostEqual(sem, sem_ext)
self.assertAlmostEqual(ecc, ecc_ext)
self.assertAlmostEqual(inc, inc_ext)
self.assertAlmostEqual(lon, lon_ext)
self.assertAlmostEqual(arg, arg_ext)
self.assertAlmostEqual(ta, ta_ext)
def test8(self):
"""
testing orbital_elements_for_rel_posvel_arrays for extreme cases
"""
N = 3
mass1 = (1.2*numpy.ones(N)) | units.MSun
mass2 = (0.1, 0.05, 0.003) | units.MSun
semi_major_axis = (1., 2., 3.) | units.AU
eccentricity = (0., 0.5, 0.6)
true_anomaly = (0., 0., 66.)
inclination = (12., 0., 180.)
longitude_of_the_ascending_node = (0., 0., 0.,)
argument_of_periapsis = (0., 23., 90.)
mass12 = []
rel_position = []
rel_velocity = []
for i, arg in enumerate(zip(mass1, mass2, semi_major_axis, eccentricity, true_anomaly, inclination,
longitude_of_the_ascending_node, argument_of_periapsis)):
sun_and_comet = new_binary_from_orbital_elements(*arg, G=constants.G)
mass12.append(sun_and_comet[0].mass + sun_and_comet[1].mass)
rel_position.append(sun_and_comet[1].position - sun_and_comet[0].position)
rel_velocity.append(sun_and_comet[1].velocity - sun_and_comet[0].velocity)
# to convert lists to vector quantities
rel_pos = numpy.array([vec_i.value_in(units.AU) for vec_i in rel_position]) | units.AU
rel_vel = numpy.array([vec_i.value_in(units.kms) for vec_i in rel_velocity]) | units.kms
mass_12 = numpy.array([m_i.value_in(units.MSun) for m_i in mass12]) | units.MSun
semi_major_axis_ext, eccentricity_ext, ta_ext, inclination_ext, \
longitude_of_the_ascending_node_ext, argument_of_periapsis_ext = \
orbital_elements_for_rel_posvel_arrays(rel_pos,
rel_vel,
mass_12,
G=constants.G)
self.assertAlmostEqual(semi_major_axis, semi_major_axis_ext)
self.assertAlmostEqual(eccentricity, eccentricity_ext)
self.assertAlmostEqual(inclination, inclination_ext)
self.assertAlmostEqual(longitude_of_the_ascending_node, longitude_of_the_ascending_node_ext)
self.assertAlmostEqual(argument_of_periapsis, argument_of_periapsis_ext)
self.assertAlmostEqual(true_anomaly, ta_ext)
def test9(self):
"""
testing orbital_elements_for_rel_posvel_arrays for N particles
with random orbital elements, nbody_system
"""
numpy.random.seed(666)
N = 100
mass_sun = 1. | nbody_system.mass
mass1 = numpy.ones(N) * mass_sun
mass2 = numpy.zeros(N) | nbody_system.mass
semi_major_axis = (-numpy.log(random.random(N))) | nbody_system.length
eccentricity = random.random(N)
true_anomaly = 360.*random.random(N)-180.
inclination = 180*random.random(N)
longitude_of_the_ascending_node = 360*random.random(N)-180
argument_of_periapsis = 360*random.random(N)-180
comets = datamodel.Particles(N)
for i, arg in enumerate(zip(mass1, mass2, semi_major_axis, eccentricity, true_anomaly, inclination,
longitude_of_the_ascending_node, argument_of_periapsis)):
sun_and_comet = new_binary_from_orbital_elements(*arg, G=nbody_system.G)
comets[i].mass = sun_and_comet[1].mass
comets[i].position = sun_and_comet[1].position
comets[i].velocity = sun_and_comet[1].velocity
semi_major_axis_ext, eccentricity_ext, ta_ext, inclination_ext, \
longitude_of_the_ascending_node_ext, argument_of_periapsis_ext = \
orbital_elements_for_rel_posvel_arrays(comets.position,
comets.velocity,
comets.mass + mass_sun,
G=nbody_system.G)
self.assertAlmostEqual(semi_major_axis, semi_major_axis_ext)
self.assertAlmostEqual(eccentricity, eccentricity_ext)
self.assertAlmostEqual(inclination, inclination_ext)
self.assertAlmostEqual(longitude_of_the_ascending_node, longitude_of_the_ascending_node_ext)
self.assertAlmostEqual(argument_of_periapsis, argument_of_periapsis_ext)
self.assertAlmostEqual(true_anomaly, ta_ext)
def xtest10(self):
"""
testing orbital_elements_for_rel_posvel_arrays for N particles
with random orbital elements, unitless
"""
numpy.random.seed(666)
N = 100
mass_sun = 1.
mass1 = numpy.ones(N) * mass_sun
mass2 = numpy.zeros(N)
semi_major_axis = (-numpy.log(random.random(N)))
eccentricity = random.random(N)
true_anomaly = 360.*random.random(N)-180.
inclination = 180*random.random(N)
longitude_of_the_ascending_node = 360*random.random(N)-180
argument_of_periapsis = 360*random.random(N)-180
comets = datamodel.Particles(N)
for i, arg in enumerate(zip(mass1, mass2, semi_major_axis, eccentricity, true_anomaly, inclination,
longitude_of_the_ascending_node, argument_of_periapsis)):
sun_and_comet = new_binary_from_orbital_elements(*arg, G=1)
comets[i].mass = sun_and_comet[1].mass
comets[i].position = sun_and_comet[1].position
comets[i].velocity = sun_and_comet[1].velocity
semi_major_axis_ext, eccentricity_ext, ta_ext, inclination_ext, \
longitude_of_the_ascending_node_ext, argument_of_periapsis_ext = \
orbital_elements_for_rel_posvel_arrays(comets.position,
comets.velocity,
comets.mass + mass_sun,
G=1)
self.assertAlmostEqual(semi_major_axis, semi_major_axis_ext)
self.assertAlmostEqual(eccentricity, eccentricity_ext)
self.assertAlmostEqual(inclination, inclination_ext)
self.assertAlmostEqual(longitude_of_the_ascending_node, longitude_of_the_ascending_node_ext)
self.assertAlmostEqual(argument_of_periapsis, argument_of_periapsis_ext)
self.assertAlmostEqual(true_anomaly, ta_ext)
def test11(self):
"""
testing orbital_elements_for_rel_posvel_arrays for unbound orbits
"""
from amuse.community.kepler.interface import Kepler
numpy.random.seed(66)
N = 10
mass_sun = 1. | units.MSun
mass1 = numpy.ones(N) * mass_sun
mass2 = numpy.zeros(N) | units.MSun
semi_major_axis = -1000.*(random.random(N)) | units.AU
eccentricity = (1.+random.random(N))*10.-9.
inclination = numpy.pi*random.random(N)
longitude_of_the_ascending_node = 2.*numpy.pi*random.random(N)-numpy.pi
argument_of_periapsis = 2.*numpy.pi*random.random(N)-numpy.pi
# kepler.initialize_from_elements initializes orbits with mean_anomaly=0 and true_anomaly=0
true_anomaly = 0.*(360.*random.random(N)-180.)
comets = datamodel.Particles(N)
converter = nbody_system.nbody_to_si(1 | units.MSun, 1 | units.AU)
kepler = Kepler(converter)
kepler.initialize_code()
for i, arg in enumerate(zip(mass1, mass2, semi_major_axis, eccentricity, true_anomaly, inclination,
longitude_of_the_ascending_node, argument_of_periapsis)):
kepler.initialize_from_elements(mass=(mass1[i]+mass2[i]),
semi=semi_major_axis[i],
ecc=eccentricity[i])
ri = kepler.get_separation_vector()
vi = kepler.get_velocity_vector()
om = longitude_of_the_ascending_node[i]
w = argument_of_periapsis[i]
incl = inclination[i]
a1 = ([numpy.cos(om), -numpy.sin(om), 0.0], [numpy.sin(om), numpy.cos(om), 0.0], [0.0, 0.0, 1.0])
a2 = ([1.0, 0.0, 0.0], [0.0, numpy.cos(incl), -numpy.sin(incl)], [0.0, numpy.sin(incl), numpy.cos(incl)])
a3 = ([numpy.cos(w), -numpy.sin(w), 0.0], [numpy.sin(w), numpy.cos(w), 0.0], [0.0, 0.0, 1.0])
A = numpy.dot(numpy.dot(a1, a2), a3)
r_vec = numpy.dot(A, numpy.reshape(ri, 3, 1))
v_vec = numpy.dot(A, numpy.reshape(vi, 3, 1))
r = (0.0, 0.0, 0.0) | units.AU
v = (0.0, 0.0, 0.0) | (units.AU / units.day)
r[0] = r_vec[0]
r[1] = r_vec[1]
r[2] = r_vec[2]
v[0] = v_vec[0]
v[1] = v_vec[1]
v[2] = v_vec[2]
comets[i].mass = mass2[i]
comets[i].position = r_vec
comets[i].velocity = v_vec
kepler.stop()
semi_major_axis_ext, eccentricity_ext, ta_ext, inclination_ext, \
longitude_of_the_ascending_node_ext, argument_of_periapsis_ext = \
orbital_elements(comets.position,
comets.velocity,
comets.mass + mass_sun,
G=constants.G)
self.assertAlmostEqual(semi_major_axis, semi_major_axis_ext.in_(units.AU))
self.assertAlmostEqual(eccentricity, eccentricity_ext)
self.assertAlmostEqual(inclination, inclination_ext)
self.assertAlmostEqual(longitude_of_the_ascending_node, longitude_of_the_ascending_node_ext)
self.assertAlmostEqual(argument_of_periapsis, argument_of_periapsis_ext)
self.assertAlmostEqual(true_anomaly, ta_ext)
def test12(self):
"""
tests generating cartesian coordinates from orbital elements
"""
numpy.random.seed(1701)
mass1 = 1.0 | units.MSun
mass2 = 0.1 | units.MEarth
sem = 2. | units.AU
ecc = 0.15
inc = 11. | units.deg
lon = 30. | units.deg
arg = 0.3 | units.deg
ta = (360.*random.random()-180.) | units.deg
rel_pos, rel_vel = rel_posvel_arrays_from_orbital_elements(
mass1,
mass2,
sem,
ecc,
ta,
inc,
lon,
arg,
G=constants.G)
mass_12 = mass1 + mass2
sem_ext, ecc_ext, ta_ext, inc_ext, lon_ext, arg_ext = \
orbital_elements(
rel_pos, rel_vel, mass_12, G=constants.G)
self.assertAlmostEqual(sem, sem_ext)
self.assertAlmostEqual(ecc, ecc_ext)
self.assertAlmostEqual(inc, inc_ext)
self.assertAlmostEqual(lon, lon_ext)
self.assertAlmostEqual(arg, arg_ext)
self.assertAlmostEqual(ta, ta_ext)
def test13(self):
"""
tests generating cartesian coordinates from orbital elements
"""
numpy.random.seed(17014)
N = 5
mass1 = 1.0 | units.MSun
mass2 = numpy.ones(N) * 0.01 | units.MEarth
sem = 2. | units.AU
ecc = 0.15
inc = 11. | units.deg
lon = 30. | units.deg
arg = 0.3 | units.deg
ta = (360.*random.random()-180.) | units.deg
rel_pos, rel_vel = rel_posvel_arrays_from_orbital_elements(
mass1,
mass2,
sem,
ecc,
ta,
inc,
lon,
arg,
G=constants.G)
mass_12 = mass1 + mass2
sem_ext, ecc_ext, ta_ext, inc_ext, lon_ext, arg_ext = \
orbital_elements(
rel_pos, rel_vel, mass_12, G=constants.G)
self.assertAlmostEqual(
sem, sem_ext)
self.assertAlmostEqual(ecc, ecc_ext)
self.assertAlmostEqual(inc, inc_ext)
self.assertAlmostEqual(lon, lon_ext)
self.assertAlmostEqual(arg, arg_ext)
self.assertAlmostEqual(ta, ta_ext)
def test14(self):
"""
tests generating cartesian coordinates from orbital elements
"""
numpy.random.seed(17018)
N = 5
mass1 = numpy.ones(N) * 1.0 | units.MSun
mass2 = random.random(N) | units.MEarth
sem = numpy.array([2., 1.0, 1.1, 1.2, 4.0]) | units.AU
ecc = numpy.array([0.15, 0.01, 0.5, 0.9, 0.99])
inc = numpy.array([11., 0.1, 20, 90, 180.]) | units.deg
lon = numpy.array([31., 32., 33., 45., 30.]) | units.deg
arg = numpy.array([0.3, 11., 15., 30., 95.]) | units.deg
ta = (360.*random.random(N)-180.) | units.deg
rel_pos, rel_vel = rel_posvel_arrays_from_orbital_elements(
mass1,
mass2,
sem,
ecc,
ta,
inc,
lon,
arg,
G=constants.G)
mass_12 = mass1 + mass2
sem_ext, ecc_ext, ta_ext, inc_ext, lon_ext, arg_ext = \
orbital_elements(
rel_pos, rel_vel, mass_12, G=constants.G)
self.assertAlmostEqual(
sem.value_in(units.AU), sem_ext.value_in(units.AU))
self.assertAlmostEqual(ecc, ecc_ext)
self.assertAlmostEqual(inc, inc_ext)
self.assertAlmostEqual(lon, lon_ext)
self.assertAlmostEqual(arg, arg_ext)
self.assertAlmostEqual(ta, ta_ext)
def test15(self):
"""
testing orbital_elements_for_rel_posvel_arrays for N particles
with random orbital elements
"""
numpy.random.seed(666)
N = 100
mass_sun = 1. | units.MSun
mass1 = numpy.ones(N) * mass_sun
mass2 = numpy.zeros(N) | units.MSun
semi_major_axis = (-numpy.log(random.random(N))) | units.AU
eccentricity = random.random(N)
true_anomaly = 360.*random.random(N)-180.
inclination = 180*random.random(N)
longitude_of_the_ascending_node = 360*random.random(N)-180
argument_of_periapsis = 360*random.random(N)-180
comets = datamodel.Particles(N)
suns = datamodel.Particles(N)
for i, arg in enumerate(
zip(mass1, mass2, semi_major_axis, eccentricity, true_anomaly,
inclination, longitude_of_the_ascending_node,
argument_of_periapsis)):
sun_and_comet = new_binary_from_orbital_elements(
*arg, G=constants.G)
comets[i].mass = sun_and_comet[1].mass
comets[i].position = sun_and_comet[1].position
comets[i].velocity = sun_and_comet[1].velocity
suns.mass = mass1
suns.position = 0*comets.position
suns.velocity = 0*comets.velocity
mass1_ext, mass2_ext, semi_major_axis_ext, eccentricity_ext, ta_ext,\
inclination_ext, longitude_of_the_ascending_node_ext,\
argument_of_periapsis_ext = orbital_elements(
suns, comets, G=constants.G)
rad_to_deg = 180./numpy.pi
for i in range(N):
self.assertAlmostEqual(
semi_major_axis[i].value_in(units.AU),
semi_major_axis_ext[i].value_in(units.AU))
self.assertAlmostEqual(eccentricity[i], eccentricity_ext[i])
self.assertAlmostEqual(
inclination[i], rad_to_deg*inclination_ext[i])
self.assertAlmostEqual(
longitude_of_the_ascending_node[i],
rad_to_deg*longitude_of_the_ascending_node_ext[i])
self.assertAlmostEqual(
argument_of_periapsis[i],
rad_to_deg*argument_of_periapsis_ext[i])
self.assertAlmostEqual(true_anomaly[i], rad_to_deg*ta_ext[i])
def test16(self):
""" tests a mismatch in shape in generate_binaries """
m1 = [1]*5 | nbody_system.mass
m2 = [0]*5 | nbody_system.mass
a = [1.]*5 | nbody_system.length
ecc = numpy.array([0, 0, .99999, 0.1, 0.5])
ta = [180, 180, 20, 30, 0] | units.deg
primaries, secondaries = generate_binaries(m1, m2, a, eccentricity=ecc, true_anomaly=ta)
m1_, m2_, a_, ecc_, ta_, i_, lasc_, ap_ = get_orbital_elements_from_binaries(primaries, secondaries)
self.assertAlmostEqual(ecc, ecc_)
|
820dd92848bd9c95639780605d659181566b2be7
|
94d3ef554f7931d2aad799eb6bcfa18104dc9bed
|
/ndlib/models/epidemics/__init__.py
|
e1f92c3075f2f245814b3034983648a7ccd1fa97
|
[
"BSD-2-Clause"
] |
permissive
|
GiulioRossetti/ndlib
|
76b5a86a2b521cd68197218aea235e20bcb8d1f4
|
900cb3727795c97a73e59fdb736aa736c4d17157
|
refs/heads/master
| 2023-09-03T17:09:00.069129
| 2023-07-21T13:57:29
| 2023-07-21T13:57:29
| 59,556,819
| 265
| 82
|
BSD-2-Clause
| 2023-08-17T03:54:33
| 2016-05-24T08:53:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
__init__.py
|
"""
The :mod:`ndlib.models.epidemic` module contains common network models from epidemic research literature.
"""
from .GeneralisedThresholdModel import GeneralisedThresholdModel
from .IndependentCascadesModel import IndependentCascadesModel
from .KerteszThresholdModel import KerteszThresholdModel
from .ProfileModel import ProfileModel
from .ProfileThresholdModel import ProfileThresholdModel
from .SEIRModel import SEIRModel
from .SEISModel import SEISModel
from .SIModel import SIModel
from .SIRModel import SIRModel
from .SISModel import SISModel
from .SWIRModel import SWIRModel
from .ThresholdModel import ThresholdModel
from .ICEModel import ICEModel
from .ICPModel import ICPModel
from .ICEPModel import ICEPModel
from .GeneralThresholdModel import GeneralThresholdModel
from .UTLDRModel import UTLDRModel
from .SEIR_ct_Model import SEIRctModel
from .SEIS_ct_Model import SEISctModel
from .ForestFireModel import ForestFireModel
__all__ = [
"GeneralisedThresholdModel",
"IndependentCascadesModel",
"KerteszThresholdModel",
"ProfileModel",
"ProfileThresholdModel",
"SEIRModel",
"SEISModel",
"SIModel",
"SIRModel",
"SISModel",
"SWIRModel",
"ThresholdModel",
"ICEModel",
"ICPModel",
"GeneralThresholdModel",
"UTLDRModel",
"ICEPModel",
"SEIRctModel",
"SEISctModel",
"ForestFireModel",
]
|
39c84a9b11262e2525db209e0685c881737ae2d3
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/forecasting/base/adapters/_pmdarima.py
|
d3bf2b0904e1fb765b14ac32998f01caf6ab2694
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 13,695
|
py
|
_pmdarima.py
|
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements adapter for pmdarima forecasters to be used in sktime framework."""
__author__ = ["mloning", "hyang1996", "kejsitake", "fkiraly"]
__all__ = ["_PmdArimaAdapter"]
import pandas as pd
from sktime.datatypes._utilities import get_slice
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
class _PmdArimaAdapter(BaseForecaster):
"""Base class for interfacing pmdarima."""
_tags = {
"ignores-exogeneous-X": False,
"capability:pred_int": True,
"capability:pred_int:insample": True,
"requires-fh-in-fit": False,
"handles-missing-data": True,
"python_dependencies": "pmdarima",
}
def __init__(self):
self._forecaster = None
super().__init__()
def _instantiate_model(self):
raise NotImplementedError("abstract method")
def _fit(self, y, X, fh):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
if X is not None:
X = X.loc[y.index]
self._forecaster = self._instantiate_model()
self._forecaster.fit(y, X=X)
return self
def _update(self, y, X=None, update_params=True):
"""Update model with data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
if update_params:
if X is not None:
X = X.loc[y.index]
self._forecaster.update(y, X=X)
return self
def _predict(self, fh, X):
"""Make forecasts.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
fh_abs = fh.to_absolute(self.cutoff).to_pandas()
fh_abs_int = fh.to_absolute_int(fh_abs[0], self.cutoff).to_pandas()
end_int = fh_abs_int[-1] + 2
# +2 becuase + 1 for "end" (python index), +1 for starting to count at 1 in fh
if X is not None:
X = get_slice(X, start=self.cutoff[0], start_inclusive=False)
X = X.iloc[:end_int]
# distinguish between in-sample and out-of-sample prediction
fh_oos = fh.to_out_of_sample(self.cutoff)
fh_ins = fh.to_in_sample(self.cutoff)
# all values are out-of-sample
if fh.is_all_out_of_sample(self.cutoff):
y_pred = self._predict_fixed_cutoff(fh_oos, X=X)
# all values are in-sample
elif fh.is_all_in_sample(self.cutoff):
y_pred = self._predict_in_sample(fh_ins, X=X)
# both in-sample and out-of-sample values
else:
y_ins = self._predict_in_sample(fh_ins, X=X)
y_oos = self._predict_fixed_cutoff(fh_oos, X=X)
y_pred = pd.concat([y_ins, y_oos])
# ensure that name is not added nor removed
# otherwise this may upset conversion to pd.DataFrame
y_pred.name = self._y.name
y_pred.index = fh_abs
return y_pred
def _predict_in_sample(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Generate in sample predictions.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
if hasattr(self, "order"):
diff_order = self.order[1]
else:
diff_order = self._forecaster.model_.order[1]
# Initialize return objects
fh_abs = fh.to_absolute(self.cutoff).to_numpy()
fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)
y_pred = pd.Series(index=fh_abs, dtype="float64")
# for in-sample predictions, pmdarima requires zero-based integer indicies
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
if start < 0:
# Can't forecasts earlier to train starting point
raise ValueError("Can't make predictions earlier to train starting point")
elif start < diff_order:
# Can't forecasts earlier to arima's differencing order
# But we return NaN for these supposedly forecastable points
start = diff_order
if end < start:
# since we might have forced `start` to surpass `end`
end = diff_order
# get rid of unforcastable points
fh_abs = fh_abs[fh_idx >= diff_order]
# reindex accordingly
fh_idx = fh_idx[fh_idx >= diff_order] - diff_order
result = self._forecaster.predict_in_sample(
start=start,
end=end,
X=X,
return_conf_int=False,
alpha=DEFAULT_ALPHA,
)
if return_pred_int:
pred_ints = []
for a in alpha:
pred_int = pd.DataFrame(index=fh_abs, columns=["lower", "upper"])
result = self._forecaster.predict_in_sample(
start=start,
end=end,
X=X,
return_conf_int=return_pred_int,
alpha=a,
)
pred_int.loc[fh_abs] = result[1][fh_idx, :]
pred_ints.append(pred_int)
# unpack results
result = pd.Series(result[0]).iloc[fh_idx]
y_pred.loc[fh_abs] = result
return y_pred, pred_ints
else:
result = pd.Series(result).iloc[fh_idx]
y_pred.loc[fh_abs] = result
return y_pred
def _predict_fixed_cutoff(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Make predictions out of sample.
Parameters
----------
fh : array-like
The forecasters horizon with the steps ahead to to predict.
Default is
one-step ahead forecast, i.e. np.array([1]).
Returns
-------
y_pred : pandas.Series
Returns series of predicted values.
"""
n_periods = int(fh.to_relative(self.cutoff)[-1])
result = self._forecaster.predict(
n_periods=n_periods,
X=X,
return_conf_int=False,
alpha=DEFAULT_ALPHA,
)
fh_abs = fh.to_absolute(self.cutoff)
fh_idx = fh.to_indexer(self.cutoff)
if return_pred_int:
pred_ints = []
for a in alpha:
result = self._forecaster.predict(
n_periods=n_periods,
X=X,
return_conf_int=True,
alpha=a,
)
pred_int = result[1]
pred_int = pd.DataFrame(
pred_int[fh_idx, :],
index=fh_abs.to_pandas(),
columns=["lower", "upper"],
)
pred_ints.append(pred_int)
return result[0], pred_ints
else:
result = pd.Series(result).iloc[fh_idx]
result.index = fh_abs.to_pandas()
return result
# todo 0.23.0 - remove legacy_interface arg and logic using it
def _predict_interval(self, fh, X, coverage, legacy_interface=False):
"""Compute/return prediction quantiles for a forecast.
private _predict_interval containing the core logic,
called from predict_interval and possibly predict_quantiles
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_"
self.cutoff
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon, default = y.index (in-sample forecast)
X : pd.DataFrame, optional (default=None)
Exogenous time series
coverage : list of float (guaranteed not None and floats in [0,1] interval)
nominal coverage(s) of predictive interval(s)
Returns
-------
pred_int : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level coverage fractions for which intervals were computed.
in the same order as in input `coverage`.
Third level is string "lower" or "upper", for lower/upper interval end.
Row index is fh. Entries are forecasts of lower/upper interval end,
for var in col index, at nominal coverage in second col index,
lower/upper depending on third col index, for the row index.
Upper/lower interval end forecasts are equivalent to
quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.
"""
# initializaing cutoff and fh related info
cutoff = self.cutoff
fh_oos = fh.to_out_of_sample(cutoff)
fh_ins = fh.to_in_sample(cutoff)
fh_is_in_sample = fh.is_all_in_sample(cutoff)
fh_is_oosample = fh.is_all_out_of_sample(cutoff)
# prepare the return DataFrame - empty with correct cols
var_names = self._get_varnames(
default="Coverage", legacy_interface=legacy_interface
)
var_name = var_names[0]
int_idx = pd.MultiIndex.from_product([var_names, coverage, ["lower", "upper"]])
pred_int = pd.DataFrame(columns=int_idx)
alpha = [1 - x for x in coverage]
kwargs = {"X": X, "return_pred_int": True, "alpha": alpha}
# all values are out-of-sample
if fh_is_oosample:
_, y_pred_int = self._predict_fixed_cutoff(fh_oos, **kwargs)
# all values are in-sample
elif fh_is_in_sample:
_, y_pred_int = self._predict_in_sample(fh_ins, **kwargs)
# if all in-sample/out-of-sample, we put y_pred_int in the required format
if fh_is_in_sample or fh_is_oosample:
# needs to be replaced, also seems duplicative, identical to part A
for intervals, a in zip(y_pred_int, coverage):
pred_int[(var_name, a, "lower")] = intervals["lower"]
pred_int[(var_name, a, "upper")] = intervals["upper"]
return pred_int
# both in-sample and out-of-sample values (we reach this line only then)
# in this case, we additionally need to concat in and out-of-sample returns
_, y_ins_pred_int = self._predict_in_sample(fh_ins, **kwargs)
_, y_oos_pred_int = self._predict_fixed_cutoff(fh_oos, **kwargs)
for ins_int, oos_int, a in zip(y_ins_pred_int, y_oos_pred_int, coverage):
pred_int[(var_name, a, "lower")] = pd.concat([ins_int, oos_int])["lower"]
pred_int[(var_name, a, "upper")] = pd.concat([ins_int, oos_int])["upper"]
return pred_int
def _get_fitted_params(self):
"""Get fitted parameters.
Returns
-------
fitted_params : dict
"""
names = self._get_fitted_param_names()
params = self._get_fitted_params_arima_res()
fitted_params = {str(name): param for name, param in zip(names, params)}
if hasattr(self._forecaster, "model_"): # AutoARIMA
fitted_params["order"] = self._forecaster.model_.order
fitted_params["seasonal_order"] = self._forecaster.model_.seasonal_order
res = self._forecaster.model_.arima_res_
elif hasattr(self._forecaster, "arima_res_"): # ARIMA
res = self._forecaster.arima_res_
else:
res = None
for name in ["aic", "aicc", "bic", "hqic"]:
fitted_params[name] = getattr(res, name, None)
return fitted_params
def _get_fitted_params_arima_res(self):
"""Return parameter values under `arima_res_`."""
if hasattr(self._forecaster, "model_"): # AutoARIMA
return self._forecaster.model_.arima_res_._results.params
elif hasattr(self._forecaster, "arima_res_"): # ARIMA
return self._forecaster.arima_res_._results.params
else:
raise NotImplementedError()
def _get_fitted_param_names(self):
"""Return parameter names under `arima_res_`."""
if hasattr(self._forecaster, "model_"): # AutoARIMA
return self._forecaster.model_.arima_res_._results.param_names
elif hasattr(self._forecaster, "arima_res_"): # ARIMA
return self._forecaster.arima_res_._results.param_names
else:
raise NotImplementedError()
def summary(self):
"""Summary of the fitted model."""
return self._forecaster.summary()
|
08f2cc0870db9b1ea8dc8953e7fbee7c23957cfe
|
79834b0f4bd34f1110b4ae127ff6aaaa72fa864d
|
/twitch/api/search.py
|
d2cb3cc00a8d1b7fb928a259f94d7a71fdaab4f3
|
[
"MIT"
] |
permissive
|
tsifrer/python-twitch-client
|
c4040ef03549e789667c2c23cdf56d743c9bb929
|
bd2d79135f62bf45acd244079e0873f4987a6352
|
refs/heads/master
| 2022-07-26T13:34:21.578695
| 2021-03-16T09:12:45
| 2021-03-16T09:12:45
| 81,694,031
| 182
| 73
|
MIT
| 2021-10-02T19:18:21
| 2017-02-12T01:05:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
search.py
|
from twitch.api.base import TwitchAPI
from twitch.exceptions import TwitchAttributeException
from twitch.resources import Channel, Game, Stream
class Search(TwitchAPI):
def channels(self, query, limit=25, offset=0):
if limit > 100:
raise TwitchAttributeException(
"Maximum number of objects returned in one request is 100"
)
params = {"query": query, "limit": limit, "offset": offset}
response = self._request_get("search/channels", params=params)
return [Channel.construct_from(x) for x in response["channels"] or []]
def games(self, query, live=False):
params = {
"query": query,
"live": live,
}
response = self._request_get("search/games", params=params)
return [Game.construct_from(x) for x in response["games"] or []]
def streams(self, query, limit=25, offset=0, hls=None):
if limit > 100:
raise TwitchAttributeException(
"Maximum number of objects returned in one request is 100"
)
params = {"query": query, "limit": limit, "offset": offset, "hls": hls}
response = self._request_get("search/streams", params=params)
return [Stream.construct_from(x) for x in response["streams"] or []]
|
b053223f5c2da325335f71b217eecfc49df9da9b
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/_pkg1_1_1_0_0/_mod1_1_1_0_0_4.py
|
9e5b4419a3960ee68d279607ac56b5eccbf18574
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
_mod1_1_1_0_0_4.py
|
name1_1_1_0_0_4_0 = None
name1_1_1_0_0_4_1 = None
name1_1_1_0_0_4_2 = None
name1_1_1_0_0_4_3 = None
name1_1_1_0_0_4_4 = None
|
62560da1feed7e41383a05c0f72745824fc5b2fb
|
8b28c83f5b915d42e0f645247234153836aa47df
|
/flaskfile/server.py
|
50e94630d5fbf2d5508272b965dae7a1b7cd0e38
|
[] |
no_license
|
1165048017/BlogLearning
|
8dae3637112f52c9a0daa619c94ed982c57744e6
|
45887e232c7441159319ad76014c90cc03709e36
|
refs/heads/master
| 2022-02-26T16:15:21.969535
| 2022-02-11T12:56:12
| 2022-02-11T12:56:12
| 253,278,238
| 214
| 68
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
server.py
|
from werkzeug.utils import secure_filename
from flask import Flask,render_template,jsonify,request,url_for,send_from_directory,redirect
import time
import os
import base64
app = Flask(__name__)
UPLOAD_FOLDER='upload'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
basedir = os.path.abspath(os.path.dirname(__file__))
ALLOWED_EXTENSIONS = set(['txt','png','jpg','xls','JPG','PNG','xlsx','gif','GIF','zip'])
# 用于判断文件后缀
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
# 用于测试上传,稍后用到
@app.route('/test/upload')
def upload_test():
return render_template('upload.html')
# 上传文件
@app.route('/upload',methods=['POST'],strict_slashes=False)
def api_upload():
file_dir=os.path.join(basedir,app.config['UPLOAD_FOLDER'])
if not os.path.exists(file_dir):
os.makedirs(file_dir)
f=request.files["myfile"] # 从表单的file字段获取文件,myfile为该表单的name值
#f.save("read.jpg")
fname = request.form["name"]
if f and allowed_file(fname): # 判断是否是允许上传的文件类型
f.save(os.path.join(file_dir,fname)) #保存文件到upload目录
#return redirect(url_for('uploaded_file',filename=fname))
return jsonify({"succeed":'True',"msg":"upload succeed"})
else:
return jsonify({"succeed":'False',"msg":"upload failed"})
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0',port=5555)
|
dd621c0c686432f40120ce85c3df617409f1086b
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/data/model/oci/test/test_oci_label.py
|
6be431f7946b6344c656859beb260d0402433b9f
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,447
|
py
|
test_oci_label.py
|
from test.fixtures import *
import pytest
from playhouse.test_utils import assert_query_count
from data.database import Manifest, ManifestLabel
from data.model.oci.label import (
DataModelException,
create_manifest_label,
delete_manifest_label,
get_manifest_label,
list_manifest_labels,
)
@pytest.mark.parametrize(
"key, value, source_type, expected_error",
[
("foo", "bar", "manifest", None),
pytest.param("..foo", "bar", "manifest", None, id="invalid key on manifest"),
pytest.param("..foo", "bar", "api", "is invalid", id="invalid key on api"),
],
)
def test_create_manifest_label(key, value, source_type, expected_error, initialized_db):
manifest = Manifest.get()
if expected_error:
with pytest.raises(DataModelException) as ex:
create_manifest_label(manifest, key, value, source_type)
assert ex.match(expected_error)
return
label = create_manifest_label(manifest, key, value, source_type)
labels = [
ml.label_id for ml in ManifestLabel.select().where(ManifestLabel.manifest == manifest)
]
assert label.id in labels
with assert_query_count(1):
assert label in list_manifest_labels(manifest)
assert label not in list_manifest_labels(manifest, "someprefix")
assert label in list_manifest_labels(manifest, key[0:2])
with assert_query_count(1):
assert get_manifest_label(label.uuid, manifest) == label
def test_list_manifest_labels(initialized_db):
manifest = Manifest.get()
label1 = create_manifest_label(manifest, "foo", "1", "manifest")
label2 = create_manifest_label(manifest, "bar", "2", "api")
label3 = create_manifest_label(manifest, "baz", "3", "internal")
assert label1 in list_manifest_labels(manifest)
assert label2 in list_manifest_labels(manifest)
assert label3 in list_manifest_labels(manifest)
other_manifest = Manifest.select().where(Manifest.id != manifest.id).get()
assert label1 not in list_manifest_labels(other_manifest)
assert label2 not in list_manifest_labels(other_manifest)
assert label3 not in list_manifest_labels(other_manifest)
def test_get_manifest_label(initialized_db):
found = False
for manifest_label in ManifestLabel.select():
assert (
get_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
== manifest_label.label
)
assert manifest_label.label in list_manifest_labels(manifest_label.manifest)
found = True
assert found
def test_delete_manifest_label(initialized_db):
found = False
for manifest_label in list(ManifestLabel.select()):
assert (
get_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
== manifest_label.label
)
assert manifest_label.label in list_manifest_labels(manifest_label.manifest)
if manifest_label.label.source_type.mutable:
assert delete_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
assert manifest_label.label not in list_manifest_labels(manifest_label.manifest)
assert get_manifest_label(manifest_label.label.uuid, manifest_label.manifest) is None
else:
with pytest.raises(DataModelException):
delete_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
found = True
assert found
|
e3251cb644122848f09715ccf62ca8ea0674da95
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/synapse/azure-synapse/azure/synapse/spark/aio/operations_async/_spark_session_operations_async.py
|
624bc2ae298ff66ab1d917c2fa486b3be5454952
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 25,661
|
py
|
_spark_session_operations_async.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SparkSessionOperations:
"""SparkSessionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.spark.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_spark_sessions(
self,
from_parameter: Optional[int] = None,
size: Optional[int] = None,
detailed: Optional[bool] = None,
**kwargs
) -> "models.SparkSessionCollection":
"""List all spark sessions which are running under a particular spark pool.
:param from_parameter: Optional param specifying which index the list should begin from.
:type from_parameter: int
:param size: Optional param specifying the size of the returned list.
By default it is 20 and that is the maximum.
:type size: int
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkSessionCollection, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkSessionCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkSessionCollection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_sessions.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if from_parameter is not None:
query_parameters['from'] = self._serialize.query("from_parameter", from_parameter, 'int')
if size is not None:
query_parameters['size'] = self._serialize.query("size", size, 'int')
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkSessionCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_sessions.metadata = {'url': '/sessions'} # type: ignore
async def create_spark_session(
self,
spark_session_options: "models.SparkSessionOptions",
detailed: Optional[bool] = None,
**kwargs
) -> "models.SparkSession":
"""Create new spark session.
:param spark_session_options: Livy compatible batch job request payload.
:type spark_session_options: ~azure.synapse.spark.models.SparkSessionOptions
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkSession, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkSession
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkSession"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_spark_session.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(spark_session_options, 'SparkSessionOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkSession', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_spark_session.metadata = {'url': '/sessions'} # type: ignore
async def get_spark_session(
self,
session_id: int,
detailed: Optional[bool] = None,
**kwargs
) -> "models.SparkSession":
"""Gets a single spark session.
:param session_id: Identifier for the session.
:type session_id: int
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkSession, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkSession
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkSession"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_session.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkSession', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_session.metadata = {'url': '/sessions/{sessionId}'} # type: ignore
async def cancel_spark_session(
self,
session_id: int,
**kwargs
) -> None:
"""Cancels a running spark session.
:param session_id: Identifier for the session.
:type session_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.cancel_spark_session.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
cancel_spark_session.metadata = {'url': '/sessions/{sessionId}'} # type: ignore
async def reset_spark_session_timeout(
self,
session_id: int,
**kwargs
) -> None:
"""Sends a keep alive call to the current session to reset the session timeout.
:param session_id: Identifier for the session.
:type session_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.reset_spark_session_timeout.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
reset_spark_session_timeout.metadata = {'url': '/sessions/{sessionId}/reset-timeout'} # type: ignore
async def get_spark_statements(
self,
session_id: int,
**kwargs
) -> "models.SparkStatementCollection":
"""Gets a list of statements within a spark session.
:param session_id: Identifier for the session.
:type session_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatementCollection, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatementCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatementCollection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_statements.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatementCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_statements.metadata = {'url': '/sessions/{sessionId}/statements'} # type: ignore
async def create_spark_statement(
self,
session_id: int,
spark_statement_options: "models.SparkStatementOptions",
**kwargs
) -> "models.SparkStatement":
"""Create statement within a spark session.
:param session_id: Identifier for the session.
:type session_id: int
:param spark_statement_options: Livy compatible batch job request payload.
:type spark_statement_options: ~azure.synapse.spark.models.SparkStatementOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatement, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatement
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatement"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_spark_statement.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(spark_statement_options, 'SparkStatementOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatement', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_spark_statement.metadata = {'url': '/sessions/{sessionId}/statements'} # type: ignore
async def get_spark_statement(
self,
session_id: int,
statement_id: int,
**kwargs
) -> "models.SparkStatement":
"""Gets a single statement within a spark session.
:param session_id: Identifier for the session.
:type session_id: int
:param statement_id: Identifier for the statement.
:type statement_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatement, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatement
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatement"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_statement.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
'statementId': self._serialize.url("statement_id", statement_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatement', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_statement.metadata = {'url': '/sessions/{sessionId}/statements/{statementId}'} # type: ignore
async def cancel_spark_statement(
self,
session_id: int,
statement_id: int,
**kwargs
) -> "models.SparkStatementCancellationResult":
"""Kill a statement within a session.
:param session_id: Identifier for the session.
:type session_id: int
:param statement_id: Identifier for the statement.
:type statement_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatementCancellationResult, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatementCancellationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatementCancellationResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.cancel_spark_statement.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
'statementId': self._serialize.url("statement_id", statement_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatementCancellationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_spark_statement.metadata = {'url': '/sessions/{sessionId}/statements/{statementId}/cancel'} # type: ignore
|
10b4c17595b09b8f84e4047edde3c0bd45ab8477
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/openvino/tools/mo/middle/AttributedTileNormalizer.py
|
c889e73b5f87b07ce4da4bb7197febbe37cbfbe0
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
AttributedTileNormalizer.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.middle.replacement import MiddleReplacementPattern
from openvino.tools.mo.ops.const import Const
from openvino.tools.mo.ops.tile import Tile
class AttributedTileNormalizer(MiddleReplacementPattern):
enabled = True
@staticmethod
def pattern():
return dict(
nodes=[
('tile', dict(op='AttributedTile', axis=lambda x: x is not None, tiles=lambda x: x is not None))],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
node = match['tile']
name = node.soft_get('name', node.id)
axis = node.axis
tiles = node.tiles
input_shape = node.in_port(0).data.get_shape()
assert input_shape is not None
tiles_input_value = int64_array(np.ones(input_shape.size))
tiles_input_value[axis] = tiles
const = Const(graph, {'value': tiles_input_value, 'name': name + '/tiles'}).create_node()
tile = Tile(graph, {'name': name}).create_node()
node.out_port(0).get_connection().set_source(tile.out_port(0))
node.in_port(0).get_connection().set_destination(tile.in_port(0))
const.out_port(0).connect(tile.in_port(1))
|
a1e7f71fede19bf663a5ceb19aa3541519d8f284
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-datacatalog-lineage/google/cloud/datacatalog/lineage_v1/services/lineage/transports/rest.py
|
0d855cc91e907f005bc04fc0881a5cab4f878994
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 103,864
|
py
|
rest.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dataclasses
import json # type: ignore
import re
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import (
gapic_v1,
operations_v1,
path_template,
rest_helpers,
rest_streaming,
)
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.transport.requests import AuthorizedSession # type: ignore
from google.longrunning import operations_pb2
from google.protobuf import json_format
import grpc # type: ignore
from requests import __version__ as requests_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.cloud.datacatalog.lineage_v1.types import lineage
from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
from .base import LineageTransport
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class LineageRestInterceptor:
"""Interceptor for Lineage.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the LineageRestTransport.
.. code-block:: python
class MyCustomLineageInterceptor(LineageRestInterceptor):
def pre_batch_search_link_processes(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_batch_search_link_processes(self, response):
logging.log(f"Received response: {response}")
return response
def pre_create_lineage_event(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_create_lineage_event(self, response):
logging.log(f"Received response: {response}")
return response
def pre_create_process(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_create_process(self, response):
logging.log(f"Received response: {response}")
return response
def pre_create_run(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_create_run(self, response):
logging.log(f"Received response: {response}")
return response
def pre_delete_lineage_event(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def pre_delete_process(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete_process(self, response):
logging.log(f"Received response: {response}")
return response
def pre_delete_run(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete_run(self, response):
logging.log(f"Received response: {response}")
return response
def pre_get_lineage_event(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_lineage_event(self, response):
logging.log(f"Received response: {response}")
return response
def pre_get_process(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_process(self, response):
logging.log(f"Received response: {response}")
return response
def pre_get_run(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_run(self, response):
logging.log(f"Received response: {response}")
return response
def pre_list_lineage_events(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_lineage_events(self, response):
logging.log(f"Received response: {response}")
return response
def pre_list_processes(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_processes(self, response):
logging.log(f"Received response: {response}")
return response
def pre_list_runs(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_runs(self, response):
logging.log(f"Received response: {response}")
return response
def pre_search_links(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_search_links(self, response):
logging.log(f"Received response: {response}")
return response
def pre_update_process(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update_process(self, response):
logging.log(f"Received response: {response}")
return response
def pre_update_run(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update_run(self, response):
logging.log(f"Received response: {response}")
return response
transport = LineageRestTransport(interceptor=MyCustomLineageInterceptor())
client = LineageClient(transport=transport)
"""
def pre_batch_search_link_processes(
self,
request: lineage.BatchSearchLinkProcessesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[lineage.BatchSearchLinkProcessesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for batch_search_link_processes
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_batch_search_link_processes(
self, response: lineage.BatchSearchLinkProcessesResponse
) -> lineage.BatchSearchLinkProcessesResponse:
"""Post-rpc interceptor for batch_search_link_processes
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_create_lineage_event(
self,
request: lineage.CreateLineageEventRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[lineage.CreateLineageEventRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for create_lineage_event
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_create_lineage_event(
self, response: lineage.LineageEvent
) -> lineage.LineageEvent:
"""Post-rpc interceptor for create_lineage_event
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_create_process(
self, request: lineage.CreateProcessRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.CreateProcessRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for create_process
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_create_process(self, response: lineage.Process) -> lineage.Process:
"""Post-rpc interceptor for create_process
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_create_run(
self, request: lineage.CreateRunRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.CreateRunRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for create_run
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_create_run(self, response: lineage.Run) -> lineage.Run:
"""Post-rpc interceptor for create_run
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_delete_lineage_event(
self,
request: lineage.DeleteLineageEventRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[lineage.DeleteLineageEventRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete_lineage_event
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def pre_delete_process(
self, request: lineage.DeleteProcessRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.DeleteProcessRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete_process
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_delete_process(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for delete_process
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_delete_run(
self, request: lineage.DeleteRunRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.DeleteRunRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete_run
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_delete_run(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for delete_run
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_get_lineage_event(
self,
request: lineage.GetLineageEventRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[lineage.GetLineageEventRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_lineage_event
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_get_lineage_event(
self, response: lineage.LineageEvent
) -> lineage.LineageEvent:
"""Post-rpc interceptor for get_lineage_event
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_get_process(
self, request: lineage.GetProcessRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.GetProcessRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_process
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_get_process(self, response: lineage.Process) -> lineage.Process:
"""Post-rpc interceptor for get_process
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_get_run(
self, request: lineage.GetRunRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.GetRunRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_run
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_get_run(self, response: lineage.Run) -> lineage.Run:
"""Post-rpc interceptor for get_run
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_list_lineage_events(
self,
request: lineage.ListLineageEventsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[lineage.ListLineageEventsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_lineage_events
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_list_lineage_events(
self, response: lineage.ListLineageEventsResponse
) -> lineage.ListLineageEventsResponse:
"""Post-rpc interceptor for list_lineage_events
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_list_processes(
self, request: lineage.ListProcessesRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.ListProcessesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_processes
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_list_processes(
self, response: lineage.ListProcessesResponse
) -> lineage.ListProcessesResponse:
"""Post-rpc interceptor for list_processes
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_list_runs(
self, request: lineage.ListRunsRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.ListRunsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_runs
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_list_runs(
self, response: lineage.ListRunsResponse
) -> lineage.ListRunsResponse:
"""Post-rpc interceptor for list_runs
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_search_links(
self, request: lineage.SearchLinksRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.SearchLinksRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for search_links
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_search_links(
self, response: lineage.SearchLinksResponse
) -> lineage.SearchLinksResponse:
"""Post-rpc interceptor for search_links
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_update_process(
self, request: lineage.UpdateProcessRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.UpdateProcessRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for update_process
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_update_process(self, response: lineage.Process) -> lineage.Process:
"""Post-rpc interceptor for update_process
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_update_run(
self, request: lineage.UpdateRunRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[lineage.UpdateRunRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for update_run
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_update_run(self, response: lineage.Run) -> lineage.Run:
"""Post-rpc interceptor for update_run
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_cancel_operation(
self,
request: operations_pb2.CancelOperationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for cancel_operation
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_cancel_operation(self, response: None) -> None:
"""Post-rpc interceptor for cancel_operation
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_delete_operation(
self,
request: operations_pb2.DeleteOperationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete_operation
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_delete_operation(self, response: None) -> None:
"""Post-rpc interceptor for delete_operation
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_get_operation(
self,
request: operations_pb2.GetOperationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_operation
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_get_operation(
self, response: operations_pb2.Operation
) -> operations_pb2.Operation:
"""Post-rpc interceptor for get_operation
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
def pre_list_operations(
self,
request: operations_pb2.ListOperationsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_operations
Override in a subclass to manipulate the request or metadata
before they are sent to the Lineage server.
"""
return request, metadata
def post_list_operations(
self, response: operations_pb2.ListOperationsResponse
) -> operations_pb2.ListOperationsResponse:
"""Post-rpc interceptor for list_operations
Override in a subclass to manipulate the response
after it is returned by the Lineage server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class LineageRestStub:
_session: AuthorizedSession
_host: str
_interceptor: LineageRestInterceptor
class LineageRestTransport(LineageTransport):
"""REST backend transport for Lineage.
Lineage is used to track data flows between assets over time. You
can create
[LineageEvents][google.cloud.datacatalog.lineage.v1.LineageEvent] to
record lineage between multiple sources and a single target, for
example, when table data is based on data from multiple tables.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
def __init__(
self,
*,
host: str = "datalineage.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[LineageRestInterceptor] = None,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or LineageRestInterceptor()
self._prep_wrapped_messages(client_info)
@property
def operations_client(self) -> operations_v1.AbstractOperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Only create a new client if we do not already have one.
if self._operations_client is None:
http_options: Dict[str, List[Dict[str, str]]] = {
"google.longrunning.Operations.CancelOperation": [
{
"method": "post",
"uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel",
"body": "*",
},
],
"google.longrunning.Operations.DeleteOperation": [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
],
"google.longrunning.Operations.GetOperation": [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
],
"google.longrunning.Operations.ListOperations": [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*}/operations",
},
],
}
rest_transport = operations_v1.OperationsRestTransport(
host=self._host,
# use the credentials which are saved
credentials=self._credentials,
scopes=self._scopes,
http_options=http_options,
path_prefix="v1",
)
self._operations_client = operations_v1.AbstractOperationsClient(
transport=rest_transport
)
# Return the client from cache.
return self._operations_client
class _BatchSearchLinkProcesses(LineageRestStub):
def __hash__(self):
return hash("BatchSearchLinkProcesses")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.BatchSearchLinkProcessesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.BatchSearchLinkProcessesResponse:
r"""Call the batch search link
processes method over HTTP.
Args:
request (~.lineage.BatchSearchLinkProcessesRequest):
The request object. Request message for
[BatchSearchLinkProcesses][google.cloud.datacatalog.lineage.v1.Lineage.BatchSearchLinkProcesses].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.BatchSearchLinkProcessesResponse:
Response message for
[BatchSearchLinkProcesses][google.cloud.datacatalog.lineage.v1.Lineage.BatchSearchLinkProcesses].
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*}:batchSearchLinkProcesses",
"body": "*",
},
]
request, metadata = self._interceptor.pre_batch_search_link_processes(
request, metadata
)
pb_request = lineage.BatchSearchLinkProcessesRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.BatchSearchLinkProcessesResponse()
pb_resp = lineage.BatchSearchLinkProcessesResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_batch_search_link_processes(resp)
return resp
class _CreateLineageEvent(LineageRestStub):
def __hash__(self):
return hash("CreateLineageEvent")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.CreateLineageEventRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.LineageEvent:
r"""Call the create lineage event method over HTTP.
Args:
request (~.lineage.CreateLineageEventRequest):
The request object. Request message for
[CreateLineageEvent][google.cloud.datacatalog.lineage.v1.CreateLineageEvent].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.LineageEvent:
A lineage event represents an
operation on assets. Within the
operation, the data flows from the
source to the target defined in the
links field.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*/processes/*/runs/*}/lineageEvents",
"body": "lineage_event",
},
]
request, metadata = self._interceptor.pre_create_lineage_event(
request, metadata
)
pb_request = lineage.CreateLineageEventRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.LineageEvent()
pb_resp = lineage.LineageEvent.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_create_lineage_event(resp)
return resp
class _CreateProcess(LineageRestStub):
def __hash__(self):
return hash("CreateProcess")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.CreateProcessRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.Process:
r"""Call the create process method over HTTP.
Args:
request (~.lineage.CreateProcessRequest):
The request object. Request message for
[CreateProcess][google.cloud.datacatalog.lineage.v1.CreateProcess].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.Process:
A process is the definition of a data
transformation operation.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*}/processes",
"body": "process",
},
]
request, metadata = self._interceptor.pre_create_process(request, metadata)
pb_request = lineage.CreateProcessRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.Process()
pb_resp = lineage.Process.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_create_process(resp)
return resp
class _CreateRun(LineageRestStub):
def __hash__(self):
return hash("CreateRun")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.CreateRunRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.Run:
r"""Call the create run method over HTTP.
Args:
request (~.lineage.CreateRunRequest):
The request object. Request message for
[CreateRun][google.cloud.datacatalog.lineage.v1.CreateRun].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.Run:
A lineage run represents an execution
of a process that creates lineage
events.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*/processes/*}/runs",
"body": "run",
},
]
request, metadata = self._interceptor.pre_create_run(request, metadata)
pb_request = lineage.CreateRunRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.Run()
pb_resp = lineage.Run.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_create_run(resp)
return resp
class _DeleteLineageEvent(LineageRestStub):
def __hash__(self):
return hash("DeleteLineageEvent")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.DeleteLineageEventRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
r"""Call the delete lineage event method over HTTP.
Args:
request (~.lineage.DeleteLineageEventRequest):
The request object. Request message for
[DeleteLineageEvent][google.cloud.datacatalog.lineage.v1.DeleteLineageEvent].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/processes/*/runs/*/lineageEvents/*}",
},
]
request, metadata = self._interceptor.pre_delete_lineage_event(
request, metadata
)
pb_request = lineage.DeleteLineageEventRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
class _DeleteProcess(LineageRestStub):
def __hash__(self):
return hash("DeleteProcess")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.DeleteProcessRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the delete process method over HTTP.
Args:
request (~.lineage.DeleteProcessRequest):
The request object. Request message for
[DeleteProcess][google.cloud.datacatalog.lineage.v1.DeleteProcess].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/processes/*}",
},
]
request, metadata = self._interceptor.pre_delete_process(request, metadata)
pb_request = lineage.DeleteProcessRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete_process(resp)
return resp
class _DeleteRun(LineageRestStub):
def __hash__(self):
return hash("DeleteRun")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.DeleteRunRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the delete run method over HTTP.
Args:
request (~.lineage.DeleteRunRequest):
The request object. Request message for
[DeleteRun][google.cloud.datacatalog.lineage.v1.DeleteRun].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
This resource represents a
long-running operation that is the
result of a network API call.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/processes/*/runs/*}",
},
]
request, metadata = self._interceptor.pre_delete_run(request, metadata)
pb_request = lineage.DeleteRunRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = operations_pb2.Operation()
json_format.Parse(response.content, resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete_run(resp)
return resp
class _GetLineageEvent(LineageRestStub):
def __hash__(self):
return hash("GetLineageEvent")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.GetLineageEventRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.LineageEvent:
r"""Call the get lineage event method over HTTP.
Args:
request (~.lineage.GetLineageEventRequest):
The request object. Request message for
[GetLineageEvent][google.cloud.datacatalog.lineage.v1.GetLineageEvent].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.LineageEvent:
A lineage event represents an
operation on assets. Within the
operation, the data flows from the
source to the target defined in the
links field.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/processes/*/runs/*/lineageEvents/*}",
},
]
request, metadata = self._interceptor.pre_get_lineage_event(
request, metadata
)
pb_request = lineage.GetLineageEventRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.LineageEvent()
pb_resp = lineage.LineageEvent.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get_lineage_event(resp)
return resp
class _GetProcess(LineageRestStub):
def __hash__(self):
return hash("GetProcess")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.GetProcessRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.Process:
r"""Call the get process method over HTTP.
Args:
request (~.lineage.GetProcessRequest):
The request object. Request message for
[GetProcess][google.cloud.datacatalog.lineage.v1.GetProcess].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.Process:
A process is the definition of a data
transformation operation.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/processes/*}",
},
]
request, metadata = self._interceptor.pre_get_process(request, metadata)
pb_request = lineage.GetProcessRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.Process()
pb_resp = lineage.Process.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get_process(resp)
return resp
class _GetRun(LineageRestStub):
def __hash__(self):
return hash("GetRun")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.GetRunRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.Run:
r"""Call the get run method over HTTP.
Args:
request (~.lineage.GetRunRequest):
The request object. Request message for
[GetRun][google.cloud.datacatalog.lineage.v1.GetRun].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.Run:
A lineage run represents an execution
of a process that creates lineage
events.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/processes/*/runs/*}",
},
]
request, metadata = self._interceptor.pre_get_run(request, metadata)
pb_request = lineage.GetRunRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.Run()
pb_resp = lineage.Run.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get_run(resp)
return resp
class _ListLineageEvents(LineageRestStub):
def __hash__(self):
return hash("ListLineageEvents")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.ListLineageEventsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.ListLineageEventsResponse:
r"""Call the list lineage events method over HTTP.
Args:
request (~.lineage.ListLineageEventsRequest):
The request object. Request message for
[ListLineageEvents][google.cloud.datacatalog.lineage.v1.ListLineageEvents].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.ListLineageEventsResponse:
Response message for
[ListLineageEvents][google.cloud.datacatalog.lineage.v1.ListLineageEvents].
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{parent=projects/*/locations/*/processes/*/runs/*}/lineageEvents",
},
]
request, metadata = self._interceptor.pre_list_lineage_events(
request, metadata
)
pb_request = lineage.ListLineageEventsRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.ListLineageEventsResponse()
pb_resp = lineage.ListLineageEventsResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list_lineage_events(resp)
return resp
class _ListProcesses(LineageRestStub):
def __hash__(self):
return hash("ListProcesses")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.ListProcessesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.ListProcessesResponse:
r"""Call the list processes method over HTTP.
Args:
request (~.lineage.ListProcessesRequest):
The request object. Request message for
[ListProcesses][google.cloud.datacatalog.lineage.v1.ListProcesses].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.ListProcessesResponse:
Response message for
[ListProcesses][google.cloud.datacatalog.lineage.v1.ListProcesses].
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{parent=projects/*/locations/*}/processes",
},
]
request, metadata = self._interceptor.pre_list_processes(request, metadata)
pb_request = lineage.ListProcessesRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.ListProcessesResponse()
pb_resp = lineage.ListProcessesResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list_processes(resp)
return resp
class _ListRuns(LineageRestStub):
def __hash__(self):
return hash("ListRuns")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.ListRunsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.ListRunsResponse:
r"""Call the list runs method over HTTP.
Args:
request (~.lineage.ListRunsRequest):
The request object. Request message for
[ListRuns][google.cloud.datacatalog.lineage.v1.ListRuns].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.ListRunsResponse:
Response message for
[ListRuns][google.cloud.datacatalog.lineage.v1.ListRuns].
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{parent=projects/*/locations/*/processes/*}/runs",
},
]
request, metadata = self._interceptor.pre_list_runs(request, metadata)
pb_request = lineage.ListRunsRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.ListRunsResponse()
pb_resp = lineage.ListRunsResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list_runs(resp)
return resp
class _SearchLinks(LineageRestStub):
def __hash__(self):
return hash("SearchLinks")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.SearchLinksRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.SearchLinksResponse:
r"""Call the search links method over HTTP.
Args:
request (~.lineage.SearchLinksRequest):
The request object. Request message for
[SearchLinks][google.cloud.datacatalog.lineage.v1.Lineage.SearchLinks].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.SearchLinksResponse:
Response message for
[SearchLinks][google.cloud.datacatalog.lineage.v1.Lineage.SearchLinks].
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{parent=projects/*/locations/*}:searchLinks",
"body": "*",
},
]
request, metadata = self._interceptor.pre_search_links(request, metadata)
pb_request = lineage.SearchLinksRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.SearchLinksResponse()
pb_resp = lineage.SearchLinksResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_search_links(resp)
return resp
class _UpdateProcess(LineageRestStub):
def __hash__(self):
return hash("UpdateProcess")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.UpdateProcessRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.Process:
r"""Call the update process method over HTTP.
Args:
request (~.lineage.UpdateProcessRequest):
The request object. Request message for
[UpdateProcess][google.cloud.datacatalog.lineage.v1.UpdateProcess].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.Process:
A process is the definition of a data
transformation operation.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/v1/{process.name=projects/*/locations/*/processes/*}",
"body": "process",
},
]
request, metadata = self._interceptor.pre_update_process(request, metadata)
pb_request = lineage.UpdateProcessRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.Process()
pb_resp = lineage.Process.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update_process(resp)
return resp
class _UpdateRun(LineageRestStub):
def __hash__(self):
return hash("UpdateRun")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: lineage.UpdateRunRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lineage.Run:
r"""Call the update run method over HTTP.
Args:
request (~.lineage.UpdateRunRequest):
The request object. Request message for
[UpdateRun][google.cloud.datacatalog.lineage.v1.UpdateRun].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.lineage.Run:
A lineage run represents an execution
of a process that creates lineage
events.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/v1/{run.name=projects/*/locations/*/processes/*/runs/*}",
"body": "run",
},
]
request, metadata = self._interceptor.pre_update_run(request, metadata)
pb_request = lineage.UpdateRunRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = lineage.Run()
pb_resp = lineage.Run.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update_run(resp)
return resp
@property
def batch_search_link_processes(
self,
) -> Callable[
[lineage.BatchSearchLinkProcessesRequest],
lineage.BatchSearchLinkProcessesResponse,
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._BatchSearchLinkProcesses(self._session, self._host, self._interceptor) # type: ignore
@property
def create_lineage_event(
self,
) -> Callable[[lineage.CreateLineageEventRequest], lineage.LineageEvent]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._CreateLineageEvent(self._session, self._host, self._interceptor) # type: ignore
@property
def create_process(
self,
) -> Callable[[lineage.CreateProcessRequest], lineage.Process]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._CreateProcess(self._session, self._host, self._interceptor) # type: ignore
@property
def create_run(self) -> Callable[[lineage.CreateRunRequest], lineage.Run]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._CreateRun(self._session, self._host, self._interceptor) # type: ignore
@property
def delete_lineage_event(
self,
) -> Callable[[lineage.DeleteLineageEventRequest], empty_pb2.Empty]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._DeleteLineageEvent(self._session, self._host, self._interceptor) # type: ignore
@property
def delete_process(
self,
) -> Callable[[lineage.DeleteProcessRequest], operations_pb2.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._DeleteProcess(self._session, self._host, self._interceptor) # type: ignore
@property
def delete_run(
self,
) -> Callable[[lineage.DeleteRunRequest], operations_pb2.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._DeleteRun(self._session, self._host, self._interceptor) # type: ignore
@property
def get_lineage_event(
self,
) -> Callable[[lineage.GetLineageEventRequest], lineage.LineageEvent]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._GetLineageEvent(self._session, self._host, self._interceptor) # type: ignore
@property
def get_process(self) -> Callable[[lineage.GetProcessRequest], lineage.Process]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._GetProcess(self._session, self._host, self._interceptor) # type: ignore
@property
def get_run(self) -> Callable[[lineage.GetRunRequest], lineage.Run]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._GetRun(self._session, self._host, self._interceptor) # type: ignore
@property
def list_lineage_events(
self,
) -> Callable[
[lineage.ListLineageEventsRequest], lineage.ListLineageEventsResponse
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ListLineageEvents(self._session, self._host, self._interceptor) # type: ignore
@property
def list_processes(
self,
) -> Callable[[lineage.ListProcessesRequest], lineage.ListProcessesResponse]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ListProcesses(self._session, self._host, self._interceptor) # type: ignore
@property
def list_runs(
self,
) -> Callable[[lineage.ListRunsRequest], lineage.ListRunsResponse]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ListRuns(self._session, self._host, self._interceptor) # type: ignore
@property
def search_links(
self,
) -> Callable[[lineage.SearchLinksRequest], lineage.SearchLinksResponse]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._SearchLinks(self._session, self._host, self._interceptor) # type: ignore
@property
def update_process(
self,
) -> Callable[[lineage.UpdateProcessRequest], lineage.Process]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._UpdateProcess(self._session, self._host, self._interceptor) # type: ignore
@property
def update_run(self) -> Callable[[lineage.UpdateRunRequest], lineage.Run]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._UpdateRun(self._session, self._host, self._interceptor) # type: ignore
@property
def cancel_operation(self):
return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore
class _CancelOperation(LineageRestStub):
def __call__(
self,
request: operations_pb2.CancelOperationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Call the cancel operation method over HTTP.
Args:
request (operations_pb2.CancelOperationRequest):
The request object for CancelOperation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel",
"body": "*",
},
]
request, metadata = self._interceptor.pre_cancel_operation(
request, metadata
)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
body = json.dumps(transcoded_request["body"])
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
return self._interceptor.post_cancel_operation(None)
@property
def delete_operation(self):
return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore
class _DeleteOperation(LineageRestStub):
def __call__(
self,
request: operations_pb2.DeleteOperationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Call the delete operation method over HTTP.
Args:
request (operations_pb2.DeleteOperationRequest):
The request object for DeleteOperation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
]
request, metadata = self._interceptor.pre_delete_operation(
request, metadata
)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
return self._interceptor.post_delete_operation(None)
@property
def get_operation(self):
return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore
class _GetOperation(LineageRestStub):
def __call__(
self,
request: operations_pb2.GetOperationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Call the get operation method over HTTP.
Args:
request (operations_pb2.GetOperationRequest):
The request object for GetOperation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
operations_pb2.Operation: Response from GetOperation method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*/operations/*}",
},
]
request, metadata = self._interceptor.pre_get_operation(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = operations_pb2.Operation()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_get_operation(resp)
return resp
@property
def list_operations(self):
return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore
class _ListOperations(LineageRestStub):
def __call__(
self,
request: operations_pb2.ListOperationsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.ListOperationsResponse:
r"""Call the list operations method over HTTP.
Args:
request (operations_pb2.ListOperationsRequest):
The request object for ListOperations method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
operations_pb2.ListOperationsResponse: Response from ListOperations method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1/{name=projects/*/locations/*}/operations",
},
]
request, metadata = self._interceptor.pre_list_operations(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = operations_pb2.ListOperationsResponse()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_list_operations(resp)
return resp
@property
def kind(self) -> str:
return "rest"
def close(self):
self._session.close()
__all__ = ("LineageRestTransport",)
|
93ca140de4ec291084f1742913dbd45c44423a3d
|
36df4a9eb074bd06ae95f0e534043818b47acd91
|
/streaming/language_support/python/test_install.py
|
a923bb1ae2463aa3f82ef2bfb059e757b476dfb2
|
[] |
no_license
|
mongodb/mongo-hadoop
|
1356805ebfd5d7073e225a6adfdb4bf21f18329e
|
20208a027ad8638e56dfcf040773f176d6ee059f
|
refs/heads/master
| 2023-08-28T03:59:09.626489
| 2022-01-28T19:28:02
| 2022-01-28T19:28:02
| 856,703
| 1,057
| 421
| null | 2023-03-09T01:55:32
| 2010-08-23T14:05:51
|
Java
|
UTF-8
|
Python
| false
| false
| 366
|
py
|
test_install.py
|
#!/usr/bin/env python
try:
import pymongo
from bson import _elements_to_dict, InvalidBSON
except:
raise Exception("Cannot find a valid pymongo installation.")
try:
from pymongo_hadoop import BSONInput
except:
raise Exception("Cannot find a valid pymongo_hadoop installation.")
print "*** Everything looks OK. All required modules were found."
|
5a399a2bbde19fb8d7f538f1bd45d225141356ba
|
b85e5263563af92ea888ddc74c97785501e4a7bd
|
/python_examples/pair_distribution.py
|
22fddb23fe152028ca50c8150e8202826d8d7a6b
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Allen-Tildesley/examples
|
4e9a2f7bc574d1dd823caceef995d623d5b8255e
|
9e71fb31b4eb6421cc1936149e882e7dd47dd927
|
refs/heads/master
| 2023-09-04T08:44:00.488422
| 2023-09-01T14:32:50
| 2023-09-01T14:32:50
| 32,993,797
| 255
| 109
|
CC0-1.0
| 2022-08-20T14:12:51
| 2015-03-27T15:28:35
|
Fortran
|
UTF-8
|
Python
| false
| false
| 6,271
|
py
|
pair_distribution.py
|
#!/usr/bin/env python3
# pair_distribution.py
#------------------------------------------------------------------------------------------------#
# This software was written in 2016/17 #
# by Michael P. Allen <m.p.allen@warwick.ac.uk>/<m.p.allen@bristol.ac.uk> #
# and Dominic J. Tildesley <d.tildesley7@gmail.com> ("the authors"), #
# to accompany the book "Computer Simulation of Liquids", second edition, 2017 ("the text"), #
# published by Oxford University Press ("the publishers"). #
# #
# LICENCE #
# Creative Commons CC0 Public Domain Dedication. #
# To the extent possible under law, the authors have dedicated all copyright and related #
# and neighboring rights to this software to the PUBLIC domain worldwide. #
# This software is distributed without any warranty. #
# You should have received a copy of the CC0 Public Domain Dedication along with this software. #
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #
# #
# DISCLAIMER #
# The authors and publishers make no warranties about the software, and disclaim liability #
# for all uses of the software, to the fullest extent permitted by applicable law. #
# The authors and publishers do not recommend use of this software for any purpose. #
# It is made freely available, solely to clarify points made in the text. When using or citing #
# the software, you should not imply endorsement by the authors or publishers. #
#------------------------------------------------------------------------------------------------#
"""Calculates pair distribution function g(r)."""
import json
import sys
import numpy as np
import math
from config_io_module import read_cnf_atoms
import os.path
print('pair_distribution')
# Read parameters in JSON format
try:
nml = json.load(sys.stdin)
except json.JSONDecodeError:
print('Exiting on Invalid JSON format')
sys.exit()
# Set default values, check keys and typecheck values
defaults = {"dr":0.02}
for key, val in nml.items():
if key in defaults:
assert type(val) == type(defaults[key]), key+" has the wrong type"
else:
print('Warning', key, 'not in ', list(defaults.keys()))
# Set parameters to input values or defaults
dr = nml["dr"] if "dr" in nml else defaults["dr"]
# Write out parameters
print ( "{:40}{:15.6f}".format('g(r) spacing dr', dr) )
# Read in configuration
cnf_prefix = 'cnf.'
if not os.path.isfile(cnf_prefix+'000'):
print(cnf_prefix+'000 does not exist')
sys.exit()
n, box, r = read_cnf_atoms(cnf_prefix+'000')
print("{:40}{:15d} ".format('Number of particles', n) )
print("{:40}{:15.6f}".format('Box (in sigma units)',box) )
dr = dr / box # Convert to box=1 units
nk = math.floor(0.5/dr) # Accumulate out to half box length
r_max = nk*dr # Actual r_max (box=1 units)
print( "{:40}{:15d} ".format('Number of bins', nk) )
print( "{:40}{:15.6f}".format('Maximum r/box', r_max) )
h = np.zeros(nk,dtype=np.int_) # Histogram bins initialized to zero
nstep = 0 # Counts configurations
while True: # Loop until configurations or naming scheme exhausted
if nstep >= 1000:
break
sav_tag = str(nstep).zfill(3)
file_name = cnf_prefix+sav_tag
if not os.path.isfile(file_name):
break
n, box, r = read_cnf_atoms(file_name)
print('Processing '+file_name)
r = r / box # Convert to box=1 units
# Simple approach calculating all pairs at once
rij = r[:,np.newaxis,:] - r[np.newaxis,:,:] # Set of all distance vectors
rij = rij - np.rint(rij) # Apply periodic boundaries
rij_mag = np.sqrt(np.sum(rij**2,axis=-1)) # Separation distances
rij_mag = rij_mag[np.triu_indices_from(rij_mag,k=1)] # Extract upper triangle
hist,edges = np.histogram(rij_mag,bins=nk,range=(0.0,r_max)) # Accumulate histogram of separations
h = h + 2*hist # Accumulate histogram
# This section now replaced by the simple approach above
# To make best use of NumPy, we loop over cyclic offset shifts and process N rij pairs at once.
# factor=2 accounts for both ij and ji, but if N is even, the last shift
# includes both ij and ji pairs already, so factor=1
# The idea dates back to S Brode and R Ahlrichs Comput Phys Commun 42, 51 (1986)
# nshift=n//2
# for shift in range(nshift):
# rij = r - np.roll(r,shift+1,axis=0) # Difference between N pairs of coordinates
# rij = rij - np.rint(rij) # Apply periodic boundaries
# rij_mag = np.sqrt(np.sum(rij**2,axis=1)) # Separation distances
# hist,edges = np.histogram(rij_mag,bins=nk,range=(0.0,r_max)) # Accumulate histogram of separations
# factor = 1 if n%2==0 and shift==nshift-1 else 2 # Normally 2 except possibly on last shift
# h = h + factor*hist # Accumulate histogram
nstep=nstep+1 # Increment configuration counter ready for next time
rho = float(n) # Our calculation is done in box=1 units
h_id = ( 4.0 * np.pi * rho / 3.0) * ( edges[1:nk+1]**3 - edges[0:nk]**3 ) # Ideal number
g = h / h_id / (n*nstep) # Average number
print('Output to pair_distribution.out')
edges = edges*box # Convert bin edges back to sigma=1 units
r_mid = 0.5*(edges[0:nk]+edges[1:nk+1]) # Mid points of bins
np.savetxt('pair_distribution.out',np.c_[r_mid,g],fmt="%15.8f")
|
2fffbc1969e6b6f0e2973b163001d882f3a00b28
|
b38247a5d84d8b52ce8363f8dd81629cfbe17f65
|
/serving/reagent/serving/config/operators.py
|
2a4e368776121ce5535f5b649939fe97ae3203a0
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/ReAgent
|
7f2b82eaaf7a19e58cc50aacc307d7b001231440
|
c5f1a8371a677b4f8fb0882b600bf331eba5259d
|
refs/heads/main
| 2023-09-05T15:56:49.175072
| 2023-08-29T21:48:40
| 2023-08-29T21:48:40
| 98,565,575
| 1,480
| 290
|
BSD-3-Clause
| 2023-09-12T23:09:30
| 2017-07-27T17:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
operators.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from typing import Dict
import reagent.serving.config.namespace as namespace
# Track things we don't want to export
__globals = set(globals())
def ActionValueScoring(model_id: int, snapshot_id: int):
"""The operator calls predictor for a model and generates the scores
Args:
"""
pass
def EpsilonGreedyRanker(epsilon: float, values: Dict, seed: int = None):
"""The operator that implements iterative epsilon greedy ranker
Args:
"""
pass
def Expression(equation: str):
"""The operator that can do calculation based on the given equation
Args:
"""
pass
def Frechet(values: Dict, rho: float, gamma: float, seed: int = None):
"""The operator that implements Frechet ranking
Args:
"""
pass
def InputFromRequest():
"""The operator that will return "input" from request
Args:
"""
pass
def PropensityFit(input: Dict, targets: Dict):
"""The operator that shifts the input to match the distribution in targets
Args:
"""
pass
def Softmax(temperature: float, values: Dict):
"""The operator to use softmax for normalization
Args:
"""
pass
def SoftmaxRanker(temperature: float, values: Dict, seed: int = None):
"""The operator that implements iterative softmax ranker
Args:
"""
pass
def UCB(method: str, batch_size: int = 1):
"""The operator that implements UCB algorithms
Args:
"""
pass
# Set exports
for op in set(globals()) - __globals - {"__globals"}:
globals()[op] = namespace.DecisionOperation(globals()[op])
del __globals
|
facfeaf4a0c10b7664f5893a6b3c3b92d19af65e
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/vehicle_preview/info/__init__.py
|
cdd2c662d1aa29fe291a414ca4e8e1f2db129a30
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
__init__.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/vehicle_preview/info/__init__.py
pass
|
9659ef1236c4132492f5ed787c1d41ed98720c2f
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/synapse/config/spam_checker.py
|
f22784f9c95a56885d11b07a84409a23ea72a76a
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
spam_checker.py
|
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Dict, List, Tuple
from synapse.config import ConfigError
from synapse.types import JsonDict
from synapse.util.module_loader import load_module
from ._base import Config
logger = logging.getLogger(__name__)
LEGACY_SPAM_CHECKER_WARNING = """
This server is using a spam checker module that is implementing the deprecated spam
checker interface. Please check with the module's maintainer to see if a new version
supporting Synapse's generic modules system is available. For more information, please
see https://matrix-org.github.io/synapse/latest/modules/index.html
---------------------------------------------------------------------------------------"""
class SpamCheckerConfig(Config):
section = "spamchecker"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.spam_checkers: List[Tuple[Any, Dict]] = []
spam_checkers = config.get("spam_checker") or []
if isinstance(spam_checkers, dict):
# The spam_checker config option used to only support one
# spam checker, and thus was simply a dictionary with module
# and config keys. Support this old behaviour by checking
# to see if the option resolves to a dictionary
self.spam_checkers.append(load_module(spam_checkers, ("spam_checker",)))
elif isinstance(spam_checkers, list):
for i, spam_checker in enumerate(spam_checkers):
config_path = ("spam_checker", "<item %i>" % i)
if not isinstance(spam_checker, dict):
raise ConfigError("expected a mapping", config_path)
self.spam_checkers.append(load_module(spam_checker, config_path))
else:
raise ConfigError("spam_checker syntax is incorrect")
# If this configuration is being used in any way, warn the admin that it is going
# away soon.
if self.spam_checkers:
logger.warning(LEGACY_SPAM_CHECKER_WARNING)
|
209c68eae5130d9e135fd3547a1018c346e78880
|
f71ea031e1df51e8fbf20899788830ac079c7609
|
/spidermon/contrib/actions/email/__init__.py
|
3a415b7f41101802f7316eb874997d4a517e6502
|
[
"BSD-3-Clause"
] |
permissive
|
scrapinghub/spidermon
|
d4177399a38610f876842e1a6b6d47b19381f716
|
ea21cee8f42261c99278a6b4582a19b32224ef47
|
refs/heads/master
| 2023-08-31T17:06:04.520859
| 2023-08-31T09:10:51
| 2023-08-31T09:10:51
| 46,345,612
| 492
| 105
|
BSD-3-Clause
| 2023-09-13T20:41:43
| 2015-11-17T12:27:38
|
Python
|
UTF-8
|
Python
| false
| false
| 5,532
|
py
|
__init__.py
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import logging
logger = logging.getLogger(__name__)
from spidermon.contrib.actions.templates import ActionWithTemplates
from spidermon.exceptions import NotConfigured
from premailer import transform
class SendEmail(ActionWithTemplates):
sender = None
subject = None
subject_template = None
to = None
cc = None
bcc = None
reply_to = None
body_text = None
body_text_template = None
body_html = None
body_html_template = "reports/email/monitors/result.jinja"
fake = False
def __init__(
self,
sender,
to,
cc=None,
bcc=None,
reply_to=None,
subject=None,
subject_template=None,
body_text=None,
body_text_template=None,
body_html=None,
body_html_template=None,
fake=None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.sender = sender or self.sender
self.subject = subject or self.subject
self.subject_template = subject_template or self.subject_template
self.to = to or self.to
self.cc = cc or self.cc
self.bcc = bcc or self.bcc
self.reply_to = reply_to or self.reply_to
self.body_text = body_text or self.body_text
self.body_text_template = body_text_template or self.body_text_template
self.body_html = body_html or self.body_html
self.body_html_template = body_html_template or self.body_html_template
self.fake = fake or self.fake
if not self.fake and not self.to:
raise NotConfigured(
"You must provide a value for SPIDERMON_EMAIL_TO setting."
)
if not self.subject:
raise NotConfigured(
"You must provide a value for SPIDERMON_EMAIL_SUBJECT setting."
)
if not any(
[
self.body_text,
self.body_text_template,
self.body_html,
self.body_html_template,
]
):
body_settings = ", ".join(
[
"SPIDERMON_BODY_TEXT",
"SPIDERMON_BODY_TEXT_TEMPLATE",
"SPIDERMON_BODY_HTML",
"SPIDERMON_BODY_HTML_TEMPLATE",
]
)
raise NotConfigured(
f"You must provide a value for one of these settings: {body_settings}"
)
@classmethod
def from_crawler_kwargs(cls, crawler):
return {
"sender": crawler.settings.get("SPIDERMON_EMAIL_SENDER"),
"subject": crawler.settings.get("SPIDERMON_EMAIL_SUBJECT"),
"subject_template": crawler.settings.get(
"SPIDERMON_EMAIL_SUBJECT_TEMPLATE"
),
"to": cls.getlist(crawler.settings, "SPIDERMON_EMAIL_TO"),
"cc": cls.getlist(crawler.settings, "SPIDERMON_EMAIL_CC"),
"bcc": cls.getlist(crawler.settings, "SPIDERMON_EMAIL_BCC"),
"reply_to": crawler.settings.get("SPIDERMON_EMAIL_REPLY_TO"),
"body_text": crawler.settings.get("SPIDERMON_BODY_TEXT"),
"body_text_template": crawler.settings.get("SPIDERMON_BODY_TEXT_TEMPLATE"),
"body_html": crawler.settings.get("SPIDERMON_BODY_HTML"),
"body_html_template": crawler.settings.get("SPIDERMON_BODY_HTML_TEMPLATE"),
"fake": crawler.settings.getbool("SPIDERMON_EMAIL_FAKE"),
"context": crawler.settings.getdict("SPIDERMON_EMAIL_CONTEXT"),
}
@staticmethod
def getlist(settings, entry):
return [v.strip() for v in settings.getlist(entry)]
def run_action(self):
message = self.get_message()
if self.fake:
logger.info(message.as_string())
else:
self.send_message(message)
def get_subject(self):
if self.subject:
return self.render_text_template(self.subject)
elif self.subject_template:
return self.render_template(self.subject_template)
else:
return ""
def get_body_text(self):
if self.body_text:
return self.render_text_template(self.body_text)
elif self.body_text_template:
return self.render_template(self.body_text_template)
else:
return ""
def get_body_html(self):
html = ""
if self.body_html:
html = transform(self.render_text_template(self.body_html))
elif self.body_html_template:
html = transform(self.render_template(self.body_html_template))
return html
def get_message(self):
subject = self.get_subject()
body_text = self.get_body_text()
body_html = self.get_body_html()
message = MIMEMultipart("alternative")
message.set_charset("UTF-8")
message["Subject"] = subject
message["From"] = self.sender
message["To"] = ", ".join(self.to)
if self.cc:
message["Cc"] = ", ".join(self.cc)
if self.bcc:
message["Bcc"] = ", ".join(self.bcc)
if self.reply_to:
message["reply-to"] = self.reply_to
message.attach(MIMEText(body_text, "plain"))
if body_html:
message.attach(MIMEText(body_html, "html"))
return message
def send_message(self, message, **kwargs):
raise NotImplementedError
|
b3bf9b42a7688edc0c0e7524a52ae474a1f54a9f
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/django/contrib/redirects/migrations/0002_alter_redirect_new_path_help_text.py
|
99763660056376f9d60b077fdaad024a2bb02774
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
0002_alter_redirect_new_path_help_text.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("redirects", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="redirect",
name="new_path",
field=models.CharField(
blank=True,
help_text=(
"This can be either an absolute path (as above) or a full "
"URL starting with a scheme such as “https://”."
),
max_length=200,
verbose_name="redirect to",
),
),
]
|
c9aafa688e148a333a96db25df9ce3b7f72a7e59
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/key_manager/v1/container.py
|
eebe5d038365eccba4c5c39e6ef28267e57a660b
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
container.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.key_manager.v1 import _format
from openstack import resource
class Container(resource.Resource):
resources_key = 'containers'
base_path = '/containers'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
# Properties
#: A URI for this container
container_ref = resource.Body('container_ref')
#: The ID for this container
container_id = resource.Body(
'container_ref', alternate_id=True, type=_format.HREFToUUID
)
#: The timestamp when this container was created.
created_at = resource.Body('created')
#: The name of this container
name = resource.Body('name')
#: A list of references to secrets in this container
secret_refs = resource.Body('secret_refs', type=list)
#: The status of this container
status = resource.Body('status')
#: The type of this container
type = resource.Body('type')
#: The timestamp when this container was updated.
updated_at = resource.Body('updated')
#: A party interested in this container.
consumers = resource.Body('consumers', type=list)
|
8bccdc9de06813276416e1b639dbcc84b5d64b66
|
6ffd23679939f59f0a09c9507a126ba056b239d7
|
/imperative/python/megengine/traced_module/_passes/const_pass.py
|
f21b95cb67349ebd888bb343ad60aa0e9a615ba3
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MegEngine/MegEngine
|
74c1c9b6022c858962caf7f27e6f65220739999f
|
66b79160d35b2710c00befede0c3fd729109e474
|
refs/heads/master
| 2023-08-23T20:01:32.476848
| 2023-08-01T07:12:01
| 2023-08-11T06:04:12
| 248,175,118
| 5,697
| 585
|
Apache-2.0
| 2023-07-19T05:11:07
| 2020-03-18T08:21:58
|
C++
|
UTF-8
|
Python
| false
| false
| 6,729
|
py
|
const_pass.py
|
from ... import functional as F
from ... import module as M
from ...core.ops.builtin import GetVarShape
from ...logger import get_logger
from ...tensor import Tensor
from ..expr import Constant, Expr, is_apply_def, is_constant, is_getattr
from ..node import Node, NodeMixin, TensorNode
from .matcher import PatternMatcher
from .pass_base import BackwardPass, ForwardPass, register_pass
from .pattern import is_op
from .utils import get_const_value
logger = get_logger(__name__)
def _as_const_node(x):
node = Constant.make(x)
NodeMixin.wrap(x, node)
return node
@register_pass("AttrToConstant")
class AttrToConstant(BackwardPass):
r"""Convert :class:`~.GetAttr` to :class:`~.Constant` expr."""
name = "AttrToConstant"
run_once = True
def run_transform(self, expr: Expr):
if not (is_getattr(expr) and isinstance(expr.outputs[0], TensorNode)):
return expr
graph = expr.top_graph
value = get_const_value(expr)
orig_node = expr.outputs[0]
name = orig_node.name
with graph.insert_exprs(expr):
const_node = _as_const_node(value)
graph.replace_node({orig_node: const_node})
graph.compile()
const_node.name = name
return const_node.expr
@register_pass("FixInputShape")
class FixInputShape(BackwardPass):
name = "FixInputShape"
run_once = True
def run_transform(self, expr: Expr):
if not is_apply_def(expr, GetVarShape):
return expr
shape = Tensor(expr.inputs[0].shape, dtype="int32")
graph = expr.top_graph
with graph.insert_exprs(expr):
const_shape = _as_const_node(shape)
graph.replace_node({expr.outputs[0]: const_shape})
graph.compile()
const_shape.name = expr.outputs[0].name
return const_shape.expr
@register_pass("FlodConstant")
class FlodConstant(ForwardPass):
r"""Constant folding."""
name = "FlodConstant"
required_pass = ["AttrToConstant"]
run_once = False
def run_transform(self, expr: Expr):
if len(expr.inputs) == 0 or any(not is_constant(n.expr) for n in expr.inputs):
return expr
const_var = expr.interpret(*[get_const_value(n.expr) for n in expr.inputs])[0]
graph = expr.top_graph
with graph.insert_exprs(expr):
const_node = _as_const_node(const_var)
graph.replace_node({expr.outputs[0]: const_node})
graph.compile()
const_node.name = expr.outputs[0].name
return const_node.expr
@register_pass("NormElemWise")
class NormElemWise(BackwardPass):
r"""Transform add/sub or mul/div expr to add-only or mul-only chains.
For example, the following code
.. code-block::
b = 1 - a
c = 2 * b
d = 1 / c
will be changed to
.. code-block::
a1 = F.neg(a)
b = a1 + 1
c = b * 2
d = F.pow(d, -1)
"""
name = "NormElemWise"
required_pass = ["FlodConstant"]
run_once = False
def __init__(self,):
super().__init__()
self.pattern = is_op(F.add)
for op in [F.sub, F.mul, F.div]:
self.pattern |= is_op(op)
for op in ["__add__", "__iadd__", "__radd__"]:
self.pattern |= is_op(op)
for op in ["__sub__", "__isub__", "__rsub__"]:
self.pattern |= is_op(op)
for op in ["__mul__", "__imul__", "__rmul__"]:
self.pattern |= is_op(op)
for op in ["__truediv__", "__itruediv__", "__rtruediv__"]:
self.pattern |= is_op(op)
def run_transform(self, expr: Expr):
matcher = PatternMatcher()
if not matcher.match(self.pattern, expr):
return expr
pattern = matcher.matched_patterns[0]
target = pattern.target
cofee, left_node, right_node = 1, None, None
if len(expr.inputs) == 1 and target not in ["__add__", "__mul__"]:
left_node = expr.inputs[0]
named_args = (expr.named_args).values()
for v in named_args:
if not isinstance(v, TensorNode):
right_node = v
break
if target in ["__rsub__", "__rtruediv__"]:
cofee = -1
if target in [F.sub, F.div] and left_node is not expr.named_args["x"]:
cofee = -1
elif len(expr.inputs) == 2 and (
target not in ["__add__", "__mul__"] or is_constant(expr.inputs[0].expr)
):
left_node, right_node = expr.inputs
if target in ["__rsub__", "__rtruediv__"]:
left_node, right_node = right_node, left_node
if target in [F.sub, F.div] and left_node is not expr.named_args["x"]:
left_node, right_node = right_node, left_node
if is_constant(left_node.expr):
left_node, right_node = right_node, left_node
cofee = -1
if left_node is None:
return expr
if isinstance(right_node, TensorNode):
right_node = get_const_value(right_node.expr, right_node)
graph = expr.top_graph
mul_f, add_f, sub_f, div_f = F.mul, F.add, F.sub, F.div
def map_f(value, func):
if isinstance(value, (list, tuple)):
return [func(v) for v in value]
return func(value)
with graph.insert_exprs():
if target in ["__mul__", "__imul__", "__rmul__", mul_f]:
out_node = left_node * right_node
elif target in ["__add__", "__iadd__", "__radd__", add_f]:
out_node = left_node + right_node
elif target in ["__sub__", "__isub__", "__rsub__", sub_f]:
f_l, f_r = lambda v: v, lambda v: v
if cofee == -1:
f_l = lambda v: F.neg(v)
else:
if isinstance(right_node, TensorNode):
f_r = lambda v: F.neg(v)
else:
f_r = lambda v: -1 * v
out_node = map_f(left_node, f_l) + map_f(right_node, f_r)
elif target in ["__truediv__", "__itruediv__", "__rtruediv__", div_f]:
f_l, f_r = lambda v: v, lambda v: v
if cofee == -1:
f_l = lambda v: F.pow(v, -1)
else:
if isinstance(right_node, TensorNode):
f_r = lambda v: F.pow(v, -1)
else:
f_r = lambda v: 1 / v
out_node = map_f(left_node, f_l) * map_f(right_node, f_r)
graph.replace_node({expr.outputs[0]: out_node})
graph.compile()
return out_node.expr
|
36d25c309388f87a846aa6558afb8ec17d874b8c
|
b86358e1f01f1eb51e44bb419c7889bee34b6893
|
/tools/etnaviv/parse_rng.py
|
18a8fe5399cc496948d2f15d1ef8ed15c03a72c4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
etnaviv/etna_viv
|
77a113036b1390cbeba7349a34100ea577acebc1
|
678505ad8f3cfdebe39a431e5e7e85cee0a3e57c
|
refs/heads/master
| 2023-08-09T09:31:53.703245
| 2023-07-05T11:19:08
| 2023-07-05T11:19:08
| 22,278,924
| 124
| 24
|
NOASSERTION
| 2020-03-02T17:47:34
| 2014-07-26T05:06:21
|
C
|
UTF-8
|
Python
| false
| false
| 27,875
|
py
|
parse_rng.py
|
'''
Parse rules-ng-ng XML format.
See rules-ng-ng.xsd and rules-ng-ng.xml for documentation of the format.
'''
# Copyright (c) 2012-2013 Wladimir J. van der Laan
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sub license,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function, division, unicode_literals
import os, sys, struct
from collections import OrderedDict
from lxml import etree as ET # parsing
from itertools import izip
from os import path
from etnaviv.floatutil import int_as_float
ns = "{http://nouveau.freedesktop.org/}"
XML_BOOL = {'1':True, '0':False, 'false':False, 'true':True, 'yes':True, 'no':False}
MASK_FIELD_SUFFIX = '_MASK'
# Simple interval arithmetic
# XXX move to utils package
INTERVAL_MIN = 0
INTERVAL_MAX = 1<<64
EMPTY_INTERVAL = (INTERVAL_MAX, INTERVAL_MIN)
FULL_INTERVAL = (INTERVAL_MIN, INTERVAL_MAX)
def interval_union(bounds1, bounds2):
'''Return the union of two intervals'''
return (min(bounds1[0], bounds2[0]), max(bounds1[1], bounds2[1]))
def interval_add(bounds, val):
'''Shift an interval with a specified value'''
return (max(INTERVAL_MIN, bounds[0] + val),
min(INTERVAL_MAX, bounds[1] + val))
def interval_check(bounds, val):
'''Check if val lies within this interval'''
return bounds[0] <= val < bounds[1]
#-------------------------------------------------------------------------
# Helper classes
class RNNObject(object):
'''
Rules-ng description object, common attributes are here.
'''
name = None # object name (None if anonymous)
parent = None # back-reference to parent object
brief = '' # short documentation
doc = '' # long documentation
varset = ''
variants = ''
def __init__(self, parent, **attr):
self.parent = parent
self.name = attr.get('name', None)
self.parent = attr.get('parent')
self.varset = attr.get('varset', None)
self.variants = attr.get('variants', None)
# Extension to rules-ng-ng: allow brief as attribute as well as element
self.brief = attr.get('brief', '')
def add_child(self, child):
return False
class TypedValue(object):
type = None
shr = None # Shiftright value, only makes sense for integer types
anon_type = None # anonymous private type
size = None
masked = None # does this register use masks for state groups?
def __init__(self, **attr):
self.type = attr.get('type', None)
self.shr = attr.get('shr', None)
self.masked = attr.get('masked', False)
def add_child(self, child):
# Creates an anonymous type
if isinstance(child, BitField):
if self.type is not None:
raise ValueError('Register with type cannot have bitfield inside')
if self.anon_type is None:
self.anon_type = BitSet(self, masked=self.masked)
self.anon_type.add_child(child)
return True
elif isinstance(child, EnumValue):
if self.type is not None:
raise ValueError('Register with type cannot have value inside')
if self.anon_type is None:
self.anon_type = Enum(self)
self.anon_type.add_child(child)
return True
return False
class Range(object):
'''
Memory range that can contain registers and other memory ranges.
'''
contents = None # arrays, stripes and registers
def __init__(self, **attr):
self.contents = []
self.contents_by_name = {}
def lookup_address(self, addr, variants=None):
'''Look up the specified address in this range. Return None if not found.'''
raise KeyError
def add_child(self, child):
if isinstance(child, (Array, Stripe, Register)):
self.contents.append(child)
self.contents_by_name[child.name] = child
return True
return False
def compute_bounds(self):
'''
Compute and propagate lower and upper bound and return as tuple.
'''
return FULL_INTERVAL
class Type(object):
'''
Marker class for objects that can be used as type.
'''
pass
#-------------------------------------------------------------------------
# Rules database objects: Types / values
class BaseType(Type):
'''Basic (primitive) type'''
INT = 'int'
UINT = 'uint'
BOOLEAN = 'boolean'
HEX = 'hex'
FLOAT = 'float'
FIXEDP = 'fixedp'
FIXEDPS = 'fixedps'
TYPES = {INT, UINT, BOOLEAN, HEX, FLOAT, FIXEDP, FIXEDPS}
kind = None
def __init__(self, kind, size):
assert(kind in self.TYPES)
if kind == self.FLOAT and size not in [16,32,64]:
raise ValueError('float must be size 16, 32 or 64, not %i' % size)
self.kind = kind
self.size = size
def describe(self, value):
# XXX need to propagate depth
if self.kind == 'int':
if value & (1<<(self.size-1)): # negative
value -= 1<<self.size
return '%i' % value
elif self.kind == 'uint':
return '%u' % value
elif self.kind == 'boolean':
return '%i' % value
elif self.kind == 'hex':
return '0x%x' % value
elif self.kind == 'float':
return '%f' % int_as_float(value, self.size)
elif self.kind == 'fixedp':
return '%f' % (value/(1<<(self.size//2)))
elif self.kind == 'fixedps':
if value > (1<<self.size)//2:
value -= (1<<self.size)
return '%f' % (value/(1<<(self.size//2)))
class EnumValue(RNNObject):
'''Rules-ng enumeration value'''
value = None
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
if self.name is None:
raise ValueEror('Value must have name')
self.value = attr['value']
class Enum(RNNObject, Type):
'''Rules-ng enumeration'''
values_by_value = None # ordered dict of EnumValues for fast lookup
values_by_name = None # ordered dict of EnumValues for fast lookup
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
self.values_by_name = OrderedDict()
self.values_by_value = OrderedDict()
def add_child(self, child):
if isinstance(child, EnumValue):
self.values_by_value[child.value] = child
self.values_by_name[child.name] = child
return True
return False
def describe(self, value):
'''
Short description of the value.
Look up value, return either the name of the enum value that has the value, or an hexadecimal value.'''
try:
return self.values_by_value[value].name
except KeyError:
return '0x%x' % value
class BitField(TypedValue, RNNObject):
'''Rules-ng bitfield description'''
high = None
low = None
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
TypedValue.__init__(self, **attr)
if 'high' in attr and 'low' in attr:
if 'pos' in attr:
raise ValueError('Cannot specify both low, high and pos for bitfield')
self.high = attr['high']
self.low = attr['low']
elif 'pos' in attr:
self.high = self.low = attr['pos']
else:
raise ValueError('bitfield has neither low/high nor pos')
self.shr = int(attr.get('shr', '0'))
# derived attribute
self.size = self.high - self.low + 1
@property
def mask(self):
'''Return mask for this bit field'''
return ((1<<(self.high-self.low+1))-1) << self.low
def extract(self, value):
'''Extract this bit field from a value'''
return ((value >> self.low) & ((1<<(self.high-self.low+1))-1)) << self.shr
def fill(self, value):
'''Return value filled into this bit field'''
rv = (value << self.low)
if rv != (rv & self.mask):
raise ValueError('Value %i doesn\'t fit in mask %s' % (value, self.name))
return rv
def describe(self, value):
return self.type.describe(self.extract(value))
class BitSet(RNNObject, Type):
'''Rules-ng bitset description'''
bitfields = None # List of bit fields
masked = None # does this register use masks for state groups?
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
self.bitfields = []
self.size = 0
self.masked = attr.get('masked', False)
def add_child(self, child):
if isinstance(child, BitField):
self.bitfields.append(child)
# keep track of current size
self.size = max(child.high+1, self.size)
return True
return False
def describe(self, value):
'''
Short description of the value.
'''
fields = []
if self.masked:
# first, find out which fields are to be modified
unmasked = set()
mask_fields = set()
residue = 0xffffffff
for field in self.bitfields:
if field.name.endswith(MASK_FIELD_SUFFIX) and field.size == 1:
mask_fields.add(field.name)
mask_fields.add(field.name[0:-len(MASK_FIELD_SUFFIX)])
if field.extract(value) == 0:
unmasked.add(field.name[0:-len(MASK_FIELD_SUFFIX)])
residue &= ~field.mask
# then log fields that are unmaked
for field in self.bitfields:
if field.name in unmasked or (field.name not in mask_fields):
fields.append(field.name + '=' + field.describe(value))
residue &= ~field.mask
residue |= value & field.mask
residue ^= value # residue are the bits that are not predicted by the masks
else:
residue = value
for field in self.bitfields:
fields.append(field.name + '=' + field.describe(value))
residue &= ~field.mask
rv = ','.join(fields)
if residue != 0:
rv += '(residue:0x%08x)' % residue
return rv
#-------------------------------------------------------------------------
# Rules database objects: Memory ranges and registers
class Stripe(RNNObject, Range):
'''Rules-ng stripe description.
A stripe repeats its contents `length` times (defaults to 1),
`stride` units apart (defaults to 0).
'''
offset = 0 # Offset into parent
length = 1
stride = 0
# can contain registers, arrays and other stripes
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
Range.__init__(self, **attr)
self.offset = attr.get('offset', 0)
self.length = attr.get('length', 1)
self.stride = attr.get('stride', 0)
if self.length != 1 and self.stride == 0:
raise ValueError('Stripe of length >1 cannot have stride 0')
def add_child(self, child):
return Range.add_child(self, child)
def lookup_address(self, addr, variants=None):
if self.length == 0:
raise KeyError('Lookup in unknown-length stripes currently not supported')
# For a stripe this is pretty complicated, at least to do efficiently
# There are `length` copies of the stripe at offset `stride`
# Will need to check all the contents at every offset
# 0*stride .. length*stride
addr -= self.offset
if addr < 0:
raise KeyError('Address not found: 0x%x' % addr)
# bounds -> bounds of this subelement replicated over total length of stripe
# elem_bounds -> bounds of the first instance of this subelement
for bounds,elem_bounds,range in izip(self.child_total_bounds, self.child_elem_bounds, self.contents):
# Check only child ranges whose interval matches the address
if interval_check(bounds, addr):
# XXX this is not always necessary
# can determine from elem_bounds which elements this hits
for i in xrange(0, self.length):
sub_addr = addr - i*self.stride
pathcomp = [(self, i if self.length>1 else None)]
try:
return pathcomp + range.lookup_address(sub_addr)
except KeyError:
pass
raise KeyError('Address not found: 0x%x' % addr)
def compute_bounds(self):
# for each child, compute bounds
if self.length == 0:
# Stripes of unknown length have unclear upper bound
return (self.offset, INTERVAL_MAX)
self.child_total_bounds = []
self.child_elem_bounds = []
for obj in self.contents:
bounds = obj.compute_bounds()
# now replicate bounds over stride times length
# subtract one from length because the last stride won't span
# the entire length
#
# |-----------| index 0
# |............| index 1
# |.............| index 2
#
# \______________________/
stagger_bounds = interval_union(bounds,
interval_add(bounds, self.stride * (self.length - 1)))
self.child_total_bounds.append(stagger_bounds)
self.child_elem_bounds.append(bounds)
#if bounds != stagger_bounds:
# print('sub %x %x %x %x' % (bounds+ stagger_bounds))
total_bounds = EMPTY_INTERVAL
for bounds in self.child_total_bounds:
total_bounds = interval_union(bounds, total_bounds)
#print("total:", total_bounds)
return interval_add(total_bounds, self.offset)
class Array(RNNObject, Range):
'''Rules-ng array description'''
offset = None # Offset into parent
length = None
stride = None
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
Range.__init__(self, **attr)
self.offset = attr['offset']
self.length = attr['length']
self.stride = attr['stride']
if self.length < 1:
raise ValueError('Length of array cannot be 0 or negative')
def add_child(self, child):
return Range.add_child(self, child)
def lookup_address(self, addr, variants=None):
addr -= self.offset
if addr < 0:
raise KeyError
(r,d) = (addr % self.stride, addr // self.stride)
if d >= self.length:
raise KeyError
sub_addr = addr - d*self.stride
#print(i,self.stride,'%x'%sub_addr)
pathcomp = [(self, d)]
for range in self.contents:
try:
return pathcomp + range.lookup_address(sub_addr, variants)
except KeyError:
pass
raise KeyError('Address not found: 0x%x' % addr)
def compute_bounds(self):
# Trivial
return (self.offset, self.offset + self.stride * self.length)
class Register(RNNObject, Range, TypedValue):
'''
One rules-ng register description.
'''
offset = None # Offset into parent
length = None
stride = None
size = None # size, in bits
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
Range.__init__(self, **attr)
TypedValue.__init__(self, **attr)
self.size = attr['size']
self.offset = attr['offset']
self.length = attr.get('length', 1)
self.stride = attr.get('stride', self.size // 8)
def add_child(self, child):
return TypedValue.add_child(self, child)
def lookup_address(self, addr, variants=None):
addr -= self.offset
if addr < 0:
raise KeyError
(r,d) = (addr % self.stride, addr // self.stride)
#print('r,d',r,d)
if d < self.length and r < (self.size//8):
# XXX also need byte offset in cell?
if self.length != 1:
num = d
else:
num = None
return [(self,num)]
else:
raise KeyError
def compute_bounds(self):
# Trivial
lower_bound = self.offset
upper_bound = self.offset + self.stride * (self.length-1) + self.size//8
return (lower_bound, upper_bound)
def describe(self, value):
return self.type.describe(value)
class Domain(RNNObject, Range, Type):
'''Rules-ng domain description'''
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
Range.__init__(self, **attr)
if self.name is None:
raise ValueError('Domain must have a name')
def lookup_address(self, addr, variants=None):
'''
Look up address within domain.
This will return a path to the innermost register reference, or
raise KeyError if the address was not found.
The path consists of a list of tuples (container, index)
where container is the domain, stripe, array or register and index is the index
within this object. The index will be None in case of single-element
containers.
'''
if variants is None:
variants = (None,None)
for range in self.contents:
if (range.variants,range.varset) != variants:
continue
try:
return range.lookup_address(addr, variants)
except KeyError:
pass
raise KeyError('Address not found: 0x%x' % addr)
def add_child(self, child):
if isinstance(child, Type): # Types can be defined here
return True
return Range.add_child(self, child)
def compute_bounds(self):
# Bounds of a domain are simply the union of all child intervals
bounds = EMPTY_INTERVAL
for obj in self.contents:
bounds = interval_union(bounds, obj.compute_bounds())
return bounds
def describe(self, pos):
'''DOMAIN can also be used as value'''
try:
path = self.lookup_address(pos)
return format_path(path)
except KeyError:
return '*0x%x' % pos # prefix address with *
class Database(RNNObject):
'''Object representing RNN database'''
domains = None # Ordered dictionary of domains by name
types = None # Ordered dictionary of types by name
def __init__(self, parent, **attr):
RNNObject.__init__(self, parent, **attr)
self.domains = OrderedDict()
self.types = OrderedDict()
def lookup_domain(self, domain_name):
return self.domains[domain_name]
def lookup_type(self, type_name):
return self.types[type_name]
def add_child(self, child):
if isinstance(child, Domain):
self.domains[child.name] = child
return True
if isinstance(child, Type): # Types can be defined here
return True
if isinstance(child, CopyrightDummy): # Copyrights can be defined here
return True
return False
class CopyrightDummy(RNNObject):
'''Dummy object representing RNN copyright info'''
def add_child(self, child):
if isinstance(child, CopyrightDummy): # More copyrights can be defined here
return True
return False
#-------------------------------------------------------------------------
# Parsing
class Tag:
'''Rules-ng XML tags'''
DATABASE = ns+'database'
ENUM = ns+'enum'
VALUE = ns+'value'
BITSET = ns+'bitset'
BITFIELD = ns+'bitfield'
DOMAIN = ns+'domain'
STRIPE = ns+'stripe'
ARRAY = ns+'array'
REG8 = ns+'reg8'
REG16 = ns+'reg16'
REG32 = ns+'reg32'
REG64 = ns+'reg64'
GROUP = ns+'group'
USE_GROUP = ns+'use-group'
BRIEF = ns+'brief'
DOC = ns+'doc'
IMPORT = ns+'import'
COPYRIGHT = ns+'copyright'
AUTHOR = ns+'author'
LICENSE = ns+'license'
REG_TO_SIZE = {REG8:8, REG16:16, REG32:32, REG64:64}
visit = {
Tag.DATABASE: Database,
Tag.ENUM: Enum,
Tag.VALUE: EnumValue,
Tag.BITSET: BitSet,
Tag.BITFIELD: BitField,
Tag.DOMAIN: Domain,
Tag.STRIPE: Stripe,
Tag.ARRAY: Array,
Tag.REG8: Register,
Tag.REG16: Register,
Tag.REG32: Register,
Tag.REG64: Register,
Tag.COPYRIGHT: CopyrightDummy,
Tag.AUTHOR: CopyrightDummy,
Tag.LICENSE: CopyrightDummy,
# TODO
# Tag.GROUP
# Tag.USE_GROUP
}
def intdh(s):
'''
Parse a rules-ng integer.
This does not recognize octal unlike int(s, 0)
'''
if s.startswith("0x"):
return int(s[2:], 16)
else:
return int(s)
def visit_xml(syms, type_resolve_list, parent, root, imports):
'''
Visit an xml element, build an in-memory object for it,
and add it to the in-memory parent object.
'''
# These take text and add this to the parents .brief or .doc
if root.tag == Tag.BRIEF:
parent.brief += root.text
return None
if root.tag == Tag.DOC:
parent.doc += root.text
return None
if root.tag == Tag.IMPORT:
imports.append(root.attrib['file'])
return None
# Pre-process attributes
attr = {}
for key,value in root.attrib.iteritems():
if key in ['stride', 'offset', 'length', 'value', 'pos', 'low', 'high']:
attr[key] = intdh(value)
elif key in ['masked']:
attr[key] = XML_BOOL[value]
else:
attr[key] = value
if root.tag in Tag.REG_TO_SIZE:
attr['size'] = Tag.REG_TO_SIZE[root.tag]
# Instantiate object from tag
if root.tag == Tag.DOMAIN and attr['name'] in syms:
# allow re-opening a domain that was created before, to add more
# state
obj = syms[attr['name']]
assert(isinstance(obj, Domain))
else:
obj = visit[root.tag](parent, **attr)
# Add this object to parent object
if parent is not None and obj is not None:
if not parent.add_child(obj):
raise ValueError('Cannot add child %s to %s' %
(obj.__class__.__name__, parent.__class__.__name__))
# If a type, add object to symbol table
if isinstance(obj, Type):
if not obj.name in syms:
syms[obj.name] = obj
else:
raise ValueError('Duplicate type name %s' % obj.name)
# If it has a type attribute, add it to the type resolve list
if isinstance(obj, TypedValue):
type_resolve_list.append(obj)
# Visit children
for child in root.iterchildren(tag=ET.Element):
visit_xml(syms, type_resolve_list, obj, child, imports)
return obj
def parse_rng(f, import_path=''):
'''
Parse a rules-ng-ng XML tree from a file object.
@returns a Database object
'''
# XXX Proper data structure for memory map would be an interval tree
# After all, stripes can overlap, and arrays can be within arrays
# Current solution is to loop over the entire domain, nesting into
# stripes, arrays when necessary (when it is possible that it
# spans the provided value)
# It is also possible to build a hash table, but this may take a lot of memory
# and take long to build as stripes can potentially be huge, even unknown sized
tree = ET.parse(f)
root = tree.getroot()
# build types symbol table, to be able to look up types by name
# and patch them into the right place in the second pass
type_table = OrderedDict()
type_resolve_list = []
imports = []
retval = visit_xml(type_table, type_resolve_list, None, root, imports)
if not isinstance(retval, Database):
raise ValueError('Top-level element must be database')
# Load imports
already_imported = set()
while imports:
filename = imports.pop()
if filename in already_imported:
continue
with open(path.join(import_path,filename), 'r') as f:
tree = ET.parse(f)
root = tree.getroot()
# import, merging duplicate domains
visit_xml(type_table, type_resolve_list, None, root, imports)
# add types list to toplevel database
retval.types = type_table
# Resolve types pass
for obj in type_resolve_list:
if obj.type is None:
# The default is "bitset" if there are inline <bitfield> tags present
# "enum" if there are inline <value> tags present
# (this will have created an internal anonymous type)
# "boolean" if this value has width 1
# otherwise "hex"
if obj.anon_type is not None:
obj.type = obj.anon_type
elif obj.size == 1:
obj.type = BaseType('boolean', 1)
else:
obj.type = BaseType('hex', obj.size)
elif obj.type in BaseType.TYPES:
obj.type = BaseType(obj.type, obj.size)
else:
try:
obj.type = type_table[obj.type]
except KeyError:
raise ValueError('Unknown type %s' % obj.type)
# Propagate bounds (from inside out) for stripes
for domain in retval.domains.itervalues():
bounds = domain.compute_bounds()
#print(bounds)
return retval
def parse_rng_file(filename):
import_path = path.dirname(filename)
with open(filename, 'r') as f:
return parse_rng(f, import_path)
def format_path(path):
'''Format path into state space as string'''
retval = []
for obj,idx in path:
if idx is not None:
retval.append('%s[%i]' % (obj.name,idx))
else:
retval.append(obj.name)
return '.'.join(retval)
#-------------------------------------------------------------------------
# Test
def main():
import argparse
parser = argparse.ArgumentParser(description='Parse rules-ng-ng test.')
parser.add_argument('input', metavar='INFILE', type=str,
help='RNG file')
args = parser.parse_args()
r = parse_rng_file(args.input)
d = r.lookup_domain('VIVS')
path = d.lookup_address(0x100)
print(format_path(path))
path = d.lookup_address(0x604)
print(format_path(path))
path = d.lookup_address(0x68C)
print(format_path(path))
path = d.lookup_address(0x6A8)
print(format_path(path))
path = d.lookup_address(0x2444)
print(format_path(path))
path = d.lookup_address(0x10850)
print(format_path(path))
path = d.lookup_address(0x4100)
print(format_path(path))
if __name__ == '__main__':
main()
|
9502007cf2a5467a1d43d008b372fda736eb5a90
|
17af9c355b71a8d92dbfda9d57e335778aef36b0
|
/sklego/notinstalled.py
|
165893ed61d1b981dc86fde0d20af9114027d47f
|
[
"MIT"
] |
permissive
|
koaning/scikit-lego
|
34aea637a190facaca0a34b898af66f1184b23b7
|
c1d413e8ae24350527f3b5bafa7d55b82e95c0cb
|
refs/heads/main
| 2023-08-30T03:45:58.479238
| 2023-07-18T19:07:12
| 2023-07-18T19:07:12
| 166,836,939
| 1,067
| 133
|
MIT
| 2023-09-08T07:30:45
| 2019-01-21T15:30:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
notinstalled.py
|
KNOWN_PACKAGES = {"cvxpy": {"version": ">=1.0.24", "extra_name": "cvxpy"}}
class NotInstalledPackage:
"""
Class to gracefully catch ImportErrors for modules and packages that are not installed
:param package_name (str): Name of the package you want to load
:param version (str, Optional): Version of the package
Usage:
>>> try:
... import thispackagedoesnotexist as package
>>> except ImportError:
... from sklego.notinstalled import NotInstalledPackage
... package = NotInstalledPackage("thispackagedoesnotexist")
"""
def __init__(self, package_name: str, version: str = None):
self.package_name = package_name
package_info = KNOWN_PACKAGES.get(package_name, {})
self.version = version if version else package_info.get("version", "")
extra_name = package_info.get("extra_name", None)
self.pip_message = (
(
f"Install extra requirement {package_name} using "
+ f"`python -m pip install scikit-lego[{extra_name}]` or "
+ "`python -m pip install scikit-lego[all]`. "
+ "For more information, check the 'Dependency installs' section of the installation docs at "
+ "https://scikit-lego.readthedocs.io/en/latest/install.html"
)
if extra_name
else ""
)
def __getattr__(self, name):
raise ImportError(
f"The package {self.package_name}{self.version} is not installed. "
+ self.pip_message
)
|
ac14564422f6c69e551bcefc0af4627df1ee53f7
|
3ab0ce5a37683744fca77c0ee7172eea7b839feb
|
/galpy/potential/EllipsoidalPotential.py
|
0be932e3778e7cc235e5bacf90c7cf9731e250fa
|
[
"BSD-3-Clause"
] |
permissive
|
jobovy/galpy
|
8ee6c00a2796e6bdb920625ce7c5cb32b47b5bc9
|
a46619fd4f5979acfccad23f4d57503033f440c5
|
refs/heads/main
| 2023-08-25T04:18:39.588870
| 2023-08-14T02:34:26
| 2023-08-14T02:34:26
| 2,375,854
| 182
| 119
|
BSD-3-Clause
| 2023-09-11T03:28:59
| 2011-09-13T03:20:30
|
Python
|
UTF-8
|
Python
| false
| false
| 22,607
|
py
|
EllipsoidalPotential.py
|
###############################################################################
# EllipsoidalPotential.py: base class for potentials corresponding to
# density profiles that are stratified on
# ellipsoids:
#
# \rho(x,y,z) ~ \rho(m)
#
# with m^2 = x^2+y^2/b^2+z^2/c^2
#
###############################################################################
import hashlib
import numpy
from scipy import integrate
from ..util import _rotate_to_arbitrary_vector, conversion, coords
from .Potential import Potential, check_potential_inputs_not_arrays
class EllipsoidalPotential(Potential):
"""Base class for potentials corresponding to density profiles that are stratified on ellipsoids:
.. math::
\\rho(x,y,z) \\equiv \\rho(m^2)
where :math:`m^2 = x^2+y^2/b^2+z^2/c^2`. Note that :math:`b` and :math:`c` are defined to be the axis ratios (rather than using :math:`m^2 = x^2/a^2+y^2/b^2+z^2/c^2` as is common).
Implement a specific density distribution with this form by inheriting from this class and defining functions ``_mdens(self,m)`` (the density as a function of ``m``), ``_mdens_deriv(self,m)`` (the derivative of the density as a function of ``m``), and ``_psi(self,m)``, which is:
.. math::
\\psi(m) = -\\int_{m^2}^\\infty d m^2 \\rho(m^2)
See PerfectEllipsoidPotential for an example and `Merritt & Fridman (1996) <http://adsabs.harvard.edu/abs/1996ApJ...460..136M>`_ for the formalism.
"""
def __init__(
self,
amp=1.0,
b=1.0,
c=1.0,
zvec=None,
pa=None,
glorder=50,
ro=None,
vo=None,
amp_units=None,
):
"""
NAME:
__init__
PURPOSE:
initialize a ellipsoidal potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units that depend on the specific spheroidal potential
b - y-to-x axis ratio of the density
c - z-to-x axis ratio of the density
zvec= (None) If set, a unit vector that corresponds to the z axis
pa= (None) If set, the position angle of the x axis (rad or Quantity)
glorder= (50) if set, compute the relevant force and potential integrals with Gaussian quadrature of this order
amp_units - ('mass', 'velocity2', 'density') type of units that amp should have if it has units (passed to Potential.__init__)
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2018-08-06 - Started - Bovy (UofT)
"""
Potential.__init__(self, amp=amp, ro=ro, vo=vo, amp_units=amp_units)
# Setup axis ratios
self._b = b
self._c = c
self._b2 = self._b**2.0
self._c2 = self._c**2.0
self._force_hash = None
# Setup rotation
self._setup_zvec_pa(zvec, pa)
# Setup integration
self._setup_gl(glorder)
if not self._aligned or numpy.fabs(self._b - 1.0) > 10.0**-10.0:
self.isNonAxi = True
return None
def _setup_zvec_pa(self, zvec, pa):
if not pa is None:
pa = conversion.parse_angle(pa)
if zvec is None and (pa is None or numpy.fabs(pa) < 10.0**-10.0):
self._aligned = True
else:
self._aligned = False
if not pa is None:
pa_rot = numpy.array(
[
[numpy.cos(pa), numpy.sin(pa), 0.0],
[-numpy.sin(pa), numpy.cos(pa), 0.0],
[0.0, 0.0, 1.0],
]
)
else:
pa_rot = numpy.eye(3)
if not zvec is None:
if not isinstance(zvec, numpy.ndarray):
zvec = numpy.array(zvec)
zvec /= numpy.sqrt(numpy.sum(zvec**2.0))
zvec_rot = _rotate_to_arbitrary_vector(
numpy.array([[0.0, 0.0, 1.0]]), zvec, inv=True
)[0]
else:
zvec_rot = numpy.eye(3)
self._rot = numpy.dot(pa_rot, zvec_rot)
return None
def _setup_gl(self, glorder):
self._glorder = glorder
if self._glorder is None:
self._glx, self._glw = None, None
else:
self._glx, self._glw = numpy.polynomial.legendre.leggauss(self._glorder)
# Interval change
self._glx = 0.5 * self._glx + 0.5
self._glw *= 0.5
return None
@check_potential_inputs_not_arrays
def _evaluate(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2016-05-30 - Started - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
if numpy.isinf(R):
y = 0.0
if self._aligned:
return self._evaluate_xyz(x, y, z)
else:
xyzp = numpy.dot(self._rot, numpy.array([x, y, z]))
return self._evaluate_xyz(xyzp[0], xyzp[1], xyzp[2])
def _evaluate_xyz(self, x, y, z):
"""Evaluation of the potential as a function of (x,y,z) in the
aligned coordinate frame"""
return (
2.0
* numpy.pi
* self._b
* self._c
* _potInt(
x, y, z, self._psi, self._b2, self._c2, glx=self._glx, glw=self._glw
)
)
@check_potential_inputs_not_arrays
def _Rforce(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2016-06-09 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
# Compute all rectangular forces
new_hash = hashlib.md5(numpy.array([x, y, z])).hexdigest()
if new_hash == self._force_hash:
Fx = self._cached_Fx
Fy = self._cached_Fy
Fz = self._cached_Fz
else:
if self._aligned:
xp, yp, zp = x, y, z
else:
xyzp = numpy.dot(self._rot, numpy.array([x, y, z]))
xp, yp, zp = xyzp[0], xyzp[1], xyzp[2]
Fx = self._force_xyz(xp, yp, zp, 0)
Fy = self._force_xyz(xp, yp, zp, 1)
Fz = self._force_xyz(xp, yp, zp, 2)
self._force_hash = new_hash
self._cached_Fx = Fx
self._cached_Fy = Fy
self._cached_Fz = Fz
if not self._aligned:
Fxyz = numpy.dot(self._rot.T, numpy.array([Fx, Fy, Fz]))
Fx, Fy = Fxyz[0], Fxyz[1]
return numpy.cos(phi) * Fx + numpy.sin(phi) * Fy
@check_potential_inputs_not_arrays
def _phitorque(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_phitorque
PURPOSE:
evaluate the azimuthal torque for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the azimuthal torque
HISTORY:
2016-06-09 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
# Compute all rectangular forces
new_hash = hashlib.md5(numpy.array([x, y, z])).hexdigest()
if new_hash == self._force_hash:
Fx = self._cached_Fx
Fy = self._cached_Fy
Fz = self._cached_Fz
else:
if self._aligned:
xp, yp, zp = x, y, z
else:
xyzp = numpy.dot(self._rot, numpy.array([x, y, z]))
xp, yp, zp = xyzp[0], xyzp[1], xyzp[2]
Fx = self._force_xyz(xp, yp, zp, 0)
Fy = self._force_xyz(xp, yp, zp, 1)
Fz = self._force_xyz(xp, yp, zp, 2)
self._force_hash = new_hash
self._cached_Fx = Fx
self._cached_Fy = Fy
self._cached_Fz = Fz
if not self._aligned:
Fxyz = numpy.dot(self._rot.T, numpy.array([Fx, Fy, Fz]))
Fx, Fy = Fxyz[0], Fxyz[1]
return R * (-numpy.sin(phi) * Fx + numpy.cos(phi) * Fy)
@check_potential_inputs_not_arrays
def _zforce(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2016-06-09 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
# Compute all rectangular forces
new_hash = hashlib.md5(numpy.array([x, y, z])).hexdigest()
if new_hash == self._force_hash:
Fx = self._cached_Fx
Fy = self._cached_Fy
Fz = self._cached_Fz
else:
if self._aligned:
xp, yp, zp = x, y, z
else:
xyzp = numpy.dot(self._rot, numpy.array([x, y, z]))
xp, yp, zp = xyzp[0], xyzp[1], xyzp[2]
Fx = self._force_xyz(xp, yp, zp, 0)
Fy = self._force_xyz(xp, yp, zp, 1)
Fz = self._force_xyz(xp, yp, zp, 2)
self._force_hash = new_hash
self._cached_Fx = Fx
self._cached_Fy = Fy
self._cached_Fz = Fz
if not self._aligned:
Fxyz = numpy.dot(self._rot.T, numpy.array([Fx, Fy, Fz]))
Fz = Fxyz[2]
return Fz
def _force_xyz(self, x, y, z, i):
"""Evaluation of the i-th force component as a function of (x,y,z)"""
return (
-4.0
* numpy.pi
* self._b
* self._c
* _forceInt(
x,
y,
z,
lambda m: self._mdens(m),
self._b2,
self._c2,
i,
glx=self._glx,
glw=self._glw,
)
)
@check_potential_inputs_not_arrays
def _R2deriv(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
if not self._aligned:
raise NotImplementedError(
"2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa); use RotateAndTiltWrapperPotential for this functionality instead"
)
phixx = self._2ndderiv_xyz(x, y, z, 0, 0)
phixy = self._2ndderiv_xyz(x, y, z, 0, 1)
phiyy = self._2ndderiv_xyz(x, y, z, 1, 1)
return (
numpy.cos(phi) ** 2.0 * phixx
+ numpy.sin(phi) ** 2.0 * phiyy
+ 2.0 * numpy.cos(phi) * numpy.sin(phi) * phixy
)
@check_potential_inputs_not_arrays
def _Rzderiv(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed radial, vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the mixed radial, vertical derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
if not self._aligned:
raise NotImplementedError(
"2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)"
)
phixz = self._2ndderiv_xyz(x, y, z, 0, 2)
phiyz = self._2ndderiv_xyz(x, y, z, 1, 2)
return numpy.cos(phi) * phixz + numpy.sin(phi) * phiyz
@check_potential_inputs_not_arrays
def _z2deriv(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
if not self._aligned:
raise NotImplementedError(
"2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)"
)
return self._2ndderiv_xyz(x, y, z, 2, 2)
@check_potential_inputs_not_arrays
def _phi2deriv(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_phi2deriv
PURPOSE:
evaluate the second azimuthal derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second azimuthal derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
if not self._aligned:
raise NotImplementedError(
"2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)"
)
Fx = self._force_xyz(x, y, z, 0)
Fy = self._force_xyz(x, y, z, 1)
phixx = self._2ndderiv_xyz(x, y, z, 0, 0)
phixy = self._2ndderiv_xyz(x, y, z, 0, 1)
phiyy = self._2ndderiv_xyz(x, y, z, 1, 1)
return R**2.0 * (
numpy.sin(phi) ** 2.0 * phixx
+ numpy.cos(phi) ** 2.0 * phiyy
- 2.0 * numpy.cos(phi) * numpy.sin(phi) * phixy
) + R * (numpy.cos(phi) * Fx + numpy.sin(phi) * Fy)
@check_potential_inputs_not_arrays
def _Rphideriv(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_Rphideriv
PURPOSE:
evaluate the mixed radial, azimuthal derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the mixed radial, azimuthal derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
if not self._aligned:
raise NotImplementedError(
"2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)"
)
Fx = self._force_xyz(x, y, z, 0)
Fy = self._force_xyz(x, y, z, 1)
phixx = self._2ndderiv_xyz(x, y, z, 0, 0)
phixy = self._2ndderiv_xyz(x, y, z, 0, 1)
phiyy = self._2ndderiv_xyz(x, y, z, 1, 1)
return (
R * numpy.cos(phi) * numpy.sin(phi) * (phiyy - phixx)
+ R * numpy.cos(2.0 * phi) * phixy
+ numpy.sin(phi) * Fx
- numpy.cos(phi) * Fy
)
@check_potential_inputs_not_arrays
def _phizderiv(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_phizderiv
PURPOSE:
evaluate the mixed azimuthal, vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the mixed radial, azimuthal derivative
HISTORY:
2021-04-30 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi = 0.0
x, y, z = coords.cyl_to_rect(R, phi, z)
if not self._aligned:
raise NotImplementedError(
"2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)"
)
phixz = self._2ndderiv_xyz(x, y, z, 0, 2)
phiyz = self._2ndderiv_xyz(x, y, z, 1, 2)
return R * (numpy.cos(phi) * phiyz - numpy.sin(phi) * phixz)
def _2ndderiv_xyz(self, x, y, z, i, j):
"""General 2nd derivative of the potential as a function of (x,y,z)
in the aligned coordinate frame"""
return (
4.0
* numpy.pi
* self._b
* self._c
* _2ndDerivInt(
x,
y,
z,
lambda m: self._mdens(m),
lambda m: self._mdens_deriv(m),
self._b2,
self._c2,
i,
j,
glx=self._glx,
glw=self._glw,
)
)
@check_potential_inputs_not_arrays
def _dens(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2018-08-06 - Written - Bovy (UofT)
"""
x, y, z = coords.cyl_to_rect(R, phi, z)
if self._aligned:
xp, yp, zp = x, y, z
else:
xyzp = numpy.dot(self._rot, numpy.array([x, y, z]))
xp, yp, zp = xyzp[0], xyzp[1], xyzp[2]
m = numpy.sqrt(xp**2.0 + yp**2.0 / self._b2 + zp**2.0 / self._c2)
return self._mdens(m)
def _mass(self, R, z=None, t=0.0):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R (and z) for this potential; if z=None, integrate to z=inf
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2021-03-08 - Written - Bovy (UofT)
"""
if not z is None:
raise AttributeError # Hack to fall back to general
return (
4.0
* numpy.pi
* self._b
* self._c
* integrate.quad(lambda m: m**2.0 * self._mdens(m), 0, R)[0]
)
def OmegaP(self):
"""
NAME:
OmegaP
PURPOSE:
return the pattern speed
INPUT:
(none)
OUTPUT:
pattern speed
HISTORY:
2016-05-31 - Written - Bovy (UofT)
"""
return 0.0
def _potInt(x, y, z, psi, b2, c2, glx=None, glw=None):
r"""int_0^\infty [psi(m)-psi(\infy)]/sqrt([1+tau]x[b^2+tau]x[c^2+tau])dtau"""
def integrand(s):
t = 1 / s**2.0 - 1.0
return psi(
numpy.sqrt(x**2.0 / (1.0 + t) + y**2.0 / (b2 + t) + z**2.0 / (c2 + t))
) / numpy.sqrt((1.0 + (b2 - 1.0) * s**2.0) * (1.0 + (c2 - 1.0) * s**2.0))
if glx is None:
return integrate.quad(integrand, 0.0, 1.0)[0]
else:
return numpy.sum(glw * integrand(glx))
def _forceInt(x, y, z, dens, b2, c2, i, glx=None, glw=None):
"""Integral that gives the force in x,y,z"""
def integrand(s):
t = 1 / s**2.0 - 1.0
return (
dens(
numpy.sqrt(
x**2.0 / (1.0 + t) + y**2.0 / (b2 + t) + z**2.0 / (c2 + t)
)
)
* (
x / (1.0 + t) * (i == 0)
+ y / (b2 + t) * (i == 1)
+ z / (c2 + t) * (i == 2)
)
/ numpy.sqrt((1.0 + (b2 - 1.0) * s**2.0) * (1.0 + (c2 - 1.0) * s**2.0))
)
if glx is None:
return integrate.quad(integrand, 0.0, 1.0)[0]
else:
return numpy.sum(glw * integrand(glx))
def _2ndDerivInt(x, y, z, dens, densDeriv, b2, c2, i, j, glx=None, glw=None):
"""Integral that gives the 2nd derivative of the potential in x,y,z"""
def integrand(s):
t = 1 / s**2.0 - 1.0
m = numpy.sqrt(x**2.0 / (1.0 + t) + y**2.0 / (b2 + t) + z**2.0 / (c2 + t))
return (
densDeriv(m)
* (
x / (1.0 + t) * (i == 0)
+ y / (b2 + t) * (i == 1)
+ z / (c2 + t) * (i == 2)
)
* (
x / (1.0 + t) * (j == 0)
+ y / (b2 + t) * (j == 1)
+ z / (c2 + t) * (j == 2)
)
/ m
+ dens(m)
* (i == j)
* (
1.0 / (1.0 + t) * (i == 0)
+ 1.0 / (b2 + t) * (i == 1)
+ 1.0 / (c2 + t) * (i == 2)
)
) / numpy.sqrt((1.0 + (b2 - 1.0) * s**2.0) * (1.0 + (c2 - 1.0) * s**2.0))
if glx is None:
return integrate.quad(integrand, 0.0, 1.0)[0]
else:
return numpy.sum(glw * integrand(glx))
|
6a59b2169b7002d15dd52b6cca9499f4fbf9aa51
|
7a0e1ed4c4ae6cbfaba63cc38a3b64ac6029b9c5
|
/src/astral/location.py
|
21622807425fa44155699425df2ef30ce459e797
|
[
"Apache-2.0"
] |
permissive
|
sffjunkie/astral
|
68db1d7832ead79af3bb7d56f0a7fe5c17f3be30
|
bc6885b0685e6dc629f6d1131df26d8beea57e8d
|
refs/heads/master
| 2023-07-31T09:06:19.711829
| 2022-11-09T11:34:09
| 2022-11-09T11:34:09
| 65,539,122
| 204
| 54
|
Apache-2.0
| 2023-06-27T19:18:02
| 2016-08-12T08:58:17
|
Python
|
UTF-8
|
Python
| false
| false
| 31,305
|
py
|
location.py
|
import dataclasses
import datetime
try:
import zoneinfo
except ImportError:
from backports import zoneinfo # type: ignore
from typing import Any, Dict, Optional, Tuple, Union
import astral.moon
import astral.sun
from astral import (
Depression,
Elevation,
LocationInfo,
Observer,
SunDirection,
dms_to_float,
today,
)
class Location:
"""Provides access to information for single location."""
def __init__(self, info: Optional[LocationInfo] = None):
"""Initializes the Location with a LocationInfo object.
The tuple should contain items in the following order
================ =============
Field Default
================ =============
name Greenwich
region England
time zone name Europe/London
latitude 51.4733
longitude -0.0008333
================ =============
See the :attr:`timezone` property for a method of obtaining time zone
names
"""
self._location_info: LocationInfo
self._solar_depression: float = Depression.CIVIL.value
if not info:
self._location_info = LocationInfo(
"Greenwich", "England", "Europe/London", 51.4733, -0.0008333
)
else:
self._location_info = info
def __eq__(self, other: object) -> bool:
if type(other) is Location:
return self._location_info == other._location_info # type: ignore
return NotImplemented
def __repr__(self) -> str:
if self.region:
_repr = "%s/%s" % (self.name, self.region)
else:
_repr = self.name
return (
f"{_repr}, tz={self.timezone}, "
f"lat={self.latitude:0.02f}, "
f"lon={self.longitude:0.02f}"
)
@property
def info(self) -> LocationInfo:
return LocationInfo(
self.name,
self.region,
self.timezone,
self.latitude,
self.longitude,
)
@property
def observer(self) -> Observer:
return Observer(self.latitude, self.longitude, 0.0)
@property
def name(self) -> str:
return self._location_info.name
@name.setter
def name(self, name: str) -> None:
self._location_info = dataclasses.replace(self._location_info, name=name)
@property
def region(self) -> str:
return self._location_info.region
@region.setter
def region(self, region: str) -> None:
self._location_info = dataclasses.replace(self._location_info, region=region)
@property
def latitude(self) -> float:
"""The location's latitude
``latitude`` can be set either as a string or as a number
For strings they must be of the form
degrees°minutes'[N|S] e.g. 51°31'N
For numbers, positive numbers signify latitudes to the North.
"""
return self._location_info.latitude
@latitude.setter
def latitude(self, latitude: Union[float, str]) -> None:
self._location_info = dataclasses.replace(
self._location_info, latitude=dms_to_float(latitude, 90.0)
)
@property
def longitude(self) -> float:
"""The location's longitude.
``longitude`` can be set either as a string or as a number
For strings they must be of the form
degrees°minutes'[E|W] e.g. 51°31'W
For numbers, positive numbers signify longitudes to the East.
"""
return self._location_info.longitude
@longitude.setter
def longitude(self, longitude: Union[float, str]) -> None:
self._location_info = dataclasses.replace(
self._location_info, longitude=dms_to_float(longitude, 180.0)
)
@property
def timezone(self) -> str:
"""The name of the time zone for the location.
A list of time zone names can be obtained from the zoneinfo module.
For example.
>>> import zoneinfo
>>> assert "CET" in zoneinfo.available_timezones()
"""
return self._location_info.timezone
@timezone.setter
def timezone(self, name: str) -> None:
if name not in zoneinfo.available_timezones(): # type: ignore
raise ValueError("Timezone '%s' not recognized" % name)
self._location_info = dataclasses.replace(self._location_info, timezone=name)
@property
def tzinfo(self) -> zoneinfo.ZoneInfo: # type: ignore
"""Time zone information."""
try:
tz = zoneinfo.ZoneInfo(self._location_info.timezone) # type: ignore
return tz # type: ignore
except zoneinfo.ZoneInfoNotFoundError as exc: # type: ignore
raise ValueError(
"Unknown timezone '%s'" % self._location_info.timezone
) from exc
tz = tzinfo
@property
def solar_depression(self) -> float:
"""The number of degrees the sun must be below the horizon for the
dawn/dusk calculation.
Can either be set as a number of degrees below the horizon or as
one of the following strings
============= =======
String Degrees
============= =======
civil 6.0
nautical 12.0
astronomical 18.0
============= =======
"""
return self._solar_depression
@solar_depression.setter
def solar_depression(self, depression: Union[float, str, Depression]) -> None:
if isinstance(depression, str):
try:
self._solar_depression = {
"civil": 6.0,
"nautical": 12.0,
"astronomical": 18.0,
}[depression]
except KeyError:
raise KeyError(
(
"solar_depression must be either a number "
"or one of 'civil', 'nautical' or "
"'astronomical'"
)
)
elif isinstance(depression, Depression):
self._solar_depression = depression.value
else:
self._solar_depression = float(depression)
def today(self, local: bool = True) -> datetime.date:
if local:
return today(self.tzinfo)
else:
return today()
def sun(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> Dict[str, Any]:
"""Returns dawn, sunrise, noon, sunset and dusk as a dictionary.
:param date: The date for which to calculate the times.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:returns: Dictionary with keys ``dawn``, ``sunrise``, ``noon``,
``sunset`` and ``dusk`` whose values are the results of the
corresponding methods.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.sun(observer, date, self.solar_depression, self.tzinfo)
else:
return astral.sun.sun(observer, date, self.solar_depression)
def dawn(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> datetime.datetime:
"""Calculates the time in the morning when the sun is a certain number
of degrees below the horizon. By default this is 6 degrees but can be
changed by setting the :attr:`Astral.solar_depression` property.
:param date: The date for which to calculate the dawn time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:returns: The date and time at which dawn occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.dawn(observer, date, self.solar_depression, self.tzinfo)
else:
return astral.sun.dawn(observer, date, self.solar_depression)
def sunrise(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> datetime.datetime:
"""Return sunrise time.
Calculates the time in the morning when the sun is a 0.833 degrees
below the horizon. This is to account for refraction.
:param date: The date for which to calculate the sunrise time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:returns: The date and time at which sunrise occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.sunrise(observer, date, self.tzinfo)
else:
return astral.sun.sunrise(observer, date)
def noon(
self, date: Optional[datetime.date] = None, local: bool = True
) -> datetime.datetime:
"""Calculates the solar noon (the time when the sun is at its highest
point.)
:param date: The date for which to calculate the noon time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:returns: The date and time at which the solar noon occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude)
if local:
return astral.sun.noon(observer, date, self.tzinfo)
else:
return astral.sun.noon(observer, date)
def sunset(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> datetime.datetime:
"""Calculates sunset time (the time in the evening when the sun is a
0.833 degrees below the horizon. This is to account for refraction.)
:param date: The date for which to calculate the sunset time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:returns: The date and time at which sunset occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.sunset(observer, date, self.tzinfo)
else:
return astral.sun.sunset(observer, date)
def dusk(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> datetime.datetime:
"""Calculates the dusk time (the time in the evening when the sun is a
certain number of degrees below the horizon. By default this is 6
degrees but can be changed by setting the
:attr:`solar_depression` property.)
:param date: The date for which to calculate the dusk time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:returns: The date and time at which dusk occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.dusk(observer, date, self.solar_depression, self.tzinfo)
else:
return astral.sun.dusk(observer, date, self.solar_depression)
def midnight(
self, date: Optional[datetime.date] = None, local: bool = True
) -> datetime.datetime:
"""Calculates the solar midnight (the time when the sun is at its lowest
point.)
:param date: The date for which to calculate the midnight time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:returns: The date and time at which the solar midnight occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude)
if local:
return astral.sun.midnight(observer, date, self.tzinfo)
else:
return astral.sun.midnight(observer, date)
def daylight(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> Tuple[datetime.datetime, datetime.datetime]:
"""Calculates the daylight time (the time between sunrise and sunset)
:param date: The date for which to calculate daylight.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:returns: A tuple containing the start and end times
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.daylight(observer, date, self.tzinfo)
else:
return astral.sun.daylight(observer, date)
def night(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> Tuple[datetime.datetime, datetime.datetime]:
"""Calculates the night time (the time between astronomical dusk and
astronomical dawn of the next day)
:param date: The date for which to calculate the start of the night time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:returns: A tuple containing the start and end times
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.night(observer, date, self.tzinfo)
else:
return astral.sun.night(observer, date)
def twilight(
self,
date: Optional[datetime.date] = None,
direction: SunDirection = SunDirection.RISING,
local: bool = True,
observer_elevation: Elevation = 0.0,
):
"""Returns the start and end times of Twilight in the UTC timezone when
the sun is traversing in the specified direction.
This method defines twilight as being between the time
when the sun is at -6 degrees and sunrise/sunset.
:param direction: Determines whether the time is for the sun rising or setting.
Use ``astral.SUN_RISING`` or ``astral.SunDirection.SETTING``.
:param date: The date for which to calculate the times.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:return: A tuple of the UTC date and time at which twilight starts and ends.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.twilight(observer, date, direction, self.tzinfo)
else:
return astral.sun.twilight(observer, date, direction)
def moonrise(
self,
date: Optional[datetime.date] = None,
local: bool = True,
) -> Optional[datetime.datetime]:
"""Calculates the time when the moon rises.
:param date: The date for which to calculate the moonrise time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:returns: The date and time at which moonrise occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, 0)
if local:
return astral.moon.moonrise(observer, date, self.tzinfo)
else:
return astral.moon.moonrise(observer, date)
def moonset(
self,
date: Optional[datetime.date] = None,
local: bool = True,
) -> Optional[datetime.datetime]:
"""Calculates the time when the moon sets.
:param date: The date for which to calculate the moonset time.
If no date is specified then the current date will be used.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:returns: The date and time at which moonset occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, 0)
if local:
return astral.moon.moonset(observer, date, self.tzinfo)
else:
return astral.moon.moonset(observer, date)
def time_at_elevation(
self,
elevation: float,
date: Optional[datetime.date] = None,
direction: SunDirection = SunDirection.RISING,
local: bool = True,
) -> datetime.datetime:
"""Calculate the time when the sun is at the specified elevation.
Note:
This method uses positive elevations for those above the horizon.
Elevations greater than 90 degrees are converted to a setting sun
i.e. an elevation of 110 will calculate a setting sun at 70 degrees.
:param elevation: Elevation in degrees above the horizon to calculate for.
:param date: The date for which to calculate the elevation time.
If no date is specified then the current date will be used.
:param direction: Determines whether the time is for the sun rising or setting.
Use ``SunDirection.RISING`` or ``SunDirection.SETTING``.
Default is rising.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:returns: The date and time at which dusk occurs.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
if elevation > 90.0:
elevation = 180.0 - elevation
direction = SunDirection.SETTING
observer = Observer(self.latitude, self.longitude, 0.0)
if local:
return astral.sun.time_at_elevation(
observer, elevation, date, direction, self.tzinfo
)
else:
return astral.sun.time_at_elevation(observer, elevation, date, direction)
def rahukaalam(
self,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> Tuple[datetime.datetime, datetime.datetime]:
"""Calculates the period of rahukaalam.
:param date: The date for which to calculate the rahukaalam period.
A value of ``None`` uses the current date.
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
:param observer_elevation: Elevation of the observer in metres above
the location.
:return: Tuple containing the start and end times for Rahukaalam.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.rahukaalam(observer, date, tzinfo=self.tzinfo)
else:
return astral.sun.rahukaalam(observer, date)
def golden_hour(
self,
direction: SunDirection = SunDirection.RISING,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> Tuple[datetime.datetime, datetime.datetime]:
"""Returns the start and end times of the Golden Hour when the sun is traversing
in the specified direction.
This method uses the definition from PhotoPills i.e. the
golden hour is when the sun is between 4 degrees below the horizon
and 6 degrees above.
:param direction: Determines whether the time is for the sun rising or setting.
Use ``SunDirection.RISING`` or ``SunDirection.SETTING``.
Default is rising.
:param date: The date for which to calculate the times.
:param local: True = Times to be returned in location's time zone;
False = Times to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:return: A tuple of the date and time at which the Golden Hour starts and ends.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.golden_hour(observer, date, direction, self.tzinfo)
else:
return astral.sun.golden_hour(observer, date, direction)
def blue_hour(
self,
direction: SunDirection = SunDirection.RISING,
date: Optional[datetime.date] = None,
local: bool = True,
observer_elevation: Elevation = 0.0,
) -> Tuple[datetime.datetime, datetime.datetime]:
"""Returns the start and end times of the Blue Hour when the sun is traversing
in the specified direction.
This method uses the definition from PhotoPills i.e. the
blue hour is when the sun is between 6 and 4 degrees below the horizon.
:param direction: Determines whether the time is for the sun rising or setting.
Use ``SunDirection.RISING`` or ``SunDirection.SETTING``.
Default is rising.
:param date: The date for which to calculate the times.
If no date is specified then the current date will be used.
:param local: True = Times to be returned in location's time zone;
False = Times to be returned in UTC.
If not specified then the time will be returned in local time
:param observer_elevation: Elevation of the observer in metres above
the location.
:return: A tuple of the date and time at which the Blue Hour starts and ends.
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if date is None:
date = self.today(local)
observer = Observer(self.latitude, self.longitude, observer_elevation)
if local:
return astral.sun.blue_hour(observer, date, direction, self.tzinfo)
else:
return astral.sun.blue_hour(observer, date, direction)
def solar_azimuth(
self,
dateandtime: Optional[datetime.datetime] = None,
observer_elevation: Elevation = 0.0,
) -> float:
"""Calculates the solar azimuth angle for a specific date/time.
:param dateandtime: The date and time for which to calculate the angle.
:returns: The azimuth angle in degrees clockwise from North.
"""
if dateandtime is None:
dateandtime = astral.sun.now(self.tzinfo)
elif not dateandtime.tzinfo:
dateandtime = dateandtime.replace(tzinfo=self.tzinfo)
observer = Observer(self.latitude, self.longitude, observer_elevation)
dateandtime = dateandtime.astimezone(datetime.timezone.utc) # type: ignore
return astral.sun.azimuth(observer, dateandtime)
def solar_elevation(
self,
dateandtime: Optional[datetime.datetime] = None,
observer_elevation: Elevation = 0.0,
) -> float:
"""Calculates the solar elevation angle for a specific time.
:param dateandtime: The date and time for which to calculate the angle.
:returns: The elevation angle in degrees above the horizon.
"""
if dateandtime is None:
dateandtime = astral.sun.now(self.tzinfo)
elif not dateandtime.tzinfo:
dateandtime = dateandtime.replace(tzinfo=self.tzinfo)
observer = Observer(self.latitude, self.longitude, observer_elevation)
dateandtime = dateandtime.astimezone(datetime.timezone.utc) # type: ignore
return astral.sun.elevation(observer, dateandtime)
def solar_zenith(
self,
dateandtime: Optional[datetime.datetime] = None,
observer_elevation: Elevation = 0.0,
) -> float:
"""Calculates the solar zenith angle for a specific time.
:param dateandtime: The date and time for which to calculate the angle.
:returns: The zenith angle in degrees from vertical.
"""
return 90.0 - self.solar_elevation(dateandtime, observer_elevation)
def moon_phase(self, date: Optional[datetime.date] = None, local: bool = True):
"""Calculates the moon phase for a specific date.
:param date: The date to calculate the phase for. If ommitted the
current date is used.
:returns:
A number designating the phase
============ ==============
0 .. 6.99 New moon
7 .. 13.99 First quarter
14 .. 20.99 Full moon
21 .. 27.99 Last quarter
============ ==============
"""
if date is None:
date = self.today(local)
return astral.moon.phase(date)
|
efeb10f4c7a599da7a0fecf1f131b16697879dad
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/async_support/lbank.py
|
a46476292258482200877b37a1d9e98d33bae1e1
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 34,074
|
py
|
lbank.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.abstract.lbank import ImplicitAPI
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class lbank(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(lbank, self).describe(), {
'id': 'lbank',
'name': 'LBank',
'countries': ['CN'],
'version': 'v1',
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': False,
'createStopMarketOrder': False,
'createStopOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarginMode': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': False, # status 0 API doesn't work
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositionMode': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'withdraw': True,
},
'timeframes': {
'1m': 'minute1',
'5m': 'minute5',
'15m': 'minute15',
'30m': 'minute30',
'1h': 'hour1',
'2h': 'hour2',
'4h': 'hour4',
'6h': 'hour6',
'8h': 'hour8',
'12h': 'hour12',
'1d': 'day1',
'1w': 'week1',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38063602-9605e28a-3302-11e8-81be-64b1e53c4cfb.jpg',
'api': {
'rest': 'https://api.lbank.info',
},
'www': 'https://www.lbank.info',
'doc': 'https://github.com/LBank-exchange/lbank-official-api-docs',
'fees': 'https://www.lbank.info/fees.html',
'referral': 'https://www.lbank.info/invitevip?icode=7QCY',
},
'api': {
'public': {
'get': [
'currencyPairs',
'ticker',
'depth',
'trades',
'kline',
'accuracy',
],
},
'private': {
'post': [
'user_info',
'create_order',
'cancel_order',
'orders_info',
'orders_info_history',
'withdraw',
'withdrawCancel',
'withdraws',
'withdrawConfigs',
],
},
},
'fees': {
'trading': {
'maker': self.parse_number('0.001'),
'taker': self.parse_number('0.001'),
},
'funding': {
'withdraw': {},
},
},
'commonCurrencies': {
'GMT': 'GMT Token',
'PNT': 'Penta',
'SHINJA': 'SHINJA(1M)',
'VET_ERC20': 'VEN',
},
'options': {
'cacheSecretAsPem': True,
},
'precisionMode': TICK_SIZE,
})
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for lbank
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
response = await self.publicGetAccuracy(params)
#
# [
# {
# "symbol": "btc_usdt",
# "quantityAccuracy": "4",
# "minTranQua": "0.0001",
# "priceAccuracy": "2"
# },
# ...
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = market['symbol']
parts = id.split('_')
baseId = None
quoteId = None
numParts = len(parts)
# lbank will return symbols like "vet_erc20_usdt"
if numParts > 2:
baseId = parts[0] + '_' + parts[1]
quoteId = parts[2]
else:
baseId = parts[0]
quoteId = parts[1]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': True,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'quantityAccuracy'))),
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'priceAccuracy'))),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_float(market, 'minTranQua'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': id,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"btc_usdt",
# "ticker":{
# "high":43416.06,
# "vol":7031.7427,
# "low":41804.26,
# "change":1.33,
# "turnover":300302447.81,
# "latest":43220.4
# },
# "timestamp":1642201617747
# }
#
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market, '_')
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'timestamp')
info = ticker
ticker = info['ticker']
last = self.safe_string(ticker, 'latest')
percentage = self.safe_string(ticker, 'change')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_string(ticker, 'vol'),
'quoteVolume': self.safe_string(ticker, 'turnover'),
'info': info,
}, market)
async def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTicker(self.extend(request, params))
# {
# "symbol":"btc_usdt",
# "ticker":{
# "high":43416.06,
# "vol":7031.7427,
# "low":41804.26,
# "change":1.33,
# "turnover":300302447.81,
# "latest":43220.4
# },
# "timestamp":1642201617747
# }
return self.parse_ticker(response, market)
async def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
await self.load_markets()
symbols = self.market_symbols(symbols)
request = {
'symbol': 'all',
}
response = await self.publicGetTicker(self.extend(request, params))
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_order_book(self, symbol: str, limit=60, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
size = 60
if limit is not None:
size = min(limit, size)
market = self.market(symbol)
request = {
'symbol': market['id'],
'size': size,
}
response = await self.publicGetDepth(self.extend(request, params))
return self.parse_order_book(response, market['symbol'])
def parse_trade(self, trade, market=None):
market = self.safe_market(None, market)
timestamp = self.safe_integer(trade, 'date_ms')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
id = self.safe_string(trade, 'tid')
type = None
side = self.safe_string(trade, 'type')
# remove type additions from i.e. buy_maker, sell_maker, buy_ioc, sell_ioc, buy_fok, sell_fok
splited = side.split('_')
side = splited[0]
return {
'id': id,
'info': self.safe_value(trade, 'info', trade),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'size': 100,
}
if since is not None:
request['time'] = since
if limit is not None:
request['size'] = limit
response = await self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590969600,
# 0.02451657,
# 0.02452675,
# 0.02443701,
# 0.02447814,
# 238.38210000
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit=1000, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100 # it's defined in lbank2
if since is None:
duration = self.parse_timeframe(timeframe)
since = self.milliseconds() - duration * 1000 * limit
request = {
'symbol': market['id'],
'type': self.safe_string(self.timeframes, timeframe, timeframe),
'size': limit,
'time': self.parse_to_int(since / 1000),
}
response = await self.publicGetKline(self.extend(request, params))
#
# [
# [1590969600,0.02451657,0.02452675,0.02443701,0.02447814,238.38210000],
# [1590969660,0.02447814,0.02449883,0.02443209,0.02445973,212.40270000],
# [1590969720,0.02445973,0.02452067,0.02445909,0.02446151,266.16920000],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
info = self.safe_value(response, 'info', {})
free = self.safe_value(info, 'free', {})
freeze = self.safe_value(info, 'freeze', {})
asset = self.safe_value(info, 'asset', {})
currencyIds = list(free.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(free, currencyId)
account['used'] = self.safe_string(freeze, currencyId)
account['total'] = self.safe_string(asset, currencyId)
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
await self.load_markets()
response = await self.privatePostUserInfo(params)
#
# {
# "result":"true",
# "info":{
# "freeze":{
# "iog":"0.00000000",
# "ssc":"0.00000000",
# "eon":"0.00000000",
# },
# "asset":{
# "iog":"0.00000000",
# "ssc":"0.00000000",
# "eon":"0.00000000",
# },
# "free":{
# "iog":"0.00000000",
# "ssc":"0.00000000",
# "eon":"0.00000000",
# },
# }
# }
#
return self.parse_balance(response)
def parse_order_status(self, status):
statuses = {
'-1': 'cancelled', # cancelled
'0': 'open', # not traded
'1': 'open', # partial deal
'2': 'closed', # complete deal
'4': 'closed', # disposal processing
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
#
# {
# "symbol":"eth_btc",
# "amount":10.000000,
# "create_time":1484289832081,
# "price":5000.000000,
# "avg_price":5277.301200,
# "type":"sell",
# "order_id":"ab704110-af0d-48fd-a083-c218f19a4a55",
# "deal_amount":10.000000,
# "status":2
# }
#
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
timestamp = self.safe_integer(order, 'create_time')
# Limit Order Request Returns: Order Price
# Market Order Returns: cny amount of market order
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
filled = self.safe_string(order, 'deal_amount')
average = self.safe_string(order, 'avg_price')
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'order_id')
type = self.safe_string(order, 'order_type')
side = self.safe_string(order, 'type')
return self.safe_order({
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'triggerPrice': None,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': None,
'trades': None,
'fee': None,
'info': self.safe_value(order, 'info', order),
'average': average,
}, market)
async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'type': side,
'amount': amount,
}
if type == 'market':
order['type'] += '_market'
else:
order['price'] = price
response = await self.privatePostCreateOrder(self.extend(order, params))
order = self.omit(order, 'type')
order['order_id'] = response['order_id']
order['type'] = side
order['order_type'] = type
order['create_time'] = self.milliseconds()
order['info'] = response
return self.parse_order(order, market)
async def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'order_id': id,
}
response = await self.privatePostCancelOrder(self.extend(request, params))
return response
async def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
# Id can be a list of ids delimited by a comma
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'order_id': id,
}
response = await self.privatePostOrdersInfo(self.extend(request, params))
data = self.safe_value(response, 'orders', [])
orders = self.parse_orders(data, market)
numOrders = len(orders)
if numOrders == 1:
return orders[0]
else:
return orders
async def fetch_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
if limit is None:
limit = 100
market = self.market(symbol)
request = {
'symbol': market['id'],
'current_page': 1,
'page_length': limit,
}
response = await self.privatePostOrdersInfoHistory(self.extend(request, params))
data = self.safe_value(response, 'orders', [])
return self.parse_orders(data, None, since, limit)
async def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
if symbol is not None:
market = self.market(symbol)
symbol = market['symbol']
orders = await self.fetch_orders(symbol, since, limit, params)
closed = self.filter_by(orders, 'status', 'closed')
canceled = self.filter_by(orders, 'status', 'cancelled') # cancelled orders may be partially filled
allOrders = self.array_concat(closed, canceled)
return self.filter_by_symbol_since_limit(allOrders, symbol, since, limit)
async def withdraw(self, code: str, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str tag:
:param dict [params]: extra parameters specific to the lbank api endpoint
:returns dict: a `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
# mark and fee are optional params, mark is a note and must be less than 255 characters
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'assetCode': currency['id'],
'amount': amount,
'account': address,
}
if tag is not None:
request['memo'] = tag
response = self.privatePostWithdraw(self.extend(request, params))
#
# {
# 'result': 'true',
# 'withdrawId': 90082,
# 'fee':0.001
# }
#
return self.parse_transaction(response, currency)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# 'result': 'true',
# 'withdrawId': 90082,
# 'fee':0.001
# }
#
currency = self.safe_currency(None, currency)
return {
'id': self.safe_string_2(transaction, 'id', 'withdrawId'),
'txid': None,
'timestamp': None,
'datetime': None,
'network': None,
'addressFrom': None,
'address': None,
'addressTo': None,
'amount': None,
'type': None,
'currency': currency['code'],
'status': None,
'updated': None,
'tagFrom': None,
'tag': None,
'tagTo': None,
'comment': None,
'fee': None,
'info': transaction,
}
def convert_secret_to_pem(self, secret):
lineLength = 64
secretLength = len(secret) - 0
numLines = self.parse_to_int(secretLength / lineLength)
numLines = self.sum(numLines, 1)
pem = "-----BEGIN PRIVATE KEY-----\n" # eslint-disable-line
for i in range(0, numLines):
start = i * lineLength
end = self.sum(start, lineLength)
pem += self.secret[start:end] + "\n" # eslint-disable-line
return pem + '-----END PRIVATE KEY-----'
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api']['rest'] + '/' + self.version + '/' + self.implode_params(path, params)
# Every endpoint ends with ".do"
url += '.do'
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, query))
queryString = self.rawencode(query)
message = self.hash(self.encode(queryString), 'md5').upper()
cacheSecretAsPem = self.safe_value(self.options, 'cacheSecretAsPem', True)
pem = None
if cacheSecretAsPem:
pem = self.safe_value(self.options, 'pem')
if pem is None:
pem = self.convert_secret_to_pem(self.secret)
self.options['pem'] = pem
else:
pem = self.convert_secret_to_pem(self.secret)
query['sign'] = self.rsa(message, pem, 'sha256')
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None
success = self.safe_string(response, 'result')
if success == 'false':
errorCode = self.safe_string(response, 'error_code')
message = self.safe_string({
'10000': 'Internal error',
'10001': 'The required parameters can not be empty',
'10002': 'verification failed',
'10003': 'Illegal parameters',
'10004': 'User requests are too frequent',
'10005': 'Key does not exist',
'10006': 'user does not exist',
'10007': 'Invalid signature',
'10008': 'This currency pair is not supported',
'10009': 'Limit orders can not be missing orders and the number of orders',
'10010': 'Order price or order quantity must be greater than 0',
'10011': 'Market orders can not be missing the amount of the order',
'10012': 'market sell orders can not be missing orders',
'10013': 'is less than the minimum trading position 0.001',
'10014': 'Account number is not enough',
'10015': 'The order type is wrong',
'10016': 'Account balance is not enough',
'10017': 'Abnormal server',
'10018': 'order inquiry can not be more than 50 less than one',
'10019': 'withdrawal orders can not be more than 3 less than one',
'10020': 'less than the minimum amount of the transaction limit of 0.001',
'10022': 'Insufficient key authority',
}, errorCode, self.json(response))
ErrorClass = self.safe_value({
'10002': AuthenticationError,
'10004': DDoSProtection,
'10005': AuthenticationError,
'10006': AuthenticationError,
'10007': AuthenticationError,
'10009': InvalidOrder,
'10010': InvalidOrder,
'10011': InvalidOrder,
'10012': InvalidOrder,
'10013': InvalidOrder,
'10014': InvalidOrder,
'10015': InvalidOrder,
'10016': InvalidOrder,
'10022': AuthenticationError,
}, errorCode, ExchangeError)
raise ErrorClass(message)
return None
|
9bd1a7830dba885b2a7f9c3a34ae2df040f55ffa
|
1a47fe8a56854bfad6b54917ff04885c49538bd5
|
/test/holes_test.py
|
f569a08804e1da44f09a0992019bc4d7f5cc09fd
|
[
"Apache-2.0"
] |
permissive
|
spotify/annoy
|
02b4c866ed819140969bd9835f1ed0afc299d5c7
|
2be37c9e015544be2cf60c431f0cccc076151a2d
|
refs/heads/main
| 2023-09-03T03:31:28.329246
| 2023-08-20T17:38:53
| 2023-08-20T17:38:53
| 9,155,431
| 11,966
| 1,230
|
Apache-2.0
| 2023-08-20T17:38:55
| 2013-04-01T20:29:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
holes_test.py
|
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import random
import numpy
from annoy import AnnoyIndex
def test_random_holes():
f = 10
index = AnnoyIndex(f, "angular")
valid_indices = random.sample(range(2000), 1000) # leave holes
for i in valid_indices:
v = numpy.random.normal(size=(f,))
index.add_item(i, v)
index.build(10)
for i in valid_indices:
js = index.get_nns_by_item(i, 10000)
for j in js:
assert j in valid_indices
for i in range(1000):
v = numpy.random.normal(size=(f,))
js = index.get_nns_by_vector(v, 10000)
for j in js:
assert j in valid_indices
def _test_holes_base(n, f=100, base_i=100000):
annoy = AnnoyIndex(f, "angular")
for i in range(n):
annoy.add_item(base_i + i, numpy.random.normal(size=(f,)))
annoy.build(100)
res = annoy.get_nns_by_item(base_i, n)
assert set(res) == set([base_i + i for i in range(n)])
def test_root_one_child():
# See https://github.com/spotify/annoy/issues/223
_test_holes_base(1)
def test_root_two_children():
_test_holes_base(2)
def test_root_some_children():
# See https://github.com/spotify/annoy/issues/295
_test_holes_base(10)
def test_root_many_children():
_test_holes_base(1000)
|
66b470fa2f99f1debc94627811e1c4f3741b0613
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_functionapp_commands_thru_mock.py
|
ebeeb6838f05120879115fdeac4c027d0a1c95bc
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 26,631
|
py
|
test_functionapp_commands_thru_mock.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest import mock
import os
from azure.mgmt.web import WebSiteManagementClient
from knack.util import CLIError
from azure.cli.command_modules.appservice.custom import (
enable_zip_deploy_functionapp,
enable_zip_deploy,
add_remote_build_app_settings,
remove_remote_build_app_settings,
validate_app_settings_in_scm)
from azure.cli.core.profiles import ResourceType
from azure.cli.core.azclierror import (AzureInternalError, UnclassifiedUserFault)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
def _get_test_cmd():
from azure.cli.core.mock import DummyCli
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import AzCliCommand
cli_ctx = DummyCli()
loader = AzCommandsLoader(cli_ctx, resource_type=ResourceType.MGMT_APPSERVICE)
cmd = AzCliCommand(loader, 'test', None)
cmd.command_kwargs = {'resource_type': ResourceType.MGMT_APPSERVICE}
cmd.cli_ctx = cli_ctx
return cmd
def _get_zip_deploy_headers(username, password, cmd_mock_client):
from urllib3.util import make_headers
from azure.cli.core.util import get_az_user_agent
headers = make_headers(basic_auth='{0}:{1}'.format(username, password))
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['x-ms-client-request-id'] = cmd_mock_client.data['headers']['x-ms-client-request-id']
return headers
class TestFunctionappMocked(unittest.TestCase):
def setUp(self):
self.client = WebSiteManagementClient(mock.MagicMock(), '123455678')
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom.parse_resource_id')
@mock.patch('azure.cli.command_modules.appservice.custom.enable_zip_deploy')
@mock.patch('azure.cli.command_modules.appservice.custom.add_remote_build_app_settings')
def test_functionapp_zip_deploy_flow(self,
add_remote_build_app_settings_mock,
enable_zip_deploy_mock,
parse_resource_id_mock,
web_client_factory_mock):
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
# action
enable_zip_deploy_functionapp(cmd_mock, 'rg', 'name', 'src', build_remote=True, timeout=None, slot=None)
# assert
parse_resource_id_mock.assert_called_once()
enable_zip_deploy_mock.assert_called_once()
add_remote_build_app_settings_mock.assert_called_once()
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom.parse_resource_id')
@mock.patch('azure.cli.command_modules.appservice.custom.enable_zip_deploy')
@mock.patch('azure.cli.command_modules.appservice.custom.remove_remote_build_app_settings')
def test_functionapp_zip_deploy_flow(self,
remove_remote_build_app_settings_mock,
enable_zip_deploy_mock,
parse_resource_id_mock,
web_client_factory_mock):
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
# action
enable_zip_deploy_functionapp(cmd_mock, 'rg', 'name', 'src', build_remote=False, timeout=None, slot=None)
# assert
parse_resource_id_mock.assert_called_once()
enable_zip_deploy_mock.assert_called_once()
remove_remote_build_app_settings_mock.assert_called_once()
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom.parse_resource_id')
@mock.patch('azure.cli.command_modules.appservice.custom.validate_zip_deploy_app_setting_exists')
@mock.patch('azure.cli.command_modules.appservice.custom.upload_zip_to_storage')
@mock.patch('azure.cli.command_modules.appservice.custom.is_plan_consumption', return_value=True)
def test_functionapp_linux_consumption_non_remote_build(self,
is_plan_consumption_mock,
upload_zip_to_storage_mock,
validate_zip_deploy_app_setting_exists_mock,
parse_resource_id_mock,
web_client_factory_mock):
# prepare
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
appservice_mock = mock.Mock()
appservice_mock.reserved = True # Marked app service as Linux
web_client_mock = mock.Mock()
web_client_mock.web_apps = mock.Mock()
web_client_mock.web_apps.get = mock.Mock(return_value=appservice_mock)
web_client_factory_mock.return_value = web_client_mock
# action
# Linux Consumption app should use update-storage to deploy when not using remote build
enable_zip_deploy_functionapp(cmd_mock, 'rg', 'name', 'src', build_remote=False, timeout=None, slot=None)
# assert
web_client_mock.web_apps.get.assert_called_with('rg', 'name')
upload_zip_to_storage_mock.assert_called_with(cmd_mock, 'rg', 'name', 'src', None)
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom.parse_resource_id')
@mock.patch('azure.cli.command_modules.appservice.custom.validate_zip_deploy_app_setting_exists')
@mock.patch('azure.cli.command_modules.appservice.custom.upload_zip_to_storage')
@mock.patch('azure.cli.command_modules.appservice.custom.is_plan_consumption', return_value=True)
def test_functionapp_linux_consumption_non_remote_build_with_slot(self,
is_plan_consumption_mock,
upload_zip_to_storage_mock,
validate_zip_deploy_app_setting_exists_mock,
parse_resource_id_mock,
web_client_factory_mock):
# prepare
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
appservice_mock = mock.Mock()
appservice_mock.reserved = True # Marked app service as Linux
web_client_mock = mock.Mock()
web_client_mock.web_apps = mock.Mock()
web_client_mock.web_apps.get = mock.Mock(return_value=appservice_mock)
web_client_factory_mock.return_value = web_client_mock
# action
# Linux Consumption app should use update-storage to deploy when not using remote build
enable_zip_deploy_functionapp(cmd_mock, 'rg', 'name', 'src', build_remote=False, timeout=None, slot='slot')
# assert
web_client_mock.web_apps.get.assert_called_with('rg', 'name')
upload_zip_to_storage_mock.assert_called_with(cmd_mock, 'rg', 'name', 'src', 'slot')
@mock.patch('azure.cli.command_modules.appservice.custom.add_remote_build_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom.parse_resource_id')
@mock.patch('azure.cli.command_modules.appservice.custom.enable_zip_deploy')
def test_functionapp_remote_build_supports_linux(self,
enable_zip_deploy_mock,
parse_resource_id_mock,
web_client_factory_mock,
add_remote_build_app_settings_mock):
# prepare
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
appservice_mock = mock.Mock()
appservice_mock.reserved = True # Marked app service as Linux
web_client_mock = mock.Mock()
web_client_mock.web_apps = mock.Mock()
web_client_mock.web_apps.get = mock.Mock(return_value=appservice_mock)
web_client_factory_mock.return_value = web_client_mock
# action
enable_zip_deploy_functionapp(cmd_mock, 'rg', 'name', 'src', build_remote=True, timeout=None, slot=None)
# assert
web_client_mock.web_apps.get.assert_called_with('rg', 'name')
enable_zip_deploy_mock.assert_called_with(cmd_mock, 'rg', 'name', 'src', None, None)
@mock.patch('azure.cli.command_modules.appservice.custom.get_scm_site_headers')
@mock.patch('azure.cli.command_modules.appservice.custom._get_scm_url', side_effect=ValueError())
def test_enable_zip_deploy_remote_build_no_scm_site(self,
get_scm_url_mock,
get_scm_headers_mock):
# prepare
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
# action
# When the function app is created before 8/1/2019, it cannot use remote build
with self.assertRaises(CLIError):
enable_zip_deploy(cmd_mock, 'rg', 'name', 'src', slot=None)
# assert
get_scm_url_mock.assert_called_with(cmd_mock, 'rg', 'name', None)
@mock.patch('azure.cli.command_modules.appservice.custom.get_scm_site_headers')
@mock.patch('azure.cli.command_modules.appservice.custom._get_scm_url', return_value='https://mock-scm')
@mock.patch('requests.post', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._check_zip_deployment_status')
def test_enable_zip_deploy_accepted(self,
check_zip_deployment_status_mock,
requests_post_mock,
get_scm_url_mock,
get_scm_headers_mock):
# prepare
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
response = mock.MagicMock()
response.status_code = 202
requests_post_mock.return_value = response
expected_zip_deploy_headers = _get_zip_deploy_headers('usr', 'pwd', cmd_mock.cli_ctx)
get_scm_headers_mock.return_value = expected_zip_deploy_headers
# action
with mock.patch('builtins.open', new_callable=mock.mock_open, read_data='zip-content'):
enable_zip_deploy(cmd_mock, 'rg', 'name', 'src', slot=None)
# assert
requests_post_mock.assert_called_with('https://mock-scm/api/zipdeploy?isAsync=true', data='zip-content',
headers=expected_zip_deploy_headers, verify=mock.ANY)
# TODO improve authorization matcher
check_zip_deployment_status_mock.assert_called_with(cmd_mock, 'rg', 'name',
'https://mock-scm/api/deployments/latest', mock.ANY, None)
@mock.patch('azure.cli.command_modules.appservice.custom.get_scm_site_headers')
@mock.patch('azure.cli.command_modules.appservice.custom._get_scm_url', return_value='https://mock-scm')
@mock.patch('requests.post', autospec=True)
def test_enable_zip_deploy_conflict(self,
requests_post_mock,
get_scm_url_mock,
get_scm_headers_mock):
# prepare
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
response = mock.MagicMock()
response.status_code = 409
requests_post_mock.return_value = response
expected_zip_deploy_headers = _get_zip_deploy_headers('usr', 'pwd', cmd_mock.cli_ctx)
get_scm_headers_mock.return_value = expected_zip_deploy_headers
# action
with mock.patch('builtins.open', new_callable=mock.mock_open, read_data='zip-content'):
with self.assertRaises(UnclassifiedUserFault):
enable_zip_deploy(cmd_mock, 'rg', 'name', 'src', slot=None)
# assert
requests_post_mock.assert_called_with('https://mock-scm/api/zipdeploy?isAsync=true', data='zip-content',
headers=expected_zip_deploy_headers, verify=mock.ANY)
@mock.patch('azure.cli.command_modules.appservice.custom.get_scm_site_headers')
@mock.patch('azure.cli.command_modules.appservice.custom._get_scm_url', return_value='https://mock-scm')
@mock.patch('requests.post', autospec=True)
def test_enable_zip_deploy_service_unavailable(self,
requests_post_mock,
get_scm_url_mock,
get_scm_headers_mock):
# prepare
cmd_mock = _get_test_cmd()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
response = mock.MagicMock()
response.status_code = 503
requests_post_mock.return_value = response
expected_zip_deploy_headers = _get_zip_deploy_headers('usr', 'pwd', cmd_mock.cli_ctx)
get_scm_headers_mock.return_value = expected_zip_deploy_headers
# action
with mock.patch('builtins.open', new_callable=mock.mock_open, read_data='zip-content'):
with self.assertRaises(AzureInternalError):
enable_zip_deploy(cmd_mock, 'rg', 'name', 'src', slot=None)
# assert
requests_post_mock.assert_called_with('https://mock-scm/api/zipdeploy?isAsync=true', data='zip-content',
headers=expected_zip_deploy_headers, verify=mock.ANY)
@mock.patch('azure.cli.command_modules.appservice.custom._get_app_settings_from_scm', return_value={
'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'
})
def test_validate_app_settings_in_scm_should_have(self, get_app_settings_from_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
should_have = ['SCM_DO_BUILD_DURING_DEPLOYMENT']
# action
result = validate_app_settings_in_scm(cmd_mock, 'rg', 'name', slot=None, should_have=should_have)
# assert
self.assertTrue(result)
@mock.patch('azure.cli.command_modules.appservice.custom._get_app_settings_from_scm', return_value={
'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'
})
def test_validate_app_settings_in_scm_should_not_have(self, get_app_settings_from_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
should_not_have = ['ENABLE_ORYX_BUILD']
# action
result = validate_app_settings_in_scm(cmd_mock, 'rg', 'name', slot=None, should_not_have=should_not_have)
# assert
self.assertTrue(result)
@mock.patch('azure.cli.command_modules.appservice.custom._get_app_settings_from_scm', return_value={
'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'
})
def test_validate_app_settings_in_scm_should_contain(self, get_app_settings_from_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
should_contain = {'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'}
# action
result = validate_app_settings_in_scm(cmd_mock, 'rg', 'name', slot=None, should_contain=should_contain)
# assert
self.assertTrue(result)
@mock.patch('azure.cli.command_modules.appservice.custom._get_app_settings_from_scm', return_value={
'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'
})
def test_validate_app_settings_in_scm_should_have_failure(self, get_app_settings_from_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
should_have = ['ENABLE_ORYX_BUILD']
# action
result = validate_app_settings_in_scm(cmd_mock, 'rg', 'name', slot=None, should_have=should_have)
# assert
self.assertFalse(result)
@mock.patch('azure.cli.command_modules.appservice.custom._get_app_settings_from_scm', return_value={
'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'
})
def test_validate_app_Settings_in_scm_should_not_have_failure(self, get_app_settings_from_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
should_not_have = ['SCM_DO_BUILD_DURING_DEPLOYMENT']
# action
result = validate_app_settings_in_scm(cmd_mock, 'rg', 'name', slot=None, should_not_have=should_not_have)
# assert
self.assertFalse(result)
@mock.patch('azure.cli.command_modules.appservice.custom._get_app_settings_from_scm', return_value={
'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'
})
def test_validate_app_settings_in_scm_should_contain_failure(self, get_app_settings_from_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
should_contain = {'SCM_DO_BUILD_DURING_DEPLOYMENT': 'false'}
# action
result = validate_app_settings_in_scm(cmd_mock, 'rg', 'name', slot=None, should_contain=should_contain)
# assert
self.assertFalse(result)
@mock.patch('azure.cli.command_modules.appservice.custom.validate_app_settings_in_scm', return_value=True)
@mock.patch('azure.cli.command_modules.appservice.custom.update_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.delete_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.get_app_settings', return_value=[])
def test_add_remote_build_app_settings_add_scm_do_build_during_deployment(self,
get_app_settings_mock,
delete_app_settings_mock,
update_app_settings_mock,
validate_app_settings_in_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
# action
add_remote_build_app_settings(cmd_mock, 'rg', 'name', slot=None)
# assert
update_app_settings_mock.assert_called_with(cmd_mock, 'rg', 'name', ['SCM_DO_BUILD_DURING_DEPLOYMENT=true'], None)
validate_app_settings_in_scm_mock.assert_called_with(cmd_mock, 'rg', 'name', None,
should_contain={'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'},
should_not_have=[])
@mock.patch('azure.cli.command_modules.appservice.custom.validate_app_settings_in_scm',
return_value=True)
@mock.patch('azure.cli.command_modules.appservice.custom.update_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.delete_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.get_app_settings', return_value=[
{
'name': 'WEBSITE_RUN_FROM_PACKAGE',
'value': 'https://microsoft.com'
},
{
'name': 'ENABLE_ORYX_BUILD',
'value': 'true'
}
])
def test_add_remote_build_app_settings_remove_unnecessary_app_settings(self,
get_app_settings_mock,
delete_app_settings_mock,
update_app_settings_mock,
validate_app_settings_in_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
# action
add_remote_build_app_settings(cmd_mock, 'rg', 'name', slot=None)
# assert
delete_app_settings_mock.assert_any_call(cmd_mock, 'rg', 'name', ['WEBSITE_RUN_FROM_PACKAGE'], None)
delete_app_settings_mock.assert_any_call(cmd_mock, 'rg', 'name', ['ENABLE_ORYX_BUILD'], None)
validate_app_settings_in_scm_mock.assert_called_with(cmd_mock, 'rg', 'name', None,
should_contain={'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'},
should_not_have=['WEBSITE_RUN_FROM_PACKAGE', 'ENABLE_ORYX_BUILD'])
@mock.patch('azure.cli.command_modules.appservice.custom.validate_app_settings_in_scm', return_value=True)
@mock.patch('azure.cli.command_modules.appservice.custom.update_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.delete_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.get_app_settings', return_value=[{
'name': 'SCM_DO_BUILD_DURING_DEPLOYMENT',
'value': 'false'
}])
def test_add_remote_build_app_settings_change_scm_do_build_during_deployment(self,
get_app_settings_mock,
delete_app_settings_mock,
update_app_settings_mock,
validate_app_settings_in_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
# action
add_remote_build_app_settings(cmd_mock, 'rg', 'name', slot=None)
# assert
update_app_settings_mock.assert_called_with(cmd_mock, 'rg', 'name', ['SCM_DO_BUILD_DURING_DEPLOYMENT=true'], None)
validate_app_settings_in_scm_mock.assert_called_with(cmd_mock, 'rg', 'name', None,
should_contain={'SCM_DO_BUILD_DURING_DEPLOYMENT': 'true'},
should_not_have=[])
@mock.patch('azure.cli.command_modules.appservice.custom.validate_app_settings_in_scm', return_value=True)
@mock.patch('azure.cli.command_modules.appservice.custom.update_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.delete_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.get_app_settings', return_value=[{
'name': 'SCM_DO_BUILD_DURING_DEPLOYMENT',
'value': 'true'
}])
def test_add_remote_build_app_settings_do_nothing(self,
get_app_settings_mock,
delete_app_settings_mock,
update_app_settings_mock,
validate_app_settings_in_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
# action
add_remote_build_app_settings(cmd_mock, 'rg', 'name', slot=None)
# assert
update_app_settings_mock.assert_not_called()
validate_app_settings_in_scm_mock.assert_not_called()
@mock.patch('azure.cli.command_modules.appservice.custom.validate_app_settings_in_scm', return_value=True)
@mock.patch('azure.cli.command_modules.appservice.custom.update_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.delete_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.get_app_settings', return_value=[])
def test_remove_remote_build_app_settings_disable_scm_do_build_during_deployment(self,
get_app_settings_mock,
delete_app_settings_mock,
update_app_settings_mock,
validate_app_settings_in_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
# action
remove_remote_build_app_settings(cmd_mock, 'rg', 'name', slot=None)
# assert
update_app_settings_mock.assert_called_with(cmd_mock, 'rg', 'name', ['SCM_DO_BUILD_DURING_DEPLOYMENT=false'], None)
validate_app_settings_in_scm_mock.assert_called_with(cmd_mock, 'rg', 'name', None,
should_contain={'SCM_DO_BUILD_DURING_DEPLOYMENT': 'false'})
@mock.patch('azure.cli.command_modules.appservice.custom.validate_app_settings_in_scm', return_value=True)
@mock.patch('azure.cli.command_modules.appservice.custom.update_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.delete_app_settings')
@mock.patch('azure.cli.command_modules.appservice.custom.get_app_settings', return_value=[{
'name': 'SCM_DO_BUILD_DURING_DEPLOYMENT',
'value': 'false'
}])
def test_remove_remote_build_app_settings_do_nothing(self,
get_app_settings_mock,
delete_app_settings_mock,
update_app_settings_mock,
validate_app_settings_in_scm_mock):
# prepare
cmd_mock = _get_test_cmd()
# action
remove_remote_build_app_settings(cmd_mock, 'rg', 'name', slot=None)
# assert
update_app_settings_mock.assert_not_called()
validate_app_settings_in_scm_mock.assert_not_called()
|
b5a63efaaf400d898de5f3161a8580f0bae6768b
|
574661105b08044688f287c45c917b1e0cfa0cfb
|
/__init__.py
|
2a1473b44ae1a3e9f7ef55ff7763b61e09564da3
|
[] |
no_license
|
elupus/hass_nibe
|
64a97316ff25eb3f2cf20adf4d5cf2f73d6cd913
|
b32e6f256cee4727d1c830075acfa607adc15dc7
|
refs/heads/master
| 2023-09-04T12:00:36.403723
| 2023-08-11T20:29:06
| 2023-08-11T20:29:24
| 93,442,181
| 169
| 60
| null | 2023-08-30T06:53:01
| 2017-06-05T20:08:06
|
Python
|
UTF-8
|
Python
| false
| false
| 12,759
|
py
|
__init__.py
|
"""Support for nibe uplink."""
from __future__ import annotations
import asyncio
import logging
from dataclasses import dataclass
from datetime import timedelta
from typing import Callable, Optional, cast
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as device_registry
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import persistent_notification
from homeassistant.const import CONF_NAME
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from nibeuplink import Uplink, UplinkSession
from nibeuplink.typing import ParameterId, ParameterType, System, SystemSoftwareInfo
from .const import (
CONF_ACCESS_DATA,
CONF_BINARY_SENSORS,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_CLIMATE_SYSTEMS,
CONF_CLIMATES,
CONF_CURRENT_TEMPERATURE,
CONF_FANS,
CONF_REDIRECT_URI,
CONF_SENSORS,
CONF_SWITCHES,
CONF_SYSTEM,
CONF_SYSTEMS,
CONF_THERMOSTATS,
CONF_UNITS,
CONF_VALVE_POSITION,
CONF_WATER_HEATERS,
CONF_WRITEACCESS,
DATA_NIBE_CONFIG,
DATA_NIBE_ENTRIES,
DOMAIN,
SCAN_INTERVAL,
)
from .services import async_register_services
_LOGGER = logging.getLogger(__name__)
ParameterSet = dict[ParameterId, Optional[ParameterType]]
def ensure_system_dict(value: dict[int, dict] | list[dict] | None) -> dict[int, dict]:
"""Wrap value in list if it is not one."""
if value is None:
return {}
if isinstance(value, list):
value_schema = vol.Schema(
[
vol.Schema(
{vol.Required(CONF_SYSTEM): cv.positive_int}, extra=vol.ALLOW_EXTRA
)
]
)
value_dict = value_schema(value)
return {x[CONF_SYSTEM]: x for x in value_dict}
if isinstance(value, dict):
return value
value_any = SYSTEM_SCHEMA(value)
return {value_any[CONF_SYSTEM]: value_any}
THERMOSTAT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_CLIMATE_SYSTEMS, default=[1]): vol.All(cv.ensure_list, [int]),
vol.Required(CONF_NAME): str,
vol.Optional(CONF_CURRENT_TEMPERATURE): cv.entity_id,
vol.Optional(CONF_VALVE_POSITION): cv.entity_id,
}
)
SYSTEM_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(CONF_CLIMATES),
cv.deprecated(CONF_WATER_HEATERS),
cv.deprecated(CONF_FANS),
cv.deprecated(CONF_UNITS),
{
vol.Remove(CONF_CLIMATES): object,
vol.Remove(CONF_WATER_HEATERS): object,
vol.Remove(CONF_FANS): object,
vol.Remove(CONF_UNITS): list,
vol.Optional(CONF_SYSTEM): cv.positive_int,
vol.Optional(CONF_SENSORS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_SWITCHES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_BINARY_SENSORS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_THERMOSTATS, default={}): {
cv.positive_int: THERMOSTAT_SCHEMA
},
},
)
)
NIBE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_REDIRECT_URI): cv.string,
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_WRITEACCESS): cv.boolean,
vol.Optional(CONF_SYSTEMS, default={}): vol.All(
ensure_system_dict, {vol.Coerce(int): SYSTEM_SCHEMA}
),
}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: NIBE_SCHEMA}, extra=vol.ALLOW_EXTRA)
FORWARD_PLATFORMS = (
"climate",
"switch",
"sensor",
"binary_sensor",
"water_heater",
"fan",
"update",
)
@dataclass
class NibeData:
"""Holder for nibe data."""
session: UplinkSession
uplink: Uplink
systems: dict[int, NibeSystem]
coordinator: DataUpdateCoordinator | None = None
async def async_setup(hass, config):
"""Configure the nibe uplink component."""
hass.data[DATA_NIBE_ENTRIES] = {}
if DOMAIN in config:
hass.data[DATA_NIBE_CONFIG] = config[DOMAIN]
else:
hass.data[DATA_NIBE_CONFIG] = NIBE_SCHEMA({})
await async_register_services(hass)
return True
def _get_system_config(hass, system_id: int):
config = hass.data[DATA_NIBE_CONFIG]
system = config[CONF_SYSTEMS].get(system_id)
if system:
return system
return SYSTEM_SCHEMA({})
class NibeSystemsCoordinator(DataUpdateCoordinator[dict[int, System]]):
"""Coordinator that keeps track of all systems."""
def __init__(self, hass: HomeAssistant, uplink: Uplink):
"""Initialize systems coordinator."""
self.uplink = uplink
super().__init__(
hass,
_LOGGER,
name="Nibe Uplink",
update_interval=timedelta(seconds=SCAN_INTERVAL),
)
async def _async_update_data(self) -> dict[int, System]:
"""Update data via library."""
systems_raw = await self.uplink.get_systems()
systems = {system["systemId"]: system for system in systems_raw}
return systems
async def async_setup_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry):
"""Set up an access point from a config entry."""
_LOGGER.debug("Setup nibe entry")
scope = None
if entry.data.get(CONF_WRITEACCESS):
scope = ["READSYSTEM", "WRITESYSTEM"]
else:
scope = ["READSYSTEM"]
def access_data_write(data):
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_ACCESS_DATA: data}
)
session = UplinkSession(
client_id=entry.data.get(CONF_CLIENT_ID),
client_secret=entry.data.get(CONF_CLIENT_SECRET),
redirect_uri=entry.data.get(CONF_REDIRECT_URI),
access_data=entry.data.get(CONF_ACCESS_DATA),
access_data_write=access_data_write,
scope=scope,
)
await session.open()
uplink = Uplink(session)
coordinator = NibeSystemsCoordinator(hass, uplink)
data = NibeData(session, uplink, {}, coordinator)
hass.data[DATA_NIBE_ENTRIES][entry.entry_id] = data
await coordinator.async_config_entry_first_refresh()
if systems_conf := entry.options.get(CONF_SYSTEMS):
systems_enabled = {system_id for system_id in systems_conf}
else:
systems_enabled = set(coordinator.data.keys())
for system_id, system_raw in coordinator.data.items():
if system_id not in systems_enabled:
continue
system = NibeSystem(
hass, system_raw, _get_system_config(hass, system_id), coordinator
)
await system.async_config_entry_first_refresh()
data.systems[system.system_id] = system
await hass.config_entries.async_forward_entry_setups(entry, FORWARD_PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry):
"""Unload a configuration entity."""
data: NibeData = hass.data[DATA_NIBE_ENTRIES][entry.entry_id]
unload_ok = await hass.config_entries.async_unload_platforms(
entry, FORWARD_PLATFORMS
)
if unload_ok:
await asyncio.wait([system.unload() for system in data.systems.values()])
await data.session.close()
hass.data[DATA_NIBE_ENTRIES].pop(entry.entry_id)
return True
class NibeSystem(DataUpdateCoordinator):
"""Object representing a system."""
parent: NibeSystemsCoordinator
def __init__(
self,
hass: HomeAssistant,
system: System,
config: dict,
parent: NibeSystemsCoordinator,
):
"""Init."""
self.system_id = system["systemId"]
self.system = system
self.uplink = parent.uplink
self.parent = parent
self.notice: list[dict] = []
self.statuses: set[str] = set()
self.software: SystemSoftwareInfo | None = None
self._unsub: list[Callable] = []
self.config = config
self._parameters: ParameterSet = {}
self._parameter_subscribers: dict[object, set[ParameterId]] = {}
self._parameter_preload: set[ParameterId] = set()
super().__init__(
hass,
_LOGGER,
name=f"Nibe Uplink: {self.system_id}",
update_interval=timedelta(minutes=10),
)
reg = device_registry.async_get(self.hass)
reg.async_get_or_create(
config_entry_id=parent.config_entry.entry_id,
configuration_url=f"https://nibeuplink.com/System/{self.system_id}",
identifiers={(DOMAIN, self.system_id)},
manufacturer="NIBE Energy Systems",
model=system["productName"],
name=f"{system['name']} - {self.system_id}",
)
self._unsub.append(parent.async_add_listener(self._async_check_refresh))
async def unload(self):
"""Unload system."""
for unsub in reversed(self._unsub):
unsub()
self._unsub = []
@callback
def _async_check_refresh(self):
"""Update the system if timestamps have changed."""
if system := self.parent.data.get(self.system_id):
if self.system != system:
self.system = system
self.hass.async_add_job(self.async_request_refresh)
async def _async_update_data(self) -> None:
"""Update data via library."""
await self.update_notifications()
await self.update_statuses()
await self.update_version()
parameters = set()
for subscriber_parameters in self._parameter_subscribers.values():
parameters |= subscriber_parameters
parameters -= self._parameter_preload
self._parameter_preload = set()
await self.update_parameters(parameters)
async def update_version(self):
"""Update software version."""
self.software = await self.uplink.get_system_software(self.system_id)
_LOGGER.debug("Version: %s", self.software)
async def update_statuses(self):
"""Update status list."""
status_icons = await self.uplink.get_status(self.system_id)
statuses = set()
for status_icon in status_icons:
statuses.add(status_icon["title"])
for parameter in status_icon["parameters"]:
self.set_parameter(parameter["parameterId"], parameter)
self.statuses = statuses
_LOGGER.debug("Statuses: %s", statuses)
async def update_notifications(self):
"""Update notification list."""
notice = await self.uplink.get_notifications(self.system_id)
added = [k for k in notice if k not in self.notice]
removed = [k for k in self.notice if k not in notice]
self.notice = notice
for x in added:
persistent_notification.async_create(
self.hass,
x["info"]["description"],
x["info"]["title"],
"nibe:{}".format(x["notificationId"]),
)
for x in removed:
persistent_notification.async_dismiss(
self.hass, "nibe:{}".format(x["notificationId"])
)
def get_parameter(
self, parameter_id: ParameterId | None, cached=True
) -> ParameterType | None:
"""Get a cached parameter."""
return self._parameters.get(parameter_id)
async def update_parameters(self, parameters: set[ParameterId | None]):
"""Update parameter cache."""
async def _get(parameter_id: ParameterId):
self._parameters[parameter_id] = await self.uplink.get_parameter(
self.system_id, parameter_id
)
tasks = [_get(parameter_id) for parameter_id in parameters if parameter_id]
if tasks:
await asyncio.gather(*tasks)
def set_parameter(self, parameter_id: ParameterId, data: ParameterType | None):
"""Store a parameter in cache."""
self._parameters[parameter_id] = data
self._parameter_preload |= {parameter_id}
def add_parameter_subscriber(
self, parameters: set[ParameterId | None]
) -> CALLBACK_TYPE:
"""Add a subscriber for parameters."""
sentinel = object()
@callback
def _remove():
del self._parameter_subscribers[sentinel]
parameters_clean = cast(set[ParameterId], (parameters - {None}))
self._parameter_subscribers[sentinel] = parameters_clean
for parameter in parameters_clean - set(self._parameters.keys()):
self._parameters[parameter] = None
return _remove
|
07ecebe8c1b692546dd525617411d7791b2a0181
|
01e5ff7ae1bdd5ed8b048f4586c5d76d35ca2253
|
/aiobotocore/config.py
|
8d7cc6e1f66d55bea316156181089588c132ae2b
|
[
"Apache-2.0"
] |
permissive
|
aio-libs/aiobotocore
|
870fb8055eee462e5e15218517dcdaff59441730
|
1d341d79e01d6459aaa6f023d808782f81fc122c
|
refs/heads/master
| 2023-09-04T05:25:02.675573
| 2023-08-31T16:07:35
| 2023-08-31T16:07:35
| 36,618,494
| 993
| 200
|
Apache-2.0
| 2023-09-07T05:18:33
| 2015-05-31T19:28:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
config.py
|
import copy
import botocore.client
from botocore.exceptions import ParamValidationError
class AioConfig(botocore.client.Config):
def __init__(self, connector_args=None, **kwargs):
super().__init__(**kwargs)
self._validate_connector_args(connector_args)
self.connector_args = copy.copy(connector_args)
if not self.connector_args:
self.connector_args = dict()
if 'keepalive_timeout' not in self.connector_args:
# AWS has a 20 second idle timeout:
# https://web.archive.org/web/20150926192339/https://forums.aws.amazon.com/message.jspa?messageID=215367
# and aiohttp default timeout is 30s so we set it to something
# reasonable here
self.connector_args['keepalive_timeout'] = 12
def merge(self, other_config):
# Adapted from parent class
config_options = copy.copy(self._user_provided_options)
config_options.update(other_config._user_provided_options)
return AioConfig(self.connector_args, **config_options)
@staticmethod
def _validate_connector_args(connector_args):
if connector_args is None:
return
for k, v in connector_args.items():
# verify_ssl is handled by verify parameter to create_client
if k == 'use_dns_cache':
if not isinstance(v, bool):
raise ParamValidationError(
report=f'{k} value must be a boolean'
)
elif k == 'keepalive_timeout':
if v is not None and not isinstance(v, (float, int)):
raise ParamValidationError(
report=f'{k} value must be a float/int or None'
)
elif k == 'force_close':
if not isinstance(v, bool):
raise ParamValidationError(
report=f'{k} value must be a boolean'
)
# limit is handled by max_pool_connections
elif k == 'ssl_context':
import ssl
if not isinstance(v, ssl.SSLContext):
raise ParamValidationError(
report=f'{k} must be an SSLContext instance'
)
elif k == "resolver":
from aiohttp.abc import AbstractResolver
if not isinstance(v, AbstractResolver):
raise ParamValidationError(
report=f'{k} must be an instance of a AbstractResolver'
)
else:
raise ParamValidationError(report=f'invalid connector_arg:{k}')
|
09515c33b783d36e838ce3f6c53f27c11739ff95
|
52107637d2687db30f168ba15ffd1e1e534f4cb4
|
/tests/datasets/test_four_way_tabla.py
|
343e818d396b424bceff47ba8863a0f131ba6d8a
|
[
"BSD-3-Clause"
] |
permissive
|
mir-dataset-loaders/mirdata
|
9be10e0201b08abf51fc72338ccaaacc8216145b
|
496eb4a9120aa16ff6963792f0c8b738a0c3f310
|
refs/heads/master
| 2023-05-07T13:15:16.517429
| 2023-03-27T13:54:37
| 2023-03-27T13:54:37
| 170,765,267
| 297
| 65
|
BSD-3-Clause
| 2023-08-05T22:48:48
| 2019-02-14T22:11:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,104
|
py
|
test_four_way_tabla.py
|
import os
import numpy as np
from tests.test_utils import run_track_tests
from mirdata import annotations
from mirdata.datasets import four_way_tabla
def test_track():
default_trackid = "AHK_solo-tintal-1"
data_home = os.path.normpath("tests/resources/mir_datasets/four_way_tabla")
dataset = four_way_tabla.Dataset(data_home)
track = dataset.track(default_trackid)
expected_attributes = {
"audio_path": os.path.join(
os.path.normpath("tests/resources/mir_datasets/four_way_tabla/"),
"4way-tabla-ismir21-dataset/train/audios/AHK_solo-tintal-1.wav",
),
"onsets_b_path": os.path.join(
os.path.normpath("tests/resources/mir_datasets/four_way_tabla/"),
"4way-tabla-ismir21-dataset/train/onsets/b/AHK_solo-tintal-1.onsets",
),
"onsets_d_path": os.path.join(
os.path.normpath("tests/resources/mir_datasets/four_way_tabla/"),
"4way-tabla-ismir21-dataset/train/onsets/d/AHK_solo-tintal-1.onsets",
),
"onsets_rb_path": os.path.join(
os.path.normpath("tests/resources/mir_datasets/four_way_tabla/"),
"4way-tabla-ismir21-dataset/train/onsets/rb/AHK_solo-tintal-1.onsets",
),
"onsets_rt_path": os.path.join(
os.path.normpath("tests/resources/mir_datasets/four_way_tabla/"),
"4way-tabla-ismir21-dataset/train/onsets/rt/AHK_solo-tintal-1.onsets",
),
"track_id": "AHK_solo-tintal-1",
"train": True,
}
expected_property_types = {
"onsets_b": annotations.BeatData,
"onsets_d": annotations.BeatData,
"onsets_rb": annotations.BeatData,
"onsets_rt": annotations.BeatData,
"audio": tuple,
}
run_track_tests(track, expected_attributes, expected_property_types)
audio, sr = track.audio
assert sr == 44100
assert audio.shape == (69152,)
def test_get_onsets():
default_trackid = "AHK_solo-tintal-1"
data_home = "tests/resources/mir_datasets/four_way_tabla"
dataset = four_way_tabla.Dataset(data_home)
track = dataset.track(default_trackid)
loaded_b = track.onsets_b
parsed_b = four_way_tabla.load_onsets(track.onsets_b_path)
# Check types
assert type(parsed_b) == annotations.BeatData
assert type(parsed_b.times) is np.ndarray
assert type(parsed_b.positions) is np.ndarray
assert type(loaded_b) == annotations.BeatData
assert type(loaded_b.times) is np.ndarray
assert type(loaded_b.positions) is np.ndarray
# Check values
assert np.array_equal(parsed_b.times, np.array([2.395, 2.885, 65.635]))
assert np.array_equal(parsed_b.positions, np.array([0.0, 0.0, 0.0]))
assert np.array_equal(loaded_b.times, np.array([2.395, 2.885, 65.635]))
assert np.array_equal(loaded_b.positions, np.array([0.0, 0.0, 0.0]))
assert four_way_tabla.load_onsets(None) is None
track = dataset.track("binati_SRC")
parsed_onsets = track.onsets_rt
assert parsed_onsets is None
def test_to_jams():
default_trackid = "AHK_solo-tintal-1"
data_home = "tests/resources/mir_datasets/four_way_tabla"
dataset = four_way_tabla.Dataset(data_home)
track = dataset.track(default_trackid)
jam = track.to_jams()
# Validate Four-Way Tabla schema
assert jam.validate()
# Sama
onsets = jam.search(namespace="beat")[0]["data"]
assert len(onsets) == 3
assert [onset.time for onset in onsets] == [2.395, 2.885, 65.635]
assert [onset.duration for onset in onsets] == [0.0, 0.0, 0.0]
assert [onset.value for onset in onsets] == [0.0, 0.0, 0.0]
assert [onset.confidence for onset in onsets] == [None, None, None]
def test_load_audio():
default_trackid = "AHK_solo-tintal-1"
data_home = "tests/resources/mir_datasets/four_way_tabla"
dataset = four_way_tabla.Dataset(data_home)
track = dataset.track(default_trackid)
audio_path = track.audio_path
audio, sr = four_way_tabla.load_audio(audio_path)
assert sr == 44100
assert audio.shape == (69152,)
assert type(audio) is np.ndarray
|
d39223f515f340583ecca46586dcde8ef52f51e4
|
c9f67929f734dc27b9e1b0e2ff106319ee9fd86b
|
/workflow/rules/metabat_single.smk
|
5678faf2364616a0190994e7853d8467b9de5aaa
|
[
"MIT"
] |
permissive
|
franciscozorrilla/metaGEM
|
b6a91df36dcbbf334a5dafc0a3ae75206bb413ee
|
cb099b7a557af8bddbd3b257670d263be3ac5a1f
|
refs/heads/master
| 2023-08-25T20:52:04.928295
| 2023-08-12T10:30:49
| 2023-08-12T10:30:49
| 137,376,259
| 137
| 36
|
MIT
| 2023-06-30T11:34:02
| 2018-06-14T15:26:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,803
|
smk
|
metabat_single.smk
|
rule metabat_single:
input:
assembly = rules.megahit.output,
R1 = rules.qfilter.output.R1,
R2 = rules.qfilter.output.R2
output:
directory(f'{config["path"]["root"]}/{config["folder"]["metabat"]}/{{IDs}}/{{IDs}}.metabat-bins')
benchmark:
f'{config["path"]["root"]}/{config["folder"]["benchmarks"]}/{{IDs}}.metabat.benchmark.txt'
message:
"""
Implementation of metabat2 where only coverage information from the focal sample is used
for binning. Use with the crossMapParallel subworkflow, where cross sample coverage information
is only used by CONCOCT.
"""
shell:
"""
# Activate metagem environment
set +u;source activate {config[envs][metagem]};set -u;
# Make job specific scratch dir
fsampleID=$(echo $(basename $(dirname {input.assembly})))
echo -e "\nCreating temporary directory {config[path][scratch]}/{config[folder][metabat]}/${{fsampleID}} ... "
mkdir -p {config[path][scratch]}/{config[folder][metabat]}/${{fsampleID}}
# Move into scratch dir
cd {config[path][scratch]}/{config[folder][metabat]}/${{fsampleID}}
# Copy files
cp {input.assembly} {input.R1} {input.R2} .
echo -e "\nFocal sample: $fsampleID ... "
echo "Renaming and unzipping assembly ... "
mv $(basename {input.assembly}) $(echo $fsampleID|sed 's/$/.fa.gz/g')
gunzip $(echo $fsampleID|sed 's/$/.fa.gz/g')
echo -e "\nIndexing assembly ... "
bwa index $fsampleID.fa
id=$(basename {output})
echo -e "\nMapping reads from sample against assembly $fsampleID ..."
bwa mem -t {config[cores][metabat]} $fsampleID.fa *.fastq.gz > $id.sam
echo -e "\nDeleting no-longer-needed fastq files ... "
rm *.gz
echo -e "\nConverting SAM to BAM with samtools view ... "
samtools view -@ {config[cores][metabat]} -Sb $id.sam > $id.bam
echo -e "\nDeleting no-longer-needed sam file ... "
rm $id.sam
echo -e "\nSorting BAM file with samtools sort ... "
samtools sort -@ {config[cores][metabat]} -o $id.sort $id.bam
echo -e "\nDeleting no-longer-needed bam file ... "
rm $id.bam
# Run metabat2
echo -e "\nRunning metabat2 ... "
jgi_summarize_bam_contig_depths --outputDepth $id.depth.txt $id.sort
metabat2 -i $fsampleID.fa -a $id.depth.txt -s \
{config[params][metabatMin]} \
-v --seed {config[params][seed]} \
-t 0 -m {config[params][minBin]} \
-o $(basename $(dirname {output}))
rm $fsampleID.fa
rm $id.depth.txt
# Move files to output dir
mv *.fa {output}
"""
|
6f25144bd48d9e3a394626746caed20438e5c165
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/awards/v2/filters/filter_helpers.py
|
8f31bcd74ed9177e84efacc37c43f4d6bbbda690
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 11,872
|
py
|
filter_helpers.py
|
import logging
from collections import namedtuple
from copy import deepcopy
from datetime import datetime
from datetime import timedelta
from django.db.models import Q
from usaspending_api.common.exceptions import InvalidParameterException
from usaspending_api.common.helpers.generic_helper import dates_are_month_bookends
from usaspending_api.common.helpers.generic_helper import generate_date_from_string
from usaspending_api.common.helpers.sql_helpers import get_connection
from usaspending_api.references.constants import WEBSITE_AWARD_BINS
from usaspending_api.search.models import SubawardSearch
logger = logging.getLogger(__name__)
Range = namedtuple("Range", ["start", "end"])
def merge_date_ranges(date_range_list):
"""
Given a list of date ranges (using the defined namedtuple "Range"), combine overlapping date ranges
While adjacent fiscal years do not overlap the desired behavior is to combine them
FY2010 ends on 2010-09-30, FY2011 start on 2010-10-01.
To address this, when comparing ranges 1 day is removed from the start date and 1 day is added to the end date
Then the overlapping ranges must be > 1 instead of > 0
Inspired by Raymond Hettinger [https://stackoverflow.com/a/9044111]
"""
ordered_list = sorted([sorted(t) for t in date_range_list])
saved_range = Range(start=ordered_list[0][0], end=ordered_list[0][1])
for st, en in ordered_list[1:]:
r = Range(st, en)
latest_start = max(r.start, saved_range.start) + timedelta(days=-1)
earliest_end = min(r.end, saved_range.end) + timedelta(days=1)
delta = (earliest_end - latest_start).days + 1 # added since ranges are closed on both ends
if delta > 1: # since the overlap is potentially extended by 1-2 days, the overlap needs to be at least 2 days
saved_range = Range(start=min(saved_range.start, st), end=max(saved_range.end, en))
else:
yield (saved_range.start, saved_range.end)
saved_range = Range(start=st, end=en)
yield (saved_range.start, saved_range.end)
def date_list_to_queryset(date_list, table, is_subaward=False):
or_queryset = Q()
for v in date_list:
# Modified May 2018 so that there will always be a start and end value from combine_date_range_queryset()
date_type_dict = v.get("date_type_dict", {"gte": "action_date", "lte": "action_date"})
for operand, date_type in date_type_dict.items():
if date_type not in ["action_date", "last_modified_date", "date_signed"]:
raise InvalidParameterException("Invalid date_type: {}".format(date_type))
# When searching subawards, use the subaward equivalent fields
if is_subaward:
subaward_mappings = {
"action_date": "sub_action_date",
"last_modified_date": "last_modified_date",
"date_signed": "date_signed",
}
date_type_dict[operand] = subaward_mappings[date_type]
# (StartA <= EndB) and (EndA >= StartB)
# where "A" is an Award and "B" is the date range being searched
kwargs = {
"{}__gte".format(date_type_dict["gte"]): v["start_date"],
"{}__lte".format(date_type_dict["lte"]): v["end_date"],
}
or_queryset |= Q(**kwargs)
return table.objects.filter(or_queryset)
def combine_date_range_queryset(date_dicts, table, min_start, max_end, dt_format="%Y-%m-%d", is_subaward=False):
final_ranges = []
date_type_list = []
for time_period in date_dicts:
dt_type = time_period.get("date_type", "action_date")
gte_dt_type = time_period.get("gte_date_type")
lte_dt_type = time_period.get("lte_date_type")
date_type_list.append(
(gte_dt_type, lte_dt_type) if gte_dt_type is not None and lte_dt_type is not None else (dt_type, dt_type)
)
for date_type_tuple in set(date_type_list):
list_of_ranges = [
(
datetime.strptime(v.get("start_date", None) or min_start, dt_format),
datetime.strptime(v.get("end_date", None) or max_end, dt_format),
)
for v in date_dicts
] # convert date strings to datetime objects
final_ranges.extend(
[
{
"start_date": r[0],
"end_date": r[1],
"date_type_dict": {"gte": date_type_tuple[0], "lte": date_type_tuple[1]},
}
for r in list(merge_date_ranges(list_of_ranges))
]
)
return date_list_to_queryset(final_ranges, table, is_subaward=is_subaward)
def get_total_transaction_column(model):
"""Returns column name based on model"""
if model == SubawardSearch:
return "subaward_amount"
else:
return "award_amount"
def total_obligation_queryset(amount_obj, model, filters, is_subaward=False):
if can_use_total_obligation_enum(amount_obj):
bins = []
for v in amount_obj:
lower_bound = v.get("lower_bound")
upper_bound = v.get("upper_bound")
for key, values in WEBSITE_AWARD_BINS.items():
if lower_bound == values["lower"] and upper_bound == values["upper"]:
bins.extend(values["enums"])
break
obl_bin_filter = {f"{'sub_' if is_subaward else ''}total_obl_bin__in": set(bins)}
or_queryset = model.objects.filter(**obl_bin_filter)
else:
column = get_total_transaction_column(model)
bound_filters = Q()
for v in amount_obj:
if v.get("lower_bound") is not None and v.get("upper_bound") is not None:
bound_dict = {f"{column}__gte": v["lower_bound"], f"{column}__lte": v["upper_bound"]}
elif v.get("lower_bound") is not None:
bound_dict = {f"{column}__gte": v["lower_bound"]}
elif v.get("upper_bound") is not None:
bound_dict = {f"{column}__lte": v["upper_bound"]}
else:
raise InvalidParameterException("Invalid filter: award amount has incorrect object.")
bound_filters |= Q(**bound_dict)
or_queryset = model.objects.filter(bound_filters)
return or_queryset
def can_use_month_aggregation(time_period):
"""
time_period is the list of action_date ranges from API
"""
try:
for v in time_period:
s = generate_date_from_string(v.get("start_date"))
e = generate_date_from_string(v.get("end_date"))
if not dates_are_month_bookends(s, e):
return False
except Exception:
return False
return True
def can_use_total_obligation_enum(amount_obj):
try:
for v in amount_obj:
lower_bound = v.get("lower_bound")
upper_bound = v.get("upper_bound")
for key, values in WEBSITE_AWARD_BINS.items():
if lower_bound == values["lower"] and upper_bound == values["upper"]:
break
else:
return False
return True
except Exception:
pass
return False
def only_action_date_type(time_period):
"""
if a date_type is last_modified_date, don't use the matview this applies to
"""
try:
for v in time_period:
if v.get("date_type", "action_date") != "action_date":
return False
except Exception:
return False
return True
def transform_keyword(request, api_version):
filter_obj = request.data.get("filters", None)
if filter_obj:
if "keyword" not in filter_obj and "keywords" not in filter_obj:
return request
keyword_array_passed = filter_obj.get("keywords", False)
keyword_string_passed = filter_obj.pop("keyword", None)
if api_version < 3:
keywords = keyword_array_passed if keyword_array_passed else [keyword_string_passed]
else:
if keyword_array_passed:
keywords = keyword_array_passed
else:
raise InvalidParameterException(
"keyword' is deprecated. Please use 'keywords'. See documentation for more information."
)
filter_obj["keywords"] = keywords
request.data["filters"] = filter_obj
return request
def get_all_award_ids_in_idv_hierarchy(root_idv_award_id):
"""
Unfortunately, there's no clean way to get IDV descendants using the Django
ORM so we will turn to the dark side to get what we need. For the provided
IDV award id (surrogate, integer, internal award id), this function will
return the award id of all awards in the IDV's hierarchy, including the root
IDV itself.
"""
sql = """
with cte as (
select award_id
from parent_award
where award_id = %(root_idv_award_id)s
union all
select cpa.award_id
from parent_award ppa
inner join parent_award cpa on
cpa.parent_award_id = ppa.award_id
where ppa.award_id = %(root_idv_award_id)s
)
select ca.id
from cte
inner join vw_awards pa on
pa.id = cte.award_id
inner join vw_awards ca on
ca.parent_award_piid = pa.piid and
ca.fpds_parent_agency_id = pa.fpds_agency_id
union all
select %(root_idv_award_id)s
"""
connection = get_connection()
with connection.cursor() as cursor:
cursor.execute(sql, {"root_idv_award_id": root_idv_award_id})
return [row[0] for row in cursor.fetchall()]
def get_descendant_award_ids(root_idv_award_id, include_child_idvs):
"""
Unfortunately, there's no clean way to get IDV descendants using the Django
ORM so we will turn to the dark side to get what we need. For the provided
IDV award id (surrogate, integer, internal award id), this function will
return the award id for all child Awards and grandchild Awards and, if
include_child_idvs is True, all child IDVs as well.
"""
sql = """
with cte as (
select award_id
from parent_award
where award_id = %(root_idv_award_id)s
union all
select cpa.award_id
from parent_award ppa
inner join parent_award cpa on
cpa.parent_award_id = ppa.award_id
where ppa.award_id = %(root_idv_award_id)s
)
select ca.id
from cte
inner join vw_awards pa on
pa.id = cte.award_id
inner join vw_awards ca on
ca.parent_award_piid = pa.piid and
ca.fpds_parent_agency_id = pa.fpds_agency_id
""" + (
"" if include_child_idvs else " and ca.type not like 'IDV%%'"
)
connection = get_connection()
with connection.cursor() as cursor:
cursor.execute(sql, {"root_idv_award_id": root_idv_award_id})
return [row[0] for row in cursor.fetchall()]
def add_date_range_comparison_types(filters, is_subaward, gte_date_type, lte_date_type):
if filters is None:
return None
# Use deepcopy to make changes to avoid conflicts when filters are looped over.
filters_deepcopy = deepcopy(filters)
if not is_subaward and filters_deepcopy.get("time_period") is not None:
for time_period in filters_deepcopy["time_period"]:
time_period["gte_date_type"] = gte_date_type
time_period["lte_date_type"] = lte_date_type
return filters_deepcopy
|
2ce36c0fb3a0439c0a209e58837ff1db84cf1471
|
e8846f706a428a91659ac6e24974dc696089fe4a
|
/pandapower/converter/cim/pp_classes.py
|
f78211cb07bafc8a6e2480da3030f51854d0773e
|
[
"BSD-3-Clause"
] |
permissive
|
e2nIEE/pandapower
|
3e434bf81b29e9c88905abbd82fd0309e2191ffb
|
5592ba1f6fcd727053a37dcf246b9bf36874c24a
|
refs/heads/develop
| 2023-09-03T23:21:25.979973
| 2023-08-31T11:00:17
| 2023-08-31T11:00:17
| 78,748,060
| 608
| 481
|
NOASSERTION
| 2023-09-14T18:22:08
| 2017-01-12T13:27:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,060
|
py
|
pp_classes.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2023 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import logging
from typing import Dict
import json
import pandapower as pp
import pandapower.auxiliary
from . import cim_tools
class PandapowerDiagnostic:
"""
Create a pandapower diagnostic dictionary with CIM IDs instead of pandapower IDs.
:param net: The pandapower network.
:param diagnostic: The pandapower diagnostic. If None a pp.diagnostic(net) will be run. Optional, default: None.
:return: The pandapower diagnostic with CIM IDs.
"""
def __init__(self, net: pandapower.auxiliary.pandapowerNet, diagnostic: Dict = None):
self.logger = logging.getLogger(self.__class__.__name__)
self.net = net
self.diagnostic = diagnostic
def _rec_replace_pp_diagnostic_with_cim_ids(self, input_obj, element_type: str = None):
sc = cim_tools.get_pp_net_special_columns_dict()
element_mapping = dict({
'bus': 'bus', 'buses': 'bus', 'load': 'load', 'loads': 'load', 'sgen': 'sgen', 'sgens': 'sgen',
'motor': 'motor', 'motors': 'motor',
'asymmetric_load': 'asymmetric_load', 'asymmetric_loads': 'asymmetric_load',
'asymmetric_sgen': 'asymmetric_sgen', 'asymmetric_sgens': 'asymmetric_sgen',
'storage': 'storage', 'storages': 'storage', 'gen': 'gen', 'gens': 'gen',
'switch': 'switch', 'switches': 'switch', 'shunt': 'shunt', 'shunts': 'shunt',
'ext_grid': 'ext_grid', 'ext_grids': 'ext_grid', 'line': 'line', 'lines': 'line',
'trafo': 'trafo', 'trafos': 'trafo', 'trafo3w': 'trafo3w', 'trafos3w': 'trafo3w',
'impedance': 'impedance', 'impedances': 'impedance', 'dcline': 'dcline', 'dclines': 'dcline',
'ward': 'ward', 'wards': 'ward', 'xward': 'xward', 'xwards': 'xward'})
if isinstance(input_obj, list):
return_obj = []
for one_input_obj in input_obj:
if isinstance(one_input_obj, list) or isinstance(one_input_obj, dict):
return_obj.append(self._rec_replace_pp_diagnostic_with_cim_ids(one_input_obj, element_type))
elif element_type is not None and isinstance(one_input_obj, int):
# get the RDF ID direct
return_obj.append(self.net[element_type][sc['o_id']].at[one_input_obj])
elif element_type is not None and isinstance(one_input_obj, tuple):
# the first item from the tuple should be the element index
one_input_obj = list(one_input_obj)
if one_input_obj[0] in self.net[element_type].index.values:
one_input_obj[0] = self.net[element_type][sc['o_id']].at[one_input_obj[0]]
return_obj.append(tuple(one_input_obj))
else:
# default
return_obj.append(one_input_obj)
elif isinstance(input_obj, dict):
return_obj = dict()
for key, item in input_obj.items():
if isinstance(item, list) or isinstance(item, dict) and key in element_mapping.keys():
element_type = element_mapping[key]
return_obj[key] = self._rec_replace_pp_diagnostic_with_cim_ids(item, element_type)
else:
# default
return_obj[key] = item
else:
return_obj = input_obj
return return_obj
def replace_pp_diagnostic_with_cim_ids(self) -> Dict:
"""
Create a pandapower diagnostic dictionary with CIM IDs instead of pandapower IDs.
:param net: The pandapower network.
:param diagnostic: The pandapower diagnostic. If None a pp.diagnostic(net) will be run. Optional, default: None.
:return: The pandapower diagnostic with CIM IDs.
"""
if self.diagnostic is None:
self.diagnostic = pp.diagnostic(self.net)
result_diagnostic = dict()
for key, item in self.diagnostic.items():
result_diagnostic[key] = self._rec_replace_pp_diagnostic_with_cim_ids(item)
# add the CGMES IDs
if hasattr(self.net, 'CGMES'):
result_diagnostic['CGMES_IDs'] = dict()
for one_prf, one_prf_item in self.net['CGMES'].items():
result_diagnostic['CGMES_IDs'][one_prf] = list(one_prf_item.keys())
return result_diagnostic
def serialize(self, diagnostic: Dict, path_to_store: str):
"""
Serialize a pandapower diagnostic as json to disk.
:param diagnostic: The pandapower diagnostic dictionary.
:param path_to_store: The path to store the json on the disk.
:return:
"""
self.logger.info("Storing diagnostic to path: %s" % path_to_store)
with open(path_to_store, mode='w', encoding='UTF-8') as fp:
json.dump(diagnostic, fp, indent=2, sort_keys=True)
|
3815b07ea1b50b2cc44596c173874635742922f1
|
4b823e3338c56d38c7fda32b1900591cbb10dfc0
|
/bindings/python/tests/mesh/test-py-euclidean-distance-transform.py
|
3ac6fb6f005ebe11be009706b065951a4a8c18fe
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
Geode-solutions/OpenGeode
|
475e63ca5f33b039b9f010dc5ae09870f4d628a1
|
7d8d581ecf0556af7820b0b37707f3695798b3b3
|
refs/heads/master
| 2023-08-20T23:42:54.775348
| 2023-08-11T12:37:41
| 2023-08-11T12:37:41
| 200,015,280
| 141
| 13
|
MIT
| 2023-09-14T09:43:29
| 2019-08-01T08:58:36
|
C++
|
UTF-8
|
Python
| false
| false
| 4,965
|
py
|
test-py-euclidean-distance-transform.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2023 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import platform
if sys.version_info >= (3, 8, 0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import math
import opengeode_py_basic as basic
import opengeode_py_geometry as geom
import opengeode_py_mesh as mesh
def test_distance_transform_2D(cell_length):
grid = mesh.RegularGrid2D.create()
builder = mesh.RegularGridBuilder2D.create(grid)
builder.initialize_cartesian_grid(geom.Point2D([0, 0]), [
10, 10], cell_length)
objects_raster = [[0, 0], [9, 9]]
map_distance = mesh.euclidean_distance_transform2D(
grid, objects_raster, "test_edt")
values = [
([0, 1], cell_length * 1.),
([0, 2], cell_length * 2.),
([0, 3], cell_length * 3.),
([0, 4], cell_length * 4.),
([0, 5], cell_length * 5.),
([0, 6], cell_length * 6.),
([0, 7], cell_length * 7.),
([0, 8], cell_length * 8.),
([0, 9], cell_length * 9.),
([1, 0], cell_length * 1.),
([2, 0], cell_length * 2.),
([3, 0], cell_length * 3.),
([4, 0], cell_length * 4.),
([5, 0], cell_length * 5.),
([6, 0], cell_length * 6.),
([7, 0], cell_length * 7.),
([8, 0], cell_length * 8.),
([9, 0], cell_length * 9.),
([0, 0], cell_length * 0.),
([1, 1], cell_length * math.sqrt(2)),
([2, 2], cell_length * math.sqrt(8)),
([3, 3], cell_length * math.sqrt(18)),
([4, 4], cell_length * math.sqrt(32)),
([5, 5], cell_length * math.sqrt(32)),
([6, 6], cell_length * math.sqrt(18)),
([7, 7], cell_length * math.sqrt(8)),
([8, 8], cell_length * math.sqrt(2)),
([9, 9], cell_length * 0.),
([8, 1], cell_length * math.sqrt(65)),
([7, 2], cell_length * math.sqrt(53)),
([6, 3], cell_length * math.sqrt(45)),
([5, 4], cell_length * math.sqrt(41)),
([4, 5], cell_length * math.sqrt(41)),
([3, 6], cell_length * math.sqrt(45)),
([2, 7], cell_length * math.sqrt(53)),
([1, 8], cell_length * math.sqrt(65))
]
for value in values:
if math.fabs(map_distance.value(grid.cell_index(value[0])) - value[1] > basic.global_epsilon):
raise ValueError("[Test] Wrong 2D euclidean distance map")
def test_distance_transform_3D(cell_length):
grid = mesh.RegularGrid3D.create()
builder = mesh.RegularGridBuilder3D.create(grid)
builder.initialize_cartesian_grid(geom.Point3D([0, 0, 0]), [
10, 10, 10], cell_length)
objects_raster = [[0, 0, 0], [9, 9, 9]]
map_distance = mesh.euclidean_distance_transform3D(
grid, objects_raster, "test_edt")
values = [
([0, 9, 0], cell_length * 9.),
([0, 9, 9], cell_length * 9.),
([0, 0, 9], cell_length * 9.),
([9, 0, 0], cell_length * 9.),
([9, 0, 9], cell_length * 9.),
([9, 9, 0], cell_length * 9.),
([0, 0, 0], cell_length * 0.),
([1, 1, 1], cell_length * math.sqrt(3)),
([2, 2, 2], cell_length * math.sqrt(12)),
([3, 3, 3], cell_length * math.sqrt(27)),
([4, 4, 4], cell_length * math.sqrt(48)),
([5, 5, 5], cell_length * math.sqrt(48)),
([6, 6, 6], cell_length * math.sqrt(27)),
([7, 7, 7], cell_length * math.sqrt(12)),
([8, 8, 8], cell_length * math.sqrt(3)),
([9, 9, 9], cell_length * 0.)
]
for value in values:
if math.fabs(map_distance.value(grid.cell_index(value[0])) - value[1]) > basic.global_epsilon:
raise ValueError("[Test] Wrong 3D euclidean distance map")
if __name__ == '__main__':
mesh.OpenGeodeMeshLibrary.initialize()
test_distance_transform_2D(1.)
test_distance_transform_3D(4.5)
|
83b4c751482c3f4e5907e5b68e2b63553043f6e9
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_video/mmaction2/mmaction/models/recognizers/audio_recognizer.py
|
6d5c828207778c906edcbeab160b312c39be2162
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036
| 2023-02-14T14:21:50
| 2023-02-14T14:21:50
| 388,068,949
| 752
| 162
|
Apache-2.0
| 2022-07-12T08:28:09
| 2021-07-21T09:45:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
audio_recognizer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class AudioRecognizer(BaseRecognizer):
"""Audio recognizer model framework."""
def forward(self, audios, label=None, return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(audios, label)
return self.forward_test(audios)
def forward_train(self, audios, labels):
"""Defines the computation performed at every call when training."""
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
gt_labels = labels.squeeze()
loss = self.cls_head.loss(cls_score, gt_labels)
return loss
def forward_test(self, audios):
"""Defines the computation performed at every call when evaluation and
testing."""
num_segs = audios.shape[1]
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score.cpu().numpy()
def forward_gradcam(self, audios):
raise NotImplementedError
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
|
523c392a1b5f7322eb8de06e18f377b89ff89627
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/nxos/tests/ShowIpInterfaceVrfAll/cli/equal/golden_output7_expected.py
|
ce31b1173b62f51d29ea36bd1eb190bf2b7401d4
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 68,328
|
py
|
golden_output7_expected.py
|
expected_output = {
"Ethernet1/3": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "enabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 7,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.69.111.2/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.69.111.2",
"ip_subnet": "10.69.111.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 6294,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 15960,
"multicast_bytes_received": 6364,
"multicast_bytes_sent": 15960,
"multicast_packets_consumed": 189,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 205,
"multicast_packets_received": 97,
"multicast_packets_sent": 205,
"unicast_bytes_consumed": 1240,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 556,
"unicast_bytes_received": 620,
"unicast_bytes_sent": 556,
"unicast_packets_consumed": 12,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 6,
"unicast_packets_received": 6,
"unicast_packets_sent": 6,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": [
"224.0.0.1",
"224.0.0.13",
"224.0.0.2",
"224.0.0.5",
"224.0.0.6",
],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "default",
},
"Vlan100": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 71,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.11.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.11.1",
"ip_subnet": "10.220.11.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9100",
},
"Vlan1000": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 86,
"ip_forwarding": "enabled",
"ip_mtu": 1500,
"ipv4": {
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 1496,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 1496,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 22,
"multicast_packets_received": 0,
"multicast_packets_sent": 22,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
"none": {"broadcast_address": "255.255.255.255", "ip": "none"},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9100",
},
"Vlan1005": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 87,
"ip_forwarding": "enabled",
"ip_mtu": 1500,
"ipv4": {
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 1496,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 1496,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 22,
"multicast_packets_received": 0,
"multicast_packets_sent": 22,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
"none": {"broadcast_address": "255.255.255.255", "ip": "none"},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9105",
},
"Vlan1006": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 88,
"ip_forwarding": "enabled",
"ip_mtu": 1500,
"ipv4": {
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 1496,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 1496,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 22,
"multicast_packets_received": 0,
"multicast_packets_sent": 22,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
"none": {"broadcast_address": "255.255.255.255", "ip": "none"},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9106",
},
"Vlan1007": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 89,
"ip_forwarding": "enabled",
"ip_mtu": 1500,
"ipv4": {
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 1496,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 1496,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 22,
"multicast_packets_received": 0,
"multicast_packets_sent": 22,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
"none": {"broadcast_address": "255.255.255.255", "ip": "none"},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9107",
},
"Vlan1008": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 90,
"ip_forwarding": "enabled",
"ip_mtu": 1500,
"ipv4": {
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 1496,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 1496,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 22,
"multicast_packets_received": 0,
"multicast_packets_sent": 22,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
"none": {"broadcast_address": "255.255.255.255", "ip": "none"},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9108",
},
"Vlan1009": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 91,
"ip_forwarding": "enabled",
"ip_mtu": 1500,
"ipv4": {
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 1428,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 1428,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 21,
"multicast_packets_received": 0,
"multicast_packets_sent": 21,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
"none": {"broadcast_address": "255.255.255.255", "ip": "none"},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9109",
},
"Vlan101": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 72,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.12.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.12.1",
"ip_subnet": "10.220.12.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9100",
},
"Vlan102": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-down/link-down/admin-up",
"iod": 73,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.13.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.13.1",
"ip_subnet": "10.220.13.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 0,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 0,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9100",
},
"Vlan105": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 75,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.16.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.16.1",
"ip_subnet": "10.220.16.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9105",
},
"Vlan106": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 76,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.17.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.17.1",
"ip_subnet": "10.220.17.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9106",
},
"Vlan107": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 77,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.18.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.18.1",
"ip_subnet": "10.220.18.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9107",
},
"Vlan108": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 78,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.19.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.19.1",
"ip_subnet": "10.220.19.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9108",
},
"Vlan109": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 79,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.20.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.20.1",
"ip_subnet": "10.220.20.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9109",
},
"Vlan110": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 80,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.21.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.21.1",
"ip_subnet": "10.220.21.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9105",
},
"Vlan111": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 81,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.22.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.22.1",
"ip_subnet": "10.220.22.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9106",
},
"Vlan112": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 82,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.23.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.23.1",
"ip_subnet": "10.220.23.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9107",
},
"Vlan113": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 83,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.24.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.24.1",
"ip_subnet": "10.220.24.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9108",
},
"Vlan114": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 84,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.220.25.1/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.220.25.1",
"ip_subnet": "10.220.25.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 2048,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 2048,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 23,
"multicast_packets_received": 0,
"multicast_packets_sent": 23,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9109",
},
"Vlan910": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "enabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-down/link-down/admin-up",
"iod": 85,
"ip_forwarding": "enabled",
"ip_mtu": 1500,
"ipv4": {
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 0,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 0,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
"none": {"broadcast_address": "255.255.255.255", "ip": "none"},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups_address": "none",
"multicast_routing": "disabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "vrf-9100",
},
"loopback0": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "enabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 94,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.49.1.0/32": {
"broadcast_address": "255.255.255.255",
"ip": "10.49.1.0",
"ip_subnet": "10.49.1.0",
"prefix_length": "32",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 0,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 0,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "default",
},
"loopback1": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "disabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-down/link-down/admin-up",
"iod": 95,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.49.1.1/32": {
"ip": "10.49.1.1",
"ip_subnet": "10.49.1.1",
"prefix_length": "32",
"route_preference": "0",
"route_tag": "0",
},
"10.49.2.1/32": {
"broadcast_address": "255.255.255.255",
"ip": "10.49.2.1",
"ip_subnet": "10.49.2.1",
"prefix_length": "32",
"route_preference": "0",
"route_tag": "0",
"secondary": True,
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 35184,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 0,
"multicast_bytes_received": 35184,
"multicast_bytes_sent": 0,
"multicast_packets_consumed": 312,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 0,
"multicast_packets_received": 312,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 0,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 0,
"unicast_bytes_received": 0,
"unicast_bytes_sent": 0,
"unicast_packets_consumed": 0,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 0,
"unicast_packets_received": 0,
"unicast_packets_sent": 0,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups": ["224.0.0.1", "224.0.0.13", "224.0.0.2"],
"multicast_routing": "enabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "default",
},
"mgmt0": {
"directed_broadcast": "disabled",
"icmp_port_unreachable": "enabled",
"icmp_redirects": "enabled",
"icmp_unreachable": "disabled",
"int_stat_last_reset": "never",
"interface_status": "protocol-up/link-up/admin-up",
"iod": 2,
"ip_forwarding": "disabled",
"ip_mtu": 1500,
"ipv4": {
"10.1.6.40/24": {
"broadcast_address": "255.255.255.255",
"ip": "10.1.6.40",
"ip_subnet": "10.1.6.0",
"prefix_length": "24",
"route_preference": "0",
"route_tag": "0",
},
"counters": {
"broadcast_bytes_consumed": 0,
"broadcast_bytes_forwarded": 0,
"broadcast_bytes_originated": 0,
"broadcast_bytes_received": 0,
"broadcast_bytes_sent": 0,
"broadcast_packets_consumed": 0,
"broadcast_packets_forwarded": 0,
"broadcast_packets_originated": 0,
"broadcast_packets_received": 0,
"broadcast_packets_sent": 0,
"labeled_bytes_consumed": 0,
"labeled_bytes_forwarded": 0,
"labeled_bytes_originated": 0,
"labeled_bytes_received": 0,
"labeled_bytes_sent": 0,
"labeled_packets_consumed": 0,
"labeled_packets_forwarded": 0,
"labeled_packets_originated": 0,
"labeled_packets_received": 0,
"labeled_packets_sent": 0,
"multicast_bytes_consumed": 0,
"multicast_bytes_forwarded": 0,
"multicast_bytes_originated": 0,
"multicast_bytes_received": 0,
"multicast_bytes_sent": 0,
"multicast_packets_consumed": 0,
"multicast_packets_forwarded": 0,
"multicast_packets_originated": 0,
"multicast_packets_received": 0,
"multicast_packets_sent": 0,
"unicast_bytes_consumed": 115792,
"unicast_bytes_forwarded": 0,
"unicast_bytes_originated": 343014,
"unicast_bytes_received": 57896,
"unicast_bytes_sent": 343014,
"unicast_packets_consumed": 1592,
"unicast_packets_forwarded": 0,
"unicast_packets_originated": 805,
"unicast_packets_received": 796,
"unicast_packets_sent": 805,
},
},
"load_sharing": "none",
"local_proxy_arp": "disabled",
"multicast_groups_address": "none",
"multicast_routing": "disabled",
"proxy_arp": "disabled",
"unicast_reverse_path": "none",
"vrf": "management",
},
}
|
317b73d7e683639b1f8079ec92c48f3753b05d43
|
472c0ba1911619f8e2e1a68b4f956fad05be4e94
|
/src/matlab2cpp/rules/_string.py
|
88b8385abfbd4bc2a426c298941822a4965da744
|
[
"BSD-3-Clause"
] |
permissive
|
jonathf/matlab2cpp
|
f8b9541cf79507ec764b04b8211e73c47a20c131
|
b6e2cbaedb36c909952911adfe44fe26252a36a1
|
refs/heads/master
| 2022-08-08T21:28:23.028072
| 2022-07-15T19:58:01
| 2022-07-15T19:58:01
| 31,599,354
| 197
| 68
|
BSD-3-Clause
| 2022-07-15T19:58:02
| 2015-03-03T13:20:32
|
Python
|
UTF-8
|
Python
| false
| false
| 245
|
py
|
_string.py
|
from .assign import Assign
from .variables import *
Declare = "string %(name)s ;"
def String(node):
if node.name or node.parent.backend != "matrix":
return '"%(value)s"'
else:
return 'std::string("%(value)s")'
|
99aec74f586fb872994b2771df44eb4a88644d87
|
346f1d8818237116f759290b43ef881eb58464ed
|
/GPaCo/Seg/mmseg/models/decode_heads/uper_head_ceco.py
|
253936918f0c8df7b78dfe3600a64dd1d4b7a620
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
dvlab-research/Parametric-Contrastive-Learning
|
a35cd210692a69293c68297c01d8af3312ac986f
|
e5f8436c67b3860a18578da3290eb582e6a27c25
|
refs/heads/main
| 2023-08-04T15:03:59.422489
| 2023-07-24T15:17:37
| 2023-07-24T15:17:37
| 373,351,085
| 170
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,598
|
py
|
uper_head_ceco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmseg.ops import resize
from ..builder import HEADS
from .decode_head import BaseDecodeHead
from .psp_head import PPM
from ..losses import accuracy
import random
from scipy.stats import ortho_group
@HEADS.register_module()
class UPerHead_cecol(BaseDecodeHead): # 'Linear' version of CeCo, i.e., RR
"""Unified Perceptual Parsing for Scene Understanding.
This head is the implementation of `UPerNet
<https://arxiv.org/abs/1807.10221>`_.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module applied on the last feature. Default: (1, 2, 3, 6).
"""
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(UPerHead_cecol, self).__init__(
input_transform='multiple_select', **kwargs)
# PSP Module
self.psp_modules = PPM(
pool_scales,
self.in_channels[-1],
self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners)
self.bottleneck = ConvModule(
self.in_channels[-1] + len(pool_scales) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = ConvModule(
in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
inplace=False)
fpn_conv = ConvModule(
self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = ConvModule(
len(self.in_channels) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
frequency_file = kwargs.get('frequency_file')
self.img_cls_weight = float(kwargs.get('img_cls_weight'))
self.smooth = float(kwargs.get('smooth', 1.0))
self.etf = float(kwargs.get('etf', False))
self.scale = float(kwargs.get('scale', 1.0))
## rebalance
img_w_list = []
content = open(frequency_file, "r").readlines()
for line in content:
img_w_list.append(int(line))
self.weight = torch.Tensor(img_w_list)
self.weight = self.weight / self.weight.sum()
self.weight = self.weight.view(1,self.num_classes)
self.weight = nn.parameter.Parameter(self.weight, requires_grad=False)
if self.training:
self.reduce = nn.Sequential(
nn.Conv2d(self.channels, 128, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(128))
self.gain = nn.Sequential(
nn.Linear(128, 512),
nn.ReLU(inplace=True))
self.img_cls = nn.Sequential(self.dropout, nn.Linear(512, self.num_classes))
for param in self.reduce.parameters():
param.requires_grad = False
for param in self.gain.parameters():
param.requires_grad = False
for param in self.img_cls.parameters():
param.requires_grad = False
def cls_seg(self, feat):
"""Classify each pixel."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.conv_seg(feat)
return output
def psp_forward(self, inputs):
"""Forward function of PSP module."""
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, inputs, seg_label=None):
"""Forward function."""
inputs = self._transform_inputs(inputs)
# build laterals
laterals = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
laterals.append(self.psp_forward(inputs))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += resize(
laterals[i],
size=prev_shape,
mode='bilinear',
align_corners=self.align_corners)
# build outputs
fpn_outs = [
self.fpn_convs[i](laterals[i])
for i in range(used_backbone_levels - 1)
]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = resize(
fpn_outs[i],
size=fpn_outs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
final_output = self.cls_seg(output)
# decoupling
if self.training:
h, w = seg_label.shape[2:]
pixel_features = self.reduce(output)
pixel_features = F.interpolate(pixel_features, size=(h, w), mode='bilinear', align_corners=True)
pixel_features = pixel_features.permute(0,2,3,1).contiguous()
y = seg_label.squeeze(1)
y_valid = y[y!=255].long().cuda()
out_valid = pixel_features[y!=255,:]
y_onehot = F.one_hot(y_valid, self.num_classes).float()
features = y_onehot.T @ out_valid
scene_label = torch.unique(y_valid)
features = features[scene_label,:]
cls_num = y_onehot.T.sum(dim=1)
cls_num = cls_num[scene_label]
features = features / cls_num.unsqueeze(1)
img_x = self.gain(features)
f = F.normalize(img_x, dim=-1) if self.etf else img_x
logits_img = self.img_cls(f) * self.scale
return final_output, seg_label, logits_img, scene_label
else:
return final_output
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
outputs = self.forward(inputs, gt_semantic_seg)
losses = self.losses(outputs[0], outputs[1], outputs[2], outputs[3])
return losses
@force_fp32(apply_to=('seg_logit', ))
def losses(self, seg_logit, seg_label, logits_img, labels_img):
"""Compute segmentation loss."""
loss = dict()
seg_logit = resize(
input=seg_logit,
size=seg_label.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
if self.sampler is not None:
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
seg_label = seg_label.squeeze(1)
for loss_decode in self.loss_decode:
if loss_decode.loss_name not in loss:
loss[loss_decode.loss_name] = loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
else:
loss[loss_decode.loss_name] += loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
loss['acc_seg'] = accuracy(seg_logit, seg_label)
# rebalance loss for img
labels_img_t = torch.where(labels_img >= self.num_classes, self.num_classes, labels_img)
img_onehot = F.one_hot(labels_img_t, num_classes=self.num_classes+1)[:,:-1]
img_onehot = self.smooth * img_onehot + (1 - self.smooth) / (self.num_classes - 1) * (1 - img_onehot)
loss['loss_img_cls'] = -(img_onehot * F.log_softmax(logits_img + torch.log(self.weight + 1e-12), dim=1)).sum() / (img_onehot.sum() + 1e-12) * self.img_cls_weight
return loss
@HEADS.register_module()
class UPerHead_ceco(UPerHead_cecol): # Normalized
"""Unified Perceptual Parsing for Scene Understanding.
This head is the implementation of `UPerNet
<https://arxiv.org/abs/1807.10221>`_.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module applied on the last feature. Default: (1, 2, 3, 6).
"""
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(UPerHead_ceco, self).__init__(pool_scales=pool_scales, **kwargs)
orth = ortho_group.rvs(dim=512).astype(np.float32)
orth = torch.tensor(orth[:,:self.num_classes])
etf = math.sqrt(self.num_classes/(self.num_classes-1)) * orth @ (torch.eye(self.num_classes) - 1.0 / self.num_classes * torch.ones(self.num_classes, self.num_classes))
etf = etf.t()
self.img_cls[1].weight.data = etf / etf.norm(dim=-1, keepdim=True)
self.img_cls[1].bias.data = torch.zeros(self.num_classes,)
self.etf = True
|
142d146d179d63728f968e13028b00c724a6b110
|
89420cda57f03791a5448ed4eeb967d06a4aade3
|
/arviz/plots/backends/bokeh/traceplot.py
|
05d83f1546339c1f2a7342569624a7f48ec7cc3c
|
[
"Apache-2.0"
] |
permissive
|
arviz-devs/arviz
|
fa2423e28f7a8c1b22986dbef317579c00744f75
|
24c260a0390d030e106943f21811652ea82aebc7
|
refs/heads/main
| 2023-09-03T12:22:12.075948
| 2023-07-18T22:29:35
| 2023-07-18T22:29:35
| 39,890,704
| 1,421
| 413
|
Apache-2.0
| 2023-09-13T15:53:19
| 2015-07-29T11:51:10
|
Python
|
UTF-8
|
Python
| false
| false
| 14,207
|
py
|
traceplot.py
|
"""Bokeh Traceplot."""
import warnings
from collections.abc import Iterable
from itertools import cycle
import bokeh.plotting as bkp
import matplotlib.pyplot as plt
import numpy as np
from bokeh.models import ColumnDataSource, DataRange1d, Span
from bokeh.models.glyphs import Scatter
from bokeh.models.annotations import Title
from ...distplot import plot_dist
from ...plot_utils import _scale_fig_size
from ...rankplot import plot_rank
from .. import show_layout
from . import backend_kwarg_defaults, dealiase_sel_kwargs
from ....sel_utils import xarray_var_iter
def plot_trace(
data,
var_names,
divergences,
kind,
figsize,
rug,
lines,
circ_var_names, # pylint: disable=unused-argument
circ_var_units, # pylint: disable=unused-argument
compact,
compact_prop,
combined,
chain_prop,
legend,
labeller,
plot_kwargs,
fill_kwargs,
rug_kwargs,
hist_kwargs,
trace_kwargs,
rank_kwargs,
plotters,
divergence_data,
axes,
backend_kwargs,
backend_config,
show,
):
"""Bokeh traceplot."""
# If divergences are plotted they must be provided
if divergences is not False:
assert divergence_data is not None
if backend_config is None:
backend_config = {}
backend_config = {
**backend_kwarg_defaults(
("bounds_y_range", "plot.bokeh.bounds_y_range"),
),
**backend_config,
}
# Set plot default backend kwargs
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(
("dpi", "plot.bokeh.figure.dpi"),
),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
if figsize is None:
figsize = (12, len(plotters) * 2)
figsize, _, _, _, linewidth, _ = _scale_fig_size(figsize, 10, rows=len(plotters), cols=2)
backend_kwargs.setdefault("height", int(figsize[1] * dpi // len(plotters)))
backend_kwargs.setdefault("width", int(figsize[0] * dpi // 2))
if lines is None:
lines = ()
num_chain_props = len(data.chain) + 1 if combined else len(data.chain)
if not compact:
chain_prop = (
{"line_color": plt.rcParams["axes.prop_cycle"].by_key()["color"]}
if chain_prop is None
else chain_prop
)
else:
chain_prop = (
{
"line_dash": ("solid", "dotted", "dashed", "dashdot"),
}
if chain_prop is None
else chain_prop
)
compact_prop = (
{"line_color": plt.rcParams["axes.prop_cycle"].by_key()["color"]}
if compact_prop is None
else compact_prop
)
if isinstance(chain_prop, str):
chain_prop = {chain_prop: plt.rcParams["axes.prop_cycle"].by_key()[chain_prop]}
if isinstance(chain_prop, tuple):
warnings.warn(
"chain_prop as a tuple will be deprecated in a future warning, use a dict instead",
FutureWarning,
)
chain_prop = {chain_prop[0]: chain_prop[1]}
chain_prop = {
prop_name: [prop for _, prop in zip(range(num_chain_props), cycle(props))]
for prop_name, props in chain_prop.items()
}
if isinstance(compact_prop, str):
compact_prop = {compact_prop: plt.rcParams["axes.prop_cycle"].by_key()[compact_prop]}
if isinstance(compact_prop, tuple):
warnings.warn(
"compact_prop as a tuple will be deprecated in a future warning, use a dict instead",
FutureWarning,
)
compact_prop = {compact_prop[0]: compact_prop[1]}
trace_kwargs = {} if trace_kwargs is None else trace_kwargs
trace_kwargs.setdefault("alpha", 0.35)
if hist_kwargs is None:
hist_kwargs = {}
hist_kwargs.setdefault("alpha", 0.35)
if plot_kwargs is None:
plot_kwargs = {}
if fill_kwargs is None:
fill_kwargs = {}
if rug_kwargs is None:
rug_kwargs = {}
if rank_kwargs is None:
rank_kwargs = {}
trace_kwargs.setdefault("line_width", linewidth)
plot_kwargs.setdefault("line_width", linewidth)
if rank_kwargs is None:
rank_kwargs = {}
if axes is None:
axes = []
backend_kwargs_copy = backend_kwargs.copy()
for i in range(len(plotters)):
if not i:
_axes = [bkp.figure(**backend_kwargs), bkp.figure(**backend_kwargs_copy)]
backend_kwargs_copy.setdefault("x_range", _axes[1].x_range)
else:
_axes = [
bkp.figure(**backend_kwargs),
bkp.figure(**backend_kwargs_copy),
]
axes.append(_axes)
axes = np.atleast_2d(axes)
cds_data = {}
cds_var_groups = {}
draw_name = "draw"
for var_name, selection, isel, value in list(
xarray_var_iter(data, var_names=var_names, combined=True)
):
if selection:
cds_name = "{}_ARVIZ_CDS_SELECTION_{}".format(
var_name,
"_".join(
str(item)
for key, value in selection.items()
for item in (
[key, value]
if (isinstance(value, str) or not isinstance(value, Iterable))
else [key, *value]
)
),
)
else:
cds_name = var_name
if var_name not in cds_var_groups:
cds_var_groups[var_name] = []
cds_var_groups[var_name].append(cds_name)
for chain_idx, _ in enumerate(data.chain.values):
if chain_idx not in cds_data:
cds_data[chain_idx] = {}
_data = value[chain_idx]
cds_data[chain_idx][cds_name] = _data
while any(key == draw_name for key in cds_data[0]):
draw_name += "w"
for chain in cds_data.values():
chain[draw_name] = data.draw.values
cds_data = {chain_idx: ColumnDataSource(cds) for chain_idx, cds in cds_data.items()}
for idx, (var_name, selection, isel, value) in enumerate(plotters):
value = np.atleast_2d(value)
if len(value.shape) == 2:
y_name = (
var_name
if not selection
else "{}_ARVIZ_CDS_SELECTION_{}".format(
var_name,
"_".join(
str(item)
for key, value in selection.items()
for item in (
(key, value)
if (isinstance(value, str) or not isinstance(value, Iterable))
else (key, *value)
)
),
)
)
if rug:
rug_kwargs["y"] = y_name
_plot_chains_bokeh(
ax_density=axes[idx, 0],
ax_trace=axes[idx, 1],
data=cds_data,
x_name=draw_name,
y_name=y_name,
chain_prop=chain_prop,
combined=combined,
rug=rug,
kind=kind,
legend=legend,
trace_kwargs=trace_kwargs,
hist_kwargs=hist_kwargs,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
rank_kwargs=rank_kwargs,
)
else:
for y_name in cds_var_groups[var_name]:
if rug:
rug_kwargs["y"] = y_name
_plot_chains_bokeh(
ax_density=axes[idx, 0],
ax_trace=axes[idx, 1],
data=cds_data,
x_name=draw_name,
y_name=y_name,
chain_prop=chain_prop,
combined=combined,
rug=rug,
kind=kind,
legend=legend,
trace_kwargs=trace_kwargs,
hist_kwargs=hist_kwargs,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
rank_kwargs=rank_kwargs,
)
for col in (0, 1):
_title = Title()
_title.text = labeller.make_label_vert(var_name, selection, isel)
axes[idx, col].title = _title
axes[idx, col].y_range = DataRange1d(
bounds=backend_config["bounds_y_range"], min_interval=0.1
)
for _, _, vlines in (j for j in lines if j[0] == var_name and j[1] == selection):
if isinstance(vlines, (float, int)):
line_values = [vlines]
else:
line_values = np.atleast_1d(vlines).ravel()
for line_value in line_values:
vline = Span(
location=line_value,
dimension="height",
line_color="black",
line_width=1.5,
line_alpha=0.75,
)
hline = Span(
location=line_value,
dimension="width",
line_color="black",
line_width=1.5,
line_alpha=trace_kwargs["alpha"],
)
axes[idx, 0].renderers.append(vline)
axes[idx, 1].renderers.append(hline)
if legend:
for col in (0, 1):
axes[idx, col].legend.location = "top_left"
axes[idx, col].legend.click_policy = "hide"
else:
for col in (0, 1):
if axes[idx, col].legend:
axes[idx, col].legend.visible = False
if divergences:
div_density_kwargs = {}
div_density_kwargs.setdefault("size", 14)
div_density_kwargs.setdefault("line_color", "red")
div_density_kwargs.setdefault("line_width", 2)
div_density_kwargs.setdefault("line_alpha", 0.50)
div_density_kwargs.setdefault("angle", np.pi / 2)
div_trace_kwargs = {}
div_trace_kwargs.setdefault("size", 14)
div_trace_kwargs.setdefault("line_color", "red")
div_trace_kwargs.setdefault("line_width", 2)
div_trace_kwargs.setdefault("line_alpha", 0.50)
div_trace_kwargs.setdefault("angle", np.pi / 2)
div_selection = {k: v for k, v in selection.items() if k in divergence_data.dims}
divs = divergence_data.sel(**div_selection).values
divs = np.atleast_2d(divs)
for chain, chain_divs in enumerate(divs):
div_idxs = np.arange(len(chain_divs))[chain_divs]
if div_idxs.size > 0:
values = value[chain, div_idxs]
tmp_cds = ColumnDataSource({"y": values, "x": div_idxs})
if divergences == "top":
y_div_trace = value.max()
else:
y_div_trace = value.min()
glyph_density = Scatter(x="y", y=0.0, marker="dash", **div_density_kwargs)
if kind == "trace":
glyph_trace = Scatter(
x="x", y=y_div_trace, marker="dash", **div_trace_kwargs
)
axes[idx, 1].add_glyph(tmp_cds, glyph_trace)
axes[idx, 0].add_glyph(tmp_cds, glyph_density)
show_layout(axes, show)
return axes
def _plot_chains_bokeh(
ax_density,
ax_trace,
data,
x_name,
y_name,
chain_prop,
combined,
rug,
kind,
legend,
trace_kwargs,
hist_kwargs,
plot_kwargs,
fill_kwargs,
rug_kwargs,
rank_kwargs,
):
marker = trace_kwargs.pop("marker", True)
for chain_idx, cds in data.items():
if kind == "trace":
if legend:
trace_kwargs["legend_label"] = f"chain {chain_idx}"
ax_trace.line(
x=x_name,
y=y_name,
source=cds,
**dealiase_sel_kwargs(trace_kwargs, chain_prop, chain_idx),
)
if marker:
ax_trace.circle(
x=x_name,
y=y_name,
source=cds,
radius=0.30,
alpha=0.5,
**dealiase_sel_kwargs({}, chain_prop, chain_idx),
)
if not combined:
rug_kwargs["cds"] = cds
if legend:
plot_kwargs["legend_label"] = f"chain {chain_idx}"
plot_dist(
cds.data[y_name],
ax=ax_density,
rug=rug,
hist_kwargs=hist_kwargs,
plot_kwargs=dealiase_sel_kwargs(plot_kwargs, chain_prop, chain_idx),
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
backend="bokeh",
backend_kwargs={},
show=False,
)
if kind == "rank_bars":
value = np.array([item.data[y_name] for item in data.values()])
plot_rank(value, kind="bars", ax=ax_trace, backend="bokeh", show=False, **rank_kwargs)
elif kind == "rank_vlines":
value = np.array([item.data[y_name] for item in data.values()])
plot_rank(value, kind="vlines", ax=ax_trace, backend="bokeh", show=False, **rank_kwargs)
if combined:
rug_kwargs["cds"] = data
if legend:
plot_kwargs["legend_label"] = "combined chains"
plot_dist(
np.concatenate([item.data[y_name] for item in data.values()]).flatten(),
ax=ax_density,
rug=rug,
hist_kwargs=hist_kwargs,
plot_kwargs=dealiase_sel_kwargs(plot_kwargs, chain_prop, -1),
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
backend="bokeh",
backend_kwargs={},
show=False,
)
|
0e69f686ef0a9c34ead724ae27e87b6a4991a0f1
|
30b004cad2c14b47b5f66c3a4a0015e05ca4a27e
|
/python/paddle_fl/paddle_fl/examples/k8s_deployment/server/fl_server.py
|
6d60f463a5275ae62e906e4a9c1bdc501ea8d412
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleFL
|
66c26f774eeadc25c12e74056ac389e0c1f61b84
|
dcc00c5dff62c3dd0092801f4e9b89d8c0957d3d
|
refs/heads/master
| 2023-08-07T22:05:24.806573
| 2023-03-21T01:15:10
| 2023-03-21T01:15:10
| 210,873,203
| 486
| 136
|
Apache-2.0
| 2023-07-26T22:30:57
| 2019-09-25T15:01:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
fl_server.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle_fl.paddle_fl as fl
import os
import paddle.fluid as fluid
from paddle_fl.paddle_fl.core.server.fl_server import FLServer
from paddle_fl.paddle_fl.core.master.fl_job import FLRunTimeJob
import time
server = FLServer()
server_id = 0
job_path = "fl_job_config"
job = FLRunTimeJob()
job.load_server_job(job_path, server_id)
job._scheduler_ep = os.environ['FL_SCHEDULER_SERVICE_HOST'] + ":" + os.environ[
'FL_SCHEDULER_SERVICE_PORT_FL_SCHEDULER'] # IP address for scheduler
#job._endpoints = os.environ['POD_IP'] + ":" + os.environ['FL_SERVER_SERVICE_PORT_FL_SERVER'] # IP address for server
server.set_server_job(job)
server._current_ep = os.environ['FL_SERVER_SERVICE_HOST'] + ":" + os.environ[
'FL_SERVER_SERVICE_PORT_FL_SERVER'] # IP address for server
print(job._scheduler_ep, server._current_ep)
server.start()
print("connect")
|
edb24f474107deec45e470649f8063e599fa0b87
|
b04cc98a746d1df457183bc14908094a8be00ba1
|
/example/auto_compression/nlp/paddle_inference_eval_uie.py
|
2f378ef0254dafea2a3a580491bf71a2c11a5340
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSlim
|
a3bcaef0c92016b7f6946d58787f87c7db8ff3f8
|
bb02b103a89a09635941bc0bbbd38506d7412468
|
refs/heads/develop
| 2023-08-31T01:47:27.824722
| 2023-08-25T08:06:08
| 2023-08-25T08:06:08
| 228,290,594
| 1,534
| 402
|
Apache-2.0
| 2023-08-29T09:37:55
| 2019-12-16T02:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 14,268
|
py
|
paddle_inference_eval_uie.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import time
import json
import sys
from functools import partial
import distutils.util
import numpy as np
import paddle
from paddle import inference
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.transformers import AutoModelForTokenClassification, AutoTokenizer
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad, Dict
from paddlenlp.metrics import SpanEvaluator
def parse_args():
"""
parse_args func
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
default="./afqmc",
type=str,
required=True,
help="The path prefix of inference model to be used.", )
parser.add_argument(
"--model_filename",
type=str,
default="inference.pdmodel",
help="model file name")
parser.add_argument(
"--params_filename",
type=str,
default="inference.pdiparams",
help="params file name")
parser.add_argument(
"--dev_data",
default="./data/dev.txt",
type=str,
help="The data file of validation.", )
parser.add_argument(
"--device",
default="gpu",
choices=["gpu", "cpu"],
help="Device selected for inference.", )
parser.add_argument(
"--batch_size",
default=32,
type=int,
help="Batch size for predict.", )
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--perf_warmup_steps",
default=20,
type=int,
help="Warmup steps for performance test.", )
parser.add_argument(
"--use_trt",
action="store_true",
help="Whether to use inference engin TensorRT.", )
parser.add_argument(
"--precision",
type=str,
default="fp32",
choices=["fp32", "fp16", "int8"],
help="The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.",
)
parser.add_argument(
"--use_mkldnn",
type=bool,
default=False,
help="Whether use mkldnn or not.")
parser.add_argument(
"--cpu_threads", type=int, default=1, help="Num of cpu threads.")
args = parser.parse_args()
return args
def map_offset(ori_offset, offset_mapping):
"""
map ori offset to token offset
"""
for index, span in enumerate(offset_mapping):
if span[0] <= ori_offset < span[1]:
return index
return -1
def _convert_example(example, tokenizer, max_seq_length=128):
encoded_inputs = tokenizer(
text=[example["prompt"]],
text_pair=[example["content"]],
truncation=True,
max_seq_len=max_seq_length,
pad_to_max_seq_len=True,
return_attention_mask=True,
return_position_ids=True,
return_dict=False,
return_offsets_mapping=True)
encoded_inputs = encoded_inputs[0]
offset_mapping = [list(x) for x in encoded_inputs["offset_mapping"]]
bias = 0
for index in range(1, len(offset_mapping)):
mapping = offset_mapping[index]
if mapping[0] == 0 and mapping[1] == 0 and bias == 0:
bias = offset_mapping[index - 1][1] + 1 # Includes [SEP] token
if mapping[0] == 0 and mapping[1] == 0:
continue
offset_mapping[index][0] += bias
offset_mapping[index][1] += bias
start_ids = [0.0 for x in range(max_seq_length)]
end_ids = [0.0 for x in range(max_seq_length)]
for item in example["result_list"]:
start = map_offset(item["start"] + bias, offset_mapping)
end = map_offset(item["end"] - 1 + bias, offset_mapping)
start_ids[start] = 1.0
end_ids[end] = 1.0
tokenized_output = {
"input_ids": encoded_inputs["input_ids"],
"token_type_ids": encoded_inputs["token_type_ids"],
"start_ids": start_ids,
"end_ids": end_ids
}
return tokenized_output
class Predictor(object):
"""
Inference Predictor class
"""
def __init__(self, predictor, input_handles, output_handles):
self.predictor = predictor
self.input_handles = input_handles
self.output_handles = output_handles
@classmethod
def create_predictor(cls, args):
"""
create_predictor func
"""
cls.rerun_flag = False
config = paddle.inference.Config(
os.path.join(args.model_path, args.model_filename),
os.path.join(args.model_path, args.params_filename))
if args.device == "gpu":
# set GPU configs accordingly
config.enable_use_gpu(100, 0)
cls.device = paddle.set_device("gpu")
else:
config.disable_gpu()
config.set_cpu_math_library_num_threads(args.cpu_threads)
config.switch_ir_optim()
if args.use_mkldnn:
config.enable_mkldnn()
if args.precision == "int8":
config.enable_mkldnn_int8()
precision_map = {
"int8": inference.PrecisionType.Int8,
"fp32": inference.PrecisionType.Float32,
"fp16": inference.PrecisionType.Half,
}
if args.precision in precision_map.keys() and args.use_trt:
config.enable_tensorrt_engine(
workspace_size=1 << 30,
max_batch_size=args.batch_size,
min_subgraph_size=5,
precision_mode=precision_map[args.precision],
use_static=True,
use_calib_mode=False, )
dynamic_shape_file = os.path.join(args.model_path,
"dynamic_shape.txt")
if os.path.exists(dynamic_shape_file):
config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file,
True)
print("trt set dynamic shape done!")
else:
config.collect_shape_range_info(dynamic_shape_file)
print("Start collect dynamic shape...")
cls.rerun_flag = True
predictor = paddle.inference.create_predictor(config)
input_handles = [
predictor.get_input_handle(name)
for name in predictor.get_input_names()
]
output_handles = [
predictor.get_output_handle(name)
for name in predictor.get_output_names()
]
return cls(predictor, input_handles, output_handles)
def predict_batch(self, data):
"""
predict from batch func
"""
for input_field, input_handle in zip(data, self.input_handles):
input_handle.copy_from_cpu(input_field)
self.predictor.run()
output = [
output_handle.copy_to_cpu() for output_handle in self.output_handles
]
return output
def _convert_predict_batch(self, args, data, tokenizer, batchify_fn):
examples = []
for example in data:
example = _convert_example(
example, tokenizer, max_seq_length=args.max_seq_length)
examples.append(example)
return examples
def predict(self, dataset, tokenizer, batchify_fn, args):
"""
predict func
"""
batches = [
dataset[idx:idx + args.batch_size]
for idx in range(0, len(dataset), args.batch_size)
]
for i, batch in enumerate(batches):
examples = self._convert_predict_batch(args, batch, tokenizer,
batchify_fn)
input_ids, segment_ids, start_ids, end_ids = batchify_fn(examples)
output = self.predict_batch([input_ids, segment_ids])
if i > args.perf_warmup_steps:
break
if self.rerun_flag:
return
metric = SpanEvaluator()
metric.reset()
predict_time = 0.0
for i, batch in enumerate(batches):
examples = self._convert_predict_batch(args, batch, tokenizer,
batchify_fn)
input_ids, segment_ids, start_ids, end_ids = batchify_fn(examples)
start_time = time.time()
output = self.predict_batch([input_ids, segment_ids])
end_time = time.time()
predict_time += end_time - start_time
start_ids = paddle.to_tensor(np.array(start_ids))
end_ids = paddle.to_tensor(np.array(end_ids))
start_prob = paddle.to_tensor(output[0])
end_prob = paddle.to_tensor(output[1])
num_correct, num_infer, num_label = metric.compute(
start_prob, end_prob, start_ids, end_ids)
metric.update(num_correct, num_infer, num_label)
sequences_num = i * args.batch_size
print(
"[benchmark]batch size: {} Inference time per batch: {}ms, qps: {}.".
format(
args.batch_size,
round(predict_time * 1000 / i, 2),
round(sequences_num / predict_time, 2), ))
precision, recall, f1 = metric.accumulate()
print("[benchmark]f1: %s. \n" % (f1), end="")
sys.stdout.flush()
def reader_proprecess(data_path, max_seq_len=128):
"""
read json
"""
with open(data_path, 'r', encoding='utf-8') as f:
for line in f:
json_line = json.loads(line)
content = json_line['content'].strip()
prompt = json_line['prompt']
# Model Input is aslike: [CLS] Prompt [SEP] Content [SEP]
# It include three summary tokens.
if max_seq_len <= len(prompt) + 3:
raise ValueError(
"The value of max_seq_len is too small, please set a larger value"
)
max_content_len = max_seq_len - len(prompt) - 3
if len(content) <= max_content_len:
yield json_line
else:
result_list = json_line['result_list']
json_lines = []
accumulate = 0
while True:
cur_result_list = []
for result in result_list:
if result['start'] + 1 <= max_content_len < result[
'end']:
max_content_len = result['start']
break
cur_content = content[:max_content_len]
res_content = content[max_content_len:]
while True:
if len(result_list) == 0:
break
elif result_list[0]['end'] <= max_content_len:
if result_list[0]['end'] > 0:
cur_result = result_list.pop(0)
cur_result_list.append(cur_result)
else:
cur_result_list = [
result for result in result_list
]
break
else:
break
json_line = {
'content': cur_content,
'result_list': cur_result_list,
'prompt': prompt
}
json_lines.append(json_line)
for result in result_list:
if result['end'] <= 0:
break
result['start'] -= max_content_len
result['end'] -= max_content_len
accumulate += max_content_len
max_content_len = max_seq_len - len(prompt) - 3
if len(res_content) == 0:
break
elif len(res_content) < max_content_len:
json_line = {
'content': res_content,
'result_list': result_list,
'prompt': prompt
}
json_lines.append(json_line)
break
else:
content = res_content
for json_line in json_lines:
yield json_line
def main():
"""
main func
"""
paddle.seed(42)
args = parse_args()
if args.use_mkldnn:
paddle.set_device("cpu")
predictor = Predictor.create_predictor(args)
dev_ds = load_dataset(
reader_proprecess, data_path=args.dev_data, lazy=False)
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
batchify_fn = lambda samples, fn=Dict({
'input_ids': Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
'token_type_ids': Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment
'start_ids': Stack(dtype="int64"),
'end_ids': Stack(dtype="int64")}): fn(samples)
predictor.predict(dev_ds, tokenizer, batchify_fn, args)
if predictor.rerun_flag:
print(
"***** Collect dynamic shape done, Please rerun the program to get correct results. *****"
)
if __name__ == "__main__":
paddle.set_device("cpu")
main()
|
0807c5131485161d25bc77ab48885f6c373b69b3
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/partition-labels/Solution.py
|
003bfb72ed44c02b4820847f4db817b085030eb2
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518
| 2023-01-02T02:05:35
| 2023-01-02T02:05:35
| 43,345,677
| 155
| 66
|
MIT
| 2020-10-02T03:41:36
| 2015-09-29T04:54:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
Solution.py
|
"""
A string S of lowercase English letters is given. We want to partition this string into as many parts as possible so that each letter appears in at most one part, and return a list of integers representing the size of these parts.
Example 1:
Input: S = "ababcbacadefegdehijhklij"
Output: [9,7,8]
Explanation:
The partition is "ababcbaca", "defegde", "hijhklij".
This is a partition so that each letter appears in at most one part.
A partition like "ababcbacadefegde", "hijhklij" is incorrect, because it splits S into less parts.
Note:
S will have length in range [1, 500].
S will consist of lowercase English letters ('a' to 'z') only.
"""
class Solution:
def partitionLabels(self, S: str) -> List[int]:
tr = {}
for i, a in enumerate(S):
if a not in tr:
tr[a] = [i, i]
else:
tr[a][1] = i
rgs = list(tr.values())
rgs.sort()
merged = []
for rg in rgs:
if not merged:
merged.append(list(rg))
continue
if merged[-1][1] >= rg[0]:
merged[-1][1] = max(merged[-1][1], rg[1])
else:
merged.append(list(rg))
return [rg[1] - rg[0] + 1 for rg in merged]
|
3c997f6a2c37d5ec0436619cd4e280df0628457d
|
66fc3d58e94e8340a0d825501776a1dea37c0198
|
/share/nuke/menu.py
|
0ee592654bffd3879367f76766ce8392facd8242
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
AcademySoftwareFoundation/OpenColorIO
|
dad370b54be147ae94f18ed6414d53bd76e9ef74
|
96f528fdfb7f9fb24388e33f6a968d29a3909cf8
|
refs/heads/main
| 2023-08-29T08:51:45.625957
| 2023-08-29T01:42:37
| 2023-08-29T01:42:37
| 775,131
| 843
| 236
|
BSD-3-Clause
| 2023-09-14T02:56:01
| 2010-07-14T18:22:06
|
C++
|
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
menu.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import os
import nuke
import ocionuke.cdl
def ocio_populate_menu():
"""Adds OCIO nodes to a menu in Color
"""
m_nodes = nuke.toolbar('Nodes')
m_color = m_nodes.findItem("Color")
m_ocio = m_color.addMenu("OCIO", icon = "ocio_icon.png")
allplugs = nuke.plugins(nuke.ALL | nuke.NODIR, "OCIO*.so", "OCIO*.dylib", "OCIO*.dll")
for fname in allplugs:
nodeMenu = nodeClass = os.path.splitext(fname)[0] # strip .so extension
# Put a space after "OCIO" to match The Foundry's convention (huh?)
if nodeMenu.startswith("OCIO"):
nodeMenu = nodeMenu.replace("OCIO", "OCIO ", 1)
# Only add the item if it doesn't exist
if not m_ocio.findItem(nodeMenu):
m_ocio.addCommand(nodeMenu, lambda nodeClass=nodeClass: nuke.createNode(nodeClass))
m_utils = m_ocio.addMenu("Utils")
m_utils.addCommand("Import .ccc to CDL nodes", ocionuke.cdl.import_multiple_from_ccc)
m_utils.addCommand("Export selected CDL's to .ccc", ocionuke.cdl.export_multiple_to_ccc)
if __name__ == "__main__":
ocio_populate_menu()
|
a63591cbeb175bb53a48b4122b3842d78f3f0f36
|
6b4dbc04e8b2513634e8d25b9f8b69c82c07b6fe
|
/udsoncan/ResponseCode.py
|
a91f828cec007c9ee357bcb753d23a2ab1bcf984
|
[
"MIT"
] |
permissive
|
pylessard/python-udsoncan
|
57c62b59a88907c6a988501c65efa14cfb8153a0
|
1b93cc3cd0e09a21d48881ba53aed257f841bb89
|
refs/heads/master
| 2023-08-29T21:49:55.403246
| 2023-06-24T15:46:46
| 2023-06-24T15:46:46
| 99,459,987
| 477
| 181
|
MIT
| 2023-06-30T15:29:30
| 2017-08-06T02:40:37
|
Python
|
UTF-8
|
Python
| false
| false
| 7,612
|
py
|
ResponseCode.py
|
import inspect
from typing import Dict
class ResponseCode:
PositiveResponse = 0
GeneralReject = 0x10
ServiceNotSupported = 0x11
SubFunctionNotSupported = 0x12
IncorrectMessageLengthOrInvalidFormat = 0x13
ResponseTooLong = 0x14
BusyRepeatRequest = 0x21
ConditionsNotCorrect = 0x22
RequestSequenceError = 0x24
NoResponseFromSubnetComponent = 0x25
FailurePreventsExecutionOfRequestedAction = 0x26
RequestOutOfRange = 0x31
SecurityAccessDenied = 0x33
AuthenticationRequired = 0x34
InvalidKey = 0x35
ExceedNumberOfAttempts = 0x36
RequiredTimeDelayNotExpired = 0x37
SecureDataTransmissionRequired = 0x38
SecureDataTransmissionNotAllowed = 0x39
SecureDataVerificationFailed = 0x3A
CertificateVerificationFailed_InvalidTimePeriod = 0x50
CertificateVerificationFailed_InvalidSignature = 0x51
CertificateVerificationFailed_InvalidChainOfTrust = 0x52
CertificateVerificationFailed_InvalidType = 0x53
CertificateVerificationFailed_InvalidFormat = 0x54
CertificateVerificationFailed_InvalidContent = 0x55
CertificateVerificationFailed_InvalidScope = 0x56
CertificateVerificationFailed_InvalidCertificate = 0x57
OwnershipVerificationFailed = 0x58
ChallengeCalculationFailed = 0x59
SettingAccessRightsFailed = 0x5A
SessionKeyCreationDerivationFailed = 0x5B
ConfigurationDataUsageFailed = 0x5C
DeAuthenticationFailed = 0x5D
UploadDownloadNotAccepted = 0x70
TransferDataSuspended = 0x71
GeneralProgrammingFailure = 0x72
WrongBlockSequenceCounter = 0x73
RequestCorrectlyReceived_ResponsePending = 0x78
SubFunctionNotSupportedInActiveSession = 0x7E
ServiceNotSupportedInActiveSession = 0x7F
RpmTooHigh = 0x81
RpmTooLow = 0x82
EngineIsRunning = 0x83
EngineIsNotRunning = 0x84
EngineRunTimeTooLow = 0x85
TemperatureTooHigh = 0x86
TemperatureTooLow = 0x87
VehicleSpeedTooHigh = 0x88
VehicleSpeedTooLow = 0x89
ThrottlePedalTooHigh = 0x8A
ThrottlePedalTooLow = 0x8B
TransmissionRangeNotInNeutral = 0x8C
TransmissionRangeNotInGear = 0x8D
BrakeSwitchNotClosed = 0x8F
ShifterLeverNotInPark = 0x90
TorqueConverterClutchLocked = 0x91
VoltageTooHigh = 0x92
VoltageTooLow = 0x93
ResourceTemporarilyNotAvailable = 0x94
# Defined by ISO-15764. Offset of 0x38 is defined within UDS standard (ISO-14229)
GeneralSecurityViolation = 0x38 + 0
SecuredModeRequested = 0x38 + 1
InsufficientProtection = 0x38 + 2
TerminationWithSignatureRequested = 0x38 + 3
AccessDenied = 0x38 + 4
VersionNotSupported = 0x38 + 5
SecuredLinkNotSupported = 0x38 + 6
CertificateNotAvailable = 0x38 + 7
AuditTrailInformationNotAvailable = 0x38 + 8
@classmethod
def is_supported_by_standard(cls, code: int, standard_version: int) -> bool:
if not isinstance(code, int):
raise ValueError("given code must be an integer value, not %s" % (code.__class__.__name__))
if not isinstance(standard_version, int):
raise ValueError("given standard_version must be an integer value, not %s" % (standard_version.__class__.__name__))
codes_version: Dict[int, int] = {
cls.PositiveResponse: 2006,
cls.GeneralReject: 2006,
cls.ServiceNotSupported: 2006,
cls.SubFunctionNotSupported: 2006,
cls.IncorrectMessageLengthOrInvalidFormat: 2006,
cls.ResponseTooLong: 2006,
cls.BusyRepeatRequest: 2006,
cls.ConditionsNotCorrect: 2006,
cls.RequestSequenceError: 2006,
cls.NoResponseFromSubnetComponent: 2006,
cls.FailurePreventsExecutionOfRequestedAction: 2006,
cls.RequestOutOfRange: 2006,
cls.SecurityAccessDenied: 2006,
cls.AuthenticationRequired: 2006,
cls.InvalidKey: 2006,
cls.ExceedNumberOfAttempts: 2006,
cls.RequiredTimeDelayNotExpired: 2006,
cls.SecureDataTransmissionRequired: 2020,
cls.SecureDataTransmissionNotAllowed: 2020,
cls.SecureDataVerificationFailed: 2020,
cls.CertificateVerificationFailed_InvalidTimePeriod: 2020,
cls.CertificateVerificationFailed_InvalidSignature: 2020,
cls.CertificateVerificationFailed_InvalidChainOfTrust: 2020,
cls.CertificateVerificationFailed_InvalidType: 2020,
cls.CertificateVerificationFailed_InvalidFormat: 2020,
cls.CertificateVerificationFailed_InvalidContent: 2020,
cls.CertificateVerificationFailed_InvalidScope: 2020,
cls.CertificateVerificationFailed_InvalidCertificate: 2020,
cls.OwnershipVerificationFailed: 2020,
cls.ChallengeCalculationFailed: 2020,
cls.SettingAccessRightsFailed: 2020,
cls.SessionKeyCreationDerivationFailed: 2020,
cls.ConfigurationDataUsageFailed: 2020,
cls.DeAuthenticationFailed: 2020,
cls.UploadDownloadNotAccepted: 2006,
cls.TransferDataSuspended: 2006,
cls.GeneralProgrammingFailure: 2006,
cls.WrongBlockSequenceCounter: 2006,
cls.RequestCorrectlyReceived_ResponsePending: 2006,
cls.SubFunctionNotSupportedInActiveSession: 2006,
cls.ServiceNotSupportedInActiveSession: 2006,
cls.RpmTooHigh: 2006,
cls.RpmTooLow: 2006,
cls.EngineIsRunning: 2006,
cls.EngineIsNotRunning: 2006,
cls.EngineRunTimeTooLow: 2006,
cls.TemperatureTooHigh: 2006,
cls.TemperatureTooLow: 2006,
cls.VehicleSpeedTooHigh: 2006,
cls.VehicleSpeedTooLow: 2006,
cls.ThrottlePedalTooHigh: 2006,
cls.ThrottlePedalTooLow: 2006,
cls.TransmissionRangeNotInNeutral: 2006,
cls.TransmissionRangeNotInGear: 2006,
cls.BrakeSwitchNotClosed: 2006,
cls.ShifterLeverNotInPark: 2006,
cls.TorqueConverterClutchLocked: 2006,
cls.VoltageTooHigh: 2006,
cls.VoltageTooLow: 2006,
cls.ResourceTemporarilyNotAvailable: 2020,
cls.GeneralSecurityViolation: 2006,
cls.SecuredModeRequested: 2006,
cls.InsufficientProtection: 2006,
cls.TerminationWithSignatureRequested: 2006,
cls.AccessDenied: 2006,
cls.VersionNotSupported: 2006,
cls.SecuredLinkNotSupported: 2006,
cls.CertificateNotAvailable: 2006,
cls.AuditTrailInformationNotAvailable: 2006,
}
if code not in codes_version:
raise ValueError('Do not know the standard version in which this code has been introduced: %s' % (code))
return standard_version >= codes_version[code]
# Returns the name of the response code as a string
@classmethod
def get_name(cls, given_id: int) -> str:
if given_id is None:
return ""
for member in inspect.getmembers(cls):
if isinstance(member[1], int):
if member[1] == given_id:
return member[0]
return str(given_id)
# Tells if a code is a negative code
@classmethod
def is_negative(cls, given_id: int) -> bool:
if given_id in [None, cls.PositiveResponse]:
return False
for member in inspect.getmembers(cls):
if isinstance(member[1], int):
if member[1] == given_id:
return True
return False
|
33643631f0056ff266d794774538003a2499be45
|
3d589d1c56b55fbd2b45b03564b8a9442ebf142b
|
/lib/src/klio/metrics/logger.py
|
82beba5a49bdd00bc9d640d63a327983cd0edfe6
|
[
"Apache-2.0"
] |
permissive
|
spotify/klio
|
1aff27412e92c9d699259e5ab1eaeb39dc3e9571
|
e625565708ed846201d2e05f782c0ce585554346
|
refs/heads/develop
| 2023-05-25T14:33:28.348335
| 2022-03-23T20:34:09
| 2022-03-23T20:34:09
| 285,928,366
| 815
| 57
|
Apache-2.0
| 2023-05-24T21:07:09
| 2020-08-07T22:02:58
|
Python
|
UTF-8
|
Python
| false
| false
| 9,580
|
py
|
logger.py
|
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Klio ships with a default :class:`klio.metrics.base.AbstractRelayClient`
implementation, which outputs metrics via the standard library ``logging``
module through the :class:`MetricsLoggerClient` below.
This implementation is used by default if no other metrics consumers are
configured. It must be explicitly turned off.
The default configuration in ``klio-info.yaml`` can be overwritten:
.. code-block:: yaml
job_config:
metrics:
logger:
# Logged metrics are emitted at the `debug` level by default.
level: info
# Default timer unit is ns/nanoseconds; available
# options include `s` or `seconds`, `ms` or `milliseconds`,
# `us` or `microseconds`, and `ns` or `nanoseconds`.
timer_unit: s
To turn off logging-based metrics:
.. code-block:: yaml
job_config
metrics:
logger: false
"""
import logging
import threading
from klio.metrics import base
TIMER_UNIT_MAP = {
"nanoseconds": "ns",
"microseconds": "us",
"milliseconds": "ms",
"seconds": "s",
"ns": "ns",
"us": "us",
"ms": "ms",
"s": "s",
}
"""Map of supported measurement units to shorthand for :class:`LoggerTimer`.
"""
class MetricsLoggerClient(base.AbstractRelayClient):
"""Logging client for transform metrics.
Intended to be instantiated by :class:`klio.metrics.client.MetricsRegistry`
and not by itself.
Args:
klio_config (klio_core.config.KlioConfig): the job's configuration.
disabled (bool): whether or not to disable the Python ``logger``
Default: ``False``.
"""
RELAY_CLIENT_NAME = "logger"
DEFAULT_LEVEL = logging.DEBUG
DEFAULT_TIME_UNIT = "ns"
_thread_local = threading.local()
def __init__(self, klio_config, disabled=False):
super(MetricsLoggerClient, self).__init__(klio_config)
self.logger_config = self.klio_config.job_config.metrics.get(
"logger", {}
)
self.disabled = disabled
self.log_level = self._set_log_level()
self.timer_unit = self._set_timer_unit()
def _set_log_level(self):
log_level = MetricsLoggerClient.DEFAULT_LEVEL
if isinstance(self.logger_config, dict):
log_level_str = self.logger_config.get("level")
if log_level_str:
log_level = getattr(logging, log_level_str.upper(), log_level)
return log_level
def _set_timer_unit(self):
timer_unit = MetricsLoggerClient.DEFAULT_TIME_UNIT
if isinstance(self.logger_config, dict):
_timer_unit = self.logger_config.get("timer_unit")
if _timer_unit:
timer_unit = TIMER_UNIT_MAP.get(_timer_unit, timer_unit)
return timer_unit
@property
def logger(self):
"""Python logger associated with the job which this client will use
to emit metrics.
"""
klio_metrics_logger = getattr(
self._thread_local, "klio_metrics_logger", None
)
if not klio_metrics_logger:
logger = logging.getLogger("klio.metrics")
logger.disabled = self.disabled
self._thread_local.klio_metrics_logger = logger
return self._thread_local.klio_metrics_logger
def unmarshal(self, metric):
"""Return a dict-representation of a given metric.
Args:
metric (LoggerMetric): logger-specific metrics object
Returns:
dict(str, str): metric data
"""
return {
"name": metric.name,
"value": metric.value,
"transform": metric.transform,
"tags": metric.tags,
}
def emit(self, metric):
"""Log a given metric.
Args:
metric (LoggerMetric): logger-specific metrics object
"""
metric_data = self.unmarshal(metric)
self.logger.log(
self.log_level, metric.DEFAULT_LOG_FORMAT.format(**metric_data)
)
def counter(self, name, value=0, transform=None, tags=None, **kwargs):
"""Create a :class:`LoggerCounter` object.
Args:
name (str): name of counter
value (int): starting value of counter; defaults to 0
transform (str): transform the counter is associated with
tags (dict): any tags of additional contextual information
to associate with the counter
Returns:
LoggerCounter: a log-based counter
"""
return LoggerCounter(
name=name, value=value, transform=transform, tags=tags
)
def gauge(self, name, value=0, transform=None, tags=None, **kwargs):
"""Create a :class:`LoggerGauge` object.
Args:
name (str): name of gauge
value (int): starting value of gauge; defaults to 0
transform (str): transform the gauge is associated with
tags (dict): any tags of additional contextual information
to associate with the gauge
Returns:
LoggerGauge: a log-based gauge
"""
return LoggerGauge(
name=name, value=value, transform=transform, tags=tags
)
def timer(
self,
name,
value=0,
transform=None,
tags=None,
timer_unit=None,
**kwargs
):
"""Create a :class:`LoggerTimer` object.
Args:
name (str): name of timer
value (int): starting value of timer; defaults to 0
transform (str): transform the timer is associated with
tags (dict): any tags of additional contextual information
to associate with the timer
timer_unit (str): timer unit; defaults to configured value
in `klio-job.yaml`, or "ns". See module-level docs of
`klio.metrics.logger` for supported values.
Returns:
LoggerTimer: a log-based timer
"""
if timer_unit:
# Note: this should probably have better validation if it does
# not recognize the unit given. Instead of erroring out, we'll
# just use the default (@lynn)
timer_unit = TIMER_UNIT_MAP.get(timer_unit, self.timer_unit)
else:
timer_unit = self.timer_unit
return LoggerTimer(
name=name,
value=value,
transform=transform,
tags=tags,
timer_unit=timer_unit,
)
class LoggerMetric(base.BaseMetric):
"""Base metric type for loggers.
Args:
name (str): name of counter
value (int): initial value. Default: ``0``.
transform (str): Name of transform associated with metric, if any.
tags (dict): Tags to associate with metric.
"""
LOGGER_METRIC_TAGS = None
DEFAULT_LOG_FORMAT = (
"[{name}] value: {value} transform: '{transform}' tags: {tags}"
)
def __init__(self, name, value=0, transform=None, tags=None):
super(LoggerMetric, self).__init__(
name, value=value, transform=transform
)
self.tags = tags if tags else {}
self.tags.update(self.LOGGER_METRIC_TAGS)
class LoggerCounter(LoggerMetric):
"""Log-based counter metric.
Args:
name (str): name of counter
value (int): initial value. Default: ``0``.
transform (str): Name of transform associated with counter, if any.
tags (dict): Tags to associate with counter. Note:
``{"metric_type": "counter"}`` will always be an included tag.
"""
LOGGER_METRIC_TAGS = {"metric_type": "counter"}
class LoggerGauge(LoggerMetric):
"""Log-based gauge metric.
Args:
name (str): name of gauge
value (int): initial value. Default: ``0``.
transform (str): Name of transform associated with gauge, if any.
tags (dict): Tags to associate with gauge. Note:
``{"metric_type": "gauge"}`` will always be an included tag.
"""
LOGGER_METRIC_TAGS = {"metric_type": "gauge"}
class LoggerTimer(LoggerMetric):
"""Log-based timer metric.
Args:
name (str): name of timer
value (int): initial value. Default: ``0``.
transform (str): Name of transform associated with timer, if any.
tags (dict): Tags to associate with timer. Note:
``{"metric_type": "timer"}`` will always be an included tag.
timer_unit (str): Unit of measurement. Options: :attr:`TIMER_UNIT_MAP`.
Default: ``ns`` (nanoseconds).
"""
LOGGER_METRIC_TAGS = {"metric_type": "timer"}
def __init__(
self, name, value=0, transform=None, tags=None, timer_unit="ns"
):
self.LOGGER_METRIC_TAGS.update({"unit": timer_unit})
super(LoggerTimer, self).__init__(
name, value=value, transform=transform, tags=tags
)
self.timer_unit = timer_unit
|
324e96c848fbdd591dd561e33e3e290ec21acabc
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/lib/tool_shed/webapp/api/__init__.py
|
efe9257b639e892e07962787c35df82fd6afc06d
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
__init__.py
|
from galaxy.webapps.base.controller import BaseAPIController
from tool_shed.structured_app import ToolShedApp
class BaseShedAPIController(BaseAPIController):
app: ToolShedApp
def __init__(self, app: ToolShedApp):
super().__init__(app)
|
082ace2fd8cfb24a3dce74b804c241f93e3db311
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_settings/exceptions.py
|
ca7731b690bcab307905bfbde5430a94d769cd68
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
exceptions.py
|
"""App exceptions.
"""
from rest_framework.exceptions import APIException
# pylint: disable=line-too-long
class SettingEmptyKeyError(APIException):
"""SettingEmptyKeyError."""
status_code = 400
default_detail = "Setting Key is empty."
default_code = "setting_empty_key_error"
class SettingEmptyEndpointError(APIException):
"""SettingEmptyEndpointError."""
status_code = 400
default_detail = "Setting Endpoint is empty."
default_code = "setting_empty_endpoint_error"
class SettingCustomVisionAccessFailed(APIException):
"""SettingCustomVisionAccessFailed."""
status_code = 503
default_detail = "Training key or Endpoint is invalid. Please change the settings."
default_code = "setting_custom_vision_access_failed"
class SettingCustomVisionCannotCreateProject(APIException):
"""SettingCustomVisionCannotCreateProject."""
status_code = 503
default_detail = "Custom Vision projects reach limitation. Please delete some projects or contact admin." # noqa: E501
|
6870db2bedce5778bed8d21c65060e708650f99c
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-das/huaweicloudsdkdas/v3/model/__init__.py
|
c8418cdf820b88e39ad1a6c2e3f0c8199f404016
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 9,070
|
py
|
__init__.py
|
# coding: utf-8
from __future__ import absolute_import
# import models into model package
from huaweicloudsdkdas.v3.model.advice_result import AdviceResult
from huaweicloudsdkdas.v3.model.api_version import ApiVersion
from huaweicloudsdkdas.v3.model.cancel_share_connections_request import CancelShareConnectionsRequest
from huaweicloudsdkdas.v3.model.cancel_share_connections_request_body import CancelShareConnectionsRequestBody
from huaweicloudsdkdas.v3.model.cancel_share_connections_response import CancelShareConnectionsResponse
from huaweicloudsdkdas.v3.model.change_sql_limit_switch_status_body import ChangeSqlLimitSwitchStatusBody
from huaweicloudsdkdas.v3.model.change_sql_limit_switch_status_request import ChangeSqlLimitSwitchStatusRequest
from huaweicloudsdkdas.v3.model.change_sql_limit_switch_status_response import ChangeSqlLimitSwitchStatusResponse
from huaweicloudsdkdas.v3.model.change_sql_switch_body import ChangeSqlSwitchBody
from huaweicloudsdkdas.v3.model.change_sql_switch_request import ChangeSqlSwitchRequest
from huaweicloudsdkdas.v3.model.change_sql_switch_response import ChangeSqlSwitchResponse
from huaweicloudsdkdas.v3.model.create_share_connections_request import CreateShareConnectionsRequest
from huaweicloudsdkdas.v3.model.create_share_connections_request_body import CreateShareConnectionsRequestBody
from huaweicloudsdkdas.v3.model.create_share_connections_response import CreateShareConnectionsResponse
from huaweicloudsdkdas.v3.model.create_space_analysis_task_body import CreateSpaceAnalysisTaskBody
from huaweicloudsdkdas.v3.model.create_space_analysis_task_request import CreateSpaceAnalysisTaskRequest
from huaweicloudsdkdas.v3.model.create_space_analysis_task_response import CreateSpaceAnalysisTaskResponse
from huaweicloudsdkdas.v3.model.create_sql_limit_rule_option import CreateSqlLimitRuleOption
from huaweicloudsdkdas.v3.model.create_sql_limit_rules_body import CreateSqlLimitRulesBody
from huaweicloudsdkdas.v3.model.create_sql_limit_rules_request import CreateSqlLimitRulesRequest
from huaweicloudsdkdas.v3.model.create_sql_limit_rules_response import CreateSqlLimitRulesResponse
from huaweicloudsdkdas.v3.model.create_tuning_req import CreateTuningReq
from huaweicloudsdkdas.v3.model.create_tuning_request import CreateTuningRequest
from huaweicloudsdkdas.v3.model.create_tuning_response import CreateTuningResponse
from huaweicloudsdkdas.v3.model.db_object_space_info import DbObjectSpaceInfo
from huaweicloudsdkdas.v3.model.db_user import DbUser
from huaweicloudsdkdas.v3.model.delete_db_user_request import DeleteDbUserRequest
from huaweicloudsdkdas.v3.model.delete_db_user_response import DeleteDbUserResponse
from huaweicloudsdkdas.v3.model.delete_process_req_body import DeleteProcessReqBody
from huaweicloudsdkdas.v3.model.delete_process_request import DeleteProcessRequest
from huaweicloudsdkdas.v3.model.delete_process_response import DeleteProcessResponse
from huaweicloudsdkdas.v3.model.delete_sql_limit_rules_body import DeleteSqlLimitRulesBody
from huaweicloudsdkdas.v3.model.delete_sql_limit_rules_request import DeleteSqlLimitRulesRequest
from huaweicloudsdkdas.v3.model.delete_sql_limit_rules_response import DeleteSqlLimitRulesResponse
from huaweicloudsdkdas.v3.model.execution_plan import ExecutionPlan
from huaweicloudsdkdas.v3.model.explain import Explain
from huaweicloudsdkdas.v3.model.export_slow_query_logs_request import ExportSlowQueryLogsRequest
from huaweicloudsdkdas.v3.model.export_slow_query_logs_response import ExportSlowQueryLogsResponse
from huaweicloudsdkdas.v3.model.export_slow_sql_templates_details_request import ExportSlowSqlTemplatesDetailsRequest
from huaweicloudsdkdas.v3.model.export_slow_sql_templates_details_response import ExportSlowSqlTemplatesDetailsResponse
from huaweicloudsdkdas.v3.model.export_sql_statements_request import ExportSqlStatementsRequest
from huaweicloudsdkdas.v3.model.export_sql_statements_response import ExportSqlStatementsResponse
from huaweicloudsdkdas.v3.model.export_top_sql_templates_details_request import ExportTopSqlTemplatesDetailsRequest
from huaweicloudsdkdas.v3.model.export_top_sql_templates_details_response import ExportTopSqlTemplatesDetailsResponse
from huaweicloudsdkdas.v3.model.export_top_sql_trend_details_request import ExportTopSqlTrendDetailsRequest
from huaweicloudsdkdas.v3.model.export_top_sql_trend_details_response import ExportTopSqlTrendDetailsResponse
from huaweicloudsdkdas.v3.model.feedback_info import FeedbackInfo
from huaweicloudsdkdas.v3.model.full_sql import FullSql
from huaweicloudsdkdas.v3.model.index_advice_info import IndexAdviceInfo
from huaweicloudsdkdas.v3.model.innodb_lock import InnodbLock
from huaweicloudsdkdas.v3.model.innodb_lock_waits import InnodbLockWaits
from huaweicloudsdkdas.v3.model.innodb_trx import InnodbTrx
from huaweicloudsdkdas.v3.model.instance_space_info import InstanceSpaceInfo
from huaweicloudsdkdas.v3.model.list_api_versions_request import ListApiVersionsRequest
from huaweicloudsdkdas.v3.model.list_api_versions_response import ListApiVersionsResponse
from huaweicloudsdkdas.v3.model.list_db_users_request import ListDbUsersRequest
from huaweicloudsdkdas.v3.model.list_db_users_response import ListDbUsersResponse
from huaweicloudsdkdas.v3.model.list_innodb_locks_request import ListInnodbLocksRequest
from huaweicloudsdkdas.v3.model.list_innodb_locks_response import ListInnodbLocksResponse
from huaweicloudsdkdas.v3.model.list_metadata_locks_request import ListMetadataLocksRequest
from huaweicloudsdkdas.v3.model.list_metadata_locks_response import ListMetadataLocksResponse
from huaweicloudsdkdas.v3.model.list_processes_request import ListProcessesRequest
from huaweicloudsdkdas.v3.model.list_processes_response import ListProcessesResponse
from huaweicloudsdkdas.v3.model.list_space_analysis_request import ListSpaceAnalysisRequest
from huaweicloudsdkdas.v3.model.list_space_analysis_response import ListSpaceAnalysisResponse
from huaweicloudsdkdas.v3.model.list_sql_limit_rules_request import ListSqlLimitRulesRequest
from huaweicloudsdkdas.v3.model.list_sql_limit_rules_response import ListSqlLimitRulesResponse
from huaweicloudsdkdas.v3.model.metadata_lock import MetadataLock
from huaweicloudsdkdas.v3.model.process import Process
from huaweicloudsdkdas.v3.model.query_sql_plan_body import QuerySqlPlanBody
from huaweicloudsdkdas.v3.model.quotas import Quotas
from huaweicloudsdkdas.v3.model.register_db_user_request import RegisterDbUserRequest
from huaweicloudsdkdas.v3.model.register_db_user_request_body import RegisterDbUserRequestBody
from huaweicloudsdkdas.v3.model.register_db_user_response import RegisterDbUserResponse
from huaweicloudsdkdas.v3.model.resource import Resource
from huaweicloudsdkdas.v3.model.share_conn_user_info import ShareConnUserInfo
from huaweicloudsdkdas.v3.model.show_api_version_request import ShowApiVersionRequest
from huaweicloudsdkdas.v3.model.show_api_version_response import ShowApiVersionResponse
from huaweicloudsdkdas.v3.model.show_db_user_request import ShowDbUserRequest
from huaweicloudsdkdas.v3.model.show_db_user_response import ShowDbUserResponse
from huaweicloudsdkdas.v3.model.show_quotas_request import ShowQuotasRequest
from huaweicloudsdkdas.v3.model.show_quotas_response import ShowQuotasResponse
from huaweicloudsdkdas.v3.model.show_sql_execution_plan_request import ShowSqlExecutionPlanRequest
from huaweicloudsdkdas.v3.model.show_sql_execution_plan_response import ShowSqlExecutionPlanResponse
from huaweicloudsdkdas.v3.model.show_sql_explain_request import ShowSqlExplainRequest
from huaweicloudsdkdas.v3.model.show_sql_explain_response import ShowSqlExplainResponse
from huaweicloudsdkdas.v3.model.show_sql_limit_job_info_request import ShowSqlLimitJobInfoRequest
from huaweicloudsdkdas.v3.model.show_sql_limit_job_info_response import ShowSqlLimitJobInfoResponse
from huaweicloudsdkdas.v3.model.show_sql_limit_switch_status_request import ShowSqlLimitSwitchStatusRequest
from huaweicloudsdkdas.v3.model.show_sql_limit_switch_status_response import ShowSqlLimitSwitchStatusResponse
from huaweicloudsdkdas.v3.model.show_sql_switch_status_request import ShowSqlSwitchStatusRequest
from huaweicloudsdkdas.v3.model.show_sql_switch_status_response import ShowSqlSwitchStatusResponse
from huaweicloudsdkdas.v3.model.show_tuning_request import ShowTuningRequest
from huaweicloudsdkdas.v3.model.show_tuning_response import ShowTuningResponse
from huaweicloudsdkdas.v3.model.slow_log import SlowLog
from huaweicloudsdkdas.v3.model.slow_sql_template import SlowSqlTemplate
from huaweicloudsdkdas.v3.model.sql_limit_rule import SqlLimitRule
from huaweicloudsdkdas.v3.model.tb_pos_info import TbPosInfo
from huaweicloudsdkdas.v3.model.top_sql_template import TopSqlTemplate
from huaweicloudsdkdas.v3.model.top_sql_trend_item import TopSqlTrendItem
from huaweicloudsdkdas.v3.model.update_db_user_request import UpdateDbUserRequest
from huaweicloudsdkdas.v3.model.update_db_user_request_body import UpdateDbUserRequestBody
from huaweicloudsdkdas.v3.model.update_db_user_response import UpdateDbUserResponse
|
81c855df6cbdde996b6d2a3ae7ed959d11008006
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/utils/bin/binary-size
|
8bba5397653227d0d241b165f1818a7f242f41ca
|
[
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 7,210
|
binary-size
|
#!/usr/bin/env python3
import argparse
from subprocess import Popen, PIPE
import re
parser = argparse.ArgumentParser(
description='Report PulpOS footprint'
)
parser.add_argument("--binary", dest="binary", default=None, type=str, help="Specify the input binary")
parser.add_argument("--depth", dest="depth", default=0, type=int, help="Specify the display depth")
parser.add_argument("--groups", dest="groups", default="default", type=str, help="Specify the name of the groups to be used")
args = parser.parse_args()
class Symbol(object):
def __init__(self, line):
try:
self.num, self.val, self.size, self.type, self.bind, self.vis, self.udx, self.name = line.split()
self.size = int(self.size)
self.name = self.name.decode('utf-8')
except:
self.size = 0
class Group(object):
def __init__(self, name, regexps, childs=[]):
self.regexp = []
for regexp in regexps:
self.regexp.append(re.compile(regexp))
self.name = name
self.symbols = []
self.symbols_dict = {}
self.childs = childs
self.total = 0
self.own = 0
def add_child(self, child):
self.childs.append(child)
def match(self, symbol):
for child in self.childs:
if child.match(symbol):
self.total += symbol.size
return True
if symbol.size != 0:
for regexp in self.regexp:
if regexp.match(symbol.name):
self.total += symbol.size
self.own += symbol.size
self.symbols.append(symbol)
self.symbols_dict[symbol.name] = symbol
return True
return False
def dump(self, depth, indent=''):
if depth >= 0:
print (indent + self.name + ' ' + str(self.total))
if len(self.childs) != 0 and self.own != 0:
print (indent + ' Own ' + str(self.own))
for name in sorted(self.symbols_dict.keys()):
symbol = self.symbols_dict[name]
if len(self.childs) != 0:
print (indent + ' ' + symbol.name + ' ' + str(symbol.size))
else:
print (indent + ' ' + symbol.name + ' ' + str(symbol.size))
for child in self.childs:
child.dump(depth-1, indent + ' ')
print ()
else:
print (indent + 'Others ' + str(self.own))
for name in sorted(self.symbols_dict.keys()):
symbol = self.symbols_dict[name]
print (indent + ' ' + symbol.name + ' ' + str(symbol.size))
for child in self.childs:
child.dump(depth-1, indent)
groups = {
"pulpos": [
Group('PulpOS', ['pos_.*', 'pi_.*'], [
Group('PulpOS:kernel', ['pos_kernel.*', 'pos_soc_.*', 'pos_cbsys_.*']),
Group('PulpOS:fll', ['pos_fll.*', 'pos_freq.*', 'pi_freq_.*']),
Group('PulpOS:cpi', ['pos_cpi.*', 'pi_cpi_.*']),
Group('PulpOS:i2s', ['pos_i2s.*', 'pi_i2s_.*']),
Group('PulpOS:mram', ['pos_mram.*']),
Group('PulpOS:rtc', ['pos_rtc.*']),
Group('PulpOS:cluster', ['pos_cluster.*', 'pi_cluster_.*']),
Group('PulpOS:pmu', ['pos_pmu.*']),
Group('PulpOS:spim', ['pos_spim.*', 'pi_spi_.*']),
Group('PulpOS:hyper', ['pos_hyper.*', 'pi_hyper_.*']),
Group('PulpOS:flash', ['hyperflash_.*']),
Group('PulpOS:fs', ['__pi_read_.*', '__pi_host_.*', '__pi_fs_.*']),
Group('PulpOS:octospi', ['pos_octospi.*', 'pi_octospi_.*']),
Group('PulpOS:uart', ['pos_uart.*', 'pi_uart_.*']),
Group('PulpOS:init', ['pos_init.*']),
Group('PulpOS:irq', ['pos_irq.*']),
Group('PulpOS:time', ['pos_time.*']),
Group('PulpOS:alloc', ['pos_alloc.*', 'pos_free.*']),
Group('PulpOS:i2c', ['__pi_i2c.*', 'pi_i2c.*']),
Group('PulpOS:udma', ['pos_udma.*']),
Group('PulpOS:soc_event', ['pos_soc_event.*']),
Group('PulpOS:task', ['pos_task_.*', 'pos_sched.*']),
Group('PulpOS:libc', ['fprintf', 'vfprintf', 'printf', 'vprintf', 'sprintf', 'snprintf', 'vsnprintf', 'vsprintf', 'memmove', 'memcpy', 'puts','exit', 'strchr', 'pos_libc_.*', 'pos_io_.*']),
Group('PulpOS:pmsis', ['pi_.*']),
]),
Group('app', ['.*'])
],
"freertos": [
Group('FreeRTOS', ['pi_.*'], [
Group('FreeRTOS:kernel', [
'xTask.*', 'vQueue.*', 'xQueue.*', 'xYield.*', 'xTick.*', 'vTask.*', 'xSuspended.*',
'xIdle.*', 'xDelayed.*', 'xScheduler.*', 'xPending.*', 'xPended.*',
'xNextTaskUnblockTime', 'xNumOfOverflows', 'pxCurrentTCB', 'ulCriticalNesting',
'uPortSet_Interrupt_Mask_F', 'ulCriticalNesting', 'uxCurrentNumberOfTasks',
'uxDeletedTasksWaitingClea', 'uxIdleTaskStack', 'uxListRemove',
'uxSchedulerSuspended', 'uxTaskNumber', 'uxTopReadyPriority', 'uxTopUsedPriority',
'vApplicationGetIdleTaskMe', 'vApplicationIdleHook', 'vListInitialise',
'vListInitialiseItem', 'vListInsert', 'vListInsertEnd', 'vPortClear_Interrupt_Mask',
'vPortEnter_Critical', 'vPortExit_Critical', 'vPrvAssertFailed', 'vQueueDelete',
'vSetPendSV', 'prvAddNewTaskToReadyList', 'prvIdleTask', 'prvSetupTimerInterrupt',
'pvTaskIncrementMutexHeldC', 'pxDelayedTaskList', 'pxOverflowDelayedTaskList',
'pxPortInitialiseStack', 'pxReadyTasksLists'
]),
Group('FreeRTOS:cluster', ['__pi_cluster_.*', 'pi_cluster_.*']),
Group('FreeRTOS:cpi', ['__pi_cpi.*', 'pi_cpi_.*']),
Group('FreeRTOS:fll', ['pi_fll.*', '__pi_freq_.*', 'pi_freq_.*']),
Group('FreeRTOS:hyper', ['__pi_hyper_.*', 'pi_hyper_.*']),
Group('FreeRTOS:mram', ['pos_mram.*']),
Group('FreeRTOS:octospi', ['__pi_octospi.*', 'pi_octospi_.*']),
Group('FreeRTOS:pmu', ['__pi_pmu.*', 'pi_pmu.*']),
Group('FreeRTOS:rtc', ['__pi_rtc.*', 'pi_rtc.*']),
Group('FreeRTOS:i2c', ['__pi_i2c.*', 'pi_i2c.*']),
Group('FreeRTOS:spim', ['pos_spim.*', 'pi_spi_.*']),
Group('FreeRTOS:uart', ['__pi_uart.*', 'pi_uart_.*']),
Group('FreeRTOS:malloc', ['pi_malloc_.*', 'pi_l2_*', 'pi_l1_*', 'pi_cl_l1_*']),
Group('FreeRTOS:libc', ['fprintf', 'vfprintf', 'printf', 'vprintf', 'sprintf', 'snprintf', 'vsnprintf', 'vsprintf', 'memmove', 'memcpy', 'puts','exit', 'strchr']),
]),
Group('app', ['.*'])
],
"default": [
Group('app', ['.*'])
]
}
process = Popen(('readelf -s %s' % args.binary).split(), stdin=PIPE, stdout=PIPE)
reply = (process.communicate()[0])
for line in reply.splitlines():
symbol = Symbol(line)
for group in groups[args.groups]:
if group.match(symbol):
break
for group in groups[args.groups]:
group.dump(args.depth)
|
|
b3e395563a7364b4cd138a8ba0496950329ae9b1
|
26004343b9839c082b3df8ecd2b32845ee0b4c7e
|
/tests/distributions/test_shape_utils.py
|
2c2598145b2863581708f67aa475f892e414f01f
|
[
"Apache-2.0",
"AFL-2.1",
"MIT"
] |
permissive
|
pymc-devs/pymc
|
b75df84ab084f9fa5d7c8ad927f7fa42fe4af14c
|
ddd1d4bf05a72895c67265f541585ae02bd338a3
|
refs/heads/main
| 2023-08-29T19:33:06.631697
| 2023-08-27T22:27:13
| 2023-08-27T22:27:13
| 31,075,313
| 1,046
| 291
|
AFL-3.0
| 2020-07-30T17:37:04
| 2015-02-20T17:12:00
|
Fortran
|
UTF-8
|
Python
| false
| false
| 25,966
|
py
|
test_shape_utils.py
|
# Copyright 2023 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
import numpy as np
import pytensor
import pytest
from pytensor import tensor as pt
from pytensor.compile.mode import Mode
from pytensor.graph import Constant, ancestors
from pytensor.tensor import TensorVariable
from pytensor.tensor.random import normal
from pytensor.tensor.shape import SpecifyShape
import pymc as pm
from pymc import ShapeError
from pymc.distributions.shape_utils import (
broadcast_dist_samples_shape,
change_dist_size,
convert_dims,
convert_shape,
convert_size,
get_support_shape,
get_support_shape_1d,
rv_size_is_none,
to_tuple,
)
from pymc.model import Model
test_shapes = [
(tuple(), (1,), (4,), (5, 4)),
(tuple(), (1,), (7,), (5, 4)),
(tuple(), (1,), (1, 4), (5, 4)),
(tuple(), (1,), (5, 1), (5, 4)),
(tuple(), (1,), (3, 4), (5, 4)),
(tuple(), (1,), (5, 3), (5, 4)),
(tuple(), (1,), (10, 4), (5, 4)),
(tuple(), (1,), (10,), (5, 4)),
(tuple(), (1,), (1, 1, 4), (5, 4)),
(tuple(), (1,), (10, 1, 4), (5, 4)),
(tuple(), (1,), (10, 5, 4), (5, 4)),
]
test_sizes = [
None,
tuple(),
1,
(1,),
10,
(10,),
(1, 1),
(10, 1),
(1, 10),
(5,),
(5, 4),
(1, 1, 1, 1),
]
test_to_shapes = [None, tuple(), (10, 5, 4), (10, 1, 1, 5, 1)]
@pytest.fixture(params=test_sizes, ids=str)
def fixture_sizes(request):
return request.param
@pytest.fixture(params=test_shapes, ids=str)
def fixture_shapes(request):
return request.param
@pytest.fixture(params=[False, True], ids=str)
def fixture_exception_handling(request):
return request.param
class TestShapesBroadcasting:
def test_broadcasting(self, fixture_shapes):
shapes = fixture_shapes
try:
expected_out = np.broadcast(*(np.empty(s) for s in shapes)).shape
except ValueError:
expected_out = None
if expected_out is None:
with pytest.raises(ValueError):
np.broadcast_shapes(*shapes)
else:
out = np.broadcast_shapes(*shapes)
assert out == expected_out
def test_broadcast_dist_samples_shape(self, fixture_sizes, fixture_shapes):
size = fixture_sizes
shapes = fixture_shapes
size_ = to_tuple(size)
shapes_ = [
s if s[: min([len(size_), len(s)])] != size_ else s[len(size_) :] for s in shapes
]
try:
expected_out = np.broadcast(*(np.empty(s) for s in shapes_)).shape
except ValueError:
expected_out = None
if expected_out is not None and any(
s[: min([len(size_), len(s)])] == size_ for s in shapes
):
expected_out = size_ + expected_out
if expected_out is None:
with pytest.raises(ValueError):
broadcast_dist_samples_shape(shapes, size=size)
else:
out = broadcast_dist_samples_shape(shapes, size=size)
assert out == expected_out
class TestSizeShapeDimsObserved:
@pytest.mark.parametrize("param_shape", [(), (2,)])
@pytest.mark.parametrize("batch_shape", [(), (3,)])
@pytest.mark.parametrize(
"parametrization",
[
"implicit",
"shape",
"dims",
"size",
],
)
def test_param_and_batch_shape_combos(
self, param_shape: tuple, batch_shape: tuple, parametrization: str
):
coords = {}
param_dims = []
batch_dims = []
# Create coordinates corresponding to the parameter shape
for d in param_shape:
dname = f"param_dim_{d}"
coords[dname] = [f"c_{i}" for i in range(d)]
param_dims.append(dname)
assert len(param_dims) == len(param_shape)
# Create coordinates corresponding to the batch shape
for d in batch_shape:
dname = f"batch_dim_{d}"
coords[dname] = [f"c_{i}" for i in range(d)]
batch_dims.append(dname)
assert len(batch_dims) == len(batch_shape)
with pm.Model(coords=coords) as pmodel:
mu = pytensor.shared(np.random.normal(size=param_shape))
with warnings.catch_warnings():
warnings.simplefilter("error")
if parametrization == "implicit":
rv = pm.Normal("rv", mu=mu).shape == param_shape
else:
expected_shape = batch_shape + param_shape
if parametrization == "shape":
rv = pm.Normal("rv", mu=mu, shape=batch_shape + param_shape)
assert rv.eval().shape == expected_shape
elif parametrization == "dims":
rv = pm.Normal("rv", mu=mu, dims=batch_dims + param_dims)
assert rv.eval().shape == expected_shape
elif parametrization == "size":
rv = pm.Normal("rv", mu=mu, size=batch_shape + param_shape)
assert rv.eval().shape == expected_shape
else:
raise NotImplementedError("Invalid test case parametrization.")
def test_broadcast_by_dims(self):
with pm.Model(coords={"broadcast_dim": range(3)}) as m:
x = pm.Normal("x", mu=np.zeros((1,)), dims=("broadcast_dim",))
assert x.eval().shape == (3,)
def test_broadcast_by_observed(self):
with pm.Model() as m:
x = pm.Normal("x", mu=np.zeros((1,)), observed=np.zeros((3,)))
assert x.eval().shape == (3,)
def test_simultaneous_shape_and_dims(self):
with pm.Model() as pmodel:
x = pm.ConstantData("x", [1, 2, 3], dims="ddata")
# The shape and dims tuples correspond to each other.
# Note: No checks are performed that implied shape (x), shape and dims actually match.
y = pm.Normal("y", mu=x, shape=(2, 3), dims=("dshape", "ddata"))
assert pmodel.named_vars_to_dims["y"] == ("dshape", "ddata")
assert "dshape" in pmodel.dim_lengths
assert y.eval().shape == (2, 3)
def test_simultaneous_size_and_dims(self):
with pm.Model() as pmodel:
x = pm.ConstantData("x", [1, 2, 3], dims="ddata")
assert "ddata" in pmodel.dim_lengths
# Size does not include support dims, so this test must use a dist with support dims.
kwargs = dict(name="y", size=(2, 3), mu=pt.ones((3, 4)), cov=pt.eye(4))
y = pm.MvNormal(**kwargs, dims=("dsize", "ddata", "dsupport"))
assert pmodel.named_vars_to_dims["y"] == ("dsize", "ddata", "dsupport")
assert "dsize" in pmodel.dim_lengths
assert y.eval().shape == (2, 3, 4)
def test_simultaneous_dims_and_observed(self):
with pm.Model() as pmodel:
x = pm.ConstantData("x", [1, 2, 3], dims="ddata")
assert "ddata" in pmodel.dim_lengths
# Note: No checks are performed that observed and dims actually match.
y = pm.Normal("y", observed=[0, 0, 0], dims="ddata")
assert pmodel.named_vars_to_dims["y"] == ("ddata",)
assert y.eval().shape == (3,)
def test_define_dims_on_the_fly_raises(self):
# Check that trying to use dims that are not pre-specified fails, even if their
# length could be inferred from the shape of the variables
msg = "Dimensions {'patient'} are unknown to the model"
with pm.Model() as pmodel:
with pytest.raises(KeyError, match=msg):
pm.Normal("x", [0, 1, 2], dims=("patient",))
with pytest.raises(KeyError, match=msg):
pm.Normal("x", observed=[0, 1, 2], dims=("patient",))
def test_can_resize_data_defined_size(self):
with pm.Model() as pmodel:
x = pm.MutableData("x", [[1, 2, 3, 4]], dims=("first", "second"))
y = pm.Normal("y", mu=0, dims=("first", "second"))
z = pm.Normal("z", mu=y, observed=np.ones((1, 4)), size=y.shape)
assert x.eval().shape == (1, 4)
assert y.eval().shape == (1, 4)
assert z.eval().shape == (1, 4)
assert "first" in pmodel.dim_lengths
assert "second" in pmodel.dim_lengths
pmodel.set_data("x", [[1, 2], [3, 4], [5, 6]])
assert x.eval().shape == (3, 2)
assert y.eval().shape == (3, 2)
assert z.eval().shape == (3, 2)
def test_size32_doesnt_break_broadcasting(self):
size32 = pt.constant([1, 10], dtype="int32")
rv = pm.Normal.dist(0, 1, size=size32)
assert rv.broadcastable == (True, False)
def test_observed_with_column_vector(self):
"""This test is related to https://github.com/pymc-devs/pytensor/issues/390 which breaks
broadcastability of column-vector RVs. This unexpected change in type can lead to
incompatibilities during graph rewriting for model.logp evaluation.
"""
with pm.Model() as model:
# The `observed` is a broadcastable column vector
obs = [
pt.as_tensor_variable(np.ones((3, 1), dtype=pytensor.config.floatX))
for _ in range(4)
]
assert all(obs_.broadcastable == (False, True) for obs_ in obs)
# Both shapes describe broadcastable volumn vectors
size64 = pt.constant([3, 1], dtype="int64")
# But the second shape is upcasted from an int32 vector
cast64 = pt.cast(pt.constant([3, 1], dtype="int32"), dtype="int64")
pm.Normal("size64", mu=0, sigma=1, size=size64, observed=obs[0])
pm.Normal("shape64", mu=0, sigma=1, shape=size64, observed=obs[1])
assert model.compile_logp()({})
pm.Normal("size_cast64", mu=0, sigma=1, size=cast64, observed=obs[2])
pm.Normal("shape_cast64", mu=0, sigma=1, shape=cast64, observed=obs[3])
assert model.compile_logp()({})
def test_dist_api_works(self):
mu = pytensor.shared(np.array([1, 2, 3]))
with pytest.raises(NotImplementedError, match="API is not supported"):
pm.Normal.dist(mu=mu, dims=("town",))
assert pm.Normal.dist(mu=mu, shape=(3,)).eval().shape == (3,)
assert pm.Normal.dist(mu=mu, shape=(5, 3)).eval().shape == (5, 3)
assert pm.Normal.dist(mu=mu, size=(3,)).eval().shape == (3,)
assert pm.Normal.dist(mu=mu, size=(4, 3)).eval().shape == (4, 3)
def test_mvnormal_shape_size_difference(self):
# Parameters add one batch dimension (4), shape is what you'd expect.
# Under the hood the shape(4, 3) becomes size=(4,) and the RV is initially
# created as (4, 4, 3). The internal ndim-check then recreates it with size=None.
rv = pm.MvNormal.dist(mu=np.ones((4, 3)), cov=np.eye(3), shape=(4, 3))
assert rv.ndim == 2
assert tuple(rv.shape.eval()) == (4, 3)
# shape adds two dimensions (5, 4)
# Under the hood the shape=(5, 4, 3) becomes size=(5, 4).
# The RV is created as (5, 4, 3) right away.
rv = pm.MvNormal.dist(mu=[1, 2, 3], cov=np.eye(3), shape=(5, 4, 3))
assert rv.ndim == 3
assert tuple(rv.shape.eval()) == (5, 4, 3)
# parameters add 1 batch dimension (4), shape adds another (5)
# Under the hood the shape=(5, 4, 3) becomes size=(5, 4)
# The RV is initially created as (5, 4, 3, 4, 3) and then recreated and resized.
rv = pm.MvNormal.dist(mu=np.ones((4, 3)), cov=np.eye(3), shape=(5, 4, 3))
assert rv.ndim == 3
assert tuple(rv.shape.eval()) == (5, 4, 3)
rv = pm.MvNormal.dist(mu=[1, 2, 3], cov=np.eye(3), size=(5, 4))
assert tuple(rv.shape.eval()) == (5, 4, 3)
rv = pm.MvNormal.dist(mu=np.ones((5, 4, 3)), cov=np.eye(3), size=(5, 4))
assert tuple(rv.shape.eval()) == (5, 4, 3)
def test_convert_dims(self):
assert convert_dims(dims="town") == ("town",)
with pytest.raises(ValueError, match="must be a tuple, str or list"):
convert_dims(3)
def test_convert_shape(self):
assert convert_shape(5) == (5,)
with pytest.raises(ValueError, match="tuple, TensorVariable, int or list"):
convert_shape(shape="notashape")
def test_convert_size(self):
assert convert_size(7) == (7,)
with pytest.raises(ValueError, match="tuple, TensorVariable, int or list"):
convert_size(size="notasize")
def test_lazy_flavors(self):
assert pm.Uniform.dist(2, [4, 5], size=[3, 2]).eval().shape == (3, 2)
assert pm.Uniform.dist(2, [4, 5], shape=[3, 2]).eval().shape == (3, 2)
with pm.Model(coords=dict(town=["Greifswald", "Madrid"])):
assert pm.Normal("n1", mu=[1, 2], dims="town").eval().shape == (2,)
assert pm.Normal("n2", mu=[1, 2], dims=["town"]).eval().shape == (2,)
def test_invalid_flavors(self):
with pytest.raises(ValueError, match="Passing both"):
pm.Normal.dist(0, 1, shape=(3,), size=(3,))
def test_size_from_dims_rng_update(self):
"""Test that when setting size from dims we update the rng properly
See https://github.com/pymc-devs/pymc/issues/5653
"""
with pm.Model(coords=dict(x_dim=range(2))):
x = pm.Normal("x", dims=("x_dim",))
fn = pm.pytensorf.compile_pymc([], x)
# Check that both function outputs (rng and draws) come from the same Apply node
assert fn.maker.fgraph.outputs[0].owner is fn.maker.fgraph.outputs[1].owner
# Confirm that the rng is properly offset, otherwise the second value of the first
# draw, would match the first value of the second draw
assert fn()[1] != fn()[0]
def test_size_from_observed_rng_update(self):
"""Test that when setting size from observed we update the rng properly
See https://github.com/pymc-devs/pymc/issues/5653
"""
with pm.Model():
x = pm.Normal("x", observed=[0, 1])
fn = pm.pytensorf.compile_pymc([], x)
# Check that both function outputs (rng and draws) come from the same Apply node
assert fn.maker.fgraph.outputs[0].owner is fn.maker.fgraph.outputs[1].owner
# Confirm that the rng is properly offset, otherwise the second value of the first
# draw, would match the first value of the second draw
assert fn()[1] != fn()[0]
def test_explicit_size_shape_none(self):
with pm.Model() as m:
x = pm.Normal("x", shape=None, observed=[1, 2, 3])
y = pm.Normal("y", size=None, observed=[1, 2, 3, 4])
assert x.shape.eval().item() == 3
assert y.shape.eval().item() == 4
def test_rv_size_is_none():
rv = pm.Normal.dist(0, 1, size=None)
assert rv_size_is_none(rv.owner.inputs[1])
rv = pm.Normal.dist(0, 1, size=())
assert rv_size_is_none(rv.owner.inputs[1])
rv = pm.Normal.dist(0, 1, size=1)
assert not rv_size_is_none(rv.owner.inputs[1])
size = pm.Bernoulli.dist(0.5)
rv = pm.Normal.dist(0, 1, size=size)
assert not rv_size_is_none(rv.owner.inputs[1])
size = pm.Normal.dist(0, 1).size
rv = pm.Normal.dist(0, 1, size=size)
assert not rv_size_is_none(rv.owner.inputs[1])
def test_change_rv_size():
loc = pt.as_tensor_variable([1, 2])
rng = pytensor.shared(np.random.default_rng())
rv = normal(loc=loc, rng=rng)
assert rv.ndim == 1
assert tuple(rv.shape.eval()) == (2,)
with pytest.raises(ShapeError, match="must be ≤1-dimensional"):
change_dist_size(rv, new_size=[[2, 3]])
with pytest.raises(ShapeError, match="must be ≤1-dimensional"):
change_dist_size(rv, new_size=pt.as_tensor_variable([[2, 3], [4, 5]]))
rv_new = change_dist_size(rv, new_size=(3,), expand=True)
assert rv_new.ndim == 2
assert tuple(rv_new.shape.eval()) == (3, 2)
# Make sure that the shape used to determine the expanded size doesn't
# depend on the old `RandomVariable`.
rv_new_ancestors = set(ancestors((rv_new,)))
assert loc in rv_new_ancestors
assert rv not in rv_new_ancestors
# Check that the old rng is not reused
assert rv_new.owner.inputs[0] is not rng
rv_newer = change_dist_size(rv_new, new_size=(4,), expand=True)
assert rv_newer.ndim == 3
assert tuple(rv_newer.shape.eval()) == (4, 3, 2)
# Make sure we avoid introducing a `Cast` by converting the new size before
# constructing the new `RandomVariable`
rv = normal(0, 1)
new_size = np.array([4, 3], dtype="int32")
rv_newer = change_dist_size(rv, new_size=new_size, expand=False)
assert rv_newer.ndim == 2
assert isinstance(rv_newer.owner.inputs[1], Constant)
assert tuple(rv_newer.shape.eval()) == (4, 3)
rv = normal(0, 1)
new_size = pt.as_tensor(np.array([4, 3], dtype="int32"))
rv_newer = change_dist_size(rv, new_size=new_size, expand=True)
assert rv_newer.ndim == 2
assert tuple(rv_newer.shape.eval()) == (4, 3)
rv = normal(0, 1)
new_size = pt.as_tensor(2, dtype="int32")
rv_newer = change_dist_size(rv, new_size=new_size, expand=True)
assert rv_newer.ndim == 1
assert tuple(rv_newer.shape.eval()) == (2,)
def test_change_rv_size_default_update():
rng = pytensor.shared(np.random.default_rng(0))
x = normal(rng=rng)
# Test that "traditional" default_update is translated to the new rng
rng.default_update = x.owner.outputs[0]
new_x = change_dist_size(x, new_size=(2,))
new_rng = new_x.owner.inputs[0]
assert rng.default_update is x.owner.outputs[0]
assert new_rng.default_update is new_x.owner.outputs[0]
# Test that "non-traditional" default_update raises UserWarning
next_rng = pytensor.shared(np.random.default_rng(1))
rng.default_update = next_rng
with pytest.warns(UserWarning, match="could not be replicated in resized variable"):
new_x = change_dist_size(x, new_size=(2,))
new_rng = new_x.owner.inputs[0]
assert rng.default_update is next_rng
assert new_rng.default_update is None
# Test that default_update is not set if it was None before
rng.default_update = None
new_x = change_dist_size(x, new_size=(2,))
new_rng = new_x.owner.inputs[0]
assert new_rng.default_update is None
def test_change_specify_shape_size_univariate():
with pytensor.config.change_flags(mode=Mode("py")):
s1, s2 = pt.iscalars("s1", "s2")
x = pt.random.normal(size=(s1, s2))
x = pt.specify_shape(x, (5, 3))
x.eval({s1: 5, s2: 3}).shape == (5, 3)
new_x = change_dist_size(x, (10, 5))
# SpecifyShape is no longer present
assert not isinstance(new_x.owner.op, SpecifyShape)
assert new_x.eval().shape == (10, 5)
new_x = change_dist_size(x, (10, 5), expand=True)
# SpecifyShape is still present
assert isinstance(new_x.owner.op, SpecifyShape)
new_x.eval({s1: 5, s2: 3}).shape == (10, 5, 5, 3)
with pytest.raises(AssertionError, match=re.escape("expected (None, None, 5, 3)")):
new_x.eval({s1: 6, s2: 3})
def test_change_specify_shape_size_multivariate():
with pytensor.config.change_flags(mode=Mode("py")):
batch, supp = pt.iscalars("batch", "supp")
x = pt.random.multivariate_normal(pt.zeros(supp), pt.eye(supp), size=(batch,))
x = pt.specify_shape(x, (5, 3))
x.eval({batch: 5, supp: 3}).shape == (5, 3)
new_x = change_dist_size(x, (10, 5))
# SpecifyShape is still present in the support dimension
assert isinstance(new_x.owner.op, SpecifyShape)
assert new_x.eval({supp: 3}).shape == (10, 5, 3)
with pytest.raises(AssertionError, match=re.escape("expected (None, None, 3)")):
new_x.eval({supp: 4})
new_x = change_dist_size(x, (10, 5), expand=True)
# SpecifyShape is still present in both support and batch dimension
assert isinstance(new_x.owner.op, SpecifyShape)
new_x.eval({batch: 5, supp: 3}).shape == (10, 5, 5, 3)
with pytest.raises(AssertionError, match=re.escape("expected (None, None, 5, 3)")):
new_x.eval({batch: 6, supp: 3}).shape == (10, 5, 5, 3)
@pytest.mark.parametrize(
"support_shape, shape, support_shape_offset, expected_support_shape, consistent",
[
(10, None, 0, 10, True),
(10, None, 1, 10, True),
(None, (10,), 0, 10, True),
(None, (10,), 1, 9, True),
(None, (10, 5), 0, 5, True),
(None, None, 0, None, True),
(10, (10,), 0, 10, True),
(10, (11,), 1, 10, True),
(10, (5, 5), 0, 5, False),
(10, (5, 10), 1, 9, False),
],
)
@pytest.mark.parametrize("info_source", ("shape", "dims", "observed"))
def test_get_support_shape_1d(
info_source, support_shape, shape, support_shape_offset, expected_support_shape, consistent
):
if info_source == "shape":
inferred_support_shape = get_support_shape_1d(
support_shape=support_shape, shape=shape, support_shape_offset=support_shape_offset
)
elif info_source == "dims":
if shape is None:
dims = None
coords = {}
else:
dims = tuple(str(i) for i, _ in enumerate(shape))
coords = {str(i): range(shape) for i, shape in enumerate(shape)}
with Model(coords=coords):
inferred_support_shape = get_support_shape_1d(
support_shape=support_shape, dims=dims, support_shape_offset=support_shape_offset
)
elif info_source == "observed":
if shape is None:
observed = None
else:
observed = np.zeros(shape)
inferred_support_shape = get_support_shape_1d(
support_shape=support_shape,
observed=observed,
support_shape_offset=support_shape_offset,
)
if not isinstance(inferred_support_shape, TensorVariable):
assert inferred_support_shape == expected_support_shape
else:
if consistent:
assert inferred_support_shape.eval() == expected_support_shape
else:
# check that inferred steps is still correct by ignoring the assert
f = pytensor.function(
[], inferred_support_shape, mode=Mode().including("local_remove_all_assert")
)
assert f() == expected_support_shape
with pytest.raises(AssertionError, match="support_shape does not match"):
inferred_support_shape.eval()
@pytest.mark.parametrize(
"support_shape, shape, support_shape_offset, expected_support_shape, ndim_supp, consistent",
[
((10, 5), None, (0,), (10, 5), 1, True),
((10, 5), None, (1, 1), (10, 5), 1, True),
(None, (10, 5), (0,), 5, 1, True),
(None, (10, 5), (1,), 4, 1, True),
(None, (10, 5, 2), (0,), 2, 1, True),
(None, None, None, None, 1, True),
((10, 5), (10, 5), None, (10, 5), 2, True),
((10, 5), (11, 10, 5), None, (10, 5), 2, True),
(None, (11, 10, 5), (0, 1, 0), (11, 9, 5), 3, True),
((10, 5), (10, 5, 5), (0,), (5,), 1, False),
((10, 5), (10, 5), (1, 1), (9, 4), 2, False),
],
)
@pytest.mark.parametrize("info_source", ("shape", "dims", "observed"))
def test_get_support_shape(
info_source,
support_shape,
shape,
support_shape_offset,
expected_support_shape,
ndim_supp,
consistent,
):
if info_source == "shape":
inferred_support_shape = get_support_shape(
support_shape=support_shape,
shape=shape,
support_shape_offset=support_shape_offset,
ndim_supp=ndim_supp,
)
elif info_source == "dims":
if shape is None:
dims = None
coords = {}
else:
dims = tuple(str(i) for i, _ in enumerate(shape))
coords = {str(i): range(shape) for i, shape in enumerate(shape)}
with Model(coords=coords):
inferred_support_shape = get_support_shape(
support_shape=support_shape,
dims=dims,
support_shape_offset=support_shape_offset,
ndim_supp=ndim_supp,
)
elif info_source == "observed":
if shape is None:
observed = None
else:
observed = np.zeros(shape)
inferred_support_shape = get_support_shape(
support_shape=support_shape,
observed=observed,
support_shape_offset=support_shape_offset,
ndim_supp=ndim_supp,
)
if not isinstance(inferred_support_shape, TensorVariable):
assert inferred_support_shape == expected_support_shape
else:
if consistent:
assert (inferred_support_shape.eval() == expected_support_shape).all()
else:
# check that inferred support shape is still correct by ignoring the assert
f = pytensor.function(
[], inferred_support_shape, mode=Mode().including("local_remove_all_assert")
)
assert (f() == expected_support_shape).all()
with pytest.raises(AssertionError, match="support_shape does not match"):
inferred_support_shape.eval()
|
d0de979fde89963bece5abba3dd99e895e916461
|
d139ef8d18fcde584b06c1d7d25477d7d31ee59b
|
/google/ads/googleads/v14/common/types/metric_goal.py
|
c2909a3efed5f7c8a21b8d1a311687772a23bf1f
|
[
"Apache-2.0"
] |
permissive
|
googleads/google-ads-python
|
a53993e6be057d3aa61f276b69e97b8b338d1c12
|
146d7070c1ea2140555d49d73c77892430b37314
|
refs/heads/main
| 2023-08-31T01:58:16.738997
| 2023-06-05T08:18:42
| 2023-08-28T19:08:38
| 143,435,091
| 422
| 525
|
Apache-2.0
| 2023-09-12T17:46:52
| 2018-08-03T14:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
metric_goal.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import proto # type: ignore
from google.ads.googleads.v14.enums.types import experiment_metric
from google.ads.googleads.v14.enums.types import experiment_metric_direction
__protobuf__ = proto.module(
package="google.ads.googleads.v14.common",
marshal="google.ads.googleads.v14",
manifest={
"MetricGoal",
},
)
class MetricGoal(proto.Message):
r"""A metric goal for an experiment.
Attributes:
metric (google.ads.googleads.v14.enums.types.ExperimentMetricEnum.ExperimentMetric):
The metric of the goal. For example, clicks,
impressions, cost, conversions, etc.
direction (google.ads.googleads.v14.enums.types.ExperimentMetricDirectionEnum.ExperimentMetricDirection):
The metric direction of the goal. For
example, increase, decrease, no change.
"""
metric: experiment_metric.ExperimentMetricEnum.ExperimentMetric = (
proto.Field(
proto.ENUM,
number=1,
enum=experiment_metric.ExperimentMetricEnum.ExperimentMetric,
)
)
direction: experiment_metric_direction.ExperimentMetricDirectionEnum.ExperimentMetricDirection = proto.Field(
proto.ENUM,
number=2,
enum=experiment_metric_direction.ExperimentMetricDirectionEnum.ExperimentMetricDirection,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
a33787e8b8a7c20998ce0f9afced3ca94540e238
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/qt__pyqt__pyside__pyqode/clickable_image/main.py
|
85792d283affb06bd8f1daa1df51452c6b5639ce
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
from PyQt5 import Qt
app = Qt.QApplication([])
button = Qt.QToolButton()
button.setIcon(Qt.QIcon("img.png"))
button.setAutoRaise(True)
button.setMinimumSize(40, 50)
button.setIconSize(button.minimumSize())
layout = Qt.QVBoxLayout()
layout.addWidget(Qt.QLabel("Example:"))
layout.addWidget(button)
central = Qt.QWidget()
central.setStyleSheet("background-color: green;")
central.setLayout(layout)
mw = Qt.QMainWindow()
mw.setCentralWidget(central)
mw.show()
app.exec()
|
79f2365a8168224bda568ae2ef02af13f4cb8759
|
67efebf179c4ef948a506b5c468ae717c958c8f8
|
/chapter7/utils/__init__.py
|
a7ecb871a41e557a6c93e06d8ff467a8470568a3
|
[
"MIT"
] |
permissive
|
miyamotok0105/pytorch_handbook
|
9ed1e90f24d4f21452f84c231b880e72b75546d0
|
1a5f1549c4049d819a0f74f3287879c981ec7f6c
|
refs/heads/master
| 2023-03-05T01:39:02.453098
| 2023-02-23T12:44:05
| 2023-02-23T12:44:05
| 150,241,921
| 127
| 68
|
MIT
| 2023-02-23T12:44:06
| 2018-09-25T09:42:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
__init__.py
|
from .augmentations import SSDAugmentation
|
e0a326171210d7b89abbca3f3c30b182fe890cf5
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/tests/unit/s3/test_key.py
|
26e2fc82c943303e938bd3122908957756365ffb
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 8,633
|
py
|
test_key.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.compat import mock, unittest
from tests.unit import AWSMockServiceTestCase
from boto.compat import StringIO
from boto.exception import BotoServerError
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
from boto.s3.key import Key
class TestS3Key(AWSMockServiceTestCase):
connection_class = S3Connection
def setUp(self):
super(TestS3Key, self).setUp()
def default_body(self):
return "default body"
def test_unicode_name(self):
k = Key()
k.name = u'Österreich'
print(repr(k))
def test_when_no_restore_header_present(self):
self.set_http_response(status_code=200)
b = Bucket(self.service_connection, 'mybucket')
k = b.get_key('myglacierkey')
self.assertIsNone(k.ongoing_restore)
self.assertIsNone(k.expiry_date)
def test_restore_header_with_ongoing_restore(self):
self.set_http_response(
status_code=200,
header=[('x-amz-restore', 'ongoing-request="true"')])
b = Bucket(self.service_connection, 'mybucket')
k = b.get_key('myglacierkey')
self.assertTrue(k.ongoing_restore)
self.assertIsNone(k.expiry_date)
def test_restore_completed(self):
self.set_http_response(
status_code=200,
header=[('x-amz-restore',
'ongoing-request="false", '
'expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"')])
b = Bucket(self.service_connection, 'mybucket')
k = b.get_key('myglacierkey')
self.assertFalse(k.ongoing_restore)
self.assertEqual(k.expiry_date, 'Fri, 21 Dec 2012 00:00:00 GMT')
def test_delete_key_return_key(self):
self.set_http_response(status_code=204, body='')
b = Bucket(self.service_connection, 'mybucket')
key = b.delete_key('fookey')
self.assertIsNotNone(key)
def test_storage_class(self):
self.set_http_response(status_code=200)
b = Bucket(self.service_connection, 'mybucket')
k = b.get_key('fookey')
# Mock out the bucket object - we really only care about calls
# to list.
k.bucket = mock.MagicMock()
# Default behavior doesn't call list
k.set_contents_from_string('test')
k.bucket.list.assert_not_called()
# Direct access calls list to get the real value if unset,
# and still defaults to STANDARD if unavailable.
sc_value = k.storage_class
self.assertEqual(sc_value, 'STANDARD')
k.bucket.list.assert_called_with(k.name.encode('utf-8'))
k.bucket.list.reset_mock()
# Setting manually doesn't call list
k.storage_class = 'GLACIER'
k.set_contents_from_string('test')
k.bucket.list.assert_not_called()
def test_change_storage_class(self):
self.set_http_response(status_code=200)
b = Bucket(self.service_connection, 'mybucket')
k = b.get_key('fookey')
# Mock out Key.copy so we can record calls to it
k.copy = mock.MagicMock()
# Mock out the bucket so we don't actually need to have fake responses
k.bucket = mock.MagicMock()
k.bucket.name = 'mybucket'
self.assertEqual(k.storage_class, 'STANDARD')
# The default change_storage_class call should result in a copy to our
# bucket
k.change_storage_class('REDUCED_REDUNDANCY')
k.copy.assert_called_with(
'mybucket',
'fookey',
reduced_redundancy=True,
preserve_acl=True,
validate_dst_bucket=True,
)
def test_change_storage_class_new_bucket(self):
self.set_http_response(status_code=200)
b = Bucket(self.service_connection, 'mybucket')
k = b.get_key('fookey')
# Mock out Key.copy so we can record calls to it
k.copy = mock.MagicMock()
# Mock out the bucket so we don't actually need to have fake responses
k.bucket = mock.MagicMock()
k.bucket.name = 'mybucket'
self.assertEqual(k.storage_class, 'STANDARD')
# Specifying a different dst_bucket should result in a copy to the new
# bucket
k.copy.reset_mock()
k.change_storage_class('REDUCED_REDUNDANCY', dst_bucket='yourbucket')
k.copy.assert_called_with(
'yourbucket',
'fookey',
reduced_redundancy=True,
preserve_acl=True,
validate_dst_bucket=True,
)
def counter(fn):
def _wrapper(*args, **kwargs):
_wrapper.count += 1
return fn(*args, **kwargs)
_wrapper.count = 0
return _wrapper
class TestS3KeyRetries(AWSMockServiceTestCase):
connection_class = S3Connection
@mock.patch('time.sleep')
def test_500_retry(self, sleep_mock):
self.set_http_response(status_code=500)
b = Bucket(self.service_connection, 'mybucket')
k = b.new_key('test_failure')
fail_file = StringIO('This will attempt to retry.')
with self.assertRaises(BotoServerError):
k.send_file(fail_file)
@mock.patch('time.sleep')
def test_400_timeout(self, sleep_mock):
weird_timeout_body = "<Error><Code>RequestTimeout</Code></Error>"
self.set_http_response(status_code=400, body=weird_timeout_body)
b = Bucket(self.service_connection, 'mybucket')
k = b.new_key('test_failure')
fail_file = StringIO('This will pretend to be chunk-able.')
k.should_retry = counter(k.should_retry)
self.assertEqual(k.should_retry.count, 0)
with self.assertRaises(BotoServerError):
k.send_file(fail_file)
self.assertTrue(k.should_retry.count, 1)
@mock.patch('time.sleep')
def test_502_bad_gateway(self, sleep_mock):
weird_timeout_body = "<Error><Code>BadGateway</Code></Error>"
self.set_http_response(status_code=502, body=weird_timeout_body)
b = Bucket(self.service_connection, 'mybucket')
k = b.new_key('test_failure')
fail_file = StringIO('This will pretend to be chunk-able.')
k.should_retry = counter(k.should_retry)
self.assertEqual(k.should_retry.count, 0)
with self.assertRaises(BotoServerError):
k.send_file(fail_file)
self.assertTrue(k.should_retry.count, 1)
@mock.patch('time.sleep')
def test_504_gateway_timeout(self, sleep_mock):
weird_timeout_body = "<Error><Code>GatewayTimeout</Code></Error>"
self.set_http_response(status_code=504, body=weird_timeout_body)
b = Bucket(self.service_connection, 'mybucket')
k = b.new_key('test_failure')
fail_file = StringIO('This will pretend to be chunk-able.')
k.should_retry = counter(k.should_retry)
self.assertEqual(k.should_retry.count, 0)
with self.assertRaises(BotoServerError):
k.send_file(fail_file)
self.assertTrue(k.should_retry.count, 1)
class TestFileError(unittest.TestCase):
def test_file_error(self):
key = Key()
class CustomException(Exception): pass
key.get_contents_to_file = mock.Mock(
side_effect=CustomException('File blew up!'))
# Ensure our exception gets raised instead of a file or IO error
with self.assertRaises(CustomException):
key.get_contents_to_filename('foo.txt')
if __name__ == '__main__':
unittest.main()
|
ecae0561fa061291b06a515428eb10984b164266
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/util/test/test_workers.py
|
6a3e39afc9eb133c77df89dcc5a39d0020bdd92f
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,859
|
py
|
test_workers.py
|
import pytest
from mock import patch
from util.workers import get_worker_count
@pytest.mark.parametrize(
"kind_name,env_vars,cpu_affinity,multiplier,minimum,maximum,expected",
[
# No override and CPU affinity * multiplier is between min and max => cpu affinity * multiplier.
("registry", {}, [0, 1], 10, 8, 64, 20),
# No override and CPU affinity * multiplier is below min => min.
("registry", {}, [0], 1, 8, 64, 8),
# No override and CPU affinity * multiplier is above max => max.
("registry", {}, [0, 1, 2, 3], 20, 8, 64, 64),
# Override based on specific env var.
(
"registry",
{
"WORKER_COUNT_REGISTRY": 12,
},
[0, 1],
10,
8,
64,
12,
),
# Override based on specific env var (ignores maximum).
(
"registry",
{
"WORKER_COUNT_REGISTRY": 120,
},
[0, 1],
10,
8,
64,
120,
),
# Override based on specific env var (respects minimum).
(
"registry",
{
"WORKER_COUNT_REGISTRY": 1,
},
[0, 1],
10,
8,
64,
8,
),
# Override based on generic env var.
(
"registry",
{
"WORKER_COUNT": 12,
},
[0, 1],
10,
8,
64,
12,
),
# Override based on generic env var (ignores maximum).
(
"registry",
{
"WORKER_COUNT": 120,
},
[0, 1],
10,
8,
64,
120,
),
# Override based on generic env var (respects minimum).
(
"registry",
{
"WORKER_COUNT": 1,
},
[0, 1],
10,
8,
64,
8,
),
# Override always uses specific first.
(
"registry",
{
"WORKER_COUNT_REGISTRY": 120,
"WORKER_COUNT": 12,
},
[0, 1],
10,
8,
64,
120,
),
],
)
def test_get_worker_count(
kind_name, env_vars, cpu_affinity, multiplier, minimum, maximum, expected
):
class FakeProcess(object):
def __init__(self, pid):
pass
def cpu_affinity(self):
return cpu_affinity
with patch("os.environ.get", env_vars.get):
with patch("psutil.Process", FakeProcess):
assert get_worker_count(kind_name, multiplier, minimum, maximum) == expected
|
6f2ad449684a736fefe9c9bd820aff9900477ed5
|
019f03d6713a2bc5344b644aeb5ebe70aaf7cfd0
|
/tests/unit_tests/save_ckpt_test.py
|
dcb5208744cff8f9ca64a2c01cb2109187974a1b
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
Deci-AI/super-gradients
|
6f52cd15bc2f9f39e3cdc6067292b6512aba5dd0
|
7240726cf6425b53a26ed2faec03672f30fee6be
|
refs/heads/master
| 2023-08-25T17:47:02.595029
| 2023-08-24T11:50:50
| 2023-08-24T11:50:50
| 432,652,408
| 3,237
| 331
|
Apache-2.0
| 2023-09-14T11:24:46
| 2021-11-28T07:58:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,855
|
py
|
save_ckpt_test.py
|
import unittest
import os
from super_gradients.training import Trainer, models
from super_gradients.training.dataloaders.dataloaders import classification_test_dataloader
from super_gradients.training.metrics import Accuracy, Top5
from super_gradients.common.object_names import Models
class SaveCkptListUnitTest(unittest.TestCase):
def setUp(self):
# Define Parameters
train_params = {
"max_epochs": 4,
"lr_decay_factor": 0.1,
"lr_updates": [4],
"lr_mode": "step",
"lr_warmup_epochs": 0,
"initial_lr": 0.1,
"loss": "cross_entropy",
"optimizer": "SGD",
"criterion_params": {},
"optimizer_params": {"weight_decay": 1e-4, "momentum": 0.9},
"save_ckpt_epoch_list": [1, 3],
"loss": "cross_entropy",
"train_metrics_list": [Accuracy(), Top5()],
"valid_metrics_list": [Accuracy(), Top5()],
"metric_to_watch": "Accuracy",
"greater_metric_to_watch_is_better": True,
}
# Define Model
trainer = Trainer("save_ckpt_test")
# Build Model
model = models.get(Models.RESNET18_CIFAR, arch_params={"num_classes": 10})
# Train Model (and save ckpt_epoch_list)
trainer.train(model=model, training_params=train_params, train_loader=classification_test_dataloader(), valid_loader=classification_test_dataloader())
dir_path = trainer.checkpoints_dir_path
self.file_names_list = [dir_path + f"/ckpt_epoch_{epoch}.pth" for epoch in train_params["save_ckpt_epoch_list"]]
def test_save_ckpt_epoch_list(self):
self.assertTrue(os.path.exists(self.file_names_list[0]))
self.assertTrue(os.path.exists(self.file_names_list[1]))
if __name__ == "__main__":
unittest.main()
|
113493800c86b00e2457fac21821079c3803bf75
|
a85c048a4ae820beb2bc265d1845e23842fc8c2a
|
/learning/pytorch/utils/messages.py
|
54e2ca05fc3e701997ac7f327fb89ba504c9bdf4
|
[
"MIT"
] |
permissive
|
ithemal/Ithemal
|
e549856538c7b1f2c50d0f40b51b9bb97baf6379
|
b3c39a8942b8b3d92c0fa81815b34fa9b6cbe683
|
refs/heads/master
| 2021-12-10T11:50:00.058462
| 2021-11-30T16:18:30
| 2021-11-30T16:18:30
| 151,625,735
| 124
| 33
|
MIT
| 2020-06-26T15:11:01
| 2018-10-04T19:33:52
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
messages.py
|
from typing import NamedTuple, Tuple, Union
LossReportMessage = NamedTuple('LossReportMessage', [
('rank', int),
('loss', float),
('n_items', int),
])
EpochAdvanceMessage = NamedTuple('EpochAdvanceMessage', [
('epoch', int),
('n_trainers', int),
])
TrainerDeathMessage = NamedTuple('TrainerDeathMessage', [
('remaining_partition', Tuple[int, int]),
])
Message = Union[LossReportMessage, EpochAdvanceMessage, TrainerDeathMessage]
|
b40b8cd30034473a33a16823acdccc8a3f4699ef
|
7c857119fe1505b1d80d6e62969661c06dc1a2f4
|
/BaseTools/Source/Python/Common/MultipleWorkspace.py
|
ad5d48588b483c9797bc8ee20fe9689d3567e9c1
|
[
"BSD-2-Clause"
] |
permissive
|
CloverHackyColor/CloverBootloader
|
7042ca7dd6b513d22be591a295e49071ae1482ee
|
2711170df4f60b2ae5aa20add3e00f35cf57b7e5
|
refs/heads/master
| 2023-08-30T22:14:34.590134
| 2023-08-27T19:14:02
| 2023-08-27T19:14:02
| 205,810,121
| 4,734
| 770
|
BSD-2-Clause
| 2023-09-03T12:41:33
| 2019-09-02T08:22:14
|
C
|
UTF-8
|
Python
| false
| false
| 5,042
|
py
|
MultipleWorkspace.py
|
## @file
# manage multiple workspace file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2015 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import Common.LongFilePathOs as os
from Common.DataType import TAB_WORKSPACE
## MultipleWorkspace
#
# This class manage multiple workspace behavior
#
# @param class:
#
# @var WORKSPACE: defined the current WORKSPACE
# @var PACKAGES_PATH: defined the other WORKSPACE, if current WORKSPACE is invalid, search valid WORKSPACE from PACKAGES_PATH
#
class MultipleWorkspace(object):
WORKSPACE = ''
PACKAGES_PATH = None
## convertPackagePath()
#
# Convert path to match workspace.
#
# @param cls The class pointer
# @param Ws The current WORKSPACE
# @param Path Path to be converted to match workspace.
#
@classmethod
def convertPackagePath(cls, Ws, Path):
if str(os.path.normcase (Path)).startswith(Ws):
return os.path.join(Ws, os.path.relpath(Path, Ws))
return Path
## setWs()
#
# set WORKSPACE and PACKAGES_PATH environment
#
# @param cls The class pointer
# @param Ws initialize WORKSPACE variable
# @param PackagesPath initialize PackagesPath variable
#
@classmethod
def setWs(cls, Ws, PackagesPath=None):
cls.WORKSPACE = Ws
if PackagesPath:
cls.PACKAGES_PATH = [cls.convertPackagePath (Ws, os.path.normpath(Path.strip())) for Path in PackagesPath.split(os.pathsep)]
else:
cls.PACKAGES_PATH = []
## join()
#
# rewrite os.path.join function
#
# @param cls The class pointer
# @param Ws the current WORKSPACE
# @param *p path of the inf/dec/dsc/fdf/conf file
# @retval Path the absolute path of specified file
#
@classmethod
def join(cls, Ws, *p):
Path = os.path.join(Ws, *p)
if not os.path.exists(Path):
for Pkg in cls.PACKAGES_PATH:
Path = os.path.join(Pkg, *p)
if os.path.exists(Path):
return Path
Path = os.path.join(Ws, *p)
return Path
## relpath()
#
# rewrite os.path.relpath function
#
# @param cls The class pointer
# @param Path path of the inf/dec/dsc/fdf/conf file
# @param Ws the current WORKSPACE
# @retval Path the relative path of specified file
#
@classmethod
def relpath(cls, Path, Ws):
for Pkg in cls.PACKAGES_PATH:
if Path.lower().startswith(Pkg.lower()):
Path = os.path.relpath(Path, Pkg)
return Path
if Path.lower().startswith(Ws.lower()):
Path = os.path.relpath(Path, Ws)
return Path
## getWs()
#
# get valid workspace for the path
#
# @param cls The class pointer
# @param Ws the current WORKSPACE
# @param Path path of the inf/dec/dsc/fdf/conf file
# @retval Ws the valid workspace relative to the specified file path
#
@classmethod
def getWs(cls, Ws, Path):
absPath = os.path.join(Ws, Path)
if not os.path.exists(absPath):
for Pkg in cls.PACKAGES_PATH:
absPath = os.path.join(Pkg, Path)
if os.path.exists(absPath):
return Pkg
return Ws
## handleWsMacro()
#
# handle the $(WORKSPACE) tag, if current workspace is invalid path relative the tool, replace it.
#
# @param cls The class pointer
# @retval PathStr Path string include the $(WORKSPACE)
#
@classmethod
def handleWsMacro(cls, PathStr):
if TAB_WORKSPACE in PathStr:
PathList = PathStr.split()
if PathList:
for i, str in enumerate(PathList):
MacroStartPos = str.find(TAB_WORKSPACE)
if MacroStartPos != -1:
Substr = str[MacroStartPos:]
Path = Substr.replace(TAB_WORKSPACE, cls.WORKSPACE).strip()
if not os.path.exists(Path):
for Pkg in cls.PACKAGES_PATH:
Path = Substr.replace(TAB_WORKSPACE, Pkg).strip()
if os.path.exists(Path):
break
PathList[i] = str[0:MacroStartPos] + Path
PathStr = ' '.join(PathList)
return PathStr
## getPkgPath()
#
# get all package paths.
#
# @param cls The class pointer
#
@classmethod
def getPkgPath(cls):
return cls.PACKAGES_PATH
|
fecfc866247bebd2fa0b38c0e4d1514f34a6b91d
|
bb71e927dc2429abf551b44874ee990cb3a93f7a
|
/python/asynchrous_mapping/sync_sleep.py
|
790bf59b39179670738df2b2a55c49bd80670947
|
[] |
no_license
|
khuyentran1401/Data-science
|
c37021349bb407ed50d891dab780463e0b243de5
|
be59f5959be9f5944e12260fbb4548c85ef6aabe
|
refs/heads/master
| 2023-08-31T13:46:58.212459
| 2023-08-09T15:46:11
| 2023-08-09T15:46:11
| 280,508,180
| 3,809
| 943
| null | 2023-05-23T02:38:37
| 2020-07-17T19:25:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 203
|
py
|
sync_sleep.py
|
from time import sleep
def add_one(x):
print(f"add_one({x})")
sleep(5)
return x + 1
def main(nums=[1, 2, 3]):
return list(map(add_one, nums))
if __name__ == "__main__":
main()
|
89531b3b7a5b2263cc14d625cfa650a531e9267d
|
57c5298a3d0dcf656dc7822d6cdd9c2c00ba1f72
|
/models/UBNeck.py
|
89adf1ad07d0e3eff1f510a7258e83ff314724d7
|
[
"BSD-3-Clause"
] |
permissive
|
iArunava/ENet-Real-Time-Semantic-Segmentation
|
e5830251936fe09b0748913c6ac4c418ff1cc9d3
|
8e3e86c4c4eb8392d72962e393d992294d8fc8ae
|
refs/heads/master
| 2021-06-11T08:34:13.175152
| 2021-04-30T20:50:20
| 2021-04-30T20:50:20
| 165,788,748
| 286
| 79
|
BSD-3-Clause
| 2020-12-29T06:34:28
| 2019-01-15T05:06:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
UBNeck.py
|
import torch
import torch.nn as nn
class UBNeck(nn.Module):
def __init__(self, in_channels, out_channels, relu=False, projection_ratio=4):
super().__init__()
# Define class variables
self.in_channels = in_channels
self.reduced_depth = int(in_channels / projection_ratio)
self.out_channels = out_channels
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
self.unpool = nn.MaxUnpool2d(kernel_size = 2,
stride = 2)
self.main_conv = nn.Conv2d(in_channels = self.in_channels,
out_channels = self.out_channels,
kernel_size = 1)
self.dropout = nn.Dropout2d(p=0.1)
self.convt1 = nn.ConvTranspose2d(in_channels = self.in_channels,
out_channels = self.reduced_depth,
kernel_size = 1,
padding = 0,
bias = False)
self.prelu1 = activation
self.convt2 = nn.ConvTranspose2d(in_channels = self.reduced_depth,
out_channels = self.reduced_depth,
kernel_size = 3,
stride = 2,
padding = 1,
output_padding = 1,
bias = False)
self.prelu2 = activation
self.convt3 = nn.ConvTranspose2d(in_channels = self.reduced_depth,
out_channels = self.out_channels,
kernel_size = 1,
padding = 0,
bias = False)
self.prelu3 = activation
self.batchnorm1 = nn.BatchNorm2d(self.reduced_depth)
self.batchnorm2 = nn.BatchNorm2d(self.reduced_depth)
self.batchnorm3 = nn.BatchNorm2d(self.out_channels)
def forward(self, x, indices):
x_copy = x
# Side Branch
x = self.convt1(x)
x = self.batchnorm1(x)
x = self.prelu1(x)
x = self.convt2(x)
x = self.batchnorm2(x)
x = self.prelu2(x)
x = self.convt3(x)
x = self.batchnorm3(x)
x = self.dropout(x)
# Main Branch
x_copy = self.main_conv(x_copy)
x_copy = self.unpool(x_copy, indices, output_size=x.size())
# Concat
x = x + x_copy
x = self.prelu3(x)
return x
|
07b51a84dbc30ac2c60207aaa9dd9b79658f94e6
|
c1b8b6080f29c8037100080298b897618a826475
|
/gammapy/maps/wcs/core.py
|
af558508504348852c180573f942ea1fc5d0b1c3
|
[
"BSD-3-Clause"
] |
permissive
|
gammapy/gammapy
|
a5d7acbdde848e92e124fefbce9716faa296f572
|
60f03adb8fc7851b9f3ca039512c03a669e3fe10
|
refs/heads/main
| 2023-08-16T21:19:06.624561
| 2023-08-04T12:13:08
| 2023-08-04T12:13:08
| 10,073,640
| 204
| 184
|
BSD-3-Clause
| 2023-09-14T15:26:05
| 2013-05-15T07:50:40
|
Python
|
UTF-8
|
Python
| false
| false
| 10,152
|
py
|
core.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import numpy as np
import astropy.units as u
from astropy.io import fits
from ..core import Map
from ..io import JsonQuantityEncoder, find_bands_hdu, find_hdu
from .geom import WcsGeom
from .io import identify_wcs_format
__all__ = ["WcsMap"]
class WcsMap(Map):
"""Base class for WCS map classes.
Parameters
----------
geom : `~gammapy.maps.WcsGeom`
A WCS geometry object.
data : `~numpy.ndarray`
Data array.
"""
@classmethod
def create(
cls,
map_type="wcs",
npix=None,
binsz=0.1,
width=None,
proj="CAR",
frame="icrs",
refpix=None,
axes=None,
skydir=None,
dtype="float32",
meta=None,
unit="",
):
"""Factory method to create an empty WCS map.
Parameters
----------
map_type : {'wcs', 'wcs-sparse'}
Map type. Selects the class that will be used to
instantiate the map.
npix : int or tuple or list
Width of the map in pixels. A tuple will be interpreted as
parameters for longitude and latitude axes. For maps with
non-spatial dimensions, list input can be used to define a
different map width in each image plane. This option
supersedes width.
width : float or tuple or list
Width of the map in degrees. A tuple will be interpreted
as parameters for longitude and latitude axes. For maps
with non-spatial dimensions, list input can be used to
define a different map width in each image plane.
binsz : float or tuple or list
Map pixel size in degrees. A tuple will be interpreted
as parameters for longitude and latitude axes. For maps
with non-spatial dimensions, list input can be used to
define a different bin size in each image plane.
skydir : tuple or `~astropy.coordinates.SkyCoord`
Sky position of map center. Can be either a SkyCoord
object or a tuple of longitude and latitude in deg in the
coordinate system of the map.
frame : {"icrs", "galactic"}, optional
Coordinate system, either Galactic ("galactic") or Equatorial ("icrs").
axes : list
List of non-spatial axes.
proj : string, optional
Any valid WCS projection type. Default is 'CAR' (cartesian).
refpix : tuple
Reference pixel of the projection. If None then this will
be chosen to be center of the map.
dtype : str, optional
Data type, default is float32
meta : `dict`
Dictionary to store meta data.
unit : str or `~astropy.units.Unit`
The unit of the map
Returns
-------
map : `~WcsMap`
A WCS map object.
"""
from .ndmap import WcsNDMap
geom = WcsGeom.create(
npix=npix,
binsz=binsz,
width=width,
proj=proj,
skydir=skydir,
frame=frame,
refpix=refpix,
axes=axes,
)
if map_type == "wcs":
return WcsNDMap(geom, dtype=dtype, meta=meta, unit=unit)
elif map_type == "wcs-sparse":
raise NotImplementedError
else:
raise ValueError(f"Invalid map type: {map_type!r}")
@classmethod
def from_hdulist(cls, hdu_list, hdu=None, hdu_bands=None, format="gadf"):
"""Make a WcsMap object from a FITS HDUList.
Parameters
----------
hdu_list : `~astropy.io.fits.HDUList`
HDU list containing HDUs for map data and bands.
hdu : str
Name or index of the HDU with the map data.
hdu_bands : str
Name or index of the HDU with the BANDS table.
format : {'gadf', 'fgst-ccube', 'fgst-template'}
FITS format convention.
Returns
-------
wcs_map : `WcsMap`
Map object
"""
if hdu is None:
hdu = find_hdu(hdu_list)
else:
hdu = hdu_list[hdu]
if hdu_bands is None:
hdu_bands = find_bands_hdu(hdu_list, hdu)
if hdu_bands is not None:
hdu_bands = hdu_list[hdu_bands]
format = identify_wcs_format(hdu_bands)
wcs_map = cls.from_hdu(hdu, hdu_bands, format=format)
if wcs_map.unit.is_equivalent(""):
if format == "fgst-template":
if "GTI" in hdu_list: # exposure maps have an additional GTI hdu
wcs_map._unit = u.Unit("cm2 s")
else:
wcs_map._unit = u.Unit("cm-2 s-1 MeV-1 sr-1")
return wcs_map
def to_hdulist(self, hdu=None, hdu_bands=None, sparse=False, format="gadf"):
"""Convert to `~astropy.io.fits.HDUList`.
Parameters
----------
hdu : str
Name or index of the HDU with the map data.
hdu_bands : str
Name or index of the HDU with the BANDS table.
sparse : bool
Sparsify the map by only writing pixels with non-zero
amplitude.
format : {'gadf', 'fgst-ccube','fgst-template'}
FITS format convention.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
"""
if sparse:
hdu = "SKYMAP" if hdu is None else hdu.upper()
else:
hdu = "PRIMARY" if hdu is None else hdu.upper()
if sparse and hdu == "PRIMARY":
raise ValueError("Sparse maps cannot be written to the PRIMARY HDU.")
if format in ["fgst-ccube", "fgst-template"]:
if self.geom.axes[0].name != "energy" or len(self.geom.axes) > 1:
raise ValueError(
"All 'fgst' formats don't support extra axes except for energy."
)
if hdu_bands is None:
hdu_bands = f"{hdu.upper()}_BANDS"
if self.geom.axes:
hdu_bands_out = self.geom.to_bands_hdu(hdu_bands=hdu_bands, format=format)
hdu_bands = hdu_bands_out.name
else:
hdu_bands = None
hdu_out = self.to_hdu(hdu=hdu, hdu_bands=hdu_bands, sparse=sparse)
hdu_out.header["META"] = json.dumps(self.meta, cls=JsonQuantityEncoder)
hdu_out.header["BUNIT"] = self.unit.to_string("fits")
if hdu == "PRIMARY":
hdulist = [hdu_out]
else:
hdulist = [fits.PrimaryHDU(), hdu_out]
if self.geom.axes:
hdulist += [hdu_bands_out]
return fits.HDUList(hdulist)
def to_hdu(self, hdu="SKYMAP", hdu_bands=None, sparse=False):
"""Make a FITS HDU from this map.
Parameters
----------
hdu : str
The HDU extension name.
hdu_bands : str
The HDU extension name for BANDS table.
sparse : bool
Set INDXSCHM to SPARSE and sparsify the map by only
writing pixels with non-zero amplitude.
Returns
-------
hdu : `~astropy.io.fits.BinTableHDU` or `~astropy.io.fits.ImageHDU`
HDU containing the map data.
"""
header = self.geom.to_header()
if self.is_mask:
data = self.data.astype(int)
else:
data = self.data
if hdu_bands is not None:
header["BANDSHDU"] = hdu_bands
if sparse:
hdu_out = self._make_hdu_sparse(data, self.geom.npix, hdu, header)
elif hdu == "PRIMARY":
hdu_out = fits.PrimaryHDU(data, header=header)
else:
hdu_out = fits.ImageHDU(data, header=header, name=hdu)
return hdu_out
@staticmethod
def _make_hdu_sparse(data, npix, hdu, header):
shape = data.shape
# We make a copy, because below we modify `data` to handle non-finite entries
# TODO: The code below could probably be simplified to use expressions
# that create new arrays instead of in-place modifications
# But first: do we want / need the non-finite entry handling at all and
# always cast to 64-bit float?
data = data.copy()
if len(shape) == 2:
data_flat = np.ravel(data)
non_zero = np.where(~(data_flat == 0))
value = data_flat[non_zero].astype(float)
cols = [
fits.Column("PIX", "J", array=non_zero[0]),
fits.Column("VALUE", "E", array=value),
]
elif npix[0].size == 1:
shape_flat = shape[:-2] + (shape[-1] * shape[-2],)
data_flat = np.ravel(data).reshape(shape_flat)
nonzero = np.where(~(data_flat == 0))
channel = np.ravel_multi_index(nonzero[:-1], shape[:-2])
value = data_flat[nonzero].astype(float)
cols = [
fits.Column("PIX", "J", array=nonzero[-1]),
fits.Column("CHANNEL", "I", array=channel),
fits.Column("VALUE", "E", array=value),
]
else:
data_flat = []
channel = []
pix = []
for i, _ in np.ndenumerate(npix[0]):
data_i = np.ravel(data[i[::-1]])
pix_i = np.where(~(data_i == 0))
data_i = data_i[pix_i]
data_flat += [data_i]
pix += pix_i
channel += [
np.ones(data_i.size, dtype=int)
* np.ravel_multi_index(i[::-1], shape[:-2])
]
pix = np.concatenate(pix)
channel = np.concatenate(channel)
value = np.concatenate(data_flat).astype(float)
cols = [
fits.Column("PIX", "J", array=pix),
fits.Column("CHANNEL", "I", array=channel),
fits.Column("VALUE", "E", array=value),
]
return fits.BinTableHDU.from_columns(cols, header=header, name=hdu)
|
01dcb6899f437415fa94ae5b1404e65fa20ce10d
|
2b39d6f15cb85690c8e6e5c086362916a7df01bd
|
/graveyard/ngt_utils/mars.py
|
e726839005760f16603dafe460ef982e021a92be
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NeoGeographyToolkit/StereoPipeline
|
3649ba23fb5a07a8e3f13811a21ba12e0bd6c70a
|
80ba3b2a1fc4d5acbd47621ba117852fef6687bf
|
refs/heads/master
| 2023-08-31T02:28:40.247866
| 2023-08-27T03:06:35
| 2023-08-27T03:06:35
| 714,891
| 414
| 124
|
Apache-2.0
| 2023-03-09T03:10:29
| 2010-06-11T02:11:55
|
C++
|
UTF-8
|
Python
| false
| false
| 825
|
py
|
mars.py
|
## __BEGIN_LICENSE__
## Copyright (C) 2006-2010 United States Government as represented by
## the Administrator of the National Aeronautics and Space Administration
## All Rights Reserved.
## __END_LICENSE__
import math
import lonlat
MARS_MAJOR_RADIUS = 3396200.0
MARS_MINOR_RADIUS = 3376200.0
MARS_G2C_FACTOR = (MARS_MINOR_RADIUS/MARS_MAJOR_RADIUS)*(MARS_MINOR_RADIUS/MARS_MAJOR_RADIUS)
def mars_g2c_deg(lat):
'''Convert a mars latitude in degrees from 'ographic to 'ocentric coordinates.'''
return math.atan( MARS_G2C_FACTOR * math.tan(lat*math.pi/180) )*180/math.pi
def mars_w2e_deg(lon):
'''Convert positive-west longitude to positive-east in the range [-180,180).'''
return lonlat.w2e_deg(lon)
def mars_pm180(lon):
'''Convert latitudes to the range [-180,180).'''
return lonlat.pl180(lon)
|
7609c8953b73d594a6cf94ee13b01d792bcd17bb
|
efe44bbc64cf788768d73e35cfb1baa2098181f6
|
/mlperf/models/convert_tf_weights.py
|
2e0372ee1fed677631e30037744a36ca707d1667
|
[
"Apache-2.0"
] |
permissive
|
plaidml/plaidml
|
d2dd18f8228d3959ec875b66aa4ff31f6c29ef00
|
49fbaa5ac387e621f11ba0b81f49461e4b2d02ef
|
refs/heads/plaidml-v1
| 2023-08-09T18:38:07.681422
| 2023-07-23T20:15:07
| 2023-07-23T20:15:07
| 100,326,126
| 4,779
| 516
|
Apache-2.0
| 2023-02-14T21:33:05
| 2017-08-15T01:43:24
|
C++
|
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
convert_tf_weights.py
|
import torch
import re
from collections import OrderedDict
def remap_tf_base_names(orig_weights):
prefix = "backbone."
# convs
weights = {k: v for k, v in orig_weights.items() if "FeatureExtractor/MobilenetV1" in k}
convs = {k: v for k, v in weights.items() if "batchnorm" not in k and "pointwise_" not in k}
matcher = re.compile("(.*)Conv2d_(\d+)")
mapping = {}
for k in convs.keys():
l = matcher.match(k).group(2)
name = "pointwise" if "pointwise" in k else "depthwise"
if l == "0":
name = "0"
mapping[k] = "{}{}.{}.weight".format(prefix, l, name)
# batch norm
weights = {
k: v for k, v in orig_weights.items() if "FeatureExtractor/MobilenetV1/MobilenetV1" in k
}
weights = {k: v for k, v in weights.items() if "pointwise_" not in k}
for k in weights.keys():
l = matcher.match(k).group(2)
name = "pointwise" if "pointwise" in k else "depthwise"
op = "scale" if "mul" in k else "bias"
if l == "0":
name = "0"
mapping[k] = "{}{}.{}/BatchNorm.{}".format(prefix, l, name, op)
return mapping
def remap_tf_extras(orig_weights):
prefix = "extras."
weights = {k: v for k, v in orig_weights.items() if "FeatureExtractor/MobilenetV1" in k}
weights = {k: v for k, v in weights.items() if "pointwise_" in k}
matcher = re.compile("(.*)Conv2d_(\d+)_(\d)x(\d)")
mapping = {}
for k in weights.keys():
m = matcher.match(k)
l = int(m.group(2)) - 2
ks = int(m.group(3))
if ks == 1:
pos = 0
else:
pos = 2
wtype = "weight" if "weight" in k else "bias"
mapping[k] = "{}{}.{}.{}".format(prefix, l, pos, wtype)
return mapping
def remap_tf_predictors(orig_weights):
mapping = {}
# regression
weights = {k: v for k, v in orig_weights.items() if "BoxPredictor" in k}
weights = {k: v for k, v in weights.items() if "BoxEncodingPredictor" in k}
matcher = re.compile("BoxPredictor_(\d+)")
for k in weights.keys():
pos = matcher.match(k).group(1)
wtype = "weight" if "weights" in k else "bias"
mapping[k] = "predictors.{}.regression.{}".format(pos, wtype)
# classification
weights = {k: v for k, v in orig_weights.items() if "BoxPredictor" in k}
weights = {k: v for k, v in weights.items() if "ClassPredictor" in k}
for k in weights.keys():
pos = matcher.match(k).group(1)
wtype = "weight" if "weights" in k else "bias"
mapping[k] = "predictors.{}.classification.{}".format(pos, wtype)
return mapping
def remap_tf_names(weights):
layers_base = remap_tf_base_names(weights)
layers_extra = remap_tf_extras(weights)
layers_predictors = remap_tf_predictors(weights)
layers = {}
layers.update(layers_base)
layers.update(layers_extra)
layers.update(layers_predictors)
return layers
def get_state_dict(weights):
layers = remap_tf_names(weights)
state_dict = OrderedDict()
for orig, new in layers.items():
weight = weights[orig]
weight = torch.as_tensor(weight, dtype=torch.float32)
if weight.dim() == 4:
p = (2, 3, 0, 1)
if "pointwise" in orig or "backbone.0." in new or "BoxPredictor" in orig:
p = (3, 2, 0, 1)
weight = weight.permute(*p).contiguous()
state_dict[new] = weight
return state_dict
def read_tf_weights(frozen_model):
import tensorflow as tf
from tensorflow.python.framework import tensor_util
weights = {}
with tf.Session() as sess:
with tf.gfile.GFile(frozen_model, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def)
for n in graph_def.node:
if n.op == 'Const':
weights[n.name] = tensor_util.MakeNdarray(n.attr['value'].tensor)
return weights
|
b13fdb5a50432f37e55328597b641f9454c45f9f
|
76f23cc69dc10c44bc7cf00b78e37db04c7a9c45
|
/datalad/customremotes/ria_remote.py
|
a11d05e7ccf1fe9e250809c8ff52695dd722a3fe
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
datalad/datalad
|
2d9c247344d340325ba84e7ab674ac320e57f30c
|
40332b5ad25bf8744f7399f6c3575f7d28f71384
|
refs/heads/maint
| 2023-09-04T11:03:02.264714
| 2023-08-10T15:56:19
| 2023-08-10T15:56:19
| 14,052,034
| 453
| 134
|
NOASSERTION
| 2023-09-14T19:10:18
| 2013-11-01T19:40:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,878
|
py
|
ria_remote.py
|
from datalad.customremotes.main import main as super_main
from datalad.distributed.ora_remote import ORARemote
from datalad.support.annexrepo import AnnexRepo
class DeprecatedRIARemote(ORARemote):
"""This is a shim for backwards compatibility with the old and archived
git-annex-ria-remote, which the current ORA remote is based on. Providing
this remote allows datasets that are configured with the old name (and the
respective config names) to still work.
However, this is intended to be somewhat temporary and be replaced by
another implementation that actually migrates from ria to ora once we
settled for an approach.
"""
def __init__(self, annex):
super().__init__(annex)
def initremote(self):
self.message("special remote type 'ria' is deprecated. Consider "
"migrating to 'ora'.", type='info')
super().initremote(self)
def _load_local_cfg(self):
"""Overwrite _load_local_cfg in order to initialize attributes with
deprecated 'ria' configs if they exist and then go on to let 'super' do
it's thing"""
self._repo = AnnexRepo(self.gitdir)
self.storage_host = \
self._repo.config.get(f"annex.ria-remote.{self.name}.ssh-host")
self.store_base_path = \
self._repo.config.get(f"annex.ria-remote.{self.name}.base-path")
self.force_write = \
self._repo.config.get(f"annex.ria-remote.{self.name}.force-write")
self.ignore_remote_config = \
self._repo.config.get(f"annex.ria-remote.{self.name}.ignore-remote-config")
super()._load_local_cfg()
def main():
"""cmdline entry point"""
super_main(
cls=DeprecatedRIARemote,
remote_name='ria',
description=\
"transport file content to and from datasets hosted in RIA stores",
)
|
5eb1d4699c52dce824e02922c240583fce560b33
|
9e891f81f31204e4d4aa3c91ab469ba3cdce059b
|
/semi/denoise.py
|
b65833accd80a6bb06bcf22c3f6098c059dc9567
|
[] |
no_license
|
haofuml/cyclical_annealing
|
0a78e1d1b2845fe37ded6c124db1c12f4db6051c
|
6ef4ebabb631df696cf4bfc333a965283eba1958
|
refs/heads/master
| 2022-11-26T08:32:56.912005
| 2020-10-16T18:22:09
| 2020-10-16T18:22:09
| 177,511,947
| 158
| 20
| null | 2022-11-22T17:30:40
| 2019-03-25T04:04:47
|
OpenEdge ABL
|
UTF-8
|
Python
| false
| false
| 3,559
|
py
|
denoise.py
|
"""
Yizhe Zhang
Perturbation to the input
"""
import numpy as np
import os
import scipy.io as sio
from math import floor
import pdb
def add_noise(sents, opt):
if opt.substitution == 's':
sents_permutated= substitute_sent(sents, opt)
elif opt.substitution == 'p':
sents_permutated= permutate_sent(sents, opt)
elif opt.substitution == 'a':
sents_permutated= add_sent(sents, opt)
elif opt.substitution == 'd':
sents_permutated= delete_sent(sents, opt)
elif opt.substitution == 'm':
sents_permutated= mixed_noise_sent(sents, opt)
elif opt.substitution == 'sc':
sents_permutated = substitute_sent_char(sents, opt)
else:
sents_permutated= sents
return sents_permutated
def permutate_sent(sents, opt):
sents_p = []
for ss in range(len(sents)):
sent_temp = sents[ss][:]
if len(sent_temp) <= 1:
sents_p.append(sent_temp)
continue
idx_s= np.random.choice(len(sent_temp)-1, size=opt.permutation, replace=True)
temp = sent_temp[idx_s[0]]
for ii in range(opt.permutation-1):
sent_temp[idx_s[ii]] = sent_temp[idx_s[ii+1]]
sent_temp[idx_s[opt.permutation-1]] = temp
sents_p.append(sent_temp)
return sents_p
def substitute_sent(sents, opt):
# substitute single word
sents_p = []
for ss in range(len(sents)):
sent_temp = sents[ss][:]
if len(sent_temp) <= 1:
sents_p.append(sent_temp)
continue
idx_s= np.random.choice(len(sent_temp)-1, size=opt.permutation, replace=True)
for ii in range(opt.permutation):
sent_temp[idx_s[ii]] = np.random.choice(opt.n_words)
sents_p.append(sent_temp)
return sents_p
def delete_sent(sents, opt):
# substitute single word
sents_p = []
for ss in range(len(sents)):
sent_temp = sents[ss][:]
if len(sent_temp) <= 1:
sents_p.append(sent_temp)
continue
idx_s= np.random.choice(len(sent_temp)-1, size=opt.permutation, replace=True)
for ii in range(opt.permutation):
sent_temp[idx_s[ii]] = -1
sents_p.append([s for s in sent_temp if s!=-1])
return sents_p
def add_sent(sents, opt):
# substitute single word
sents_p = []
for ss in range(len(sents)):
sent_temp = sents[ss][:]
if len(sent_temp) <= 1:
sents_p.append(sent_temp)
continue
idx_s= np.random.choice(len(sent_temp)-1, size=opt.permutation, replace=True)
for ii in range(opt.permutation):
sent_temp.insert(idx_s[ii], np.random.choice(opt.n_words))
sents_p.append(sent_temp[:opt.maxlen])
return sents_p
def mixed_noise_sent(sents, opt):
sents = delete_sent(sents, opt)
sents = add_sent(sents, opt)
sents = substitute_sent(sents, opt)
return sents
def substitute_sent_char(sents, opt):
# substitute single word
sents_p = []
for ss in range(len(sents)):
sent_temp = sents[ss][:]
if len(sent_temp) <= 1:
sents_p.append(sent_temp)
continue
permute_choice = [ic for ic in range(len(sent_temp)) if sent_temp[ic] != 1]
idx_s= np.random.choice(permute_choice, size=int(opt.permutation * (len(permute_choice))), replace=True)
for ii in range(len(idx_s)):
sent_temp[idx_s[ii]] = np.random.choice(list(range(2,28)))
sents_p.append(sent_temp)
return sents_p
|
d1d562b28669ff2a5cea1aa5dd47d9d9cec0f637
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/tests/opentrons/hardware_control/test_api_helpers.py
|
bc3d161a2d323a9b6573384eaedb69bfea678411
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
test_api_helpers.py
|
import pytest
from opentrons.calibration_storage.types import (
SourceType,
CalibrationStatus,
)
from opentrons.hardware_control.util import DeckTransformState
from opentrons.hardware_control.robot_calibration import (
RobotCalibration,
DeckCalibration,
)
@pytest.mark.ot2_only # ot3 attitude is always correct
async def test_validating_attitude(hardware):
inrange_matrix = [[1, 0, 1], [0, 1, 2], [0, 0, 1]]
deck_cal = DeckCalibration(
attitude=inrange_matrix,
last_modified="sometime",
source=SourceType.user,
status=CalibrationStatus(),
)
hardware.set_robot_calibration(RobotCalibration(deck_calibration=deck_cal))
assert hardware.validate_calibration() == DeckTransformState.OK
identity_matrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
deck_cal.attitude = identity_matrix
deck_cal.last_modified = None
hardware.set_robot_calibration(RobotCalibration(deck_calibration=deck_cal))
assert hardware.validate_calibration() == DeckTransformState.IDENTITY
singular_matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 1]]
deck_cal.attitude = singular_matrix
hardware.set_robot_calibration(RobotCalibration(deck_calibration=deck_cal))
assert hardware.validate_calibration() == DeckTransformState.SINGULARITY
|
d4a0a5f309fe4206c44e6cd50ee2aeb11c329ae3
|
d8572b8c1015f62c9eedf2a29b0f2c4277c62903
|
/bashhub/bashhub_globals.py
|
9d7bf28e413be7ac09d3fba898160674d41d0f1b
|
[
"Apache-2.0"
] |
permissive
|
rcaloras/bashhub-client
|
2ba1f4205513116f6669ac6557e47350e5b862f5
|
043846909d57428a1a30d47fd5f8a727b574e20c
|
refs/heads/master
| 2023-09-03T20:29:37.579652
| 2023-08-26T21:17:34
| 2023-08-26T21:17:34
| 12,858,849
| 1,232
| 98
|
Apache-2.0
| 2023-08-26T21:17:51
| 2013-09-16T04:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,662
|
py
|
bashhub_globals.py
|
"""
This file should be used for declaring any global variables that need to be
pulled in from environment variables or are just used across multiple files.
"""
import os
import re
import time
import stat
# Support for Python 2 and 3
try:
import configparser
from configparser import NoSectionError, NoOptionError
except ImportError:
import ConfigParser as configparser
from ConfigParser import NoSectionError, NoOptionError
# Current time in milleseconds to use across app.
current_milli_time = lambda: int(round(time.time() * 1000))
BH_HOME = '~/.bashhub' if 'HOME' not in list(os.environ.keys()) \
else os.environ['HOME'] + '/.bashhub'
def write_to_config_file(section, value):
exists = os.path.exists(BH_HOME)
file_path = BH_HOME + '/config'
permissions = stat.S_IRUSR | stat.S_IWUSR
if exists:
config = configparser.ConfigParser()
config.read(BH_HOME + '/config')
# Add our section if it doesn't exist
if not config.has_section("bashhub"):
config.add_section("bashhub")
config.set("bashhub", section, value)
with open(file_path, 'w') as config_file:
config.write(config_file)
os.chmod(file_path, permissions)
return True
else:
print("Couldn't find bashhub home directory. Sorry.")
return False
def get_from_config(key, default=''):
try:
config = configparser.ConfigParser()
config.read(BH_HOME + '/config')
return config.get('bashhub', key)
except NoSectionError as error:
return default
except NoOptionError as error:
return default
# Optional environment variable to configure for development
# export BH_URL='http://localhost:9000'
BH_URL = os.getenv('BH_URL', get_from_config('url', 'https://bashhub.com'))
BH_SAVE_COMMANDS = os.getenv('BH_SAVE_COMMANDS', \
get_from_config('save_commands')).lower() in ('true', 'yes', 't', 'on', '')
BH_SYSTEM_NAME = get_from_config("system_name")
# Check if debug mode is enabled
BH_DEBUG = os.getenv('BH_DEBUG', get_from_config("debug"))
# Get our token from the environment if one is present
# otherwise retrieve it from our config. Needs to
# be a function since we may change our token during setup
def BH_AUTH():
return os.getenv('BH_ACCESS_TOKEN', get_from_config("access_token"))
def is_valid_regex(regex):
try:
re.compile(regex)
return True
except re.error:
return False
def get_bh_filter():
filter = os.getenv('BH_FILTER', get_from_config('filter'))
return filter if is_valid_regex(filter) else '__invalid__'
BH_FILTER = get_bh_filter()
|
44d9fdbc8a5fc4619263ed796f0402f2bdf5bd0b
|
f1973e136f49f0b5ea2ec63c4d862188d197e5a5
|
/core/management/commands/_color.py
|
4eb8a91e6e0667fbfbe335cdba7b168c3f4fca51
|
[
"Apache-2.0"
] |
permissive
|
erigones/esdc-ce
|
65dc7d84e1bca3e3fcec668f54acae20183096a2
|
7e3dedddbe821283d909393f333eed4acd452953
|
refs/heads/master
| 2023-02-07T17:57:15.970089
| 2022-02-03T12:55:14
| 2022-02-03T12:55:14
| 73,122,985
| 123
| 36
|
Apache-2.0
| 2023-01-24T23:22:54
| 2016-11-07T21:34:53
|
Python
|
UTF-8
|
Python
| false
| false
| 674
|
py
|
_color.py
|
def _color(code):
# noinspection PyUnusedLocal
def inner(self, text, bold=False):
c = code
if bold:
c = '1;%s' % c
return '\033[%sm%s\033[0m' % (c, text)
return inner
class NoColor(object):
"""
Dummy color function.
"""
# noinspection PyUnusedLocal
def __getattr__(self, item):
return lambda text: text
class ShellColor(object):
"""
Bash colors.
"""
red = _color(31)
green = _color(32)
yellow = _color(33)
blue = _color(34)
magenta = _color(35)
cyan = _color(36)
white = _color(37)
reset = _color(0)
no_color = NoColor()
shell_color = ShellColor()
|
0ec5e72239c8f38dadc473e9a29d10c96376ffa9
|
d5a3aa96b30a5a6a355b4e004e494a6ef41a339c
|
/pudzu/sandbox/markov.py
|
7b683a77e9dab248d813bf87b91fc610041517db
|
[
"MIT"
] |
permissive
|
Udzu/pudzu
|
4c1c134503f62fd1cc08a56e257b864033b38561
|
df5019802bc32064870f31cda8397ad14868cda0
|
refs/heads/master
| 2023-07-10T06:16:35.342990
| 2023-07-04T06:28:00
| 2023-07-04T06:28:00
| 97,936,607
| 120
| 28
|
MIT
| 2021-02-21T16:15:31
| 2017-07-21T10:34:16
|
Roff
|
UTF-8
|
Python
| false
| false
| 5,078
|
py
|
markov.py
|
import argparse
import bisect
import functools
import itertools
import operator as op
import pickle
import random
import string
import sys
import unicodedata
from collections import Counter
# Simple Markov n-gram based generator.
def generate_ngrams(iterable, n):
"""Generator that yields n-grams from a sequence."""
return zip(*[itertools.islice(it, i, None) for i, it in enumerate(itertools.tee(iterable, n))])
def counter_random(counter, filter=None):
"""Return a single random elements from the Counter collection, weighted by count."""
if filter is not None:
counter = {k: v for k, v in counter.items() if filter(k)}
if len(counter) == 0:
raise Exception("No matching elements in Counter collection")
seq = list(counter.keys())
cum = list(itertools.accumulate(list(counter.values()), op.add))
return seq[bisect.bisect_left(cum, random.uniform(0, cum[-1]))]
def latin_normalise(i, letters=string.ascii_letters + " ", lowercase=True):
"""Example normalisation function that strips everything apart from letters and spaces (even accents)."""
return (nc for c in i for cc in (c.lower() if lowercase else c) for nc in (cc if cc in letters else unicodedata.normalize("NFKD", cc)) if nc in letters)
class MarkovGenerator(object):
"""Markov Chain n-gram-based generator for arbitrary iterables."""
def __init__(self, order):
"""Initialise generator for a given n-gram order."""
self.n = order
self.markov_dict = {}
self.prob_dict = Counter()
def reset(self):
"""Reset generator."""
self.__init__(self.n)
def train(self, iterable):
"""Train generator on an iterable."""
for ngram in generate_ngrams(iterable, self.n + 1):
self.markov_dict.setdefault(ngram[: self.n], Counter()).update([ngram[self.n]])
self.prob_dict.update([ngram[: self.n]])
def train_file(self, filename, encoding="utf-8", convert=itertools.chain.from_iterable, normalise=lambda i: i):
"""Train generator on a file. Accepts optional convert function (defaults to reading characters) and
normalise function (defaults to the identity)."""
with open(filename, "r", encoding=encoding) as f:
self.train(normalise(convert(f)))
def render(self, stop_when, start_ngram=None):
"""Return a tuple using the trained probabilities. Stop condition can be a maximum length or function."""
stop_fn = stop_when if callable(stop_when) else lambda o: len(o) >= stop_when
start_fn = start_ngram if (callable(start_ngram) or start_ngram is None) else lambda n: n == tuple(start_ngram)
ngram = counter_random(self.prob_dict, filter=start_fn)
output = ngram
while True:
if stop_fn(output):
break
elif ngram in self.markov_dict:
v = counter_random(self.markov_dict[ngram])
output += (v,)
ngram = ngram[1:] + (v,)
else:
ngram = counter_random(self.prob_dict)
return output
def render_word(self, min_length=3, max_length=12):
"""Generates a word. Assumes training on characters including spaces.
Doesn't filter out real words."""
while True:
word = "".join(self.render(lambda o: len(o) > 1 and o[-1] == " ", lambda n: n[0] == " "))
if min_length <= len(word.strip()) <= max_length:
return word.strip()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate pseudowords using Markov chains")
parser.add_argument("corpus", type=str, help="text corpus name")
parser.add_argument("number", type=int, help="number of words to generate")
parser.add_argument("-n", "--order", type=int, help="n-gram order [2]", default=2)
parser.add_argument("-l", "--letters", type=str, help="letters to keep [a-z/A-Z]", default=string.ascii_letters)
parser.add_argument("-c", "--casesensitive", action="store_true", help="case sensitive generator [False]")
parser.add_argument("-r", "--regenerate", action="store_true", help="always regenerate generator [False]")
args = parser.parse_args()
pickled_dict = "{}_{}.p".format(args.corpus, args.order)
try:
if args.regenerate:
raise FileNotFoundError
print("Checking for cached generator at {}".format(pickled_dict), file=sys.stderr)
with open(pickled_dict, "rb") as f:
mk = pickle.load(f)
except FileNotFoundError:
print("Training from corpus (may take a while).", file=sys.stderr)
mk = MarkovGenerator(order=args.order)
mk.train_file(args.corpus, normalise=functools.partial(latin_normalise, letters=args.letters + " ", lowercase=not args.casesensitive))
print("Saving generated generator to {}".format(pickled_dict), file=sys.stderr)
with open(pickled_dict, "wb") as f:
pickle.dump(mk, f, pickle.HIGHEST_PROTOCOL)
for i in range(args.number):
print(mk.render_word())
|
6d7d0c600de24d6c35139b7687dd06070d31b3b3
|
e96889bdfc11b1fefe00b23ca3ef0944f6cce3e6
|
/flask-backend/api/models/management.py
|
1fc9766ea9a29bb2e40ede5365d6802bdd1153e6
|
[
"Apache-2.0"
] |
permissive
|
scorelab/OpenMF
|
d8decd210bc46336a3a9d4b74d692e32f9946e2e
|
f26a5d853c783029b626f558ec3b1827c3212cf7
|
refs/heads/master
| 2023-04-13T13:41:00.787410
| 2022-09-05T07:18:36
| 2022-09-05T07:18:36
| 126,325,350
| 118
| 101
|
Apache-2.0
| 2023-07-03T13:10:08
| 2018-03-22T11:30:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
management.py
|
"""
Class definition of management user model.
"""
# Importing Depedencies
from api.models.base_user import BaseUser
from api.extansions import db
# Model Class Definition
class Management(BaseUser):
__tablename__="management"
# Property Declaration
id = db.Column(db.Integer, unique=True, primary_key=True)
role = db.Column(db.String(255), default="management")
admin_id = db.Column(db.Integer, db.ForeignKey("admin.id"))
assigned_tasks = db.relationship("Task", backref="management", lazy=True)
def __init__(self, name, email, password, admin, role="management"):
"""
Constructor for management user model.
"""
super().__init__(name, email, password)
self.admin = admin
self.role = role
def __repr__(self):
"""
Official way of representing management user in db.
"""
return (
f"<Management email={self.email}, public_id={self.public_id}, admin_email={self.admin.email}>"
)
|
401ba0fcf25d1ce99971c942c0f8006e76c95dcc
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Alcatel/7324RU/get_interfaces.py
|
0aa45de68e0e792c3fc451ca1b72e896f6d6c3f7
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,498
|
py
|
get_interfaces.py
|
# ----------------------------------------------------------------------
# Alcatel.7324RU.get_interfaces
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import re
from collections import defaultdict
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
from noc.core.text import parse_table
from noc.core.ip import IPv4
class Script(BaseScript):
name = "Alcatel.7324RU.get_interfaces"
interface = IGetInterfaces
rx_vlan = re.compile(
r" *(?P<vid>\d+)[ ]*(?P<vname>\S+)\n[ 0-9\n]+"
r" +(?P<vstatus>enabled|disabled)[ 0-9]+\n([ \-xnf]+)\n"
r" +(?P<portmask>[\-tu]+)"
r" *(?P<uplinkmask>[\-tu]*)",
re.MULTILINE | re.IGNORECASE,
)
rx_mac = re.compile(r"MAC\saddress:\s(?P<mac>\S+)\s*", re.IGNORECASE)
def execute(self):
# Management ip interface
ipif = self.cli("ip show")
mgmt_vlan = self.cli("switch vlan cpu show")
sys_mac = self.cli("sys info show")
mac = re.findall(self.rx_mac, sys_mac)[0]
vl = mgmt_vlan.splitlines()[0].split(":")[1].strip()
ip = [IPv4(parse_table(ipif)[0][1], netmask=parse_table(ipif)[0][2]).prefix]
i = [
{
"admin_status": True,
"enabled_protocols": [],
"mac": mac,
"name": parse_table(ipif)[0][0],
"oper_status": True,
"subinterfaces": [
{
"admin_status": True,
"enabled_afi": ["IPv4"],
"enabled_protocols": [],
"ipv4_addresses": ip,
"mac": mac,
"name": parse_table(ipif)[0][0],
"oper_status": True,
"vlan_ids": [vl],
}
],
"type": "SVI",
}
]
# ADSL ports
phy_ports = self.cli("adsl show")
oper_ports = self.cli("statistics adsl show") # noqa
sub_ports = self.cli("adsl pvc show")
vlans = self.cli("switch vlan show *")
phy_ports = phy_ports.split("Subscriber Info:")
for phy in parse_table(phy_ports[0]):
t1 = parse_table(phy_ports[1])[int(phy[0]) - 1]
admin_status = False
oper_status = False
if phy[1] == "V":
admin_status = True
if t1[1] == "V":
oper_status = True
description = ""
if t1[1] != "-":
description = t1[1] + " " + t1[2]
sub = []
for s in parse_table(sub_ports):
if s[3] == "*":
# Perhaps star - is default vlan
self.logger.info("Skipping star vlan")
continue
if s[0] == phy[0]:
sub += [
{
"name": s[0],
"admin_status": True,
"oper_status": True,
"enabled_afi": ["ATM", "BRIDGE"],
"description": description,
"untagged_vlan": s[3],
"vpi": s[1],
"vci": s[2],
}
]
i += [
{
"name": phy[0],
"type": "physical",
"admin_status": admin_status,
"oper_status": oper_status,
"description": description,
"subinterfaces": sub,
"snmp_ifindex": phy[0],
}
]
# Enet ports info
enet_ports = self.cli("statistics enet")
tagged = defaultdict(list)
for match in self.rx_vlan.finditer(vlans):
up = 0
if match.group("vstatus") == "enabled":
for x in match.group("uplinkmask"):
up += 1
if x == "T":
tagged[up] += [match.group("vid")]
for y in range(up):
oper_status = True
admin_status = True
if parse_table(enet_ports)[y][1] == "disabled":
admin_status = False
oper_status = False
elif parse_table(enet_ports)[y][1] == "link down":
oper_status = False
i += [
{
"name": "enet%d" % (y + 1),
"type": "physical",
"admin_status": admin_status,
"oper_status": oper_status,
"mac": mac,
"snmp_ifindex": y + 49,
"subinterfaces": [
{
"admin_status": admin_status,
"enabled_afi": ["BRIDGE"],
"oper_status": oper_status,
"name": "enet%d" % (y + 1),
"mac": mac,
"tagged_vlans": tagged[y + 1],
}
],
}
]
return [{"interfaces": i}]
|
30303887bbd4d65092c45d339a75269fa779e2a0
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/1406. Stone Game III/1406-2.py
|
d9a3a73a474fb155f5a31eedc26a26174c141886
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 487
|
py
|
1406-2.py
|
class Solution:
def stoneGameIII(self, stoneValue: List[int]) -> str:
n = len(stoneValue)
# dp[i] := max "relative score" Alice can make w/ stoneValue[i:]
dp = [-math.inf] * n + [0]
for i in reversed(range(n)):
summ = 0
for j in range(i, i + 3):
if j == n:
break
summ += stoneValue[j]
dp[i] = max(dp[i], summ - dp[j + 1])
score = dp[0]
if score == 0:
return 'Tie'
return 'Alice' if score > 0 else 'Bob'
|
bbc30a42e95a450be5d30632c0df01afcd02ff99
|
07df6279388a17192eb4e4e417383a1f56208839
|
/mmdet3d/models/fusion_layers/point_fusion.py
|
97b4177763e7bb0cf4aae9c92016203a6cb2afd2
|
[
"Apache-2.0"
] |
permissive
|
HuangJunJie2017/BEVDet
|
11d4ca45286739c9bd099f715cb0edc9408a914f
|
f71858d02eb0fbd09860150ade67558d7984b1be
|
refs/heads/dev2.1
| 2023-05-23T15:35:45.216750
| 2023-05-07T16:35:04
| 2023-05-07T16:35:04
| 432,979,408
| 985
| 192
|
Apache-2.0
| 2023-04-28T15:06:51
| 2021-11-29T09:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 12,265
|
py
|
point_fusion.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch import nn as nn
from torch.nn import functional as F
from mmdet3d.core.bbox.structures import (get_proj_mat_by_coord_type,
points_cam2img)
from ..builder import FUSION_LAYERS
from . import apply_3d_transformation
def point_sample(img_meta,
img_features,
points,
proj_mat,
coord_type,
img_scale_factor,
img_crop_offset,
img_flip,
img_pad_shape,
img_shape,
aligned=True,
padding_mode='zeros',
align_corners=True):
"""Obtain image features using points.
Args:
img_meta (dict): Meta info.
img_features (torch.Tensor): 1 x C x H x W image features.
points (torch.Tensor): Nx3 point cloud in LiDAR coordinates.
proj_mat (torch.Tensor): 4x4 transformation matrix.
coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.
img_scale_factor (torch.Tensor): Scale factor with shape of
(w_scale, h_scale).
img_crop_offset (torch.Tensor): Crop offset used to crop
image during data augmentation with shape of (w_offset, h_offset).
img_flip (bool): Whether the image is flipped.
img_pad_shape (tuple[int]): int tuple indicates the h & w after
padding, this is necessary to obtain features in feature map.
img_shape (tuple[int]): int tuple indicates the h & w before padding
after scaling, this is necessary for flipping coordinates.
aligned (bool, optional): Whether use bilinear interpolation when
sampling image features for each point. Defaults to True.
padding_mode (str, optional): Padding mode when padding values for
features of out-of-image points. Defaults to 'zeros'.
align_corners (bool, optional): Whether to align corners when
sampling image features for each point. Defaults to True.
Returns:
torch.Tensor: NxC image features sampled by point coordinates.
"""
# apply transformation based on info in img_meta
points = apply_3d_transformation(
points, coord_type, img_meta, reverse=True)
# project points to camera coordinate
pts_2d = points_cam2img(points, proj_mat)
# img transformation: scale -> crop -> flip
# the image is resized by img_scale_factor
img_coors = pts_2d[:, 0:2] * img_scale_factor # Nx2
img_coors -= img_crop_offset
# grid sample, the valid grid range should be in [-1,1]
coor_x, coor_y = torch.split(img_coors, 1, dim=1) # each is Nx1
if img_flip:
# by default we take it as horizontal flip
# use img_shape before padding for flip
orig_h, orig_w = img_shape
coor_x = orig_w - coor_x
h, w = img_pad_shape
coor_y = coor_y / h * 2 - 1
coor_x = coor_x / w * 2 - 1
grid = torch.cat([coor_x, coor_y],
dim=1).unsqueeze(0).unsqueeze(0) # Nx2 -> 1x1xNx2
# align_corner=True provides higher performance
mode = 'bilinear' if aligned else 'nearest'
point_features = F.grid_sample(
img_features,
grid,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners) # 1xCx1xN feats
return point_features.squeeze().t()
@FUSION_LAYERS.register_module()
class PointFusion(BaseModule):
"""Fuse image features from multi-scale features.
Args:
img_channels (list[int] | int): Channels of image features.
It could be a list if the input is multi-scale image features.
pts_channels (int): Channels of point features
mid_channels (int): Channels of middle layers
out_channels (int): Channels of output fused features
img_levels (int, optional): Number of image levels. Defaults to 3.
coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.
Defaults to 'LIDAR'.
conv_cfg (dict, optional): Dict config of conv layers of middle
layers. Defaults to None.
norm_cfg (dict, optional): Dict config of norm layers of middle
layers. Defaults to None.
act_cfg (dict, optional): Dict config of activatation layers.
Defaults to None.
activate_out (bool, optional): Whether to apply relu activation
to output features. Defaults to True.
fuse_out (bool, optional): Whether apply conv layer to the fused
features. Defaults to False.
dropout_ratio (int, float, optional): Dropout ratio of image
features to prevent overfitting. Defaults to 0.
aligned (bool, optional): Whether apply aligned feature fusion.
Defaults to True.
align_corners (bool, optional): Whether to align corner when
sampling features according to points. Defaults to True.
padding_mode (str, optional): Mode used to pad the features of
points that do not have corresponding image features.
Defaults to 'zeros'.
lateral_conv (bool, optional): Whether to apply lateral convs
to image features. Defaults to True.
"""
def __init__(self,
img_channels,
pts_channels,
mid_channels,
out_channels,
img_levels=3,
coord_type='LIDAR',
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
init_cfg=None,
activate_out=True,
fuse_out=False,
dropout_ratio=0,
aligned=True,
align_corners=True,
padding_mode='zeros',
lateral_conv=True):
super(PointFusion, self).__init__(init_cfg=init_cfg)
if isinstance(img_levels, int):
img_levels = [img_levels]
if isinstance(img_channels, int):
img_channels = [img_channels] * len(img_levels)
assert isinstance(img_levels, list)
assert isinstance(img_channels, list)
assert len(img_channels) == len(img_levels)
self.img_levels = img_levels
self.coord_type = coord_type
self.act_cfg = act_cfg
self.activate_out = activate_out
self.fuse_out = fuse_out
self.dropout_ratio = dropout_ratio
self.img_channels = img_channels
self.aligned = aligned
self.align_corners = align_corners
self.padding_mode = padding_mode
self.lateral_convs = None
if lateral_conv:
self.lateral_convs = nn.ModuleList()
for i in range(len(img_channels)):
l_conv = ConvModule(
img_channels[i],
mid_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=self.act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.img_transform = nn.Sequential(
nn.Linear(mid_channels * len(img_channels), out_channels),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
)
else:
self.img_transform = nn.Sequential(
nn.Linear(sum(img_channels), out_channels),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
)
self.pts_transform = nn.Sequential(
nn.Linear(pts_channels, out_channels),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
)
if self.fuse_out:
self.fuse_conv = nn.Sequential(
nn.Linear(mid_channels, out_channels),
# For pts the BN is initialized differently by default
# TODO: check whether this is necessary
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(inplace=False))
if init_cfg is None:
self.init_cfg = [
dict(type='Xavier', layer='Conv2d', distribution='uniform'),
dict(type='Xavier', layer='Linear', distribution='uniform')
]
def forward(self, img_feats, pts, pts_feats, img_metas):
"""Forward function.
Args:
img_feats (list[torch.Tensor]): Image features.
pts: [list[torch.Tensor]]: A batch of points with shape N x 3.
pts_feats (torch.Tensor): A tensor consist of point features of the
total batch.
img_metas (list[dict]): Meta information of images.
Returns:
torch.Tensor: Fused features of each point.
"""
img_pts = self.obtain_mlvl_feats(img_feats, pts, img_metas)
img_pre_fuse = self.img_transform(img_pts)
if self.training and self.dropout_ratio > 0:
img_pre_fuse = F.dropout(img_pre_fuse, self.dropout_ratio)
pts_pre_fuse = self.pts_transform(pts_feats)
fuse_out = img_pre_fuse + pts_pre_fuse
if self.activate_out:
fuse_out = F.relu(fuse_out)
if self.fuse_out:
fuse_out = self.fuse_conv(fuse_out)
return fuse_out
def obtain_mlvl_feats(self, img_feats, pts, img_metas):
"""Obtain multi-level features for each point.
Args:
img_feats (list(torch.Tensor)): Multi-scale image features produced
by image backbone in shape (N, C, H, W).
pts (list[torch.Tensor]): Points of each sample.
img_metas (list[dict]): Meta information for each sample.
Returns:
torch.Tensor: Corresponding image features of each point.
"""
if self.lateral_convs is not None:
img_ins = [
lateral_conv(img_feats[i])
for i, lateral_conv in zip(self.img_levels, self.lateral_convs)
]
else:
img_ins = img_feats
img_feats_per_point = []
# Sample multi-level features
for i in range(len(img_metas)):
mlvl_img_feats = []
for level in range(len(self.img_levels)):
mlvl_img_feats.append(
self.sample_single(img_ins[level][i:i + 1], pts[i][:, :3],
img_metas[i]))
mlvl_img_feats = torch.cat(mlvl_img_feats, dim=-1)
img_feats_per_point.append(mlvl_img_feats)
img_pts = torch.cat(img_feats_per_point, dim=0)
return img_pts
def sample_single(self, img_feats, pts, img_meta):
"""Sample features from single level image feature map.
Args:
img_feats (torch.Tensor): Image feature map in shape
(1, C, H, W).
pts (torch.Tensor): Points of a single sample.
img_meta (dict): Meta information of the single sample.
Returns:
torch.Tensor: Single level image features of each point.
"""
# TODO: image transformation also extracted
img_scale_factor = (
pts.new_tensor(img_meta['scale_factor'][:2])
if 'scale_factor' in img_meta.keys() else 1)
img_flip = img_meta['flip'] if 'flip' in img_meta.keys() else False
img_crop_offset = (
pts.new_tensor(img_meta['img_crop_offset'])
if 'img_crop_offset' in img_meta.keys() else 0)
proj_mat = get_proj_mat_by_coord_type(img_meta, self.coord_type)
img_pts = point_sample(
img_meta=img_meta,
img_features=img_feats,
points=pts,
proj_mat=pts.new_tensor(proj_mat),
coord_type=self.coord_type,
img_scale_factor=img_scale_factor,
img_crop_offset=img_crop_offset,
img_flip=img_flip,
img_pad_shape=img_meta['input_shape'][:2],
img_shape=img_meta['img_shape'][:2],
aligned=self.aligned,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
return img_pts
|
94f16eca0ddd298e209160333df40f5518e57064
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/common/util/remote_base.py
|
c17253b1c5da3d0265357fb7b8464c0a030fff66
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
remote_base.py
|
import dill
import logging
import urllib.parse
import socket
import traceback
import threading
import multiprocessing
import queue
import time
import json
import abc
import mimetypes
import re
import bs4
import copy
import sys
import base64
import urllib.request
import urllib.parse
import urllib.error
import WebRequest
class RpcBaseClass():
logname = "Main.RemoteExec.Base"
def __init__(self, wg, cwg):
self.out_buffer = []
self.local_logger = logging.getLogger(self.logname)
self.wg = wg
self.cwg = cwg
self.__install_logproxy()
self.log.info("RemoteExecClass Instantiated")
def _go(self, *args, **kwargs):
raise RuntimeError
return "What?"
def __install_logproxy(self):
# pylint: disable=W0212
class LogProxy():
def __init__(self, parent_logger, log_prefix):
self.parent_logger = parent_logger
self.log_prefix = log_prefix
def debug(self, msg, *args):
self.parent_logger._debug (" [{}] -> ".format(self.log_prefix) + msg, *args)
def info(self, msg, *args):
self.parent_logger._info (" [{}] -> ".format(self.log_prefix) + msg, *args)
def error(self, msg, *args):
self.parent_logger._error (" [{}] -> ".format(self.log_prefix) + msg, *args)
def critical(self, msg, *args):
self.parent_logger._critical(" [{}] -> ".format(self.log_prefix) + msg, *args)
def warning(self, msg, *args):
self.parent_logger._warning (" [{}] -> ".format(self.log_prefix) + msg, *args)
def warn(self, msg, *args):
self.parent_logger._warning (" [{}] -> ".format(self.log_prefix) + msg, *args)
self.wg.log = LogProxy(self, "WebGet")
self.cwg.log = LogProxy(self, "CWebGet")
self.log = LogProxy(self, "MainRPCAgent")
def _debug(self, msg, *args):
tmp = self.logname + " [DEBUG] ->" + msg % args
self.local_logger.debug(tmp)
self.out_buffer.append(tmp)
def _info(self, msg, *args):
tmp = self.logname + " [INFO] ->" + msg % args
self.local_logger.info(tmp)
self.out_buffer.append(tmp)
def _error(self, msg, *args):
tmp = self.logname + " [ERROR] ->" + msg % args
self.local_logger.error(tmp)
self.out_buffer.append(tmp)
def _critical(self, msg, *args):
tmp = self.logname + " [CRITICAL] ->" + msg % args
self.local_logger.critical(tmp)
self.out_buffer.append(tmp)
def _warning(self, msg, *args):
tmp = self.logname + " [WARNING] ->" + msg % args
self.local_logger.warning(tmp)
self.out_buffer.append(tmp)
def go(self, *args, **kwargs):
print("RPC Running!")
print("Args, kwargs: ", (args, kwargs))
try:
ret = self._go(*args, **kwargs) # pylint: disable=W0212
ret = (self.out_buffer, ret)
print("RPC Done! Ret: ", ret)
return ret
except Exception as e:
import sys
log_txt = '\n '.join(self.out_buffer)
exc_message = '{}\nLog report:\n {}'.format(str(e), log_txt)
rebuilt = type(e)(exc_message).with_traceback(sys.exc_info()[2])
rebuilt.log_data = self.out_buffer
print("RPC encountered error!")
raise rebuilt
|
ba762ff9c700b4c5168459ff4d14ebde4ff76be2
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/__scraping__/nba.com/main.py
|
4864c1faec41a89d3af8d6cb9d0cc6af6c4ad08b
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
main.py
|
#!/usr/bin/env python3
# date: 2019.11.19,
# https://stackoverflow.com/questions/58938589/python-scrape-nba-tracking-drives-data
# date: 2020.08.15
import requests
headers = {
'User-Agent': 'Mozilla/5.0',
#'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0',
'Referer': 'https://stats.nba.com/players/drives/',
#'Accept': 'application/json, text/plain, */*',
'x-nba-stats-origin': 'stats',
'x-nba-stats-token': 'true',
}
url = 'https://stats.nba.com/stats/leaguedashptstats'
params = {
'College': '',
'Conference': '',
'Country': '',
'DateFrom': '',
'DateTo': '',
'Division': '',
'DraftPick': '',
'DraftYear': '',
'GameScope': '',
'Height': '',
'LastNGames': '0',
'LeagueID': '00',
'Location': '',
'Month': '0',
'OpponentTeamID': '0',
'Outcome': '',
'PORound': '0',
'PerMode': 'PerGame',
'PlayerExperience': '',
'PlayerOrTeam': 'Player',
'PlayerPosition': '',
'PtMeasureType': 'Drives',
'Season': '2019-20',
'SeasonSegment': '',
'SeasonType': 'Regular Season',
'StarterBench': '',
'TeamID': '0',
'VsConference': '',
'VsDivision': '',
'Weight': '',
}
r = requests.get(url, headers=headers, params=params)
#print(r.request.url)
data = r.json()
print(data)
|
a553f3af2c137ac5f649e461c108bf146a5144f0
|
9be283dfdcc52d56066dc54743e06c618a7c14ce
|
/humans/tasks.py
|
004c8926d2a8af2746cc53150625fc01b095947a
|
[
"MIT"
] |
permissive
|
whitesmith/hawkpost
|
e5dd1d18f5f8197b138720917a4c4e6bc83d96e0
|
6a5b3ac532fbacb603d9daf7364ed963e4d5bfe5
|
refs/heads/master
| 2023-08-12T17:47:38.058926
| 2023-05-14T20:55:24
| 2023-05-14T20:55:24
| 55,302,159
| 982
| 68
|
MIT
| 2023-08-07T04:46:35
| 2016-04-02T15:38:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,583
|
py
|
tasks.py
|
from celery import shared_task
from celery.utils.log import get_task_logger
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from .models import User, Notification
from .utils import key_state
import requests
logger = get_task_logger(__name__)
def fetch_key(url):
res = requests.get(url)
begin = res.text.find("-----BEGIN PGP PUBLIC KEY BLOCK-----")
end = res.text.find("-----END PGP PUBLIC KEY BLOCK-----")
if 200 <= res.status_code < 300 and begin >= 0 and end > begin:
return res.text[begin:end + 34]
else:
raise ValueError(_('The Url provided does not contain a public key'))
def send_email(user, subject, template):
content = render_to_string(template, context={"user": user})
email = EmailMultiAlternatives(subject, content,
settings.DEFAULT_FROM_EMAIL,
[user.email])
email.send()
@shared_task(ignore_result=True)
def update_public_keys():
users = User.objects.exclude(
Q(keyserver_url__isnull=True) | Q(keyserver_url__exact=''))
logger.info(_('Start updating user keys'))
for user in users:
logger.info(_('Working on user: {}').format(user.email))
logger.info(_('URL: {}').format(user.keyserver_url))
try:
key = fetch_key(user.keyserver_url)
except:
logger.error(_('Unable to fetch new key'))
continue
# Check key
fingerprint, *state = key_state(key)
if state[0] in ["expired", "revoked"]:
# Email user and disable/remove key
send_email(user, _('Hawkpost: {} key').format(state[0]),
"humans/emails/key_{}.txt".format(state[0]))
user.fingerprint = ""
user.public_key = ""
user.keyserver_url = ""
user.save()
elif state[0] == "invalid":
# Alert the user and remove keyserver_url
send_email(user,
_('Hawkpost: Keyserver Url providing an invalid key'),
"humans/emails/key_invalid.txt")
user.keyserver_url = ""
user.save()
elif fingerprint != user.fingerprint:
# Email user and remove the keyserver url
send_email(user, _('Hawkpost: Fingerprint mismatch'),
"humans/emails/fingerprint_changed.txt")
user.keyserver_url = ""
user.save()
elif state[0] == "valid":
user.public_key = key
user.save()
logger.info(_('Finished Updating user keys'))
@shared_task(ignore_result=True)
def validate_public_keys():
users = User.objects.exclude(
Q(public_key__isnull=True) | Q(public_key__exact=''))
logger.info(_('Start validating user keys'))
for user in users:
logger.info(_('Working on user: {}').format(user.email))
key = user.public_key
# Check key
fingerprint, *state = key_state(key)
if state[0] == "expired":
# Email user and disable/remove key
send_email(user, _('Hawkpost: {} key').format(state[0]),
"humans/emails/key_{}.txt".format(state[0]))
user.fingerprint = ""
user.public_key = ""
user.save()
elif state[0] == "valid":
# Checks if key is about to expire
days_to_expire = state[1]
if days_to_expire == 7 or days_to_expire == 1:
# Warns user if key about to expire
send_email(user,
_('Hawkpost: Key will expire in {} day(s)').format(days_to_expire),
"humans/emails/key_will_expire.txt")
@shared_task
def send_email_notification(subject, body, email):
email = EmailMultiAlternatives(subject, body,
settings.DEFAULT_FROM_EMAIL,
[email])
email.send()
@shared_task
def enqueue_email_notifications(notification_id, group_id):
notification = Notification.objects.get(id=notification_id)
if group_id:
users = User.objects.filter(groups__id=group_id)
else:
users = User.objects.all()
for user in users:
send_email_notification.delay(notification.subject,
notification.body,
user.email)
|
46f513d1f69b1db0e113bc6e9c97aa514dd38caf
|
9c54b9ea3e9fe208457bf64ad53eba8889f4b057
|
/Problem014/Python/solution_slow_1.py
|
bab288d57bada7296cfbc7991becad102a8a2af2
|
[
"MIT"
] |
permissive
|
DestructHub/ProjectEuler
|
e0d77c02f0646a85d09af64127e92ac907ebad2a
|
efba582f976cd59748566c19799d84984c77ea61
|
refs/heads/master
| 2022-12-21T13:08:00.128200
| 2021-10-06T12:47:48
| 2022-12-15T20:33:12
| 36,625,177
| 179
| 87
|
MIT
| 2022-12-15T20:33:14
| 2015-05-31T22:36:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
solution_slow_1.py
|
#!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyleft © Manoel Vilela
#
#
# problem14.py dumb solution (not efficient)
"""
Longest Collatz sequence
Problem 14
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence
(starting at 13 and finishing at 1) contains 10 terms.
Although it has not been proved yet (Collatz Problem),
it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
"""
def sequence(n):
terms = 1
while n > 1:
if n % 2 == 0:
n = n / 2
else:
n = 3 * n + 1
terms += 1
return terms
def answer():
most = 0
i = 1
while i < 10 ** 6:
s = sequence(i)
if s > most:
most = s
value = i
i += 1
return value
print(answer())
|
502a71b200baa128df18c16019d8d5755e78899a
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWrapLambdaInParentheses_after.py
|
387490817aab87d35663fd3e330e419db57df3a3
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
percentOperatorWrapLambdaInParentheses_after.py
|
f'{(lambda: None)}'
|
02fb2a1fd8db20be76f00bafa73f711b266dd59b
|
66bc394ad0aeb94298c9b0a0d16812d5408332e9
|
/tests/samplers/test_batch.py
|
05b0cf0b3a8a8e15f051ba8e98a80464006f29dd
|
[
"MIT"
] |
permissive
|
microsoft/torchgeo
|
e058a74ef51ba29aefd6ba8f0eb5e6070db310a7
|
29985861614b3b93f9ef5389469ebb98570de7dd
|
refs/heads/main
| 2023-08-20T01:11:41.549866
| 2023-08-18T22:58:31
| 2023-08-18T22:58:31
| 369,428,935
| 1,724
| 225
|
MIT
| 2023-09-14T20:33:24
| 2021-05-21T05:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,453
|
py
|
test_batch.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import math
from collections.abc import Iterator
from itertools import product
import pytest
from _pytest.fixtures import SubRequest
from rasterio.crs import CRS
from torch.utils.data import DataLoader
from torchgeo.datasets import BoundingBox, GeoDataset, stack_samples
from torchgeo.samplers import BatchGeoSampler, RandomBatchGeoSampler, Units
class CustomBatchGeoSampler(BatchGeoSampler):
def __init__(self) -> None:
pass
def __iter__(self) -> Iterator[list[BoundingBox]]:
for i in range(len(self)):
yield [BoundingBox(j, j, j, j, j, j) for j in range(len(self))]
def __len__(self) -> int:
return 2
class CustomGeoDataset(GeoDataset):
def __init__(self, crs: CRS = CRS.from_epsg(3005), res: float = 10) -> None:
super().__init__()
self._crs = crs
self.res = res
def __getitem__(self, query: BoundingBox) -> dict[str, BoundingBox]:
return {"index": query}
class TestBatchGeoSampler:
@pytest.fixture(scope="class")
def dataset(self) -> CustomGeoDataset:
ds = CustomGeoDataset()
ds.index.insert(0, (0, 100, 200, 300, 400, 500))
return ds
@pytest.fixture(scope="function")
def sampler(self) -> CustomBatchGeoSampler:
return CustomBatchGeoSampler()
def test_iter(self, sampler: CustomBatchGeoSampler) -> None:
expected = [BoundingBox(0, 0, 0, 0, 0, 0), BoundingBox(1, 1, 1, 1, 1, 1)]
assert next(iter(sampler)) == expected
def test_len(self, sampler: CustomBatchGeoSampler) -> None:
assert len(sampler) == 2
@pytest.mark.slow
@pytest.mark.parametrize("num_workers", [0, 1, 2])
def test_dataloader(
self,
dataset: CustomGeoDataset,
sampler: CustomBatchGeoSampler,
num_workers: int,
) -> None:
dl = DataLoader(
dataset,
batch_sampler=sampler,
num_workers=num_workers,
collate_fn=stack_samples,
)
for _ in dl:
continue
def test_abstract(self, dataset: CustomGeoDataset) -> None:
with pytest.raises(TypeError, match="Can't instantiate abstract class"):
BatchGeoSampler(dataset) # type: ignore[abstract]
class TestRandomBatchGeoSampler:
@pytest.fixture(scope="class")
def dataset(self) -> CustomGeoDataset:
ds = CustomGeoDataset()
ds.index.insert(0, (0, 100, 200, 300, 400, 500))
ds.index.insert(1, (0, 100, 200, 300, 400, 500))
return ds
@pytest.fixture(
scope="function",
params=product([3, 4.5, (2, 2), (3, 4.5), (4.5, 3)], [Units.PIXELS, Units.CRS]),
)
def sampler(
self, dataset: CustomGeoDataset, request: SubRequest
) -> RandomBatchGeoSampler:
size, units = request.param
return RandomBatchGeoSampler(
dataset, size, batch_size=2, length=10, units=units
)
def test_iter(self, sampler: RandomBatchGeoSampler) -> None:
for batch in sampler:
for query in batch:
assert sampler.roi.minx <= query.minx <= query.maxx <= sampler.roi.maxx
assert sampler.roi.miny <= query.miny <= query.miny <= sampler.roi.maxy
assert sampler.roi.mint <= query.mint <= query.maxt <= sampler.roi.maxt
assert math.isclose(query.maxx - query.minx, sampler.size[1])
assert math.isclose(query.maxy - query.miny, sampler.size[0])
assert math.isclose(
query.maxt - query.mint, sampler.roi.maxt - sampler.roi.mint
)
def test_len(self, sampler: RandomBatchGeoSampler) -> None:
assert len(sampler) == sampler.length // sampler.batch_size
def test_roi(self, dataset: CustomGeoDataset) -> None:
roi = BoundingBox(0, 50, 200, 250, 400, 450)
sampler = RandomBatchGeoSampler(dataset, 2, 2, 10, roi=roi)
for batch in sampler:
for query in batch:
assert query in roi
def test_small_area(self) -> None:
ds = CustomGeoDataset(res=1)
ds.index.insert(0, (0, 10, 0, 10, 0, 10))
ds.index.insert(1, (20, 21, 20, 21, 20, 21))
sampler = RandomBatchGeoSampler(ds, 2, 2, 10)
for _ in sampler:
continue
def test_point_data(self) -> None:
ds = CustomGeoDataset()
ds.index.insert(0, (0, 0, 0, 0, 0, 0))
ds.index.insert(1, (1, 1, 1, 1, 1, 1))
sampler = RandomBatchGeoSampler(ds, 0, 2, 10)
for _ in sampler:
continue
def test_weighted_sampling(self) -> None:
ds = CustomGeoDataset()
ds.index.insert(0, (0, 0, 0, 0, 0, 0))
ds.index.insert(1, (0, 10, 0, 10, 0, 10))
sampler = RandomBatchGeoSampler(ds, 1, 2, 10)
for batch in sampler:
for bbox in batch:
assert bbox == BoundingBox(0, 10, 0, 10, 0, 10)
@pytest.mark.slow
@pytest.mark.parametrize("num_workers", [0, 1, 2])
def test_dataloader(
self,
dataset: CustomGeoDataset,
sampler: RandomBatchGeoSampler,
num_workers: int,
) -> None:
dl = DataLoader(
dataset,
batch_sampler=sampler,
num_workers=num_workers,
collate_fn=stack_samples,
)
for _ in dl:
continue
|
dbfca3c691fa08ccb8763d3783a35cc4a516097e
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/junos/tests/ShowOspfDatabaseOpaqueArea/cli/equal/golden_output_expected.py
|
72ca66a2de838686815f0e0f2233d6bc92676191
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,811
|
py
|
golden_output_expected.py
|
expected_output = {
"ospf-database-information": {
"ospf-area-header": {"ospf-area": "0.0.0.8"},
"ospf-database": [
{
"advertising-router": "10.49.194.125",
"age": "359",
"checksum": "0x6f5d",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000002",
},
{
"advertising-router": "10.49.194.127",
"age": "595",
"checksum": "0x7751",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000002",
},
{
"advertising-router": "10.34.2.250",
"age": "227",
"checksum": "0xa11a",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80000003",
},
{
"advertising-router": "10.34.3.252",
"age": "754",
"checksum": "0xad09",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000002",
},
{
"advertising-router": "10.169.14.240",
"age": "2210",
"checksum": "0x35ae",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000002",
},
{
"advertising-router": "10.49.194.125",
"age": "1168",
"checksum": "0x27f4",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000001",
},
{
"advertising-router": "10.49.194.127",
"age": "1441",
"checksum": "0xa27",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000001",
},
{
"advertising-router": "10.34.2.250",
"age": "934",
"checksum": "0x2ac3",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80000002",
},
{
"advertising-router": "10.34.3.252",
"age": "1815",
"checksum": "0x90dd",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000001",
},
{
"advertising-router": "10.169.14.240",
"age": "597",
"checksum": "0x5697",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000002",
},
{
"advertising-router": "10.34.2.250",
"age": "1814",
"checksum": "0xb7b3",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80000001",
},
{
"advertising-router": "10.34.3.252",
"age": "1167",
"checksum": "0x412",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000001",
},
{
"advertising-router": "10.169.14.240",
"age": "1440",
"checksum": "0x58d5",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000001",
},
],
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.