hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
437c42fd9708572ca32db3dd04de75e0b264c088
| 1,361
|
py
|
Python
|
calculators/credit_card_calculator.py
|
wanderindev/financial-calculator-backend
|
ad7e736c858298c240eb9af52fbadcb02c693968
|
[
"MIT"
] | 2
|
2021-01-08T04:26:54.000Z
|
2022-02-04T22:22:27.000Z
|
calculators/credit_card_calculator.py
|
wanderindev/financial-calculator-backend
|
ad7e736c858298c240eb9af52fbadcb02c693968
|
[
"MIT"
] | null | null | null |
calculators/credit_card_calculator.py
|
wanderindev/financial-calculator-backend
|
ad7e736c858298c240eb9af52fbadcb02c693968
|
[
"MIT"
] | 2
|
2019-06-06T19:36:17.000Z
|
2020-05-20T12:37:08.000Z
|
from .calculator import Calculator
# noinspection PyTypeChecker
class CreditCardCalculator(Calculator):
def __init__(self, **kwargs):
super(CreditCardCalculator, self).__init__(**kwargs)
self.cc_debt = self.get_float(kwargs.get("cc_debt", 0))
self.add_c = self.get_float(kwargs.get("add_c", 0))
self.min_p_perc = self.get_float(kwargs.get("min_p_perc", 0))
self.min_p = self.get_float(kwargs.get("min_p", 0))
self.fix_p = self.get_float(kwargs.get("fix_p", 0))
self.payments = []
self.payments_p = []
def get_payment_cc(self) -> float:
_rate = self.rate / (100 * self.freq)
_min_p_perc = self.min_p_perc / 100
_min_p = self.min_p
_fix_p = self.fix_p
b = self.cc_debt
per = 0
while b > 0:
i = b * _rate
p = max(b * _min_p_perc, _min_p, _fix_p)
if b + i < p:
p = b + i
b += i - p
per += 1
self.periods.append(per)
self.payments.append(p)
self.payments_p.append(p - i)
self.interests.append(i)
self.balances.append(b)
return self.payments[0]
def get_rate_cc(self) -> float:
return self.rate + self.add_c * 1200 / self.cc_debt
| 30.244444
| 70
| 0.543718
| 186
| 1,361
| 3.688172
| 0.22043
| 0.058309
| 0.087464
| 0.131195
| 0.167638
| 0.106414
| 0.072886
| 0
| 0
| 0
| 0
| 0.021135
| 0.339456
| 1,361
| 44
| 71
| 30.931818
| 0.741935
| 0.019104
| 0
| 0
| 0
| 0
| 0.024825
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.030303
| 0.030303
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43826b793ab889bf34bea8a88631da20426a6acb
| 3,880
|
py
|
Python
|
fedex/services/availability_commitment_service.py
|
miczone/python-fedex
|
1a17b45753b16b2551b0b8ba2c6aa65be8e73931
|
[
"BSD-3-Clause"
] | null | null | null |
fedex/services/availability_commitment_service.py
|
miczone/python-fedex
|
1a17b45753b16b2551b0b8ba2c6aa65be8e73931
|
[
"BSD-3-Clause"
] | null | null | null |
fedex/services/availability_commitment_service.py
|
miczone/python-fedex
|
1a17b45753b16b2551b0b8ba2c6aa65be8e73931
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Service Availability and Commitment Module
This package contains the shipping methods defined by Fedex's
ValidationAvailabilityAndCommitmentService WSDL file. Each is encapsulated in a class for
easy access. For more details on each, refer to the respective class's
documentation.
"""
import datetime
from ..base_service import FedexBaseService
class FedexAvailabilityCommitmentRequest(FedexBaseService):
"""
This class allows you validate service availability
"""
def __init__(self, config_obj, *args, **kwargs):
"""
@type config_obj: L{FedexConfig}
@param config_obj: A valid FedexConfig object.
"""
self._config_obj = config_obj
# Holds version info for the VersionId SOAP object.
self._version_info = {
'service_id': 'vacs',
'major': '14',
'intermediate': '0',
'minor': '0'
}
self.CarrierCode = None
"""@ivar: Carrier Code Default to Fedex (FDXE), or can bbe FDXG."""
self.Origin = None
"""@ivar: Holds Origin Address WSDL object."""
self.Destination = None
"""@ivar: Holds Destination Address WSDL object."""
self.ShipDate = None
"""@ivar: Ship Date date WSDL object."""
self.Service = None
"""@ivar: Service type, if set to None will get all available service information."""
self.Packaging = None
"""@ivar: Type of packaging to narrow down available shipping options or defaults to YOUR_PACKAGING."""
# Call the parent FedexBaseService class for basic setup work.
# Shortened the name of the wsdl, otherwise suds did not load it properly.
# Suds throws the following error when using the long file name from FedEx:
#
# File "/Library/Python/2.7/site-packages/suds/wsdl.py", line 878, in resolve
# raise Exception("binding '%s', not-found" % p.binding)
# Exception: binding 'ns:ValidationAvailabilityAndCommitmentServiceSoapBinding', not-found
super(FedexAvailabilityCommitmentRequest, self).__init__(
self._config_obj, 'ValidationAvailabilityAndCommitmentService_v14.wsdl', *args, **kwargs)
def _prepare_wsdl_objects(self):
"""
Create the data structure and get it ready for the WSDL request.
"""
self.CarrierCode = 'FDXE'
self.Origin = self.client.factory.create('Address')
self.Destination = self.client.factory.create('Address')
self.ShipDate = datetime.date.today().isoformat()
self.Service = None
self.Packaging = 'YOUR_PACKAGING'
def _assemble_and_send_request(self):
"""
Fires off the Fedex request.
@warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(),
WHICH RESIDES ON FedexBaseService AND IS INHERITED.
"""
# We get an exception like this when specifying an IntegratorId:
# suds.TypeNotFound: Type not found: 'IntegratorId'
# Setting it to None does not seem to appease it.
del self.ClientDetail.IntegratorId
self.logger.debug(self.WebAuthenticationDetail)
self.logger.debug(self.ClientDetail)
self.logger.debug(self.TransactionDetail)
self.logger.debug(self.VersionId)
# Fire off the query.
return self.client.service.serviceAvailability(
WebAuthenticationDetail=self.WebAuthenticationDetail,
ClientDetail=self.ClientDetail,
TransactionDetail=self.TransactionDetail,
Version=self.VersionId,
Origin=self.Origin,
Destination=self.Destination,
ShipDate=self.ShipDate,
CarrierCode=self.CarrierCode,
Service=self.Service,
Packaging=self.Packaging)
| 38.039216
| 111
| 0.643557
| 413
| 3,880
| 5.970944
| 0.435835
| 0.021898
| 0.024331
| 0.030819
| 0.027575
| 0.027575
| 0
| 0
| 0
| 0
| 0
| 0.003884
| 0.270103
| 3,880
| 101
| 112
| 38.415842
| 0.866879
| 0.334794
| 0
| 0.046512
| 0
| 0
| 0.059883
| 0.02483
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.046512
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4385a715a45f63ba193550d4819fe3bbd3dc2013
| 7,908
|
py
|
Python
|
cupy/linalg/product.py
|
okapies/cupy
|
4e8394e5e0c4e420295cbc36819e8e0f7de90e9d
|
[
"MIT"
] | 1
|
2021-10-04T21:57:09.000Z
|
2021-10-04T21:57:09.000Z
|
cupy/linalg/product.py
|
hephaex/cupy
|
5cf50a93bbdebe825337ed7996c464e84b1495ba
|
[
"MIT"
] | 1
|
2019-08-05T09:36:13.000Z
|
2019-08-06T12:03:01.000Z
|
cupy/linalg/product.py
|
hephaex/cupy
|
5cf50a93bbdebe825337ed7996c464e84b1495ba
|
[
"MIT"
] | 1
|
2022-03-24T13:19:55.000Z
|
2022-03-24T13:19:55.000Z
|
import numpy
import six
import cupy
from cupy import core
from cupy import internal
from cupy.linalg.solve import inv
from cupy.util import collections_abc
matmul = core.matmul
def dot(a, b, out=None):
"""Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the
last axis of ``a`` and the second-to-last axis of ``b``. This is just a
matrix product if the both arrays are 2-D. For 1-D arrays, it uses their
unique axis as an axis to take dot product over.
Args:
a (cupy.ndarray): The left argument.
b (cupy.ndarray): The right argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The dot product of ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
"""
# TODO(okuta): check type
return a.dot(b, out)
def vdot(a, b):
"""Returns the dot product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs inner
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: Zero-dimensional array of the dot product result.
.. seealso:: :func:`numpy.vdot`
"""
if a.size != b.size:
raise ValueError('Axis dimension mismatch')
if a.dtype.kind == 'c':
a = a.conj()
return core.tensordot_core(a, b, None, 1, 1, a.size, ())
def inner(a, b):
"""Returns the inner product of two arrays.
It uses the last axis of each argument to take sum product.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: The inner product of ``a`` and ``b``.
.. seealso:: :func:`numpy.inner`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
a_axis = a_ndim - 1
b_axis = b_ndim - 1
if a.shape[-1] != b.shape[-1]:
raise ValueError('Axis dimension mismatch')
if a_axis:
a = cupy.rollaxis(a, a_axis, 0)
if b_axis:
b = cupy.rollaxis(b, b_axis, 0)
ret_shape = a.shape[1:] + b.shape[1:]
k = a.shape[0]
n = a.size // k
m = b.size // k
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def outer(a, b, out=None):
"""Returns the outer product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs outer
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``.
.. seealso:: :func:`numpy.outer`
"""
n = a.size
m = b.size
ret_shape = (n, m)
if out is None:
return core.tensordot_core(a, b, None, n, m, 1, ret_shape)
if out.size != n * m:
raise ValueError('Output array has an invalid size')
if out.flags.c_contiguous:
return core.tensordot_core(a, b, out, n, m, 1, ret_shape)
else:
out[:] = core.tensordot_core(a, b, None, n, m, 1, ret_shape)
return out
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. seealso:: :func:`numpy.tensordot`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
if axes != 0 and axes != ((), ()):
raise ValueError('An input is zero-dim while axes has dimensions')
return cupy.multiply(a, b)
if isinstance(axes, collections_abc.Sequence):
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes, b_axes = axes
if numpy.isscalar(a_axes):
a_axes = a_axes,
if numpy.isscalar(b_axes):
b_axes = b_axes,
else:
a_axes = tuple(six.moves.range(a_ndim - axes, a_ndim))
b_axes = tuple(six.moves.range(axes))
sum_ndim = len(a_axes)
if sum_ndim != len(b_axes):
raise ValueError('Axes length mismatch')
for a_axis, b_axis in zip(a_axes, b_axes):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('Axis dimension mismatch')
# Make the axes non-negative
a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes])
b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes])
ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:]
k = internal.prod(a.shape[:sum_ndim])
# Avoid division by zero: core.tensordot_core returns zeros without
# checking n, m consistency, thus allowing 0-length dimensions to work
n = a.size // k if k != 0 else 0
m = b.size // k if k != 0 else 0
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def matrix_power(M, n):
"""Raise a square matrix to the (integer) power `n`.
Args:
M (~cupy.ndarray): Matrix to raise by power n.
n (~int): Power to raise matrix to.
Returns:
~cupy.ndarray: Output array.
.. note:: M must be of dtype `float32` or `float64`.
..seealso:: :func:`numpy.linalg.matrix_power`
"""
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError('input must be a square array')
if not isinstance(n, six.integer_types):
raise TypeError('exponent must be an integer')
if n == 0:
return cupy.identity(M.shape[0], dtype=M.dtype)
elif n < 0:
M = inv(M)
n *= -1
# short-cuts
if n <= 3:
if n == 1:
return M
elif n == 2:
return cupy.matmul(M, M)
else:
return cupy.matmul(cupy.matmul(M, M), M)
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
result, Z = None, None
for b in cupy.binary_repr(n)[::-1]:
Z = M if Z is None else cupy.matmul(Z, Z)
if b == '1':
result = Z if result is None else cupy.matmul(result, Z)
return result
def kron(a, b):
"""Returns the kronecker product of two arrays.
Args:
a (~cupy.ndarray): The first argument.
b (~cupy.ndarray): The second argument.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.kron`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
ndim = b_ndim
a_shape = a.shape
b_shape = b.shape
if a_ndim != b_ndim:
if b_ndim > a_ndim:
a_shape = (1,) * (b_ndim - a_ndim) + a_shape
else:
b_shape = (1,) * (a_ndim - b_ndim) + b_shape
ndim = a_ndim
axis = ndim - 1
out = core.tensordot_core(a, b, None, a.size, b.size, 1, a_shape + b_shape)
for _ in six.moves.range(ndim):
out = core.concatenate_method(out, axis=axis)
return out
def _move_axes_to_head(a, axes):
# This function moves the axes of ``s`` to the head of the shape.
for idx, axis in enumerate(axes):
if idx != axis:
break
else:
return a
return a.transpose(
axes + [i for i in six.moves.range(a.ndim) if i not in axes])
| 27.175258
| 79
| 0.592059
| 1,232
| 7,908
| 3.715909
| 0.157468
| 0.052862
| 0.045872
| 0.027523
| 0.37156
| 0.286588
| 0.263652
| 0.227829
| 0.20817
| 0.186326
| 0
| 0.009655
| 0.292742
| 7,908
| 290
| 80
| 27.268966
| 0.808868
| 0.397319
| 0
| 0.1875
| 0
| 0
| 0.057788
| 0
| 0
| 0
| 0
| 0.003448
| 0
| 1
| 0.0625
| false
| 0
| 0.054688
| 0
| 0.257813
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4387549ca0c49a838b5d253586eefe17b1221bbf
| 9,050
|
py
|
Python
|
trt_util/common.py
|
yihui8776/TensorRT-DETR
|
1f32e9a2f98e26ec5b2376f9a2695193887430fb
|
[
"Apache-2.0"
] | null | null | null |
trt_util/common.py
|
yihui8776/TensorRT-DETR
|
1f32e9a2f98e26ec5b2376f9a2695193887430fb
|
[
"Apache-2.0"
] | null | null | null |
trt_util/common.py
|
yihui8776/TensorRT-DETR
|
1f32e9a2f98e26ec5b2376f9a2695193887430fb
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ~~~Medcare AI Lab~~~
# 该部分代码参考了TensorRT官方示例完成,对相关方法进行修改
#
import pycuda.driver as cuda
#https://documen.tician.de/pycuda/driver.html
import pycuda.autoinit
import numpy as np
import tensorrt as trt
from .calibrator import Calibrator
import sys, os
import time
# TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
# TRT_LOGGER = trt.Logger(trt.Logger.INFO)
TRT_LOGGER = trt.Logger()
# Allocate host and device buffers, and create a stream.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) # <--------- the main diff to v2
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def allocate_buffers_v2(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# do inference multi outputs
def do_inference_v2(context, bindings, inputs, outputs, stream, input_tensor):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
# The onnx path is used for Pytorch models.
def build_engine_onnx(model_file,engine_file,FP16=False,verbose=False,dynamic_input=False,batch_size=1):
def get_engine():
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
# with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network,builder.create_builder_config() as config, trt.OnnxParser(network,TRT_LOGGER) as parser:
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, builder.create_builder_config() as config,\
trt.OnnxParser(network,TRT_LOGGER) as parser:
# Workspace size is the maximum amount of memory available to the builder while building an engine.
#builder.max_workspace_size = 6 << 30 # 6G
config.max_workspace_size = (1 << 30) #for trt8
config.max_batch_size = batch_size #for trt8
#builder.max_batch_size = batch_size
if FP16:
print("[INFO] Open FP16 Mode!")
config.set_flag(tensorrt.BuilderFlag.FP16) # for trt8
#builder.fp16_mode = True #trt7
with open(model_file, 'rb') as model:
parser.parse(model.read())
if verbose:
print(">"*50)
for error in range(parser.num_errors):
print(parser.get_error(error))
network.get_input(0).shape = [ batch_size, 3, 800, 800 ]
if dynamic_input:
profile = builder.create_optimization_profile();
profile.set_shape("inputs", (1,3,800,800), (8,3,800,800), (64,3,800,800))
config.add_optimization_profile(profile)
# builder engine
#engine = builder.build_cuda_engine(network) #trt 7
engine = builder.build_engine(network, config) #trt8
print("[INFO] Completed creating Engine!")
with open(engine_file, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file):
# If a serialized engine exists, use it instead of building an engine.
print("[INFO] Reading engine from file {}".format(engine_file))
with open(engine_file, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return get_engine()
# int8 quant
def build_engine_onnx_v2(onnx_file_path="", engine_file_path="",fp16_mode=False, int8_mode=False, \
max_batch_size=1,calibration_stream=None, calibration_table_path="", save_engine=False):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(max_batch_size, save_engine):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(1) as network,\
builder.create_builder_config() as config,trt.OnnxParser(network, TRT_LOGGER) as parser:
# parse onnx model file
if not os.path.exists(onnx_file_path):
quit(f'[Error]ONNX file {onnx_file_path} not found')
print(f'[INFO] Loading ONNX file from path {onnx_file_path}...')
with open(onnx_file_path, 'rb') as model:
print('[INFO] Beginning ONNX file parsing')
parser.parse(model.read())
assert network.num_layers > 0, '[Error] Failed to parse ONNX model. \
Please check if the ONNX model is compatible '
print('[INFO] Completed parsing of ONNX file')
print(f'[INFO] Building an engine from file {onnx_file_path}; this may take a while...')
# build trt engine
# config.max_workspace_size = 2 << 30 # 2GB
builder.max_batch_size = max_batch_size
config.max_workspace_size = 2 << 30 # 2GB
if fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
if int8_mode:
#builder.int8_mode = int8_mode
config.set_flag(trt.BuilderFlag.INT8)
assert calibration_stream, '[Error] a calibration_stream should be provided for int8 mode'
config.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
# builder.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
print('[INFO] Int8 mode enabled')
#engine = builder.build_cuda_engine(network)
engine = builder.build_engine(network, config)
if engine is None:
print('[INFO] Failed to create the engine')
return None
print("[INFO] Completed creating the engine")
if save_engine:
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, load it instead of building a new one.
print(f"[INFO] Reading engine from file {engine_file_path}")
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(max_batch_size, save_engine)
| 42.890995
| 189
| 0.650276
| 1,162
| 9,050
| 4.889845
| 0.223752
| 0.025343
| 0.016895
| 0.015839
| 0.429602
| 0.38226
| 0.336501
| 0.31503
| 0.293559
| 0.293559
| 0
| 0.014899
| 0.258343
| 9,050
| 210
| 190
| 43.095238
| 0.831645
| 0.259116
| 0
| 0.32
| 0
| 0
| 0.087002
| 0
| 0
| 0
| 0
| 0
| 0.016
| 1
| 0.08
| false
| 0
| 0.056
| 0.016
| 0.24
| 0.104
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4388c3265a288b272ad7c01a54a34148e2ab938e
| 2,506
|
py
|
Python
|
src/init.py
|
inpanel/inpanel-desktop
|
bff4a6accdf8a2976c722adc65f3fa2fe6650448
|
[
"MIT"
] | 1
|
2020-03-18T11:40:56.000Z
|
2020-03-18T11:40:56.000Z
|
src/init.py
|
inpanel/inpanel-desktop
|
bff4a6accdf8a2976c722adc65f3fa2fe6650448
|
[
"MIT"
] | null | null | null |
src/init.py
|
inpanel/inpanel-desktop
|
bff4a6accdf8a2976c722adc65f3fa2fe6650448
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8-*-
import tkinter.messagebox
from tkinter import Button, Label, Tk
from utils.functions import set_window_center
from utils.sqlite_helper import DBHelper
from inpanel import App
class InitWindow(Tk):
"""初始化窗口"""
def __init__(self):
Tk.__init__(self)
self.title("初始化数据")
set_window_center(self, 300, 180)
self.resizable(False, False)
self.win_success = None # 初始化成功的提示窗口
self.init_page()
def init_page(self):
"""加载控件"""
btn_1 = Button(self, text="初始化数据库", command=self.do_init_db)
btn_1.pack(expand="yes", padx=10, pady=10, ipadx=5, ipady=5)
def do_init_db(self):
"""初始化"""
db_helper = DBHelper()
db_helper.reset_database()
db_helper.create_database()
try:
tmp = db_helper.insert_user("admin", "admin") # 默认用户
tmp2 = db_helper.insert_content_by_username(
"admin",
"Hello World !",
"源码仓库地址:https://github.com/doudoudzj/tkinter-app",
"github",
)
tmp3 = db_helper.get_content_by_username("admin")
print("添加用户admin:", tmp)
print("添加内容:", tmp2)
print("查询内容:", tmp3)
self.do_success()
self.destroy()
except KeyError:
print(KeyError)
self.do_failed()
def do_failed(self):
"""是否重试"""
res = tkinter.messagebox.askretrycancel('提示', '初始化失败,是否重试?', parent=self)
if res is True:
self.do_init_db()
elif res is False:
self.destroy()
def do_success(self):
"""初始化成功弹窗"""
self.win_success = Tk()
self.win_success.title("初始化成功")
set_window_center(self.win_success, 250, 150)
self.win_success.resizable(False, False)
msg = Label(self.win_success, text="初始化成功")
msg.pack(expand="yes", fill="both")
btn = Button(self.win_success, text="确定", command=self.quit)
btn.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
btn_open_app = Button(self.win_success, text="启动程序", command=self.open_app)
btn_open_app.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
def open_app(self):
"""打开应用程序"""
self.quit()
self.win_success.destroy()
self.win_success.quit()
App()
if __name__ == "__main__":
APP_INIT = InitWindow()
APP_INIT.mainloop()
| 29.482353
| 83
| 0.57901
| 309
| 2,506
| 4.469256
| 0.375405
| 0.050688
| 0.101376
| 0.026068
| 0.110065
| 0.075308
| 0.075308
| 0.075308
| 0.075308
| 0.053584
| 0
| 0.021253
| 0.286512
| 2,506
| 84
| 84
| 29.833333
| 0.751119
| 0.036712
| 0
| 0.032258
| 0
| 0
| 0.073109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.080645
| 0
| 0.193548
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4389f5cc4e8592cb8c9777c1297c9ec965389eb9
| 1,947
|
py
|
Python
|
pdf/wechat/step.py
|
damaainan/html2md
|
0d241381e716d64bbcacad013c108857e815bb15
|
[
"MIT"
] | null | null | null |
pdf/wechat/step.py
|
damaainan/html2md
|
0d241381e716d64bbcacad013c108857e815bb15
|
[
"MIT"
] | null | null | null |
pdf/wechat/step.py
|
damaainan/html2md
|
0d241381e716d64bbcacad013c108857e815bb15
|
[
"MIT"
] | null | null | null |
# -*- coding=utf-8 -*-
from zwechathihu.mypdf import GenPdf
from db.mysqlite import simpleToolSql
data=[{"url": "http://mp.weixin.qq.com/s?__biz=MzAxODQxMDM0Mw==&mid=2247484852&idx=1&sn=85b50b8b0470bb4897e517955f4e5002&chksm=9bd7fbbcaca072aa75e2a241064a403fde1e579d57ab846cd8537a54253ceb2c8b93cc3bf38e&scene=21#wechat_redirect", "name": "001学习算法和刷题的框架思维"}
]
# path = '***/' || ''
# for val in data:
# # print(val["url"])
# # print(val["name"])
# pdf = GenPdf()
# title = val["name"].replace("/", "-")
# print(title)
# pdf.deal(val["url"], title, '')
# sql = simpleToolSql("url")
# # sql.execute("insert into wx_article (id,name,age) values (?,?,?);",[(1,'abc',15),(2,'bca',16)])
# res = sql.query("select * from wx_article;")
# print(res)
# res = sql.query("select * from wx_article where id=?;",(3,))
# print(res)
# sql.close()
# 从 db 获取需要生成的url
def getListByTitle(title:str):
sql = simpleToolSql("url")
res = sql.query("select * from wx_article where title="+title+";")
print(res)
sql.close()
return res
# 从 db 获取需要生成的url
def getListFromSql():
sql = simpleToolSql("url")
# res = sql.query("select * from wx_article where state=0;")
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return res
# 更新 db
def updateUrl(id:int):
sql = simpleToolSql("url")
res = sql.execute("update wx_article set state=1 where id = ?;",(id,))
# 需要加逗号 https://blog.csdn.net/yimaoyingbi/article/details/104323701
print(res)
sql.close()
return
def addUrl():
sql = simpleToolSql("url")
sql.execute(
"insert into wx_article (url,folder,title,state,turn,create_at,update_at) values (?,?,?,?,?,?);",
[("http",'test',"01",0,1,"2020-12-03 09:38:25","2020-12-03 09:38:25")]
)
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return
# addUrl()
updateUrl(1)
res = getListFromSql()
print(res)
| 29.059701
| 257
| 0.634309
| 252
| 1,947
| 4.845238
| 0.392857
| 0.058968
| 0.054054
| 0.083538
| 0.396396
| 0.357903
| 0.312039
| 0.312039
| 0.283374
| 0.173628
| 0
| 0.083851
| 0.173087
| 1,947
| 67
| 258
| 29.059701
| 0.674534
| 0.336415
| 0
| 0.542857
| 0
| 0.028571
| 0.410418
| 0.038674
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.057143
| 0
| 0.285714
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
438f17abc40a90f956704fbac8d28a04a5de63c3
| 2,409
|
py
|
Python
|
resources/lib/channelui.py
|
lausitzer/plugin.video.mediathekview
|
7f2086240625b9b4f8d50af114f8f47654346ed1
|
[
"MIT"
] | null | null | null |
resources/lib/channelui.py
|
lausitzer/plugin.video.mediathekview
|
7f2086240625b9b4f8d50af114f8f47654346ed1
|
[
"MIT"
] | null | null | null |
resources/lib/channelui.py
|
lausitzer/plugin.video.mediathekview
|
7f2086240625b9b4f8d50af114f8f47654346ed1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The channel model UI module
Copyright 2017-2018, Leo Moll and Dominik Schlösser
SPDX-License-Identifier: MIT
"""
# pylint: disable=import-error
import os
import xbmcgui
import xbmcplugin
import resources.lib.mvutils as mvutils
from resources.lib.channel import Channel
class ChannelUI(Channel):
"""
The channel model view class
Args:
plugin(MediathekView): the plugin object
sortmethods(array, optional): an array of sort methods
for the directory representation. Default is
`[ xbmcplugin.SORT_METHOD_TITLE ]`
nextdir(str, optional):
"""
def __init__(self, plugin, sortmethods=None, nextdir='initial'):
super(ChannelUI, self).__init__()
self.plugin = plugin
self.handle = plugin.addon_handle
self.nextdir = nextdir
self.sortmethods = sortmethods if sortmethods is not None else [
xbmcplugin.SORT_METHOD_TITLE]
self.count = 0
def begin(self):
"""
Begin a directory containing channels
"""
for method in self.sortmethods:
xbmcplugin.addSortMethod(self.handle, method)
def add(self, altname=None):
"""
Add the current entry to the directory
Args:
altname(str, optional): alternative name for the entry
"""
resultingname = self.channel if self.count == 0 else '%s (%d)' % (
self.channel, self.count, )
list_item = xbmcgui.ListItem(
label=resultingname if altname is None else altname)
icon = os.path.join(
self.plugin.path,
'resources',
'icons',
self.channel.lower() + '-m.png'
)
list_item.setArt({
'thumb': icon,
'icon': icon
})
info_labels = {
'title': resultingname,
'sorttitle': resultingname.lower()
}
list_item.setInfo(type='video', infoLabels=info_labels)
xbmcplugin.addDirectoryItem(
handle=self.handle,
url=mvutils.build_url({
'mode': self.nextdir,
'channel': self.channelid
}),
listitem=list_item,
isFolder=True
)
def end(self):
""" Finish a directory containing channels """
xbmcplugin.endOfDirectory(self.handle)
| 26.766667
| 74
| 0.584475
| 249
| 2,409
| 5.574297
| 0.457831
| 0.028818
| 0.021614
| 0.036023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006699
| 0.318389
| 2,409
| 89
| 75
| 27.067416
| 0.838611
| 0.253217
| 0
| 0
| 0
| 0
| 0.043375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.104167
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
438f4c0d3f4d94dad9a093f3100bc1608c38e26a
| 6,838
|
py
|
Python
|
getconf.py
|
smk762/Dragonhound
|
7cbaed2779afec47fcbf2481d0dae61daa4c11da
|
[
"MIT"
] | 3
|
2019-01-06T08:00:11.000Z
|
2019-03-13T13:24:23.000Z
|
getconf.py
|
smk762/Dragonhound
|
7cbaed2779afec47fcbf2481d0dae61daa4c11da
|
[
"MIT"
] | 1
|
2018-11-27T17:16:57.000Z
|
2018-12-15T07:51:26.000Z
|
getconf.py
|
smk762/Dragonhound
|
7cbaed2779afec47fcbf2481d0dae61daa4c11da
|
[
"MIT"
] | 2
|
2018-12-15T14:03:41.000Z
|
2019-01-26T14:22:07.000Z
|
#!/usr/bin/env python3
#Credit to @Alright for the RPCs
import re
import os
import requests
import json
import platform
# define function that fetchs rpc creds from .conf
def def_credentials(chain):
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64':
ac_dir = "dont have windows machine now to test"
# define config file path
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
#define rpc creds
with open(coin_config_file, 'r') as f:
#print("Reading config file for credentials:", coin_config_file)
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
return('http://' + rpcuser + ':' + rpcpassword + '@127.0.0.1:' + rpcport)
# define function that posts json data
def post_rpc(url, payload, auth=None):
try:
r = requests.post(url, data=json.dumps(payload), auth=auth)
return(json.loads(r.text))
except Exception as e:
raise Exception("Couldn't connect to " + url + ": ", e)
# Return current -pubkey=
def getpubkey_rpc(chain):
getinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "getinfo",
"params": []}
getinfo_result = post_rpc(def_credentials(chain), getinfo_payload)
return(getinfo_result['result']['pubkey'])
# return latest batontxid from all publishers
def get_latest_batontxids(chain, oracletxid):
oraclesinfo_result = oraclesinfo_rpc(chain, oracletxid)
latest_batontxids = {}
# fill "latest_batontxids" dictionary with publisher:batontxid data
for i in oraclesinfo_result['registered']:
latest_batontxids[i['publisher']] = i['batontxid']
return(latest_batontxids)
#VANILLA RPC
def sendrawtx_rpc(chain, rawtx):
sendrawtx_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "sendrawtransaction",
"params": [rawtx]}
#rpcurl = def_credentials(chain)
return(post_rpc(def_credentials(chain), sendrawtx_payload))
def signmessage_rpc(chain, address, message):
signmessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "signmessage",
"params": [
address,
message
]
}
signmessage_result = post_rpc(def_credentials(chain), signmessage_payload)
return(signmessage_result['result'])
def verifymessage_rpc(chain, address, signature, message):
verifymessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "verifymessage",
"params": [
address,
signature,
message
]
}
verifymessage_result = post_rpc(def_credentials(chain), verifymessage_payload)
return(verifymessage_result['result'])
def kvsearch_rpc(chain, key):
kvsearch_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvsearch",
"params": [
key
]
}
kvsearch_result = post_rpc(def_credentials(chain), kvsearch_payload)
return(kvsearch_result['result'])
def kvupdate_rpc(chain, key, value, days, password):
# create dynamic oraclessamples payload
kvupdate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvupdate",
"params": [
key,
value,
str(days),
password]}
# make kvupdate rpc call
kvupdate_result = post_rpc(def_credentials(chain), kvupdate_payload)
return(kvupdate_result)
def oraclesdata_rpc(chain, oracletxid, hexstr):
oraclesdata_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesdata",
"params": [
oracletxid,
hexstr]}
oraclesdata_result = post_rpc(def_credentials(chain), oraclesdata_payload)
return(oraclesdata_result['result'])
def oraclescreate_rpc(chain, name, description, oracle_type):
oraclescreate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclescreate",
"params": [
name,
description,
oracle_type]}
oraclescreate_result = post_rpc(def_credentials(chain), oraclescreate_payload)
return(oraclescreate_result['result'])
def oraclesinfo_rpc(chain, oracletxid):
oraclesinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesinfo",
"params": [oracletxid]}
oraclesinfo_result = post_rpc(def_credentials(chain), oraclesinfo_payload)
return(oraclesinfo_result['result'])
def oracleslist_rpc(chain):
oracleslist_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oracleslist",
"params": []}
oracleslist_result = post_rpc(def_credentials(chain), oracleslist_payload)
return(oracleslist_result['result'])
def oraclessubscribe_rpc(chain, oracletxid, publisher, amount):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [oracletxid, publisher, amount]}
oraclessubscribe_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(oraclessubscribe_result['result'])
def oraclesregister_rpc(chain, oracletxid, datafee):
oraclesregister_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesregister",
"params": [
oracletxid,
str(datafee)]}
oraclesregister_result = post_rpc(def_credentials(chain), oraclesregister_payload)
return(oraclesregister_result['result'])
def oraclessamples_rpc(chain, oracletxid, batonutxo, num):
oraclessamples_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessamples",
"params": [
oracletxid,
batonutxo,
str(num)]}
oraclessamples_result = post_rpc(def_credentials(chain), oraclessamples_payload)
return(oraclessamples_result['result'])
def getlastsegidstakes_rpc(chain, depth):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [depth]}
getlastsegidstakes_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(getlastsegidstakes_result['result'])
| 32.254717
| 90
| 0.620942
| 691
| 6,838
| 5.960926
| 0.222865
| 0.054382
| 0.073804
| 0.054382
| 0.27968
| 0.246176
| 0.150036
| 0.062637
| 0.062637
| 0.033018
| 0
| 0.007203
| 0.248757
| 6,838
| 211
| 91
| 32.407583
| 0.794627
| 0.069903
| 0
| 0.278409
| 0
| 0
| 0.144816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096591
| false
| 0.028409
| 0.028409
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43918d07649e9b1f2f91c59a28e777ac9f008513
| 46,128
|
py
|
Python
|
cwr/parser/decoder/dictionary.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 37
|
2015-04-21T15:33:53.000Z
|
2022-02-07T00:02:29.000Z
|
cwr/parser/decoder/dictionary.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 86
|
2015-02-01T22:26:02.000Z
|
2021-07-09T08:49:36.000Z
|
cwr/parser/decoder/dictionary.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 27
|
2015-01-26T16:01:09.000Z
|
2021-11-08T23:53:55.000Z
|
# -*- coding: utf-8 -*-
from cwr.acknowledgement import AcknowledgementRecord, MessageRecord
from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, \
InterestedPartyForAgreementRecord
from cwr.group import Group, GroupHeader, GroupTrailer
from cwr.info import AdditionalRelatedInfoRecord
from cwr.parser.decoder.common import Decoder
from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, \
PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord
from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, \
NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, \
NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, \
NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord
from cwr.transmission import Transmission, TransmissionTrailer, \
TransmissionHeader
from cwr.work import RecordingDetailRecord, ComponentRecord, \
AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, \
InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, \
WorkRecord
from cwr.file import CWRFile, FileTag
from cwr.other import AVIKey, VISAN
from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue
"""
Classes for transforming dictionaries into instances of the CWR model.
There is a decoder for each of the model classes, and all of them expect a
dictionary having at least one key for each field, having the same name as the
field, which will refer to a valid value.
As said, the values on the dictionary should be valid values, for example if
an integer is expected, then the dictionary contains an integer. The values
contained in the dictionary entries should not need to be parsed.
These decoders are useful for handling JSON transmissions or Mongo databases.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TransactionRecordDictionaryDecoder(Decoder):
def __init__(self):
super(TransactionRecordDictionaryDecoder, self).__init__()
self._decoders = {}
self._decoders['ACK'] = AcknowledgementDictionaryDecoder()
self._decoders['AGR'] = AgreementDictionaryDecoder()
self._decoders['TER'] = AgreementTerritoryDictionaryDecoder()
self._decoders['ARI'] = AdditionalRelatedInformationDictionaryDecoder()
self._decoders['ALT'] = AlternateTitleDictionaryDecoder()
self._decoders['EWT'] = AuthoredWorkDictionaryDecoder()
self._decoders['VER'] = AuthoredWorkDictionaryDecoder()
self._decoders['COM'] = ComponentDictionaryDecoder()
self._decoders['IPA'] = InterestedPartyForAgreementDictionaryDecoder()
self._decoders['SPT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['SWT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['IND'] = InstrumentationDetailDictionaryDecoder()
self._decoders['INS'] = InstrumentationSummaryDictionaryDecoder()
self._decoders['MSG'] = MessageDictionaryDecoder()
self._decoders['PER'] = PerformingArtistDictionaryDecoder()
self._decoders['PWR'] = PublisherForWriterDictionaryDecoder()
self._decoders['REC'] = RecordingDetailDictionaryDecoder()
self._decoders['EXC'] = WorkDictionaryDecoder()
self._decoders['ISW'] = WorkDictionaryDecoder()
self._decoders['NWR'] = WorkDictionaryDecoder()
self._decoders['REV'] = WorkDictionaryDecoder()
self._decoders['ORN'] = WorkOriginDictionaryDecoder()
self._decoders['SWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders[
'NPA'] = NonRomanAlphabetAgreementPartyDictionaryDecoder()
self._decoders['NOW'] = NonRomanAlphabetOtherWriterDictionaryDecoder()
self._decoders[
'NPR'] = NonRomanAlphabetPerformanceDataDictionaryDecoder()
self._decoders['NPN'] = NonRomanAlphabetPublisherNameDictionaryDecoder()
self._decoders['NAT'] = NonRomanAlphabetTitleDictionaryDecoder()
self._decoders['NET'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NCT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NVT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NWN'] = NonRomanAlphabetWriterNameDictionaryDecoder()
self._decoders['SPU'] = PublisherRecordDictionaryDecoder()
self._decoders['OPU'] = PublisherRecordDictionaryDecoder()
def decode(self, data):
return self._decoders[data['record_type']].decode(data)
class AcknowledgementDictionaryDecoder(Decoder):
def __init__(self):
super(AcknowledgementDictionaryDecoder, self).__init__()
def decode(self, data):
return AcknowledgementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
original_group_id=data[
'original_group_id'],
original_transaction_sequence_n=data[
'original_transaction_sequence_n'],
original_transaction_type=data[
'original_transaction_type'],
transaction_status=data[
'transaction_status'],
creation_date_time=data[
'creation_date_time'],
processing_date=data['processing_date'],
creation_title=data['creation_title'],
submitter_creation_n=data[
'submitter_creation_n'],
recipient_creation_n=data[
'recipient_creation_n'])
class AgreementDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
agreement_type=data['agreement_type'],
agreement_start_date=data[
'agreement_start_date'],
prior_royalty_status=data[
'prior_royalty_status'],
post_term_collection_status=data[
'post_term_collection_status'],
number_of_works=data['number_of_works'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'],
international_standard_code=data[
'international_standard_code'],
sales_manufacture_clause=data[
'sales_manufacture_clause'],
agreement_end_date=data['agreement_end_date'],
date_of_signature=data['date_of_signature'],
retention_end_date=data['retention_end_date'],
prior_royalty_start_date=data[
'prior_royalty_start_date'],
post_term_collection_end_date=data[
'post_term_collection_end_date'],
shares_change=data['shares_change'],
advance_given=data['advance_given'])
class AgreementTerritoryDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementTerritoryDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementTerritoryRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
tis_numeric_code=data[
'tis_numeric_code'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'])
class AdditionalRelatedInformationDictionaryDecoder(Decoder):
def __init__(self):
super(AdditionalRelatedInformationDictionaryDecoder, self).__init__()
def decode(self, data):
return AdditionalRelatedInfoRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
society_n=data['society_n'],
type_of_right=data['type_of_right'],
work_n=data['work_n'],
subject_code=data['subject_code'],
note=data['note'])
class AlternateTitleDictionaryDecoder(Decoder):
def __init__(self):
super(AlternateTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return AlternateTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
alternate_title=data['alternate_title'],
title_type=data['title_type'],
language_code=data['language_code'])
class AuthoredWorkDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(AuthoredWorkDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data[
'writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data[
'writer_2_ipi_base_n'])
return AuthoredWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_first_name=data[
'writer_1_first_name'],
writer_1_last_name=data['writer_1_last_name'],
writer_2_first_name=data[
'writer_2_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data[
'writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data[
'writer_2_ipi_name_n'],
source=data['source'],
language_code=data['language_code'],
iswc=data['iswc'])
class ComponentDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(ComponentDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data['writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data['writer_2_ipi_base_n'])
return ComponentRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_last_name=data['writer_1_last_name'],
writer_1_first_name=data['writer_1_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_2_first_name=data['writer_2_first_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data['writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data['writer_2_ipi_name_n'],
iswc=data['iswc'],
duration=data['duration'])
class GroupHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(GroupHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
return GroupHeader(record_type=data['record_type'],
group_id=data['group_id'],
transaction_type=data['transaction_type'],
version_number=data['version_number'],
batch_request_id=data['batch_request_id'])
class GroupTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(GroupTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
total_monetary_value = None
if 'total_monetary_value' in data:
total_monetary_value = data['total_monetary_value']
currency_indicator = None
if 'currency_indicator' in data:
currency_indicator = data['currency_indicator']
return GroupTrailer(record_type=data['record_type'],
group_id=data['group_id'],
transaction_count=data['transaction_count'],
record_count=data['record_count'],
currency_indicator=currency_indicator,
total_monetary_value=total_monetary_value,
)
class InterestedPartyForAgreementDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(InterestedPartyForAgreementDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
return InterestedPartyForAgreementRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_n=data['ip_n'],
ip_last_name=data['ip_last_name'],
agreement_role_code=data['agreement_role_code'],
ip_writer_first_name=data['ip_writer_first_name'],
ipi_name_n=data['ipi_name_n'], ipi_base_n=ipi_base,
pr_society=data['pr_society'], pr_share=data['pr_share'],
mr_society=data['mr_society'], mr_share=data['mr_share'],
sr_society=data['sr_society'], sr_share=data['sr_share'])
class IPTerritoryOfControlDictionaryDecoder(Decoder):
def __init__(self):
super(IPTerritoryOfControlDictionaryDecoder, self).__init__()
def decode(self, data):
record = IPTerritoryOfControlRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
ip_n=data['ip_n'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'],
tis_numeric_code=data[
'tis_numeric_code'],
sequence_n=data['sequence_n'],
pr_collection_share=data[
'pr_collection_share'],
mr_collection_share=data[
'mr_collection_share'],
shares_change=data['shares_change'])
if 'sr_collection_share' in data:
record.sr_collection_share = data['sr_collection_share']
return record
class InstrumentationDetailDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationDetailDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
instrument_code=data[
'instrument_code'],
number_players=data[
'number_players'])
class InstrumentationSummaryDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationSummaryDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationSummaryRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
number_voices=data['number_voices'],
standard_instrumentation_type=data['standard_instrumentation_type'],
instrumentation_description=data['instrumentation_description'])
class MessageDictionaryDecoder(Decoder):
def __init__(self):
super(MessageDictionaryDecoder, self).__init__()
def decode(self, data):
return MessageRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
message_type=data['message_type'],
message_text=data['message_text'],
original_record_sequence_n=data[
'original_record_sequence_n'],
message_record_type=data['message_record_type'],
message_level=data['message_level'],
validation_n=data['validation_n'])
class PerformingArtistDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PerformingArtistDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = None
if 'performing_artist_ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['performing_artist_ipi_base_n'])
performing_artist_first_name = None
if 'performing_artist_first_name' in data:
performing_artist_first_name = data['performing_artist_first_name']
performing_artist_ipi_name_n = None
if 'performing_artist_ipi_name_n' in data:
performing_artist_ipi_name_n = data['performing_artist_ipi_name_n']
return PerformingArtistRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
performing_artist_last_name=data[
'performing_artist_last_name'],
performing_artist_first_name=performing_artist_first_name,
performing_artist_ipi_name_n=performing_artist_ipi_name_n,
performing_artist_ipi_base_n=ipi_base)
class PublisherForWriterDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherForWriterDictionaryDecoder, self).__init__()
def decode(self, data):
publisher_name = None
if 'publisher_name' in data:
publisher_name = data['publisher_name']
return PublisherForWriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
publisher_ip_n=data['publisher_ip_n'],
publisher_name=publisher_name,
writer_ip_n=data['writer_ip_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'])
class RecordingDetailDictionaryDecoder(Decoder):
def __init__(self):
super(RecordingDetailDictionaryDecoder, self).__init__()
def decode(self, data):
media_type = None
if 'media_type' in data:
media_type = data['media_type']
return RecordingDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
first_release_date=data[
'first_release_date'],
first_release_duration=data[
'first_release_duration'],
first_album_title=data[
'first_album_title'],
first_album_label=data[
'first_album_label'],
first_release_catalog_n=data[
'first_release_catalog_n'],
ean=data['ean'],
isrc=data['isrc'],
recording_format=data['recording_format'],
recording_technique=data[
'recording_technique'],
media_type=media_type)
class FileDictionaryDecoder(Decoder):
def __init__(self):
super(FileDictionaryDecoder, self).__init__()
self._tag_decoder = FileTagDictionaryDecoder()
self._transmission_decoder = TransmissionDictionaryDecoder()
def decode(self, data):
tag = data['tag']
if isinstance(tag, dict):
tag = self._tag_decoder.decode(tag)
transmission = data['transmission']
if isinstance(transmission, dict):
transmission = self._transmission_decoder.decode(transmission)
return CWRFile(tag, transmission)
class TransmissionDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionDictionaryDecoder, self).__init__()
self._header_decoder = TransmissionHeaderDictionaryDecoder()
self._trailer_decoder = TransmissionTrailerDictionaryDecoder()
self._group_decoder = GroupDictionaryDecoder()
def decode(self, data):
header = data['header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
groups = []
if len(data['groups']) > 0:
if isinstance(data['groups'][0], dict):
for group in data['groups']:
groups.append(self._group_decoder.decode(group))
else:
groups = data['groups']
return Transmission(header, trailer, groups)
class GroupDictionaryDecoder(Decoder):
def __init__(self):
super(GroupDictionaryDecoder, self).__init__()
self._header_decoder = GroupHeaderDictionaryDecoder()
self._trailer_decoder = GroupTrailerDictionaryDecoder()
self._transaction_decoder = TransactionRecordDictionaryDecoder()
def decode(self, data):
header = data['group_header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['group_trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
transactions = []
if len(data['transactions']) > 0:
if isinstance(data['transactions'][0][0], dict):
for transaction in data['transactions']:
transaction_records = []
for record in transaction:
transaction_records.append(
self._transaction_decoder.decode(record))
transactions.append(transaction_records)
else:
transactions = data['transactions']
return Group(header, trailer, transactions)
class TransmissionHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
header = TransmissionHeader(record_type=data['record_type'],
sender_id=data['sender_id'],
sender_name=data['sender_name'],
sender_type=data['sender_type'],
creation_date_time=data[
'creation_date_time'],
transmission_date=data['transmission_date'],
edi_standard=data['edi_standard'])
if 'character_set' in data:
header.character_set = data['character_set']
return header
class TransmissionTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
return TransmissionTrailer(record_type=data['record_type'],
group_count=data['group_count'],
transaction_count=data['transaction_count'],
record_count=data['record_count'])
class WorkDictionaryDecoder(Decoder):
def __init__(self):
super(WorkDictionaryDecoder, self).__init__()
def decode(self, data):
catalogue_number = None
if 'catalogue_number' in data:
catalogue_number = data['catalogue_number']
exceptional_clause = None
if 'exceptional_clause' in data:
exceptional_clause = data['exceptional_clause']
opus_number = None
if 'opus_number' in data:
opus_number = data['opus_number']
priority_flag = None
if 'priority_flag' in data:
priority_flag = data['priority_flag']
return WorkRecord(record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_work_n=data['submitter_work_n'],
title=data['title'],
version_type=data['version_type'],
musical_work_distribution_category=data[
'musical_work_distribution_category'],
date_publication_printed_edition=data[
'date_publication_printed_edition'],
text_music_relationship=data[
'text_music_relationship'],
language_code=data['language_code'],
copyright_number=data['copyright_number'],
copyright_date=data['copyright_date'],
music_arrangement=data['music_arrangement'],
lyric_adaptation=data['lyric_adaptation'],
excerpt_type=data['excerpt_type'],
composite_type=data['composite_type'],
composite_component_count=data[
'composite_component_count'],
iswc=data['iswc'],
work_type=data['work_type'],
duration=data['duration'],
catalogue_number=catalogue_number,
opus_number=opus_number,
contact_id=data['contact_id'],
contact_name=data['contact_name'],
recorded_indicator=data['recorded_indicator'],
priority_flag=priority_flag,
exceptional_clause=exceptional_clause,
grand_rights_indicator=data['grand_rights_indicator'])
class WorkOriginDictionaryDecoder(Decoder):
def __init__(self):
super(WorkOriginDictionaryDecoder, self).__init__()
def decode(self, data):
return WorkOriginRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
intended_purpose=data['intended_purpose'],
production_title=data['production_title'],
cd_identifier=data['cd_identifier'],
cut_number=data['cut_number'],
library=data['library'],
bltvr=data['bltvr'],
visan=data['visan'],
production_n=data['production_n'],
episode_title=data['episode_title'],
episode_n=data['episode_n'],
year_production=data['year_production'],
audio_visual_key=data['audio_visual_key'])
class WriterDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(WriterDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_n = self._ipi_base_decoder.decode(data['ipi_base_n'])
return Writer(ip_n=data['ip_n'],
personal_number=data['personal_number'],
ipi_base_n=ipi_base_n,
writer_first_name=data['writer_first_name'],
writer_last_name=data['writer_last_name'],
tax_id=data['tax_id'],
ipi_name_n=data['ipi_name_n'])
class WriterRecordDictionaryDecoder(Decoder):
def __init__(self):
super(WriterRecordDictionaryDecoder, self).__init__()
self._writer_decoder = WriterDictionaryDecoder()
def decode(self, data):
writer = self._writer_decoder.decode(data['writer'])
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
return WriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer=writer,
writer_designation=data['writer_designation'],
work_for_hire=data['work_for_hire'],
writer_unknown=data['writer_unknown'],
reversionary=data['reversionary'],
first_recording_refusal=data[
'first_recording_refusal'],
usa_license=usa_license,
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'])
class NonRomanAlphabetAgreementPartyDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetAgreementPartyDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetAgreementPartyRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_name=data['ip_name'],
ip_writer_name=data['ip_writer_name'],
ip_n=data['ip_n'],
language_code=data['language_code'])
class NonRomanAlphabetOtherWriterDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetOtherWriterDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetOtherWriterRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer_first_name=data['writer_first_name'],
writer_name=data['writer_name'],
position=data['position'],
language_code=data['language_code'])
class NonRomanAlphabetPerformanceDataDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(NonRomanAlphabetPerformanceDataDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(
data['performing_artist_ipi_base_n'])
return NonRomanAlphabetPerformanceDataRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
performing_artist_first_name=data['performing_artist_first_name'],
performing_artist_name=data['performing_artist_name'],
performing_artist_ipi_name_n=data['performing_artist_ipi_name_n'],
performing_artist_ipi_base_n=ipi_base,
language_code=data['language_code'],
performance_language=data['performance_language'],
performance_dialect=data['performance_dialect'])
class NonRomanAlphabetPublisherNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetPublisherNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetPublisherNameRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher_sequence_n=data['publisher_sequence_n'],
ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
language_code=data['language_code'])
class NonRomanAlphabetTitleDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
title_type=data['title_type'],
language_code=data['language_code'])
class NonRomanAlphabetWorkDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWorkDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
language_code=data['language_code'])
class NonRomanAlphabetWriterNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWriterNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWriterNameRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
writer_first_name=data[
'writer_first_name'],
writer_last_name=data[
'writer_last_name'],
ip_n=data['ip_n'],
language_code=data[
'language_code'])
class PublisherDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PublisherDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
if 'ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
else:
ipi_base = None
return Publisher(ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
ipi_name_n=data['ipi_name_n'],
ipi_base_n=ipi_base,
tax_id=data['tax_id'])
class PublisherRecordDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherRecordDictionaryDecoder, self).__init__()
self._publisher_decoder = PublisherDictionaryDecoder()
def decode(self, data):
publisher = self._publisher_decoder.decode(data['publisher'])
special_agreements = None
if 'special_agreements' in data:
special_agreements = data['special_agreements']
first_recording_refusal = None
if 'first_recording_refusal' in data:
first_recording_refusal = data['first_recording_refusal']
agreement_type = None
if 'agreement_type' in data:
agreement_type = data['agreement_type']
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
international_standard_code = None
if 'international_standard_code' in data:
international_standard_code = data['international_standard_code']
society_assigned_agreement_n = None
if 'society_assigned_agreement_n' in data:
society_assigned_agreement_n = data['society_assigned_agreement_n']
return PublisherRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher=publisher,
publisher_sequence_n=data['publisher_sequence_n'],
submitter_agreement_n=data['submitter_agreement_n'],
publisher_type=data['publisher_type'],
publisher_unknown=data['publisher_unknown'],
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'],
special_agreements=special_agreements,
first_recording_refusal=first_recording_refusal,
international_standard_code=international_standard_code,
society_assigned_agreement_n=society_assigned_agreement_n,
agreement_type=agreement_type,
usa_license=usa_license)
class TableValueDictionaryDecoder(Decoder):
def __init__(self):
super(TableValueDictionaryDecoder, self).__init__()
def decode(self, data):
return TableValue(code=data['code'],
name=data['name'],
description=data['description'])
class MediaTypeValueDictionaryDecoder(Decoder):
def __init__(self):
super(MediaTypeValueDictionaryDecoder, self).__init__()
def decode(self, data):
return MediaTypeValue(code=data['code'],
name=data['name'],
media_type=data['media_type'],
duration_max=data['duration_max'],
works_max=data['works_max'],
fragments_max=data['fragments_max'])
class InstrumentValueDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentValueDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentValue(code=data['code'],
name=data['name'],
family=data['family'],
description=data['description'])
class FileTagDictionaryDecoder(Decoder):
def __init__(self):
super(FileTagDictionaryDecoder, self).__init__()
def decode(self, data):
return FileTag(data['year'],
data['sequence_n'],
data['sender'],
data['receiver'],
data['version'])
class AVIKeyDictionaryDecoder(Decoder):
def __init__(self):
super(AVIKeyDictionaryDecoder, self).__init__()
def decode(self, data):
return AVIKey(data['society_code'],
data['av_number'])
class IPIBaseDictionaryDecoder(Decoder):
def __init__(self):
super(IPIBaseDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class ISWCDictionaryDecoder(Decoder):
def __init__(self):
super(ISWCDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class VISANDictionaryDecoder(Decoder):
def __init__(self):
super(VISANDictionaryDecoder, self).__init__()
def decode(self, data):
return data
| 45.223529
| 96
| 0.560701
| 3,822
| 46,128
| 6.316327
| 0.099686
| 0.042873
| 0.031233
| 0.032807
| 0.450064
| 0.386728
| 0.367632
| 0.30297
| 0.301189
| 0.276749
| 0
| 0.001561
| 0.361039
| 46,128
| 1,019
| 97
| 45.26791
| 0.8175
| 0.000455
| 0
| 0.382927
| 0
| 0
| 0.125914
| 0.036876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107317
| false
| 0
| 0.014634
| 0.029268
| 0.229268
| 0.002439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43924097832cb6270f8da8544d56269f7551b02e
| 6,651
|
py
|
Python
|
prebuilt/twrp_fonts.py
|
imranpopz/android_bootable_recovery-1
|
ec4512ad1e20f640b3dcd6faf8c04cae711e4f30
|
[
"Apache-2.0"
] | 95
|
2018-10-31T12:12:01.000Z
|
2022-03-20T21:30:48.000Z
|
prebuilt/twrp_fonts.py
|
imranpopz/android_bootable_recovery-1
|
ec4512ad1e20f640b3dcd6faf8c04cae711e4f30
|
[
"Apache-2.0"
] | 34
|
2018-10-22T11:01:15.000Z
|
2021-11-21T14:10:26.000Z
|
prebuilt/twrp_fonts.py
|
imranpopz/android_bootable_recovery-1
|
ec4512ad1e20f640b3dcd6faf8c04cae711e4f30
|
[
"Apache-2.0"
] | 81
|
2018-10-23T08:37:20.000Z
|
2022-03-20T00:27:08.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import codecs,os,gzip,ctypes,ctypes.util,sys
from struct import *
from PIL import Image, ImageDraw, ImageFont
# ====== Python script to convert TrueTypeFonts to TWRP's .dat format ======
# This script was originally made by https://github.com/suky for his chinese version of TWRP
# and then translated to English by feilplane at #twrp of irc.freenode.net.
# However, it was not compatible with vanilla TWRP, so https://github.com/Tasssadar rewrote
# most of it and it now has very little in common with the original script.
class Reference():
def __init__(self, val):
self.__value = val
def get(self):
return self.__value
def set(self, val):
self.__value = val
quiet = Reference(False)
def log(text):
if not quiet.get():
sys.stdout.write(text)
def write_data(f, width, height, offsets, data):
f.write(pack("<I", width))
f.write(pack("<I", height))
for off in offsets:
f.write(pack("<I", off))
f.write(data)
if __name__ == "__main__":
fontsize = Reference(20)
out_fname = Reference("font.dat")
voffset = Reference(None)
padding = Reference(0)
font_fname = Reference(None)
preview = Reference(None)
arg_parser = [
["-s", "--size=", fontsize, int],
["-o", "--output=", out_fname, str],
["-p", "--preview=", preview, str],
[None, "--padding=", padding, int],
["-q", "--quiet", quiet, None],
[None, "--voffset=", voffset, int]
]
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
arg = argv[i]
arg_next = argv[i+1] if i+1 < argc else None
if arg == "--help" or arg == "-h":
print ("This script converts TrueTypeFonts to .dat file for TWRP recovery.\n\n"
"Usage: %s [SWITCHES] [TRUETYPE FILE]\n\n"
" -h, --help - print help\n"
" -o, --output=[FILE] - output file or '-' for stdout (default: font.dat)\n"
" -p, --preview=[FILE] - generate font preview to png file\n"
" --padding=[PIXELS] - horizontal padding around each character (default: 0)\n"
" -q, --quiet - Do not print any output\n"
" -s, --size=[SIZE IN PIXELS] - specify font size in points (default: 20)\n"
" --voffset=[PIXELS] - vertical offset (default: font size*0.25)\n\n"
"Example:\n"
" %s -s 40 -o ComicSans_40.dat -p preview.png ComicSans.ttf\n") % (
sys.argv[0], sys.argv[0]
)
exit(0)
found = False
for p in arg_parser:
if p[0] and arg == p[0] and (arg_next or not p[3]):
if p[3]:
p[2].set(p[3](arg_next))
else:
p[2].set(True)
i += 1
found = True
break
elif p[1] and arg.startswith(p[1]):
if p[3]:
p[2].set(p[3](arg[len(p[1]):]))
else:
p[2].set(True)
found = True
break
if not found:
font_fname.set(arg)
i += 1
if not voffset.get():
voffset.set(int(fontsize.get()*0.25))
if out_fname.get() == "-":
quiet.set(True)
log("Loading font %s...\n" % font_fname.get())
font = ImageFont.truetype(font_fname.get(), fontsize.get(), 0, "utf-32be")
cwidth = 0
cheight = font.getsize('A')[1]
offsets = []
renders = []
data = bytes()
# temp Image and ImageDraw to get access to textsize
res = Image.new('L', (1, 1), 0)
res_draw = ImageDraw.Draw(res)
# Measure each character and render it to separate Image
log("Rendering characters...\n")
for i in range(32, 128):
w, h = res_draw.textsize(chr(i), font)
w += padding.get()*2
offsets.append(cwidth)
cwidth += w
if h > cheight:
cheight = h
ichr = Image.new('L', (w, cheight*2))
ichr_draw = ImageDraw.Draw(ichr)
ichr_draw.text((padding.get(), 0), chr(i), 255, font)
renders.append(ichr)
# Twice the height to account for under-the-baseline characters
cheight *= 2
# Create the result bitmap
log("Creating result bitmap...\n")
res = Image.new('L', (cwidth, cheight), 0)
res_draw = ImageDraw.Draw(res)
# Paste all characters into result bitmap
for i in range(len(renders)):
res.paste(renders[i], (offsets[i], 0))
# uncomment to draw lines separating each character (for debug)
#res_draw.rectangle([offsets[i], 0, offsets[i], cheight], outline="blue")
# crop the blank areas on top and bottom
(_, start_y, _, end_y) = res.getbbox()
res = res.crop((0, start_y, cwidth, end_y))
cheight = (end_y - start_y) + voffset.get()
new_res = Image.new('L', (cwidth, cheight))
new_res.paste(res, (0, voffset.get()))
res = new_res
# save the preview
if preview.get():
log("Saving preview to %s...\n" % preview.get())
res.save(preview.get())
# Pack the data.
# The "data" is a B/W bitmap with all 96 characters next to each other
# on one line. It is as wide as all the characters combined and as
# high as the tallest character, plus padding.
# Each byte contains info about eight pixels, starting from
# highest to lowest bit:
# bits: | 7 6 5 4 3 2 1 0 | 15 14 13 12 11 10 9 8 | ...
# pixels: | 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 | ...
log("Packing data...\n")
bit = 0
bit_itr = 0
for c in res.tostring():
# FIXME: How to handle antialiasing?
# if c != '\x00':
# In Python3, c is int, in Python2, c is string. Because of reasons.
try:
fill = (ord(c) >= 127)
except TypeError:
fill = (c >= 127)
if fill:
bit |= (1 << (7-bit_itr))
bit_itr += 1
if bit_itr >= 8:
data += pack("<B", bit)
bit_itr = 0
bit = 0
# Write them to the file.
# Format:
# 000: width
# 004: height
# 008: offsets of each characters (96*uint32)
# 392: data as described above
log("Writing to %s...\n" % out_fname.get())
if out_fname.get() == "-":
write_data(sys.stdout, cwidth, cheight, offsets, data)
else:
with open(out_fname.get(), 'wb') as f:
write_data(f, cwidth, cheight, offsets, data)
exit(0)
| 33.422111
| 106
| 0.537062
| 915
| 6,651
| 3.839344
| 0.30929
| 0.013664
| 0.005693
| 0.009394
| 0.054085
| 0.035867
| 0.00797
| 0.00797
| 0.00797
| 0
| 0
| 0.032258
| 0.324162
| 6,651
| 198
| 107
| 33.590909
| 0.749277
| 0.228236
| 0
| 0.181159
| 0
| 0.021739
| 0.180428
| 0
| 0
| 0
| 0
| 0.005051
| 0
| 1
| 0.036232
| false
| 0
| 0.021739
| 0.007246
| 0.072464
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4393bd0d5f4f1245ce5fd0c8893a7351e5ec7276
| 3,589
|
py
|
Python
|
tests/en/test_asr.py
|
rhasspy/rhasspy-test
|
0c180bfdd370f18ad2f8b9ee483ea5520161ab74
|
[
"MIT"
] | null | null | null |
tests/en/test_asr.py
|
rhasspy/rhasspy-test
|
0c180bfdd370f18ad2f8b9ee483ea5520161ab74
|
[
"MIT"
] | null | null | null |
tests/en/test_asr.py
|
rhasspy/rhasspy-test
|
0c180bfdd370f18ad2f8b9ee483ea5520161ab74
|
[
"MIT"
] | 1
|
2020-07-25T13:59:25.000Z
|
2020-07-25T13:59:25.000Z
|
"""Automated speech recognition tests."""
import os
import sys
import unittest
from pathlib import Path
import requests
from rhasspyhermes.asr import AsrTextCaptured
from rhasspyhermes.nlu import NluIntent
class AsrEnglishTests(unittest.TestCase):
"""Test automated speech recognition (English)"""
def setUp(self):
self.http_host = os.environ.get("RHASSPY_HTTP_HOST", "localhost")
self.http_port = os.environ.get("RHASSPY_HTTP_PORT", 12101)
self.wav_bytes = Path("wav/en/turn_on_the_living_room_lamp.wav").read_bytes()
def api_url(self, fragment):
return f"http://{self.http_host}:{self.http_port}/api/{fragment}"
def check_status(self, response):
if response.status_code != 200:
print(response.text, file=sys.stderr)
response.raise_for_status()
def test_http_speech_to_text(self):
"""Test speech-to-text HTTP endpoint"""
response = requests.post(self.api_url("speech-to-text"), data=self.wav_bytes)
self.check_status(response)
text = response.content.decode()
self.assertEqual(text, "turn on the living room lamp")
def test_http_speech_to_text_json(self):
"""Text speech-to-text HTTP endpoint (Rhasspy JSON format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
headers={"Accept": "application/json"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["text"], "turn on the living room lamp")
def test_http_speech_to_text_hermes(self):
"""Text speech-to-text HTTP endpoint (Hermes format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "textCaptured")
text_captured = AsrTextCaptured.from_dict(result["value"])
self.assertEqual(text_captured.text, "turn on the living room lamp")
def test_http_speech_to_intent(self):
response = requests.post(self.api_url("speech-to-intent"), data=self.wav_bytes)
self.check_status(response)
result = response.json()
self.assertEqual(result["intent"]["name"], "ChangeLightState")
self.assertEqual(result["text"], "turn on the living room lamp")
self.assertEqual(result["slots"]["name"], "living room lamp")
self.assertEqual(result["slots"]["state"], "on")
def test_http_speech_to_intent_hermes(self):
response = requests.post(
self.api_url("speech-to-intent"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "intent")
nlu_intent = NluIntent.from_dict(result["value"])
self.assertEqual(nlu_intent.raw_input, "turn on the living room lamp")
self.assertEqual(nlu_intent.input, "turn on the living room lamp")
# Intent name and slots
self.assertEqual(nlu_intent.intent.intent_name, "ChangeLightState")
slots_by_name = {slot.slot_name: slot for slot in nlu_intent.slots}
self.assertIn("name", slots_by_name)
self.assertEqual(slots_by_name["name"].value["value"], "living room lamp")
self.assertIn("state", slots_by_name)
self.assertEqual(slots_by_name["state"].value["value"], "on")
| 35.534653
| 87
| 0.655893
| 445
| 3,589
| 5.110112
| 0.208989
| 0.092348
| 0.055409
| 0.046174
| 0.582674
| 0.551891
| 0.488127
| 0.423043
| 0.35708
| 0.35708
| 0
| 0.002848
| 0.217331
| 3,589
| 100
| 88
| 35.89
| 0.806693
| 0.067428
| 0
| 0.304348
| 0
| 0
| 0.182475
| 0.011743
| 0
| 0
| 0
| 0
| 0.231884
| 1
| 0.115942
| false
| 0
| 0.101449
| 0.014493
| 0.246377
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4393be2aca5a25d561f41614d1c61c91497bb77e
| 775
|
py
|
Python
|
speech/melgan/model/multiscale.py
|
OthmaneJ/deep-tts
|
93059d568c5b458d3f0d80eb294d397ecace8731
|
[
"MIT"
] | 213
|
2020-05-21T12:37:37.000Z
|
2022-03-28T16:36:07.000Z
|
speech/melgan/model/multiscale.py
|
OthmaneJ/deep-tts
|
93059d568c5b458d3f0d80eb294d397ecace8731
|
[
"MIT"
] | 36
|
2020-08-14T08:23:34.000Z
|
2022-02-07T11:26:17.000Z
|
speech/melgan/model/multiscale.py
|
OthmaneJ/deep-tts
|
93059d568c5b458d3f0d80eb294d397ecace8731
|
[
"MIT"
] | 38
|
2020-05-21T20:03:30.000Z
|
2022-01-19T16:31:15.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
from .identity import Identity
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[Discriminator() for _ in range(3)]
)
self.pooling = nn.ModuleList(
[Identity()] +
[nn.AvgPool1d(kernel_size=4, stride=2, padding=2) for _ in range(1, 3)]
)
def forward(self, x):
ret = list()
for pool, disc in zip(self.pooling, self.discriminators):
x = pool(x)
ret.append(disc(x))
return ret # [(feat, score), (feat, score), (feat, score)]
| 25.833333
| 83
| 0.602581
| 90
| 775
| 5.066667
| 0.466667
| 0.072368
| 0.057018
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012613
| 0.283871
| 775
| 29
| 84
| 26.724138
| 0.809009
| 0.058065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.238095
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4393d8ec0408fae06ace653dd14db15c556ea5c5
| 2,516
|
py
|
Python
|
main.py
|
AntonioLourencos/jogo-da-velha
|
3b3e46e2d2f8c064f0df6a383bc5a0fe6bb01f63
|
[
"MIT"
] | 10
|
2020-12-24T01:40:54.000Z
|
2021-06-03T01:22:34.000Z
|
main.py
|
AntonioLourencos/jogo-da-velha
|
3b3e46e2d2f8c064f0df6a383bc5a0fe6bb01f63
|
[
"MIT"
] | 4
|
2020-12-26T15:09:05.000Z
|
2021-10-01T13:36:16.000Z
|
main.py
|
AntonioLourencos/jogo-da-velha
|
3b3e46e2d2f8c064f0df6a383bc5a0fe6bb01f63
|
[
"MIT"
] | 3
|
2021-05-14T20:20:02.000Z
|
2021-08-09T19:10:12.000Z
|
from game import about_button, start_button, play_sound, center_pos
import pygame
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (0, 255, 0)
pygame.init()
pygame.font.init()
pygame.mixer.init()
FONT = pygame.font.Font("assets/font.ttf", 70)
FONT_MIN = pygame.font.Font("assets/font.ttf", 30)
window = pygame.display.set_mode([600,600])
running = True
clock = pygame.time.Clock()
nickname = " "
me = "X"
ia = "O"
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
play_sound("minimize_001")
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE and len(nickname) > 2:
nickname = list(nickname)
nickname.pop(-2)
nickname = "".join(nickname)
play_sound("error_001")
elif len(nickname.strip()) <= 10:
play_sound("bong_001")
if len(nickname) > 1:
nickname = list(nickname)
nickname.pop(-1)
nickname = "".join(nickname)
nickname += event.unicode
nickname += " "
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
if me == "X":
me = "O"
ia = "X"
else:
me = "X"
ia = "O"
window.fill(BLACK)
title = FONT.render("JOGO DA VELHA", True, WHITE)
title_pos = center_pos(title.get_rect(), 10)
window.blit(title, title_pos)
nickname_label = FONT.render("SEU NOME", True, WHITE)
nickname_label_pos = center_pos(nickname_label.get_rect(), 100)
window.blit(nickname_label, nickname_label_pos)
nickname_render = FONT.render(nickname, True, BLACK)
nickname_rect = nickname_render.get_rect()
nickname_pos = center_pos(nickname_rect, 180)
pygame.draw.rect(window, WHITE, (nickname_pos[0], 180, nickname_rect[2], nickname_rect[3]))
window.blit(nickname_render, nickname_pos)
choice_render = FONT.render(f"JOGUE COM {me}", True, WHITE)
window.blit(choice_render, center_pos(choice_render.get_rect(), 280))
my_name = FONT_MIN.render(f"DESENVOLVIDO POR MARIA EDUARDA DE AZEVEDO", True, WHITE)
window.blit(my_name, center_pos(my_name.get_rect(), 560))
start_button(window, "JOGAR", 380, me, ia, nickname.strip(), 10)
about_button(window, 450, 10)
pygame.display.flip()
clock.tick(60)
| 31.45
| 95
| 0.591017
| 322
| 2,516
| 4.453416
| 0.326087
| 0.037657
| 0.029289
| 0.031381
| 0.104603
| 0.037657
| 0
| 0
| 0
| 0
| 0
| 0.040975
| 0.282194
| 2,516
| 80
| 96
| 31.45
| 0.753045
| 0
| 0
| 0.129032
| 0
| 0
| 0.059197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43952014f41c3fec2a8b86f2f567eb906cd4cf2f
| 1,463
|
py
|
Python
|
schedule/views.py
|
1donggri/teamProject
|
9b4f37c2a93b065529ce9dd245f9717a783dd456
|
[
"CC-BY-3.0"
] | null | null | null |
schedule/views.py
|
1donggri/teamProject
|
9b4f37c2a93b065529ce9dd245f9717a783dd456
|
[
"CC-BY-3.0"
] | null | null | null |
schedule/views.py
|
1donggri/teamProject
|
9b4f37c2a93b065529ce9dd245f9717a783dd456
|
[
"CC-BY-3.0"
] | null | null | null |
from django.shortcuts import render, redirect
from .models import Post
from .forms import ScheduleForm
from django.core.paginator import Paginator
# Create your views here.
def view_schedule(request):
all_posts = Post.objects.all().order_by('pub_date')
page = int(request.GET.get('p', 1))
pagenator = Paginator(all_posts, 5)
posts = pagenator.get_page(page)
return render(request, 'schedule/view_schedule.html', {'posts': posts})
def write_schedule(request):
if request.method == "POST":
form = ScheduleForm(request.POST)
if form.is_valid():
# form의 모든 validators 호출 유효성 검증 수행
# user_id = request.session.get('user')
# user = User.objects.get(pk=user_id)
schedule = Post()
schedule.title = form.cleaned_data['title']
# # 검증에 성공한 값들은 사전타입으로 제공 (form.cleaned_data)
# # 검증에 실패시 form.error 에 오류 정보를 저장
schedule.username = form.cleaned_data['username']
schedule.pub_date = form.cleaned_data['pub_date']
schedule.save()
return redirect('schedule:view_schedule')
else:
form = ScheduleForm()
return render(request, 'schedule/write_schedule.html', {'form': form})
def delete(request, posts_id):
post = Post.objects.get(id=posts_id)
post.delete()
posts = Post.objects.all().order_by('-id')
return render(request, 'schedule/view_schedule.html', {'posts': posts})
| 34.833333
| 75
| 0.6473
| 186
| 1,463
| 4.967742
| 0.38172
| 0.051948
| 0.064935
| 0.087662
| 0.170996
| 0.170996
| 0.114719
| 0.114719
| 0.114719
| 0
| 0
| 0.001776
| 0.230349
| 1,463
| 42
| 76
| 34.833333
| 0.818828
| 0.140123
| 0
| 0.071429
| 0
| 0
| 0.124
| 0.0832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.142857
| 0
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43985e0c9aab5f6373fb70168960c90190116e6d
| 4,005
|
py
|
Python
|
mcts.py
|
korbi98/TicTacToeGo_Zero
|
b8ea4562f3ddf914a53fc380f2266f13ab887e04
|
[
"MIT"
] | null | null | null |
mcts.py
|
korbi98/TicTacToeGo_Zero
|
b8ea4562f3ddf914a53fc380f2266f13ab887e04
|
[
"MIT"
] | null | null | null |
mcts.py
|
korbi98/TicTacToeGo_Zero
|
b8ea4562f3ddf914a53fc380f2266f13ab887e04
|
[
"MIT"
] | 1
|
2021-12-20T12:03:49.000Z
|
2021-12-20T12:03:49.000Z
|
# Monte Carlo tree search for TicTacToe
import numpy as np
from tictactoe import Tictactoe
import copy
from random import choice
from tree import Node
import time
class MCTS:
'''
Class defining a simple monte carlo tree search algorithm.
Attributes:
- game: instance of TicTacToe game
- current_player: player to perform next move
- number_of_rollouts: number of simulations for generating one move
- tree: list containing all possible and impossible (taken) leaf nodes
'''
def __init__(self, game, number_of_rollouts):
self.game = game
self.current_player = game.move_number%2 + 1
print(self.current_player)
self.tree = Node(None, -1, 3 - self.current_player) # Root node of tree
self.number_of_rollouts = number_of_rollouts
print("Initial game state:\n",self.game.board)
def perform_search(self):
'''Perfoming the mcts by performing the specified number of
simulations and updating the corresponding leaf node.
leaf node is choosen by traverse_tree function
'''
start_time = time.clock()
for i in range(self.number_of_rollouts):
simulated_game = copy.deepcopy(self.game)
# Traverse to leaf
leaf = self.traverse_tree(simulated_game)
# Random simulation for leaf
result = self.rollout(simulated_game)
# Update all visited nodes
self.update_tree(result, leaf)
end_time = time.clock()
print("\nFirst layer:")
for child in self.tree.children:
child.print(self.tree)
second_layer = max(self.tree.children, key= lambda x: x.visits)
print("\nSecond layer:")
for child in second_layer.children:
child.print(self.tree)
print("\nSearch took:", round(end_time-start_time, 4), "seconds")
result = [0 for i in range(self.game.size**2)]
for child in self.tree.children:
result[child.boardposition] = child.visits
return result
def traverse_tree(self, simulated_game):
'''Choose next leaf for performing rollout. When node is fully
expanded, child with highest UCT is choosen. If not a
random unexplored node is choosen.
'''
current_node = self.tree #root
while current_node.isExpanded():
current_node = current_node.UTC_traverse(self.tree)
x,y = simulated_game.get_coords(current_node.boardposition)
simulated_game.setField(x,y)
# create children if empty
if not current_node.children:
current_node.getPossibleChildren(simulated_game.board)
# terminate if board is full
if not simulated_game.move_number < simulated_game.size**2 or simulated_game.checkboard():
return current_node
x,y = simulated_game.get_coords(current_node.boardposition)
simulated_game.setField(x,y)
# Choose random unexplored leaf
unexplored_leafs = list(filter(lambda x: x.visits == 0, current_node.children))
return choice(unexplored_leafs)
def rollout(self, simulated_game):
'''perform random play for choosen leaf node till terminal
state is reached'''
while (not simulated_game.checkboard()) and simulated_game.move_number < simulated_game.size**2:
simulated_game.perform_random_move()
res = simulated_game.checkboard()
print("Finished simulation player", res, "won. Terminal state is:")
simulated_game.printBoard()
return res
def update_tree(self, result, leaf):
'''update all visited nodes in tree'''
self.tree.visits += 1
current_node = leaf
while current_node.parent:
#current_node.print(self.tree)
current_node.update(result)
current_node = current_node.parent
| 35.131579
| 104
| 0.640949
| 491
| 4,005
| 5.075356
| 0.281059
| 0.099117
| 0.032103
| 0.016051
| 0.159711
| 0.110754
| 0.089888
| 0.089888
| 0.056982
| 0.056982
| 0
| 0.003825
| 0.281898
| 4,005
| 113
| 105
| 35.442478
| 0.862656
| 0.241698
| 0
| 0.131148
| 0
| 0
| 0.041465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.098361
| 0
| 0.262295
| 0.147541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4399aded5ee5a7bbfaba489cfa6e1bbdb4b8689f
| 3,911
|
py
|
Python
|
grimer/metadata.py
|
pirovc/grimer
|
169f8d3009004d6d2f4ca4d3e7dfec819078cb34
|
[
"MIT"
] | 5
|
2021-06-24T03:19:47.000Z
|
2021-12-18T22:33:04.000Z
|
grimer/metadata.py
|
pirovc/grimer
|
169f8d3009004d6d2f4ca4d3e7dfec819078cb34
|
[
"MIT"
] | 1
|
2022-02-04T14:52:40.000Z
|
2022-03-07T10:04:54.000Z
|
grimer/metadata.py
|
pirovc/grimer
|
169f8d3009004d6d2f4ca4d3e7dfec819078cb34
|
[
"MIT"
] | null | null | null |
import pandas as pd
from pandas.api.types import is_numeric_dtype
from grimer.utils import print_log
class Metadata:
valid_types = ["categorical", "numeric"]
default_type = "categorical"
def __init__(self, metadata_file, samples: list=[]):
# Read metadata and let pandas guess dtypes, index as str
self.data = pd.read_table(metadata_file, sep='\t', header=0, skiprows=0, index_col=0, dtype={0:str})
# Enforce string index
self.data.index = self.data.index.astype('str')
# Define all COLUMN TYPES as default
self.types = pd.Series(self.default_type, index=self.data.columns)
# Set types
if str(self.data.index[0]).startswith("#"):
# types defined on file
self.set_hard_types()
else:
# guessed types from read_table
self.types[self.data.dtypes.map(is_numeric_dtype)] = "numeric"
# Convert datatypes to adequate numeric values (int, float)
self.data = self.data.convert_dtypes(infer_objects=False, convert_string=False)
# Re-convert everython to object to standardize (int64 NA is not seriazable on bokeh)
self.data = self.data.astype("object")
# Remove empty fields
null_cols = self.data.isna().all(axis=0)
if any(null_cols):
self.data = self.data.loc[:, ~null_cols]
self.types = self.types[~null_cols]
print_log(str(sum(null_cols)) + " fields removed without valid values")
# Convert NaN on categorical to ""
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].fillna('')
# Remove names
self.data.index.names = [None]
self.types.name = None
# sort and filter by given samples
if samples:
self.data = self.data.reindex(samples)
# Check if matched metadata and samples
null_rows = self.data.isna().all(axis=1)
if any(null_rows):
#self.data = self.data.loc[~null_rows, :]
print_log(str(sum(null_rows)) + " samples without valid metadata")
def __repr__(self):
args = ['{}={}'.format(k, repr(v)) for (k, v) in vars(self).items()]
return 'Metadata({})'.format(', '.join(args))
def set_hard_types(self):
# Get values defined on the first row
self.types = self.data.iloc[0]
# Drop row with types from main data
self.data.drop(self.types.name, inplace=True)
# Validate declared types
idx_valid = self.types.isin(self.valid_types)
if not idx_valid.all():
print_log("Invalid metadata types replaced by: " + self.default_type)
self.types[~idx_valid] = self.default_type
# Enforce column type on dataframe
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].astype(str)
self.data[self.types[self.types == "numeric"].index] = self.data[self.types[self.types == "numeric"].index].apply(pd.to_numeric)
def get_col_headers(self):
return self.data.columns
def get_data(self, metadata_type: str=None):
if metadata_type is not None:
return self.data[self.types[self.types == metadata_type].index]
else:
return self.data
def get_col(self, col):
return self.data[col]
def get_unique_values(self, col):
return sorted(self.get_col(col).dropna().unique())
def get_formatted_unique_values(self, col):
if self.types[col] == "categorical":
return self.get_unique_values(col)
else:
return list(map('{:.16g}'.format, self.get_unique_values(col)))
def get_type(self, col):
return self.types[col]
def get_subset(self, column, value):
return self.data[self.data[column] == value]
| 38.722772
| 136
| 0.628995
| 525
| 3,911
| 4.56
| 0.266667
| 0.110276
| 0.065163
| 0.06015
| 0.181287
| 0.131997
| 0.101921
| 0.101921
| 0.070175
| 0.070175
| 0
| 0.004075
| 0.246996
| 3,911
| 100
| 137
| 39.11
| 0.808829
| 0.158527
| 0
| 0.04918
| 0
| 0
| 0.075183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163934
| false
| 0
| 0.04918
| 0.081967
| 0.42623
| 0.065574
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
439a75ca9b8d0ab554205540e1b91cb943b0c4ba
| 5,162
|
py
|
Python
|
allennlp/training/metric_tracker.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 11,433
|
2017-06-27T03:08:46.000Z
|
2022-03-31T18:14:33.000Z
|
allennlp/training/metric_tracker.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 4,006
|
2017-06-26T21:45:43.000Z
|
2022-03-31T02:11:10.000Z
|
allennlp/training/metric_tracker.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 2,560
|
2017-06-26T21:16:53.000Z
|
2022-03-30T07:55:46.000Z
|
from typing import Optional, Dict, Any, List, Union
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. You can provide a
`metric_name` that starts with "+" to indicate an increasing metric, or "-"
to indicate a decreasing metric.
# Parameters
metric_name : `Union[str, List[str]]`
Specifies the metric or metrics to track. Metric names have to start with
"+" for increasing metrics or "-" for decreasing ones. If you specify more
than one, it tracks the sum of the increasing metrics metrics minus the sum
of the decreasing metrics.
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
"""
def __init__(
self,
metric_name: Union[str, List[str]],
patience: Optional[int] = None,
) -> None:
self._patience = patience
self._best_so_far: Optional[float] = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch: Optional[int] = None
self.best_epoch_metrics: Dict[str, float] = {}
if isinstance(metric_name, str):
metric_name = [metric_name]
self.tracked_metrics = []
for name in metric_name:
if name.startswith("+"):
self.tracked_metrics.append((1.0, name[1:]))
elif name.startswith("-"):
self.tracked_metrics.append((-1.0, name[1:]))
else:
raise ConfigurationError("metric_name must start with + or -")
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
self.best_epoch_metrics.clear()
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
"best_epoch_metrics": self.best_epoch_metrics,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
# Even though we don't promise backwards compatibility for the --recover flag,
# it's particularly easy and harmless to provide it here, so we do it.
self.best_epoch_metrics = state_dict.get("best_epoch_metrics", {})
def add_metrics(self, metrics: Dict[str, float]) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
combined_score = self.combined_score(metrics)
new_best = (self._best_so_far is None) or (combined_score > self._best_so_far)
if new_best:
self._best_so_far = combined_score
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch = self._epoch_number
else:
self._epochs_with_no_improvement += 1
self._is_best_so_far = False
self._epoch_number += 1
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
def combined_score(self, metrics: Dict[str, float]) -> float:
try:
return sum(
factor * metrics[metric_name] for factor, metric_name in self.tracked_metrics
)
except KeyError as e:
raise ConfigurationError(
f"You configured the trainer to use the {e.args[0]} "
"metric for early stopping, but the model did not produce that metric."
)
| 38.522388
| 93
| 0.629407
| 681
| 5,162
| 4.51395
| 0.255507
| 0.040989
| 0.061483
| 0.035784
| 0.298959
| 0.140208
| 0.097918
| 0.097918
| 0.097918
| 0.097918
| 0
| 0.003814
| 0.288842
| 5,162
| 133
| 94
| 38.81203
| 0.83356
| 0.293103
| 0
| 0.142857
| 0
| 0
| 0.098136
| 0.015143
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103896
| false
| 0
| 0.025974
| 0
| 0.207792
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
439abf267a321356c428ab3774898fb305a07e4a
| 956
|
py
|
Python
|
json_analyzer.py
|
bantenz/NetworkConfigParser
|
e1aa8385540823340e8278c7d7af0201399efd8f
|
[
"Apache-2.0"
] | null | null | null |
json_analyzer.py
|
bantenz/NetworkConfigParser
|
e1aa8385540823340e8278c7d7af0201399efd8f
|
[
"Apache-2.0"
] | null | null | null |
json_analyzer.py
|
bantenz/NetworkConfigParser
|
e1aa8385540823340e8278c7d7af0201399efd8f
|
[
"Apache-2.0"
] | null | null | null |
import json
from deepdiff import DeepDiff
import pprint
def get_json(file_name):
with open(file_name) as json_file:
json_data = json.load(json_file)
return json_data
def compare_json(Hostname, Command, Data1, Data2):
if (Data1 == Data2):
print ("%s - %s output is same" % (Hostname, Command))
else:
print ("%s - %s output is different" % (Hostname, Command))
pprint.pprint(DeepDiff(Data1, Data2))
def main():
Hostname = raw_input('Input Hostname of the device : ').lower()
Command = raw_input('Input Command : ').lower()
Filename1 = raw_input('Input First JSON File : ').lower()
Filename2 = raw_input('Input Second JSON File : ').lower()
Data1 = get_json(Filename1)
Data2 = get_json(Filename2)
compare_json(Hostname, Command, Data1, Data2)
if __name__ == "__main__":
# If this Python file runs by itself, run below command. If imported, this section is not run
main()
| 30.83871
| 94
| 0.669456
| 129
| 956
| 4.782946
| 0.379845
| 0.06483
| 0.084279
| 0.084279
| 0.171799
| 0.123177
| 0.123177
| 0
| 0
| 0
| 0
| 0.018642
| 0.214435
| 956
| 30
| 95
| 31.866667
| 0.802929
| 0.095188
| 0
| 0
| 0
| 0
| 0.177289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.130435
| 0
| 0.304348
| 0.173913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
439b48ead1b5b023fe47fbce88acf0d32181f26a
| 9,437
|
py
|
Python
|
fiwareglancesync/sync.py
|
telefonicaid/fiware-glancesync
|
5ad0c80e12b9384473f31bf336015c75cf02a2a2
|
[
"Apache-2.0"
] | null | null | null |
fiwareglancesync/sync.py
|
telefonicaid/fiware-glancesync
|
5ad0c80e12b9384473f31bf336015c75cf02a2a2
|
[
"Apache-2.0"
] | 88
|
2015-07-21T22:13:23.000Z
|
2016-11-15T21:28:56.000Z
|
fiwareglancesync/sync.py
|
telefonicaid/fiware-glancesync
|
5ad0c80e12b9384473f31bf336015c75cf02a2a2
|
[
"Apache-2.0"
] | 2
|
2015-08-12T11:19:55.000Z
|
2018-05-25T19:04:43.000Z
|
#!/usr/bin/env python
# -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
import sys
import StringIO
import os
import os.path
import datetime
import argparse
import logging
from fiwareglancesync.glancesync import GlanceSync
class Sync(object):
def __init__(self, regions, override_d=None):
"""init object"""
GlanceSync.init_logs()
self.glancesync = GlanceSync(options_dict=override_d)
regions_expanded = list()
already_sorted = True
for region in regions:
if region.endswith(':'):
regions_expanded.extend(self.glancesync.get_regions(
target=region[:-1]))
already_sorted = False
else:
regions_expanded.append(region)
regions = regions_expanded
if not regions:
regions = self.glancesync.get_regions()
already_sorted = False
if not already_sorted:
regions_unsorted = regions
regions = list()
for region in self.glancesync.preferable_order:
if region in regions_unsorted:
regions.append(region)
regions_unsorted.remove(region)
regions.extend(regions_unsorted)
self.regions = regions
def report_status(self):
"""Report the synchronisation status of the regions"""
for region in self.regions:
try:
stream = StringIO.StringIO()
self.glancesync.export_sync_region_status(region, stream)
print(stream.getvalue())
except Exception:
# Don't do anything. Message has been already printed
# try next region
continue
def parallel_sync(self):
"""Run the synchronisation in several regions in parallel. The
synchronisation inside the region is sequential (i.e. several
regions are synchronised simultaneously, but only one image at time
is uploaded for each region)"""
max_children = self.glancesync.max_children
now = datetime.datetime.now()
datestr = str(now.year) + str(now.month).zfill(2) + \
str(now.day).zfill(2) + '_' + str(now.hour).zfill(2) +\
str(now.minute).zfill(2)
msg = '======Master is ' + self.glancesync.master_region
print(msg)
sys.stdout.flush()
os.mkdir('sync_' + datestr)
children = dict()
for region in self.regions:
try:
if len(children) >= max_children:
self._wait_child(children)
pid = os.fork()
if pid > 0:
children[pid] = region
continue
else:
path = os.path.join('sync_' + datestr, region + '.txt')
handler = logging.FileHandler(path)
handler.setFormatter(logging.Formatter('%(message)s'))
logger = self.glancesync.log
# Remove old handlers
for h in logger.handlers:
logger.removeHandler(h)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = 0
self.glancesync.sync_region(region)
# After a fork, os_exit() and not sys.exit() must be used.
os._exit(0)
except Exception:
raise
sys.stderr.flush()
sys.exit(-1)
while len(children) > 0:
self._wait_child(children)
print('All is done.')
def sequential_sync(self, dry_run=False):
"""Run the synchronisation sequentially (that is, do not start the
synchronisation to a region before the previous one was completed or
failed
:param dry_run: if true, do not synchronise images actually
"""
msg = '======Master is ' + self.glancesync.master_region
print(msg)
for region in self.regions:
try:
msg = "======" + region
print(msg)
sys.stdout.flush()
self.glancesync.sync_region(region, dry_run=dry_run)
except Exception:
# Don't do anything. Message has been already printed
# try next region
continue
def _wait_child(self, children):
""" Wait until one of the regions ends its synchronisation and then
print the result
:param children:
:return: a dictionary or regions, indexed by the pid of the process
"""
finish_direct_child = False
while not finish_direct_child:
(pid, status) = os.wait()
if pid not in children:
continue
else:
finish_direct_child = True
if status == 0:
msg = 'Region {0} has finished'.format(children[pid])
print(msg)
else:
msg = 'Region {0} has finished with errors'
print(msg.format(children[pid]))
del children[pid]
sys.stdout.flush()
def show_regions(self):
"""print a full list of the regions available (excluding the
master region) in all the targets defined in the configuration file"""
regions = self.glancesync.get_regions()
for target in self.glancesync.targets.keys():
if target == 'facade' or target == 'master':
continue
regions.extend(self.glancesync.get_regions(target=target))
print(' '.join(regions))
def make_backup(self):
"""make a backup of the metadata in the regions specified at the
constructor (in addition to the master region). The backup is created
in a directory named 'backup_glance_' with the date and time as suffix
There is a file for each region (the name is backup_<region>.csv) and
inside the file a line for each image.
Only the information about public images/ the images owned by
the tenant, can be obtained, regardless if the user is an admin. This
is a limitation of the glance API"""
now = datetime.datetime.now().isoformat()
directory = 'backup_glance_' + now
os.mkdir(directory)
regions = set(self.regions)
regions.add(self.glancesync.master_region)
for region in regions:
try:
self.glancesync.backup_glancemetadata_region(region, directory)
except Exception:
# do nothing. Already logged.
continue
if __name__ == '__main__':
# Parse cmdline
description = 'A tool to sync images from a master region to other '\
'regions'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('regions', metavar='region', type=str, nargs='*',
help='region where the images are uploaded to')
parser.add_argument('--parallel', action='store_true',
help='sync several regions in parallel')
parser.add_argument(
'--config', nargs='+', help='override configuration options. (e.g. ' +
"main.master_region=Valladolid metadata_condition='image.name=name1')")
group = parser.add_mutually_exclusive_group()
group.add_argument('--dry-run', action='store_true',
help='do not upload actually the images')
group.add_argument('--show-status', action='store_true',
help='do not sync, but show the synchronisation status')
group.add_argument('--show-regions', action='store_true',
help='don not sync, only show the available regions')
group.add_argument(
'--make-backup', action='store_true',
help="do no sync, make a backup of the regions' metadata")
meta = parser.parse_args()
options = dict()
if meta.config:
for option in meta.config:
pair = option.split('=')
if len(pair) != 2:
parser.error('config options must have the format key=value')
sys.exit(-1)
options[pair[0].strip()] = pair[1]
# Run cmd
sync = Sync(meta.regions, options)
if meta.show_status:
sync.report_status()
elif meta.parallel:
sync.parallel_sync()
elif meta.show_regions:
sync.show_regions()
elif meta.make_backup:
sync.make_backup()
else:
sync.sequential_sync(meta.dry_run)
| 35.212687
| 79
| 0.586097
| 1,086
| 9,437
| 4.997238
| 0.296501
| 0.041275
| 0.012161
| 0.017505
| 0.13009
| 0.090105
| 0.046434
| 0.046434
| 0.046434
| 0.029851
| 0
| 0.005191
| 0.326375
| 9,437
| 267
| 80
| 35.344569
| 0.848513
| 0.24192
| 0
| 0.246988
| 0
| 0
| 0.109652
| 0.009667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042169
| false
| 0
| 0.048193
| 0
| 0.096386
| 0.048193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
439b5da067d8952a4649cfcbc1a2148086951365
| 2,224
|
py
|
Python
|
models/object_detection/pytorch/ssd-resnet34/training/cpu/mlperf_logger.py
|
Pandinosaurus/models-intelai
|
60f5712d79a363bdb7624e3116a66a4f1a7fe208
|
[
"Apache-2.0"
] | null | null | null |
models/object_detection/pytorch/ssd-resnet34/training/cpu/mlperf_logger.py
|
Pandinosaurus/models-intelai
|
60f5712d79a363bdb7624e3116a66a4f1a7fe208
|
[
"Apache-2.0"
] | null | null | null |
models/object_detection/pytorch/ssd-resnet34/training/cpu/mlperf_logger.py
|
Pandinosaurus/models-intelai
|
60f5712d79a363bdb7624e3116a66a4f1a7fe208
|
[
"Apache-2.0"
] | null | null | null |
### This file is originally from: [mlcommons repo](https://github.com/mlcommons/training/tree/9947bdf21ee3f2488fa4b362eec2ce7deb2ec4dd/single_stage_detector/ssd/mlperf_logger.py)
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from mlperf_logging import mllog
from mlperf_logging.mllog import constants as mllog_const
mllogger = mllog.get_mllogger()
mllog.config(
filename=(os.getenv("COMPLIANCE_FILE") or "mlperf_compliance.log"),
root_dir=os.path.normpath(os.path.dirname(os.path.realpath(__file__))))
def ssd_print(*args, sync=True, **kwargs):
use_cuda = os.getenv('USE_CUDA')
if sync and use_cuda=='True':
barrier()
if get_rank() == 0:
kwargs['stack_offset'] = 2
mllogger.event(*args, **kwargs)
def barrier():
"""
Works as a temporary distributed barrier, currently pytorch
doesn't implement barrier for NCCL backend.
Calls all_reduce on dummy tensor and synchronizes with GPU.
"""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = os.getenv('RANK', os.getenv('LOCAL_RANK', 0))
return rank
def broadcast_seeds(seed, device):
if torch.distributed.is_initialized():
seeds_tensor = torch.LongTensor([seed]).to(device)
torch.distributed.broadcast(seeds_tensor, 0)
seed = seeds_tensor.item()
return seed
| 35.870968
| 178
| 0.721223
| 304
| 2,224
| 5.171053
| 0.490132
| 0.038168
| 0.034351
| 0.038168
| 0.05916
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017496
| 0.177608
| 2,224
| 61
| 179
| 36.459016
| 0.84199
| 0.447392
| 0
| 0.09375
| 0
| 0
| 0.062979
| 0.017872
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.15625
| 0
| 0.34375
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
439cc020be352b363d0141cede18e92d0b0f339f
| 5,910
|
py
|
Python
|
project/server/main/feed.py
|
dataesr/harvest-theses
|
1725b3ec3a944526fe62941d554bc3de6209cd28
|
[
"MIT"
] | null | null | null |
project/server/main/feed.py
|
dataesr/harvest-theses
|
1725b3ec3a944526fe62941d554bc3de6209cd28
|
[
"MIT"
] | null | null | null |
project/server/main/feed.py
|
dataesr/harvest-theses
|
1725b3ec3a944526fe62941d554bc3de6209cd28
|
[
"MIT"
] | null | null | null |
import datetime
import os
import pymongo
import requests
from urllib import parse
from urllib.parse import quote_plus
import json
from retry import retry
from bs4 import BeautifulSoup
import math
from project.server.main.logger import get_logger
from project.server.main.utils_swift import upload_object
from project.server.main.parse import parse_theses, get_idref_from_OS
from project.server.main.referentiel import harvest_and_save_idref
logger = get_logger(__name__)
def get_num_these(soup):
num_theses = []
for d in soup.find_all('doc'):
num_theses.append(d.find('str', {'name': 'num'}).text)
return num_theses
@retry(delay=60, tries=5)
def get_num_these_between_dates(start_date, end_date):
start_date_str = start_date.strftime("%d/%m/%Y")
end_date_str = end_date.strftime("%d/%m/%Y")
start_date_str_iso = start_date.strftime("%Y%m%d")
end_date_str_iso = end_date.strftime("%Y%m%d")
start = 0
url = "http://theses.fr/?q=&zone1=titreRAs&val1=&op1=AND&zone2=auteurs&val2=&op2=AND&zone3=etabSoutenances&val3=&op3=AND&zone4=dateSoutenance&val4a={}&val4b={}&start={}&format=xml"
logger.debug(url.format(start_date_str, end_date_str, start))
r = requests.get(url.format(start_date_str, end_date_str, start))
soup = BeautifulSoup(r.text, 'lxml')
nb_res = soup.find('result', {'name': 'response'}).attrs['numfound']
logger.debug("{} resultats entre {} et {}".format(nb_res, start_date_str_iso, end_date_str_iso ))
num_theses = get_num_these(soup)
nb_pages_remaining = math.ceil(int(nb_res)/1000)
for p in range(1, nb_pages_remaining):
logger.debug("page {} for entre {} et {}".format(p, start_date_str_iso, end_date_str_iso))
r = requests.get(url.format(start_date_str, end_date_str, p * 1000))
soup = BeautifulSoup(r.text, 'lxml')
num_theses += get_num_these(soup)
return num_theses
def save_data(data, collection_name, year_start, year_end, chunk_index, referentiel):
logger.debug(f'save_data theses {collection_name} {chunk_index}')
year_start_end = 'all_years'
if year_start and year_end:
year_start_end = f'{year_start}_{year_end}'
# 1. save raw data to OS
current_file = f'theses_{year_start_end}_{chunk_index}.json'
json.dump(data, open(current_file, 'w'))
os.system(f'gzip {current_file}')
upload_object('theses', f'{current_file}.gz', f'{collection_name}/raw/{current_file}.gz')
os.system(f'rm -rf {current_file}.gz')
# 2.transform data and save in mongo
current_file_parsed = f'theses_parsed_{year_start_end}_{chunk_index}.json'
data_parsed = [parse_theses(e, referentiel, collection_name) for e in data]
json.dump(data_parsed, open(current_file_parsed, 'w'))
# insert_data(collection_name, current_file_parsed)
os.system(f'gzip {current_file_parsed}')
upload_object('theses', f'{current_file_parsed}.gz', f'{collection_name}/parsed/{current_file_parsed}.gz')
os.system(f'rm -rf {current_file_parsed}.gz')
def harvest_and_insert(collection_name):
# 1. save aurehal structures
harvest_and_save_idref(collection_name)
referentiel = get_idref_from_OS(collection_name)
# 2. drop mongo
#logger.debug(f'dropping {collection_name} collection before insertion')
#myclient = pymongo.MongoClient('mongodb://mongo:27017/')
#myclient['theses'][collection_name].drop()
# 3. save publications
year_start = None
year_end = None
if year_start is None:
year_start = 1990
if year_end is None:
year_end = datetime.date.today().year
harvest_and_insert_one_year(collection_name, year_start, year_end, referentiel)
@retry(delay=60, tries=5)
def download_these_notice(these_id):
res = {'id': these_id}
r_tefudoc = requests.get("http://www.theses.fr/{}.tefudoc".format(these_id))
r_xml = requests.get("http://www.theses.fr/{}.xml".format(these_id))
if r_tefudoc.text[0:5] == "<?xml":
res['tefudoc'] = r_tefudoc.text
if r_xml.text[0:5] == "<?xml":
res['xml'] = r_xml.text
return res
def harvest_and_insert_one_year(collection_name, year_start, year_end, referentiel):
year_start_end = 'all_years'
if year_start and year_end:
year_start_end = f'{year_start}_{year_end}'
start_date = datetime.datetime(year_start,1,1)
end_date = datetime.datetime(year_end + 1,1,1) + datetime.timedelta(days = -1)
all_num_theses = get_num_these_between_dates(start_date, end_date)
# todo save by chunk
chunk_index = 0
data = []
MAX_DATA_SIZE = 25000
nb_theses = len(all_num_theses)
logger.debug(f'{nb_theses} theses to download and parse')
for ix, nnt in enumerate(all_num_theses):
if ix % 100 == 0:
logger.debug(f'theses {year_start_end} {ix}')
res = download_these_notice(nnt)
data.append(res)
if (len(data) > MAX_DATA_SIZE) or (ix == nb_theses - 1):
if data:
save_data(data, collection_name, year_start, year_end, chunk_index, referentiel)
data = []
chunk_index += 1
def insert_data(collection_name, output_file):
myclient = pymongo.MongoClient('mongodb://mongo:27017/')
mydb = myclient['theses']
## mongo start
start = datetime.datetime.now()
mongoimport = f"mongoimport --numInsertionWorkers 2 --uri mongodb://mongo:27017/theses --file {output_file}" \
f" --collection {collection_name} --jsonArray"
logger.debug(f'Mongoimport {output_file} start at {start}')
logger.debug(f'{mongoimport}')
os.system(mongoimport)
logger.debug(f'Checking indexes on collection {collection_name}')
mycol = mydb[collection_name]
#mycol.create_index('docid')
end = datetime.datetime.now()
delta = end - start
logger.debug(f'Mongoimport done in {delta}')
## mongo done
| 36.708075
| 184
| 0.694755
| 862
| 5,910
| 4.488399
| 0.207657
| 0.044197
| 0.024813
| 0.024813
| 0.34505
| 0.283019
| 0.182735
| 0.182735
| 0.15482
| 0.124063
| 0
| 0.015648
| 0.178173
| 5,910
| 160
| 185
| 36.9375
| 0.780935
| 0.069036
| 0
| 0.123894
| 0
| 0.00885
| 0.216187
| 0.066533
| 0
| 0
| 0
| 0.00625
| 0
| 1
| 0.061947
| false
| 0
| 0.168142
| 0
| 0.256637
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
439e62c4d6bd84f9f57f7073032cb6f2eab27d1b
| 15,524
|
py
|
Python
|
utilities.py
|
gandhiy/lipMIP
|
11843e6bf2223acca44f57d29791521aac15caf3
|
[
"MIT"
] | 11
|
2020-05-18T17:33:25.000Z
|
2022-01-28T18:42:31.000Z
|
utilities.py
|
gandhiy/lipMIP
|
11843e6bf2223acca44f57d29791521aac15caf3
|
[
"MIT"
] | null | null | null |
utilities.py
|
gandhiy/lipMIP
|
11843e6bf2223acca44f57d29791521aac15caf3
|
[
"MIT"
] | 1
|
2020-12-10T19:57:20.000Z
|
2020-12-10T19:57:20.000Z
|
""" General all-purpose utilities """
import sys
import torch
import torch.nn.functional as F
import numpy as np
import gurobipy as gb
import matplotlib.pyplot as plt
import io
import contextlib
import tempfile
import time
import re
import pickle
import inspect
import glob
import os
COMPLETED_JOB_DIR = os.path.join(os.path.dirname(__file__), 'jobs', 'completed')
# ===============================================================================
# = Helpful all-purpose functions =
# ===============================================================================
class ParameterObject:
def __init__(self, **kwargs):
self.attr_list = []
assert 'attr_list' not in kwargs
for k,v in kwargs.items():
setattr(self, k, v)
self.attr_list.append(k)
def change_attrs(self, **kwargs):
new_kwargs = {}
for attr in self.attr_list:
if attr in kwargs:
new_kwargs[attr] = kwargs[attr]
else:
new_kwargs[attr] = getattr(self, attr)
return self.__class__(**new_kwargs)
class Factory(ParameterObject):
def __init__(self, constructor, **kwargs):
self.constructor = constructor
super(Factory, self).__init__(**kwargs)
def __call__(self, **kwargs):
cons_args = inspect.getfullargspec(self.constructor).args
# Make default args from attributes
args = {k: getattr(self, k) for k in self.attr_list if k in cons_args}
# Update the default args
for k,v in kwargs.items():
if k in cons_args:
args[k] = v
# Build object
return self.constructor(**args)
def __repr__(self):
return '<Factory: %s>' % self.constructor.__self__.__name__
class DoEvery:
@classmethod
def dummy(cls, *args, **kwargs):
pass
def __init__(self, func, freq):
""" Simple class that holds onto a function and it returns
this function every freq iterations
ARGS:
func: function object to be returned every freq iterations
freq: int - how often to return the function
"""
self.func = func
self.freq = freq
self.i = 0
def __call__(self, *args, **kwargs):
if self.i % self.freq == 0:
returner = self.func
else:
returner = self.dummy
self.i += 1
return returner(*args, **kwargs)
class Timer:
def __init__(self, start_on_init=True):
if start_on_init:
self.start()
def start(self):
self.start_time = time.time()
def stop(self):
self.stop_time = time.time()
return self.stop_time - self.start_time
def reset(self):
self.start_time = self.stop_time = None
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
return [_.cpu() for _ in tensor_iter]
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
def prod(num_iter):
""" returns product of all elements in this iterator *'ed together"""
cumprod = 1
for el in num_iter:
cumprod *= el
return cumprod
def partition(n, m):
""" Given ints n > m, partitions n into an iterable where all
elements are m, except for the last one which is (n % m)
"""
count = 0
while count < n:
yield min([m, n - count])
count += m
def flatten_list(lol):
""" Given list of lists, flattens it into a single list. """
output = []
for el in lol:
if not isinstance(el, list):
output.append(el)
continue
output.extend(flatten_list(el))
return output
def partition_by_suffix(iterable, func):
""" Given an iterable and a boolean-valued function which takes in
elements of that iterable, outputs a list of lists, where each list
ends in an element for which the func returns true, (except for the
last one)
e.g.
iterable := [1, 2, 3, 4, 5,5, 5]
func := lambda x: (x % 2) == 0
returns [[1,2], [3,4], [5, 5, 5]]
"""
output = []
sublist = []
for el in iterable:
sublist.append(el)
if func(el):
output.append(sublist)
sublist = []
if len(sublist) > 0:
output.append(sublist)
return output
def arraylike(obj):
return isinstance(obj, (torch.Tensor, np.ndarray))
def as_numpy(tensor_or_array):
""" If given a tensor or numpy array returns that object cast numpy array
"""
if isinstance(tensor_or_array, torch.Tensor):
tensor_or_array = tensor_or_array.cpu().detach().numpy()
return tensor_or_array
def two_col(l, r):
""" Takes two numpy arrays of size N and makes a numpy array of size Nx2
"""
return np.vstack([l, r]).T
def split_pos_neg(x):
if isinstance(x, torch.Tensor):
return split_tensor_pos_neg(x)
else:
return split_ndarray_pos_neg(x)
def split_tensor_pos_neg(x):
""" Splits a tensor into positive and negative components """
pos = F.relu(x)
neg = -F.relu(-x)
return pos, neg
def split_ndarray_pos_neg(x):
""" Splits a numpy ndarray into positive and negative components """
pos = x * (x >= 0)
neg = x * (x <= 0)
return pos, neg
def swap_axes(x, source, dest):
""" Swaps the dimensions of source <-> dest for torch/numpy
ARGS:
x : numpy array or tensor
source : int index
dest : int index
RETURNS
x' - object with same data as x, but with axes swapped
"""
if isinstance(x, torch.Tensor):
return x.transpose(source, dest)
else:
return np.moveaxis(x, source, dest)
def build_var_namer(k):
return lambda d: '%s[%s]' % (k, d)
@contextlib.contextmanager
def silent():
save_stdout = sys.stdout
temp = tempfile.TemporaryFile(mode='w')
sys.stdout = temp
yield
sys.stdout = save_stdout
temp.close()
def ia_mm(matrix, intervals, lohi_dim, matrix_or_vec='matrix'):
""" Interval analysis matrix(-vec) multiplication for torch/np intervals
ARGS:
matrix : tensor or numpy array of shape (m,n) -
intervals : tensor or numpy array with shape (n1, ..., 2, n_i, ...) -
"vector" of intervals to be multiplied by a matrix
one such n_i must be equal to n (from matrix shape)
lohi_dim : int - which dimension (index) of intervals corresponds
to the lo/hi split
matrix_or_vec : string - must be matrix or vec, corresponds to whether
intervals is to be treated as a matrix or a vector.
If a v
RETURNS:
object of same type as intervals, but with the shape slightly
different: len(output[-1/-2]) == m
"""
# asserts for shapes and things
assert isinstance(matrix, torch.Tensor) # TENSOR ONLY FOR NOW
assert isinstance(intervals, torch.Tensor)
m, n = matrix.shape
assert intervals.shape[lohi_dim] == 2
assert matrix_or_vec in ['matrix', 'vec']
if matrix_or_vec == 'vec':
intervals = intervals.unsqueeze(-1)
assert lohi_dim != intervals.dim() - 2
assert intervals[dim][-2] == n
# define operators based on tensor/numpy case
matmul = lambda m, x: m.matmul(x)
stack = lambda a, b: torch.stack([a, b])
# now do IA stuff
intervals = swap_axes(intervals, 0, lohi_dim)
matrix_pos, matrix_neg = split_pos_neg(matrix)
los, his = intervals
new_los = matmul(matrix_pos, los) + matmul(matrix_neg, his)
new_his = matmul(matrix_pos, his) + matmul(matrix_neg, los)
intervals = swap_axes(stack(new_los, new_his), 0, lohi_dim)
if matrix_or_vec == 'vec':
intervals = interval.squeeze(-1)
return intervals
# =============================================================================
# = Image display functions =
# =============================================================================
def display_images(image_rows, figsize=(8, 8)):
""" Given either a tensor/np.array (or list of same), will display each
element in the row or tensor
ARGS:
image_rows: tensor or np.array or tensor[], np.array[] -
image or list of images to display
RETURNS: None, but displays images
"""
if not isinstance(image_rows, list):
image_rows = [image_rows]
np_rows = [as_numpy(row) for row in image_rows]
# Transpose channel to last dimension and stack to make rows
np_rows = [np.concatenate(_.transpose([0, 2, 3, 1]), axis=1)
for _ in np_rows]
# Now stack rows
full_image = np.concatenate(np_rows, axis=0)
# And then show image
imshow_kwargs = {}
if full_image.shape[-1] == 1:
full_image = full_image.squeeze()
imshow_kwargs['cmap'] = 'gray'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.axis('off')
ax.imshow(full_image, **imshow_kwargs)
plt.show()
# ======================================================
# = Pytorch helpers =
# ======================================================
def seq_append(seq, module):
""" Takes a nn.sequential and a nn.module and creates a nn.sequential
with the module appended to it
ARGS:
seq: nn.Sequntial object
module: <inherits nn.Module>
RETURNS:
nn.Sequential object
"""
seq_modules = [seq[_] for _ in range(len(seq))] + [module]
return nn.Sequential(*seq_modules)
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
output = []
for el in tensor_iter:
if isinstance(el, tuple):
output.append(tuple(_.cpu() for _ in el))
else:
output.append(el.cpu())
return output
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
if isinstance(el, tuple):
return tuple(_.cuda() for _ in el)
else:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
# =======================================
# = Polytope class =
# =======================================
class Polytope:
INPUT_KEY = 'input'
SLACK_KEY = 'slack'
def __init__(self, A, b):
""" Represents a polytope of the form {x | AX <= b}
(where everything is a numpy array)
"""
self.A = A
self.b = b
def _input_from_model(self, model):
var_namer = build_var_namer(self.INPUT_KEY)
return np.array([model.getVarByName(var_namer(i)).X
for i in range(self.A.shape[1])])
def _build_model(self, slack=False):
""" Builds a gurobi model of this object """
with silent():
model = gb.Model()
input_namer = build_var_namer(self.INPUT_KEY)
input_vars = [model.addVar(lb=-gb.GRB.INFINITY, ub=gb.GRB.INFINITY,
name=input_namer(i))
for i in range(self.A.shape[1])]
if slack == True:
slack_var = model.addVar(lb=0, ub=1.0, name=self.SLACK_KEY)
else:
slack_var = 0
for i, row in enumerate(self.A):
model.addConstr(gb.LinExpr(row, input_vars) + slack_var <= self.b[i])
model.update()
return model
def contains(self, x, tolerance=1e-6):
return all(self.A @ x <= self.b + tolerance)
def interior_point(self):
model = self._build_model(slack=True)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
def intersects_hbox(self, hbox):
""" If this intersects a given hyperbox, returns a
point contained in both
"""
model = self._build_model(slack=True)
input_namer = build_var_namer(self.INPUT_KEY)
for i, (lb, ub) in enumerate(hbox):
var = model.getVarByName(input_namer(i))
model.addConstr(lb <= var <= ub)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
# =========================================================
# = experiment.Result object helpers =
# =========================================================
def filename_to_epoch(filename):
return int(re.search(r'_EPOCH\d{4}_', filename).group()[-5:-1])
def read_result_files(result_files):
output = []
for result_file in result_files:
try:
with open(result_file, 'rb') as f:
output.append((result_file, pickle.load(f)))
except Exception as err:
print("Failed on file: ", result_file, err)
return output
def job_out_series(job_outs, eval_style, method,
value_or_time='value', avg_stdev='avg'):
""" Takes in some result or resultList objects and
a 'method', and desired object, and returns these objects
in a list
ARGS:
results: Result[] or ResultList[], results to consider
eval_style: str - which method of Experiment we look at
method: str - which Lipschitz-estimation technique to consider
value_or_time: 'value' or 'time' - which number to return
avg_stdev: 'avg' or 'stdev' - for ResultList[], we can
get average or stdev values
RETURNS:
list of floats
"""
# check everything is the same type
assert value_or_time in ['value', 'time']
assert avg_stdev in ['avg', 'stdev']
assert eval_style in ['do_random_evals', 'do_unit_hypercube_eval',
'do_data_evals', 'do_large_radius_evals']
results = [job_out[eval_style] for job_out in job_outs]
output = []
for result in results:
try: #Result object case
if value_or_time == 'value':
output.append(result.values(method))
else:
output.append(result.compute_times(method))
except:
triple = result.average_stdevs(value_or_time)[method]
if avg_stdev == 'avg':
output.append(triple[0])
else:
output.append(triple[1])
return output
def collect_result_outs(filematch):
""" Uses glob to collect and load result objects matching a series
ARGS:
filematch: string with *'s associated with it
e.g. 'NAME*SUBNAME*GLOBAL.result'
RESULTS:
list of (filename, experiment.Result) objects
"""
search_str = os.path.join(COMPLETED_JOB_DIR, filematch)
sorted_filenames = sorted(glob.glob(search_str))
return read_result_files(sorted_filenames)
def collect_epochs(filename_list):
""" Given a list of (filename) objects, converts
the filenames into integers, pulling the EPOCH attribute from
the filename
str[] -> int[]
"""
def epoch_gleamer(filename):
basename = os.path.basename(filename)
return int(re.search('_EPOCH\d+_', filename).group()[6:-1])
return [epoch_gleamer(_) for _ in filename_list]
def data_from_results(result_iter, method, lip_estimator, time_or_value='value',
avg_or_stdev='avg'):
""" Given a list of experiment.Result or experiment.ResultList objects
will return the time/value for the lip_estimator of the method
for result (or avg/stdev if resultList objects)
e.g., data_from_results('do_unit_hypercube_eval', 'LipMIP',
'value') gets a list of values of the
LipMIP over the unitHypercube domain
ARGS:
method: str - name of one of the experimental methods
lip_estimator : str - name of the class of lipschitz estimator to use
time_or_value : 'time' or 'value' - returning the time or value here
avg_or_stdev : 'avg' or 'stdev' - returning either avg or stdev of
results from ResultListObjects
"""
assert method in ['do_random_evals', 'do_data_evals',
'do_unit_hypercube_eval']
assert lip_estimator in ['LipMIP', 'FastLip', 'LipLP', 'CLEVER',
'LipSDP', 'NaiveUB', 'RandomLB', 'SeqLip']
assert time_or_value in ['time', 'value']
assert avg_or_stdev in ['avg', 'stdev']
def datum_getter(result_obj):
if not hasattr(result_obj, 'average_stdevs'):
if time_or_value == 'value':
return result_obj[method].values(lip_estimator)
else:
return result_obj[method].compute_times(lip_estimator)
else:
triple = result_obj.average_stdevs(time_or_value)
if avg_or_stdev == 'avg':
return triple[0]
else:
return triple[1]
return [datum_getter(_) for _ in result_iter]
| 28.021661
| 81
| 0.659237
| 2,249
| 15,524
| 4.389506
| 0.200089
| 0.007901
| 0.005673
| 0.006483
| 0.154072
| 0.118922
| 0.091167
| 0.08671
| 0.075162
| 0.075162
| 0
| 0.005188
| 0.192863
| 15,524
| 553
| 82
| 28.072333
| 0.782682
| 0.397256
| 0
| 0.228296
| 0
| 0
| 0.038807
| 0.006709
| 0
| 0
| 0
| 0
| 0.057878
| 1
| 0.154341
| false
| 0.003215
| 0.048232
| 0.016077
| 0.369775
| 0.003215
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a00c0b5646519c438692fcd0610b44be3beb14
| 1,340
|
py
|
Python
|
read_delphin_data.py
|
anssilaukkarinen/mry-cluster2
|
65d80a7371a4991dfe248ff6944f050e1573f8fc
|
[
"MIT"
] | null | null | null |
read_delphin_data.py
|
anssilaukkarinen/mry-cluster2
|
65d80a7371a4991dfe248ff6944f050e1573f8fc
|
[
"MIT"
] | null | null | null |
read_delphin_data.py
|
anssilaukkarinen/mry-cluster2
|
65d80a7371a4991dfe248ff6944f050e1573f8fc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 14:51:24 2021
@author: laukkara
This script is run first to fetch results data from university's network drive
"""
import os
import pickle
input_folder_for_Delphin_data = r'S:\91202_Rakfys_Mallinnus\RAMI\simulations'
output_folder = os.path.join(r'C:\Local\laukkara\Data\github\mry-cluster2\input')
output_pickle_file_name = 'S_RAMI.pickle'
## Preparations
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_pickle_file_path = os.path.join(output_folder,
output_pickle_file_name)
## Read in results data from pickle files
cases = {}
data = {}
cases = os.listdir(input_folder_for_Delphin_data)
cases.remove('olds')
cases.remove('RAMI_simulated_cases.xlsx')
data = {}
for case in cases:
print('Reading:', case)
fname = os.path.join(input_folder_for_Delphin_data, case, 'd.pickle')
with open(fname, 'rb') as f:
try:
df = pickle.load(f)
if df.shape[0] == 1200:
data[case] = df
else:
print('ERROR AT:', case)
except:
print('Error when reading case:', case)
print(data[cases[0]].columns)
with open(output_pickle_file_path, 'wb') as f:
pickle.dump(data, f)
| 20
| 81
| 0.630597
| 185
| 1,340
| 4.389189
| 0.481081
| 0.059113
| 0.078818
| 0.077586
| 0.16133
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023976
| 0.252985
| 1,340
| 66
| 82
| 20.30303
| 0.787213
| 0.15597
| 0
| 0.066667
| 0
| 0
| 0.166517
| 0.10351
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a04a876b69a7d204627f4d6e2351f7e07cdf98
| 518
|
py
|
Python
|
examples/pylab_examples/fancybox_demo2.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 16
|
2016-06-14T19:45:35.000Z
|
2020-11-30T19:02:58.000Z
|
examples/pylab_examples/fancybox_demo2.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 7
|
2015-05-08T19:36:25.000Z
|
2015-06-30T15:32:17.000Z
|
examples/pylab_examples/fancybox_demo2.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 6
|
2015-06-05T03:34:06.000Z
|
2022-01-25T09:07:10.000Z
|
import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
styles = mpatch.BoxStyle.get_styles()
figheight = (len(styles)+.5)
fig1 = plt.figure(1, (4/1.5, figheight/1.5))
fontsize = 0.3 * 72
for i, (stylename, styleclass) in enumerate(styles.items()):
fig1.text(0.5, (float(len(styles)) - 0.5 - i)/figheight, stylename,
ha="center",
size=fontsize,
transform=fig1.transFigure,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
plt.draw()
plt.show()
| 27.263158
| 71
| 0.629344
| 72
| 518
| 4.513889
| 0.597222
| 0.098462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04401
| 0.210425
| 518
| 18
| 72
| 28.777778
| 0.750611
| 0
| 0
| 0
| 0
| 0
| 0.015474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a39cbdc284d3d48cf14614c751040caf06e2f0
| 3,018
|
py
|
Python
|
import_off.py
|
etiennody/purchoice
|
43a2dc81ca953ac6168f8112e97a4bae91ace690
|
[
"MIT"
] | null | null | null |
import_off.py
|
etiennody/purchoice
|
43a2dc81ca953ac6168f8112e97a4bae91ace690
|
[
"MIT"
] | 2
|
2020-05-04T09:40:32.000Z
|
2021-08-03T17:34:00.000Z
|
import_off.py
|
etiennody/purchoice
|
43a2dc81ca953ac6168f8112e97a4bae91ace690
|
[
"MIT"
] | null | null | null |
#! usr/bin/python3
# code: utf-8
"""Download data from Open Food Facts API."""
import json
import requests
from src.purchoice.constants import CATEGORY_SELECTED
from src.purchoice.purchoice_database import PurchoiceDatabase
class ImportOff:
"""ImportOff class downloads data from Open Food Facts API."""
def __init__(self, db):
self.url = "https://fr.openfoodfacts.org//cgi/search.pl?"
self.db = db
def get_url_params(self, category):
"""get_urls_params helps to define more precisely
the request to Open Food Facts API.
Arguments:
category {string} -- a name of category.
Returns:
dictionnary -- contains parameters to complete
the request to Open Food Facts API.
"""
return {
"action": "process",
"tagtype_0": "categories",
"tag_contains_0": "contains",
"tag_0": category,
"sort_by": "unique_scans_n",
"page_size": 500,
"json": 1,
}
def get_off(self, category):
"""get_off method makes a request to the web page of Open Food Facts,
and load data in json if the return status code is successful.
Arguments:
category {string} -- a category name.
Returns:
dictionnary -- Deserialize an bytearray instance containing
a JSON document to a Python object as early as products.
"""
response = requests.get(self.url, params=self.get_url_params(category))
if response.status_code == 200:
return json.loads(response.content)["products"]
def import_by_category(self, category):
"""import_by_category method try to insert
products, categories, brands and stores data
for each product by category in the database.
Arguments:
category {string} -- a category name.
"""
products = self.get_off(category)
products = products if isinstance(products, list) else products.items()
print("Importation des données en cours. Patientez...")
for product in products:
try:
p = self.db.add_product(product)
for category in product.get("categories").split(","):
c = self.db.add_category(category)
p.categories.append(c)
for brand in product.get("brands").split(","):
b = self.db.add_brand(brand)
p.brands.append(b)
for store in product.get("stores").split(","):
s = self.db.add_store(store)
p.stores.append(s)
except Exception:
pass
if __name__ == "__main__":
db = PurchoiceDatabase()
db.truncate_tables()
import_off = ImportOff(db)
for category in CATEGORY_SELECTED:
import_off.import_by_category(category)
print("Merci d'avoir patienté. Vous pouvez lancer l'application !")
| 32.804348
| 79
| 0.594102
| 351
| 3,018
| 4.977208
| 0.404558
| 0.020607
| 0.037207
| 0.036634
| 0.100744
| 0.100744
| 0.032055
| 0
| 0
| 0
| 0
| 0.005778
| 0.311796
| 3,018
| 91
| 80
| 33.164835
| 0.835339
| 0.288602
| 0
| 0
| 0
| 0
| 0.143511
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0.021277
| 0.191489
| 0
| 0.340426
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a51f00be6eeff0b67bd7aa629b9ff21c09189f
| 503
|
py
|
Python
|
cogs rework/server specified/on_message_delete.py
|
lubnc4261/House-Keeper
|
6de20014afaf00cf9050e54c91cd8b3a02702a27
|
[
"MIT"
] | null | null | null |
cogs rework/server specified/on_message_delete.py
|
lubnc4261/House-Keeper
|
6de20014afaf00cf9050e54c91cd8b3a02702a27
|
[
"MIT"
] | null | null | null |
cogs rework/server specified/on_message_delete.py
|
lubnc4261/House-Keeper
|
6de20014afaf00cf9050e54c91cd8b3a02702a27
|
[
"MIT"
] | null | null | null |
import discord
from discord import Embed
@commands.Cog.listener()
async def on_message_delete(self, message):
channel = "xxxxxxxxxxxxxxxxxxxxx"
deleted = Embed(
description=f"Message deleted in {message.channel.mention}", color=0x4040EC
).set_author(name=message.author, url=Embed.Empty, icon_url=message.author.avatar_url)
deleted.add_field(name="Message", value=message.content)
deleted.timestamp = message.created_at
await channel.send(embed=deleted)
| 33.533333
| 91
| 0.735586
| 62
| 503
| 5.854839
| 0.612903
| 0.077135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011848
| 0.161034
| 503
| 15
| 92
| 33.533333
| 0.848341
| 0
| 0
| 0
| 0
| 0
| 0.146939
| 0.093878
| 0
| 0
| 0.016327
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a5f6e07158fad4d7bfe9f3af12b2b23116e364
| 22,646
|
py
|
Python
|
test/modules/md/md_env.py
|
icing/mod_md
|
4522ed547f0426f27aae86f00fbc9b5b17de545f
|
[
"Apache-2.0"
] | 320
|
2017-07-22T12:14:19.000Z
|
2022-03-24T14:00:32.000Z
|
test/modules/md/md_env.py
|
icing/mod_md
|
4522ed547f0426f27aae86f00fbc9b5b17de545f
|
[
"Apache-2.0"
] | 272
|
2017-07-22T12:30:48.000Z
|
2022-03-30T07:14:50.000Z
|
test/modules/md/md_env.py
|
icing/mod_md
|
4522ed547f0426f27aae86f00fbc9b5b17de545f
|
[
"Apache-2.0"
] | 36
|
2017-07-22T12:45:03.000Z
|
2021-05-18T12:20:11.000Z
|
import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class MDTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["proxy_connect", "md"])
if "pebble" == self.env.acme_server:
self._make_pebble_conf()
def _make_pebble_conf(self):
our_dir = os.path.dirname(inspect.getfile(MDTestSetup))
conf_src_dir = os.path.join(our_dir, 'pebble')
conf_dest_dir = os.path.join(self.env.gen_dir, 'pebble')
if not os.path.exists(conf_dest_dir):
os.makedirs(conf_dest_dir)
for name in os.listdir(conf_src_dir):
src_path = os.path.join(conf_src_dir, name)
m = re.match(r'(.+).template', name)
if m:
self._make_template(src_path, os.path.join(conf_dest_dir, m.group(1)))
elif os.path.isfile(src_path):
shutil.copy(src_path, os.path.join(conf_dest_dir, name))
class MDTestEnv(HttpdTestEnv):
MD_S_UNKNOWN = 0
MD_S_INCOMPLETE = 1
MD_S_COMPLETE = 2
MD_S_EXPIRED = 3
MD_S_ERROR = 4
EMPTY_JOUT = {'status': 0, 'output': []}
DOMAIN_SUFFIX = "%d.org" % time.time()
LOG_FMT_TIGHT = '%(levelname)s: %(message)s'
@classmethod
def get_acme_server(cls):
return os.environ['ACME'] if 'ACME' in os.environ else "pebble"
@classmethod
def has_acme_server(cls):
return cls.get_acme_server() != 'none'
@classmethod
def has_acme_eab(cls):
return cls.get_acme_server() == 'pebble'
@classmethod
def is_pebble(cls) -> bool:
return cls.get_acme_server() == 'pebble'
@classmethod
def lacks_ocsp(cls):
return cls.is_pebble()
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(MDTestEnv)),
interesting_modules=["md"])
self._acme_server = self.get_acme_server()
self._acme_tos = "accepted"
self._acme_ca_pemfile = os.path.join(self.gen_dir, "apache/acme-ca.pem")
if "pebble" == self._acme_server:
self._acme_url = "https://localhost:14000/dir"
self._acme_eab_url = "https://localhost:14001/dir"
elif "boulder" == self._acme_server:
self._acme_url = "http://localhost:4001/directory"
self._acme_eab_url = None
else:
raise Exception(f"unknown ACME server type: {self._acme_server}")
self._acme_server_down = False
self._acme_server_ok = False
self._a2md_bin = os.path.join(self.bin_dir, 'a2md')
self._default_domain = f"test1.{self.http_tld}"
self._store_dir = "./md"
self.set_store_dir_default()
self.add_cert_specs([
CertificateSpec(domains=[f"expired.{self._http_tld}"],
valid_from=timedelta(days=-100),
valid_to=timedelta(days=-10)),
CertificateSpec(domains=["localhost"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
#"AH10045", # mod_md complains that there is no vhost for an MDomain
"AH10105", # mod_md does not find a vhost with SSL enabled for an MDomain
"AH10085" # mod_ssl complains about fallback certificates
])
if self.lacks_ocsp():
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*'),
])
if setup_dirs:
self._setup = MDTestSetup(env=self)
self._setup.make()
self.issue_certs()
self.clear_store()
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_apxs_var(self, name: str) -> str:
p = subprocess.run([self._apxs, "-q", name], capture_output=True, text=True)
if p.returncode != 0:
return ""
return p.stdout.strip()
@property
def acme_server(self):
return self._acme_server
@property
def acme_url(self):
return self._acme_url
@property
def acme_tos(self):
return self._acme_tos
@property
def a2md_bin(self):
return self._a2md_bin
@property
def acme_ca_pemfile(self):
return self._acme_ca_pemfile
@property
def store_dir(self):
return self._store_dir
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
def a2md_stdargs(self, args):
self._a2md_args = [] + args
def a2md_rawargs(self, args):
self._a2md_args_raw = [] + args
def a2md(self, args, raw=False) -> ExecResult:
preargs = self._a2md_args
if raw:
preargs = self._a2md_args_raw
log.debug("running: {0} {1}".format(preargs, args))
return self.run(preargs + args)
def check_acme(self):
if self._acme_server_ok:
return True
if self._acme_server_down:
pytest.skip(msg="ACME server not running")
return False
if self.is_live(self.acme_url, timeout=timedelta(seconds=0.5)):
self._acme_server_ok = True
return True
else:
self._acme_server_down = True
pytest.fail(msg="ACME server not running", pytrace=False)
return False
def get_ca_pem_file(self, hostname: str) -> Optional[str]:
pem_file = super().get_ca_pem_file(hostname)
if pem_file is None:
pem_file = self.acme_ca_pemfile
return pem_file
# --------- access local store ---------
def purge_store(self):
log.debug("purge store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if os.path.exists(self._store_dir):
shutil.rmtree(self._store_dir, ignore_errors=False)
os.makedirs(self._store_dir)
def clear_store(self):
log.debug("clear store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
for dirpath in ["challenges", "tmp", "archive", "domains", "accounts", "staging", "ocsp"]:
shutil.rmtree(os.path.join(self._store_dir, dirpath), ignore_errors=True)
def clear_ocsp_store(self):
assert len(self._store_dir) > 1
dirpath = os.path.join(self._store_dir, "ocsp")
log.debug("clear ocsp store dir: %s" % dir)
if os.path.exists(dirpath):
shutil.rmtree(dirpath, ignore_errors=True)
def authz_save(self, name, content):
dirpath = os.path.join(self._store_dir, 'staging', name)
os.makedirs(dirpath)
open(os.path.join(dirpath, 'authz.json'), "w").write(content)
def path_store_json(self):
return os.path.join(self._store_dir, 'md_store.json')
def path_account(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.json')
def path_account_key(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.pem')
def store_domains(self):
return os.path.join(self._store_dir, 'domains')
def store_archives(self):
return os.path.join(self._store_dir, 'archive')
def store_stagings(self):
return os.path.join(self._store_dir, 'staging')
def store_challenges(self):
return os.path.join(self._store_dir, 'challenges')
def store_domain_file(self, domain, filename):
return os.path.join(self.store_domains(), domain, filename)
def store_archived_file(self, domain, version, filename):
return os.path.join(self.store_archives(), "%s.%d" % (domain, version), filename)
def store_staged_file(self, domain, filename):
return os.path.join(self.store_stagings(), domain, filename)
def path_fallback_cert(self, domain):
return os.path.join(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')
def path_job(self, domain):
return os.path.join(self._store_dir, 'staging', domain, 'job.json')
def replace_store(self, src):
shutil.rmtree(self._store_dir, ignore_errors=False)
shutil.copytree(src, self._store_dir)
def list_accounts(self):
return os.listdir(os.path.join(self._store_dir, 'accounts'))
def check_md(self, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):
domains = None
if isinstance(domain, list):
domains = domain
domain = domains[0]
if md:
domain = md
path = self.store_domain_file(domain, 'md.json')
with open(path) as f:
md = json.load(f)
assert md
if domains:
assert md['domains'] == domains
if state >= 0:
assert md['state'] == state
if ca:
assert md['ca']['url'] == ca
if protocol:
assert md['ca']['proto'] == protocol
if agreement:
assert md['ca']['agreement'] == agreement
if contacts:
assert md['contacts'] == contacts
def pkey_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "privkey.{0}.pem".format(pkeyspec)
return 'privkey.pem'
def cert_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "pubcert.{0}.pem".format(pkeyspec)
return 'pubcert.pem'
def check_md_complete(self, domain, pkey=None):
md = self.get_md_status(domain)
assert md
assert 'state' in md, "md is unexpected: {0}".format(md)
assert md['state'] is MDTestEnv.MD_S_COMPLETE, "unexpected state: {0}".format(md['state'])
assert os.path.isfile(self.store_domain_file(domain, self.pkey_fname(pkey)))
assert os.path.isfile(self.store_domain_file(domain, self.cert_fname(pkey)))
def check_md_credentials(self, domain):
if isinstance(domain, list):
domains = domain
domain = domains[0]
else:
domains = [domain]
# check private key, validate certificate, etc
MDCertUtil.validate_privkey(self.store_domain_file(domain, 'privkey.pem'))
cert = MDCertUtil(self.store_domain_file(domain, 'pubcert.pem'))
cert.validate_cert_matches_priv_key(self.store_domain_file(domain, 'privkey.pem'))
# check SANs and CN
assert cert.get_cn() == domain
# compare lists twice in opposite directions: SAN may not respect ordering
san_list = list(cert.get_san_list())
assert len(san_list) == len(domains)
assert set(san_list).issubset(domains)
assert set(domains).issubset(san_list)
# check valid dates interval
not_before = cert.get_not_before()
not_after = cert.get_not_after()
assert not_before < datetime.now(not_before.tzinfo)
assert not_after > datetime.now(not_after.tzinfo)
# --------- check utilities ---------
def check_json_contains(self, actual, expected):
# write all expected key:value bindings to a copy of the actual data ...
# ... assert it stays unchanged
test_json = copy.deepcopy(actual)
test_json.update(expected)
assert actual == test_json
def check_file_access(self, path, exp_mask):
actual_mask = os.lstat(path).st_mode & 0o777
assert oct(actual_mask) == oct(exp_mask)
def check_dir_empty(self, path):
assert os.listdir(path) == []
def get_http_status(self, domain, path, use_https=True):
r = self.get_meta(domain, path, use_https, insecure=True)
return r.response['status']
def get_cert(self, domain, tls=None, ciphers=None):
return MDCertUtil.load_server_cert(self._httpd_addr, self.https_port,
domain, tls=tls, ciphers=ciphers)
def get_server_cert(self, domain, proto=None, ciphers=None):
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if ciphers is not None:
args.extend(["-cipher", ciphers])
r = self.run(args)
# noinspection PyBroadException
try:
return MDCertUtil.parse_pem_cert(r.stdout)
except:
return None
def verify_cert_key_lenghts(self, domain, pkeys):
for p in pkeys:
cert = self.get_server_cert(domain, proto="tls1_2", ciphers=p['ciphers'])
if 0 == p['keylen']:
assert cert is None
else:
assert cert, "no cert returned for cipher: {0}".format(p['ciphers'])
assert cert.get_key_length() == p['keylen'], "key length, expected {0}, got {1}".format(
p['keylen'], cert.get_key_length()
)
def get_meta(self, domain, path, use_https=True, insecure=False):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}", insecure=insecure)
assert r.exit_code == 0
assert r.response
assert r.response['header']
return r
def get_content(self, domain, path, use_https=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}")
assert r.exit_code == 0
return r.stdout
def get_json_content(self, domain, path, use_https=True, insecure=False,
debug_log=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
url = f"{schema}://{domain}:{port}{path}"
r = self.curl_get(url, insecure=insecure, debug_log=debug_log)
if r.exit_code != 0:
log.error(f"curl get on {url} returned {r.exit_code}"
f"\nstdout: {r.stdout}"
f"\nstderr: {r.stderr}")
assert r.exit_code == 0, r.stderr
return r.json
def get_certificate_status(self, domain) -> Dict:
return self.get_json_content(domain, "/.httpd/certificate-status", insecure=True)
def get_md_status(self, domain, via_domain=None, use_https=True, debug_log=False) -> Dict:
if via_domain is None:
via_domain = self._default_domain
return self.get_json_content(via_domain, f"/md-status/{domain}",
use_https=use_https, debug_log=debug_log)
def get_server_status(self, query="/", via_domain=None, use_https=True):
if via_domain is None:
via_domain = self._default_domain
return self.get_content(via_domain, "/server-status%s" % query, use_https=use_https)
def await_completion(self, names, must_renew=False, restart=True, timeout=60,
via_domain=None, use_https=True):
try_until = time.time() + timeout
renewals = {}
names = names.copy()
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
mds = self.get_md_status(name, via_domain=via_domain, use_https=use_https)
if mds is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in mds:
renewal = mds['renewal']
renewals[name] = True
if 'finished' in renewal and renewal['finished'] is True:
if (not must_renew) or (name in renewals):
log.debug(f"domain cert was renewed: {name}")
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
if restart:
time.sleep(0.1)
return self.apache_restart() == 0
return True
def is_renewing(self, name):
stat = self.get_certificate_status(name)
return 'renewal' in stat
def await_renewal(self, names, timeout=60):
try_until = time.time() + timeout
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
md = self.get_md_status(name)
if md is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in md:
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
return True
def await_error(self, domain, timeout=60, via_domain=None, use_https=True, errors=1):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
md = self.get_md_status(domain, via_domain=via_domain, use_https=use_https)
if md:
if 'state' in md and md['state'] == MDTestEnv.MD_S_ERROR:
return md
if 'renewal' in md and 'errors' in md['renewal'] \
and md['renewal']['errors'] >= errors:
return md
time.sleep(0.1)
return None
def await_file(self, fpath, timeout=60):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
if os.path.isfile(fpath):
return True
time.sleep(0.1)
def check_file_permissions(self, domain):
md = self.a2md(["list", domain]).json['output'][0]
assert md
acct = md['ca']['account']
assert acct
self.check_file_access(self.path_store_json(), 0o600)
# domains
self.check_file_access(self.store_domains(), 0o700)
self.check_file_access(os.path.join(self.store_domains(), domain), 0o700)
self.check_file_access(self.store_domain_file(domain, 'privkey.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'pubcert.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'md.json'), 0o600)
# archive
self.check_file_access(self.store_archived_file(domain, 1, 'md.json'), 0o600)
# accounts
self.check_file_access(os.path.join(self._store_dir, 'accounts'), 0o755)
self.check_file_access(os.path.join(self._store_dir, 'accounts', acct), 0o755)
self.check_file_access(self.path_account(acct), 0o644)
self.check_file_access(self.path_account_key(acct), 0o644)
# staging
self.check_file_access(self.store_stagings(), 0o755)
def get_ocsp_status(self, domain, proto=None, cipher=None, ca_file=None):
stat = {}
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", ca_file if ca_file else self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if cipher is not None:
args.extend(["-cipher", cipher])
r = self.run(args, debug_log=False)
ocsp_regex = re.compile(r'OCSP response: +([^=\n]+)\n')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
if 'ocsp' not in stat:
ocsp_regex = re.compile(r'OCSP Response Status:\s*(.+)')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
verify_regex = re.compile(r'Verify return code:\s*(.+)')
matches = verify_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['verify'] = m.group(1)
return stat
def await_ocsp_status(self, domain, timeout=10, ca_file=None):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
break
stat = self.get_ocsp_status(domain, ca_file=ca_file)
if 'ocsp' in stat and stat['ocsp'] != "no response sent":
return stat
time.sleep(0.1)
raise TimeoutError(f"ocsp respopnse not available: {domain}")
def create_self_signed_cert(self, name_list, valid_days, serial=1000, path=None):
dirpath = path
if not path:
dirpath = os.path.join(self.store_domains(), name_list[0])
return MDCertUtil.create_self_signed_cert(dirpath, name_list, valid_days, serial)
| 37.806344
| 125
| 0.597457
| 2,920
| 22,646
| 4.414726
| 0.136644
| 0.03421
| 0.029788
| 0.026065
| 0.394616
| 0.338376
| 0.283841
| 0.241331
| 0.194709
| 0.169265
| 0
| 0.011539
| 0.280579
| 22,646
| 599
| 126
| 37.806344
| 0.779708
| 0.027025
| 0
| 0.253579
| 0
| 0
| 0.089703
| 0.007585
| 0
| 0
| 0
| 0
| 0.06953
| 1
| 0.147239
| false
| 0
| 0.034765
| 0.06135
| 0.343558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a66e0d4848430d37cecb21387fa89ddac71ea8
| 1,949
|
py
|
Python
|
models/create_message_response.py
|
ajrice6713/bw-messaging-emulator
|
d1be4976e2486ec91b419597afc8411c78ebfda7
|
[
"MIT"
] | null | null | null |
models/create_message_response.py
|
ajrice6713/bw-messaging-emulator
|
d1be4976e2486ec91b419597afc8411c78ebfda7
|
[
"MIT"
] | null | null | null |
models/create_message_response.py
|
ajrice6713/bw-messaging-emulator
|
d1be4976e2486ec91b419597afc8411c78ebfda7
|
[
"MIT"
] | null | null | null |
import datetime
import json
import random
import string
from typing import Dict
from sms_counter import SMSCounter
class CreateMessageResponse:
def __init__(self, request):
self.id = self.generate_id()
self.owner = request['from']
self.applicationId = request['applicationId']
self.time = str(datetime.datetime.utcnow().isoformat())
self.segmentCount = 1
self.direction = 'out'
if type(request['to']) is str:
self.to = [request['to']]
else:
self.to = request['to']
self.mfrom = request['from']
if 'media' in request:
self.media = request['media']
if 'text' in request:
self.text = request['text']
if 'tag' in request:
self.tag = request['tag']
if 'priority' in request:
self.priority = request['priority']
def calculate_segments(self, message) -> int:
count = SMSCounter.count(message)
return count['messages']
def generate_id(self) -> str:
pre = random.randint(1400000000000,1799999999999)
return str(pre) + ''.join(random.choice(string.ascii_lowercase) for x in range(16))
def to_json(self) -> str:
dict_response = {
'id': self.id,
'owner': self.owner,
'applicationId': self.applicationId,
'time': self.time,
'direction': self.direction,
'to': self.to,
'from': self.mfrom
}
if hasattr(self, 'media'): dict_response['media'] = self.media
if hasattr(self, 'text'):
dict_response['text'] = self.text
dict_response['segmentCount'] = self.calculate_segments(self.text)
if hasattr(self, 'tag'): dict_response['tag'] = self.tag
if hasattr(self, 'priority'): dict_response['priority'] = self.priority
return json.dumps(dict_response)
| 30.936508
| 91
| 0.578758
| 214
| 1,949
| 5.186916
| 0.28972
| 0.075676
| 0.046847
| 0.027027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021152
| 0.296562
| 1,949
| 62
| 92
| 31.435484
| 0.788476
| 0
| 0
| 0
| 0
| 0
| 0.086711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a74cac582bdf300bc81daa9bedf7b376e2c024
| 906
|
py
|
Python
|
Alpha & Beta/wootMath/decimalToBinaryFraction.py
|
Mdlkxzmcp/various_python
|
be4f873c6263e3db11177bbccce2aa465514294d
|
[
"MIT"
] | null | null | null |
Alpha & Beta/wootMath/decimalToBinaryFraction.py
|
Mdlkxzmcp/various_python
|
be4f873c6263e3db11177bbccce2aa465514294d
|
[
"MIT"
] | null | null | null |
Alpha & Beta/wootMath/decimalToBinaryFraction.py
|
Mdlkxzmcp/various_python
|
be4f873c6263e3db11177bbccce2aa465514294d
|
[
"MIT"
] | null | null | null |
def decimal_to_binary_fraction(x=0.5):
"""
Input: x, a float between 0 and 1
Returns binary representation of x
"""
p = 0
while ((2 ** p) * x) % 1 != 0:
# print('Remainder = ' + str((2**p)*x - int((2**p)*x)))
p += 1
num = int(x * (2 ** p))
result = ''
if num == 0:
result = '0'
while num > 0:
result = str(num % 2) + result
num //= 2
for i in range(p - len(result)):
result = '0' + result
result = result[0:-p] + '.' + result[-p:]
return result # If there is no integer p such that x*(2**p) is a whole number, then internal
# representation is always an approximation
# Suggest that testing equality of floats is not exact: Use abs(x-y) < some
# small number, rather than x == y
# Why does print(0.1) return 0.1, if not exact?
# Because Python designers set it up this way to automatically round
| 27.454545
| 97
| 0.566225
| 144
| 906
| 3.541667
| 0.493056
| 0.019608
| 0.017647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037559
| 0.294702
| 906
| 32
| 98
| 28.3125
| 0.760563
| 0.512141
| 0
| 0
| 0
| 0
| 0.007212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43a79fa3a61473b076f77344a5a402f9d3ac1f06
| 3,091
|
py
|
Python
|
composer/utils/run_directory.py
|
ajaysaini725/composer
|
00fbf95823cd50354b2410fbd88f06eaf0481662
|
[
"Apache-2.0"
] | null | null | null |
composer/utils/run_directory.py
|
ajaysaini725/composer
|
00fbf95823cd50354b2410fbd88f06eaf0481662
|
[
"Apache-2.0"
] | null | null | null |
composer/utils/run_directory.py
|
ajaysaini725/composer
|
00fbf95823cd50354b2410fbd88f06eaf0481662
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
import datetime
import logging
import os
import pathlib
import time
from composer.utils import dist
log = logging.getLogger(__name__)
_RUN_DIRECTORY_KEY = "COMPOSER_RUN_DIRECTORY"
_start_time_str = datetime.datetime.now().isoformat()
def get_node_run_directory() -> str:
"""Returns the run directory for the node. This folder is shared by all ranks on the node.
Returns:
str: The node run directory.
"""
node_run_directory = os.environ.get(_RUN_DIRECTORY_KEY, os.path.join("runs", _start_time_str))
if node_run_directory.endswith(os.path.sep):
# chop off the training slash so os.path.basename would work as expected
node_run_directory = node_run_directory[:-1]
os.makedirs(node_run_directory, exist_ok=True)
return os.path.abspath(node_run_directory)
def get_run_directory() -> str:
"""Returns the run directory for the current rank.
Returns:
str: The run directory.
"""
run_dir = os.path.join(get_node_run_directory(), f"rank_{dist.get_global_rank()}")
os.makedirs(run_dir, exist_ok=True)
return run_dir
def get_modified_files(modified_since_timestamp: float, *, ignore_hidden: bool = True):
"""Returns a list of files (recursively) in the run directory that have been modified since
``modified_since_timestamp``.
Args:
modified_since_timestamp (float): Minimum last modified timestamp(in seconds since EPOCH)
of files to include.
ignore_hidden (bool, optional): Whether to ignore hidden files and folders (default: ``True``)
Returns:
List[str]: List of filepaths that have been modified since ``modified_since_timestamp``
"""
modified_files = []
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
for root, dirs, files in os.walk(run_directory):
del dirs # unused
for file in files:
if ignore_hidden and any(x.startswith(".") for x in file.split(os.path.sep)):
# skip hidden files and folders
continue
filepath = os.path.join(root, file)
modified_time = os.path.getmtime(filepath)
if modified_time >= modified_since_timestamp:
modified_files.append(filepath)
return modified_files
def get_run_directory_timestamp() -> float:
"""Returns the current timestamp on the run directory filesystem.
Note that the disk time can differ from system time (e.g. when using
network filesystems).
Returns:
float: the current timestamp on the run directory filesystem.
"""
run_directory = get_run_directory()
if run_directory is None:
raise RuntimeError("Run directory is not defined")
python_time = time.time()
touch_file = (pathlib.Path(run_directory) / f".{python_time}")
touch_file.touch()
new_last_uploaded_timestamp = os.path.getmtime(str(touch_file))
os.remove(str(touch_file))
return new_last_uploaded_timestamp
| 35.125
| 102
| 0.697185
| 418
| 3,091
| 4.937799
| 0.320574
| 0.174419
| 0.069767
| 0.018411
| 0.271318
| 0.248062
| 0.217054
| 0.217054
| 0.126938
| 0.085271
| 0
| 0.002067
| 0.217405
| 3,091
| 87
| 103
| 35.528736
| 0.851178
| 0.339696
| 0
| 0.136364
| 0
| 0
| 0.065319
| 0.026439
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43aa177b05dce3f050fe11c02d43b9d799f954d6
| 3,509
|
py
|
Python
|
cpc_fusion/pkgs/keys/main.py
|
CPChain/fusion
|
63b6913010e8e5b296a1900c59592c8fd1802c2e
|
[
"MIT"
] | 5
|
2018-12-19T02:37:18.000Z
|
2022-01-26T02:52:50.000Z
|
cpc_fusion/pkgs/keys/main.py
|
CPChain/fusion
|
63b6913010e8e5b296a1900c59592c8fd1802c2e
|
[
"MIT"
] | null | null | null |
cpc_fusion/pkgs/keys/main.py
|
CPChain/fusion
|
63b6913010e8e5b296a1900c59592c8fd1802c2e
|
[
"MIT"
] | null | null | null |
from typing import (Any, Union, Type) # noqa: F401
from ..keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash, # type: bytes
private_key # type: _PrivateKey
):
# type: (...) -> _Signature
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `eth_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash, # type: bytes
signature, # type: _Signature
public_key # type: _PublicKey
) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `eth_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash, # type: bytes
signature # type: _Signature
):
# type: (...) -> _PublicKey
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `eth_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key):
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| 35.444444
| 90
| 0.61613
| 370
| 3,509
| 5.648649
| 0.245946
| 0.060287
| 0.050239
| 0.053589
| 0.563636
| 0.487081
| 0.475598
| 0.432057
| 0.326794
| 0.300478
| 0
| 0.002901
| 0.31234
| 3,509
| 98
| 91
| 35.806122
| 0.863241
| 0.15503
| 0
| 0.373333
| 0
| 0
| 0.205032
| 0.072084
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.053333
| 0
| 0.213333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43aab220da0c6298d29ad8922e374d3b90af61e0
| 16,406
|
py
|
Python
|
qiskit/pulse/transforms/canonicalization.py
|
gadial/qiskit-terra
|
0fc83f44a6e80969875c738b2cee7bc33223e45f
|
[
"Apache-2.0"
] | 1
|
2021-10-05T11:56:53.000Z
|
2021-10-05T11:56:53.000Z
|
qiskit/pulse/transforms/canonicalization.py
|
gadial/qiskit-terra
|
0fc83f44a6e80969875c738b2cee7bc33223e45f
|
[
"Apache-2.0"
] | 24
|
2021-01-27T08:20:27.000Z
|
2021-07-06T09:42:28.000Z
|
qiskit/pulse/transforms/canonicalization.py
|
gadial/qiskit-terra
|
0fc83f44a6e80969875c738b2cee7bc33223e45f
|
[
"Apache-2.0"
] | 4
|
2021-10-05T12:07:27.000Z
|
2022-01-28T18:37:28.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Basic rescheduling functions which take schedule or instructions and return new schedules."""
import warnings
from collections import defaultdict
from typing import List, Optional, Iterable, Union
import numpy as np
from qiskit.pulse import channels as chans, exceptions, instructions
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.exceptions import UnassignedDurationError
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.instructions import directives
from qiskit.pulse.schedule import Schedule, ScheduleBlock, ScheduleComponent
def block_to_schedule(block: ScheduleBlock) -> Schedule:
"""Convert ``ScheduleBlock`` to ``Schedule``.
Args:
block: A ``ScheduleBlock`` to convert.
Returns:
Scheduled pulse program.
Raises:
UnassignedDurationError: When any instruction duration is not assigned.
"""
if not block.is_schedulable():
raise UnassignedDurationError(
'All instruction durations should be assigned before creating `Schedule`.'
'Please check `.parameters` to find unassigned parameter objects.')
schedule = Schedule(name=block.name, metadata=block.metadata)
for op_data in block.instructions:
if isinstance(op_data, ScheduleBlock):
context_schedule = block_to_schedule(op_data)
schedule.append(context_schedule, inplace=True)
else:
schedule.append(op_data, inplace=True)
# transform with defined policy
return block.alignment_context.align(schedule)
def compress_pulses(schedules: List[Schedule]) -> List[Schedule]:
"""Optimization pass to replace identical pulses.
Args:
schedules: Schedules to compress.
Returns:
Compressed schedules.
"""
existing_pulses = []
new_schedules = []
for schedule in schedules:
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Play):
if inst.pulse in existing_pulses:
idx = existing_pulses.index(inst.pulse)
identical_pulse = existing_pulses[idx]
new_schedule.insert(time,
instructions.Play(identical_pulse,
inst.channel,
inst.name),
inplace=True)
else:
existing_pulses.append(inst.pulse)
new_schedule.insert(time, inst, inplace=True)
else:
new_schedule.insert(time, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def flatten(program: Schedule) -> Schedule:
"""Flatten (inline) any called nodes into a Schedule tree with no nested children.
Args:
program: Pulse program to remove nested structure.
Returns:
Flatten pulse program.
Raises:
PulseError: When invalid data format is given.
"""
if isinstance(program, Schedule):
return Schedule(*program.instructions, name=program.name, metadata=program.metadata)
else:
raise PulseError(f'Invalid input program {program.__class__.__name__} is specified.')
def inline_subroutines(program: Union[Schedule, ScheduleBlock]) -> Union[Schedule, ScheduleBlock]:
"""Recursively remove call instructions and inline the respective subroutine instructions.
Assigned parameter values, which are stored in the parameter table, are also applied.
The subroutine is copied before the parameter assignment to avoid mutation problem.
Args:
program: A program which may contain the subroutine, i.e. ``Call`` instruction.
Returns:
A schedule without subroutine.
Raises:
PulseError: When input program is not valid data format.
"""
if isinstance(program, Schedule):
return _inline_schedule(program)
elif isinstance(program, ScheduleBlock):
return _inline_block(program)
else:
raise PulseError(f'Invalid program {program.__class__.__name__} is specified.')
def _inline_schedule(schedule: Schedule) -> Schedule:
"""A helper function to inline subroutine of schedule.
.. note:: If subroutine is ``ScheduleBlock`` it is converted into Schedule to get ``t0``.
"""
ret_schedule = Schedule(name=schedule.name,
metadata=schedule.metadata)
for t0, inst in schedule.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
# convert into schedule if block is given
if isinstance(subroutine, ScheduleBlock):
subroutine = block_to_schedule(subroutine)
# recursively inline the program
inline_schedule = _inline_schedule(subroutine)
ret_schedule.insert(t0, inline_schedule, inplace=True)
else:
ret_schedule.insert(t0, inst, inplace=True)
return ret_schedule
def _inline_block(block: ScheduleBlock) -> ScheduleBlock:
"""A helper function to inline subroutine of schedule block.
.. note:: If subroutine is ``Schedule`` the function raises an error.
"""
ret_block = ScheduleBlock(alignment_context=block.alignment_context,
name=block.name,
metadata=block.metadata)
for inst in block.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
if isinstance(subroutine, Schedule):
raise PulseError(f'A subroutine {subroutine.name} is a pulse Schedule. '
'This program cannot be inserted into ScheduleBlock because '
't0 associated with instruction will be lost.')
# recursively inline the program
inline_block = _inline_block(subroutine)
ret_block.append(inline_block, inplace=True)
else:
ret_block.append(inst, inplace=True)
return ret_block
def remove_directives(schedule: Schedule) -> Schedule:
"""Remove directives.
Args:
schedule: A schedule to remove compiler directives.
Returns:
A schedule without directives.
"""
return schedule.exclude(instruction_types=[directives.Directive])
def remove_trivial_barriers(schedule: Schedule) -> Schedule:
"""Remove trivial barriers with 0 or 1 channels.
Args:
schedule: A schedule to remove trivial barriers.
Returns:
schedule: A schedule without trivial barriers
"""
def filter_func(inst):
return (isinstance(inst[1], directives.RelativeBarrier) and
len(inst[1].channels) < 2)
return schedule.exclude(filter_func)
def align_measures(schedules: Iterable[ScheduleComponent],
inst_map: Optional[InstructionScheduleMap] = None,
cal_gate: str = 'u3',
max_calibration_duration: Optional[int] = None,
align_time: Optional[int] = None,
align_all: Optional[bool] = True,
) -> List[Schedule]:
"""Return new schedules where measurements occur at the same physical time.
This transformation will align the first :class:`qiskit.pulse.Acquire` on
every channel to occur at the same time.
Minimum measurement wait time (to allow for calibration pulses) is enforced
and may be set with ``max_calibration_duration``.
By default only instructions containing a :class:`~qiskit.pulse.AcquireChannel`
or :class:`~qiskit.pulse.MeasureChannel` will be shifted. If you wish to keep
the relative timing of all instructions in the schedule set ``align_all=True``.
This method assumes that ``MeasureChannel(i)`` and ``AcquireChannel(i)``
correspond to the same qubit and the acquire/play instructions
should be shifted together on these channels.
.. jupyter-kernel:: python3
:id: align_measures
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import transforms
with pulse.build() as sched:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 0.5), pulse.DriveChannel(0))
pulse.play(pulse.Constant(10, 1.), pulse.MeasureChannel(0))
pulse.acquire(20, pulse.AcquireChannel(0), pulse.MemorySlot(0))
sched_shifted = sched << 20
aligned_sched, aligned_sched_shifted = transforms.align_measures([sched, sched_shifted])
assert aligned_sched == aligned_sched_shifted
If it is desired to only shift acquisition and measurement stimulus instructions
set the flag ``align_all=False``:
.. jupyter-execute::
aligned_sched, aligned_sched_shifted = transforms.align_measures(
[sched, sched_shifted],
align_all=False,
)
assert aligned_sched != aligned_sched_shifted
Args:
schedules: Collection of schedules to be aligned together
inst_map: Mapping of circuit operations to pulse schedules
cal_gate: The name of the gate to inspect for the calibration time
max_calibration_duration: If provided, inst_map and cal_gate will be ignored
align_time: If provided, this will be used as final align time.
align_all: Shift all instructions in the schedule such that they maintain
their relative alignment with the shifted acquisition instruction.
If ``False`` only the acquisition and measurement pulse instructions
will be shifted.
Returns:
The input list of schedules transformed to have their measurements aligned.
Raises:
PulseError: If the provided alignment time is negative.
"""
def get_first_acquire_times(schedules):
"""Return a list of first acquire times for each schedule."""
acquire_times = []
for schedule in schedules:
visited_channels = set()
qubit_first_acquire_times = defaultdict(lambda: None)
for time, inst in schedule.instructions:
if (isinstance(inst, instructions.Acquire) and
inst.channel not in visited_channels):
visited_channels.add(inst.channel)
qubit_first_acquire_times[inst.channel.index] = time
acquire_times.append(qubit_first_acquire_times)
return acquire_times
def get_max_calibration_duration(inst_map, cal_gate):
"""Return the time needed to allow for readout discrimination calibration pulses."""
# TODO (qiskit-terra #5472): fix behavior of this.
max_calibration_duration = 0
for qubits in inst_map.qubits_with_instruction(cal_gate):
cmd = inst_map.get(cal_gate, qubits, np.pi, 0, np.pi)
max_calibration_duration = max(cmd.duration, max_calibration_duration)
return max_calibration_duration
if align_time is not None and align_time < 0:
raise exceptions.PulseError("Align time cannot be negative.")
first_acquire_times = get_first_acquire_times(schedules)
# Extract the maximum acquire in every schedule across all acquires in the schedule.
# If there are no acquires in the schedule default to 0.
max_acquire_times = [max(0, *times.values()) for times in first_acquire_times]
if align_time is None:
if max_calibration_duration is None:
if inst_map:
max_calibration_duration = get_max_calibration_duration(inst_map, cal_gate)
else:
max_calibration_duration = 0
align_time = max(max_calibration_duration, *max_acquire_times)
# Shift acquires according to the new scheduled time
new_schedules = []
for sched_idx, schedule in enumerate(schedules):
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
stop_time = schedule.stop_time
if align_all:
if first_acquire_times[sched_idx]:
shift = align_time - max_acquire_times[sched_idx]
else:
shift = align_time - stop_time
else:
shift = 0
for time, inst in schedule.instructions:
measurement_channels = {
chan.index for chan in inst.channels if
isinstance(chan, (chans.MeasureChannel, chans.AcquireChannel))
}
if measurement_channels:
sched_first_acquire_times = first_acquire_times[sched_idx]
max_start_time = max(sched_first_acquire_times[chan]
for chan in measurement_channels if
chan in sched_first_acquire_times)
shift = align_time - max_start_time
if shift < 0:
warnings.warn(
"The provided alignment time is scheduling an acquire instruction "
"earlier than it was scheduled for in the original Schedule. "
"This may result in an instruction being scheduled before t=0 and "
"an error being raised."
)
new_schedule.insert(time+shift, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def add_implicit_acquires(schedule: ScheduleComponent,
meas_map: List[List[int]]
) -> Schedule:
"""Return a new schedule with implicit acquires from the measurement mapping replaced by
explicit ones.
.. warning:: Since new acquires are being added, Memory Slots will be set to match the
qubit index. This may overwrite your specification.
Args:
schedule: Schedule to be aligned.
meas_map: List of lists of qubits that are measured together.
Returns:
A ``Schedule`` with the additional acquisition instructions.
"""
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
acquire_map = dict()
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Acquire):
if inst.mem_slot and inst.mem_slot.index != inst.channel.index:
warnings.warn("One of your acquires was mapped to a memory slot which didn't match"
" the qubit index. I'm relabeling them to match.")
# Get the label of all qubits that are measured with the qubit(s) in this instruction
all_qubits = []
for sublist in meas_map:
if inst.channel.index in sublist:
all_qubits.extend(sublist)
# Replace the old acquire instruction by a new one explicitly acquiring all qubits in
# the measurement group.
for i in all_qubits:
explicit_inst = instructions.Acquire(inst.duration,
chans.AcquireChannel(i),
mem_slot=chans.MemorySlot(i),
kernel=inst.kernel,
discriminator=inst.discriminator)
if time not in acquire_map:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map = {time: {i}}
elif i not in acquire_map[time]:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map[time].add(i)
else:
new_schedule.insert(time, inst, inplace=True)
return new_schedule
| 40.210784
| 99
| 0.643911
| 1,860
| 16,406
| 5.544624
| 0.198387
| 0.022108
| 0.027732
| 0.014254
| 0.215165
| 0.15718
| 0.141181
| 0.130515
| 0.098613
| 0.087172
| 0
| 0.004114
| 0.288797
| 16,406
| 407
| 100
| 40.309582
| 0.879757
| 0.351518
| 0
| 0.205263
| 0
| 0
| 0.075953
| 0.005517
| 0
| 0
| 0
| 0.002457
| 0
| 1
| 0.068421
| false
| 0
| 0.052632
| 0.005263
| 0.194737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43abfef786fc99686d3027b89832f4ac4ffeea43
| 7,885
|
py
|
Python
|
lib/jnpr/junos/transport/tty_netconf.py
|
mmoucka/py-junos-eznc
|
9ef5ad39e32ae670fe8ed0092d725661a45b3053
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/jnpr/junos/transport/tty_netconf.py
|
mmoucka/py-junos-eznc
|
9ef5ad39e32ae670fe8ed0092d725661a45b3053
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/jnpr/junos/transport/tty_netconf.py
|
mmoucka/py-junos-eznc
|
9ef5ad39e32ae670fe8ed0092d725661a45b3053
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import re
import time
from lxml import etree
import select
import socket
import logging
import sys
from lxml.builder import E
from lxml.etree import XMLSyntaxError
from datetime import datetime, timedelta
from ncclient.operations.rpc import RPCReply, RPCError
from ncclient.xml_ import to_ele
import six
from ncclient.transport.session import HelloHandler
class PY6:
NEW_LINE = six.b("\n")
EMPTY_STR = six.b("")
NETCONF_EOM = six.b("]]>]]>")
STARTS_WITH = six.b("<!--")
__all__ = ["xmlmode_netconf"]
_NETCONF_EOM = six.b("]]>]]>")
_xmlns = re.compile(six.b("xmlns=[^>]+"))
_xmlns_strip = lambda text: _xmlns.sub(PY6.EMPTY_STR, text)
_junosns = re.compile(six.b("junos:"))
_junosns_strip = lambda text: _junosns.sub(PY6.EMPTY_STR, text)
logger = logging.getLogger("jnpr.junos.tty_netconf")
# =========================================================================
# xmlmode_netconf
# =========================================================================
class tty_netconf(object):
"""
Basic Junos XML API for bootstraping through the TTY
"""
def __init__(self, tty):
self._tty = tty
self.hello = None
self._session_id = -1
# -------------------------------------------------------------------------
# NETCONF session open and close
# -------------------------------------------------------------------------
def open(self, at_shell):
""" start the XML API process and receive the 'hello' message """
nc_cmd = ("junoscript", "xml-mode")[at_shell]
self._tty.write(nc_cmd + " netconf need-trailer")
mark_start = datetime.now()
mark_end = mark_start + timedelta(seconds=15)
while datetime.now() < mark_end:
time.sleep(0.1)
line = self._tty.read()
if line.startswith(PY6.STARTS_WITH):
break
else:
# exceeded the while loop timeout
raise RuntimeError("Error: netconf not responding")
self.hello = self._receive()
self._session_id, _ = HelloHandler.parse(self.hello.decode("utf-8"))
def close(self, device_handler, force=False):
""" issue the XML API to close the session """
# if we do not have an open connection, then return now.
if force is False:
if self.hello is None:
return
self.rpc("close-session", device_handler)
# removed flush
# -------------------------------------------------------------------------
# MISC device commands
# -------------------------------------------------------------------------
def zeroize(self):
""" issue a reboot to the device """
cmd = E.command("request system zeroize")
try:
encode = None if sys.version < "3" else "unicode"
self.rpc(etree.tostring(cmd, encoding=encode))
except:
return False
return True
# -------------------------------------------------------------------------
# XML RPC command execution
# -------------------------------------------------------------------------
def rpc(self, cmd, device_handler):
"""
Write the XML cmd and return the response as XML object.
:cmd:
<str> of the XML command. if the :cmd: is not XML, then
this routine will perform the brackets; i.e. if given
'get-software-information', this routine will turn
it into '<get-software-information/>'
NOTES:
The return XML object is the first child element after
the <rpc-reply>. There is also no error-checking
performing by this routine.
"""
if not cmd.startswith("<"):
cmd = "<{}/>".format(cmd)
rpc = six.b("<rpc>{}</rpc>".format(cmd))
logger.info("Calling rpc: %s" % rpc)
self._tty.rawwrite(rpc)
rsp = self._receive()
rsp = rsp.decode("utf-8") if isinstance(rsp, bytes) else rsp
reply = RPCReply(rsp, device_handler, huge_tree=self._tty._huge_tree)
errors = reply.errors
if len(errors) > 1:
raise RPCError(to_ele(reply._raw), errs=errors)
elif len(errors) == 1:
raise reply.error
return reply
# -------------------------------------------------------------------------
# LOW-LEVEL I/O for reading back XML response
# -------------------------------------------------------------------------
def _receive(self):
# On windows select.select throws io.UnsupportedOperation: fileno
# so use read function for windows serial COM ports
if hasattr(self._tty, "port") and str(self._tty.port).startswith("COM"):
return self._receive_serial_win()
else:
return self._receive_serial()
def _receive_serial(self):
""" process the XML response into an XML object """
rxbuf = PY6.EMPTY_STR
line = PY6.EMPTY_STR
while True:
try:
rd, wt, err = select.select([self._tty._rx], [], [], 0.1)
except select.error as err:
raise err
except socket.error as err:
raise err
if rd:
line, lastline = rd[0].read_until(PY6.NETCONF_EOM, 0.1), line
if not line:
continue
if _NETCONF_EOM in line or _NETCONF_EOM in lastline + line:
rxbuf = rxbuf + line
break
else:
rxbuf = rxbuf + line
if _NETCONF_EOM in rxbuf:
break
return self._parse_buffer(rxbuf)
# -------------------------------------------------------------------------
# Read message from windows COM ports
# -------------------------------------------------------------------------
def _receive_serial_win(self):
""" process incoming data from windows port"""
rxbuf = PY6.EMPTY_STR
line = PY6.EMPTY_STR
while True:
line, lastline = self._tty.read().strip(), line
if not line:
continue
if _NETCONF_EOM in line or _NETCONF_EOM in lastline + line:
rxbuf = rxbuf + line
break
else:
rxbuf = rxbuf + line
if _NETCONF_EOM in rxbuf:
break
return self._parse_buffer(rxbuf)
def _parse_buffer(self, rxbuf):
rxbuf = rxbuf.splitlines()
if _NETCONF_EOM in rxbuf[-1]:
if rxbuf[-1] == _NETCONF_EOM:
rxbuf.pop()
else:
rxbuf[-1] = rxbuf[-1].split(_NETCONF_EOM)[0]
try:
rxbuf = [i.strip() for i in rxbuf if i.strip() != PY6.EMPTY_STR]
rcvd_data = PY6.NEW_LINE.join(rxbuf)
logger.debug("Received: \n%s" % rcvd_data)
parser = etree.XMLParser(
remove_blank_text=True, huge_tree=self._tty._huge_tree
)
try:
etree.XML(rcvd_data, parser)
except XMLSyntaxError:
if _NETCONF_EOM in rcvd_data:
rcvd_data = rcvd_data[: rcvd_data.index(_NETCONF_EOM)]
etree.XML(rcvd_data) # just to recheck
else:
parser = etree.XMLParser(recover=True)
rcvd_data = etree.tostring(etree.XML(rcvd_data, parser=parser))
return rcvd_data
except:
if "</xnm:error>" in rxbuf:
for x in rxbuf:
if "<message>" in x:
return etree.XML(
"<error-in-receive>" + x + "</error-in-receive>"
)
else:
return etree.XML("<error-in-receive/>")
| 35.200893
| 83
| 0.49182
| 839
| 7,885
| 4.468415
| 0.278903
| 0.037343
| 0.025607
| 0.022406
| 0.17418
| 0.13817
| 0.102427
| 0.102427
| 0.102427
| 0.102427
| 0
| 0.00569
| 0.309068
| 7,885
| 223
| 84
| 35.358744
| 0.682452
| 0.250983
| 0
| 0.27027
| 0
| 0
| 0.0566
| 0.003831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060811
| false
| 0
| 0.094595
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43ad02233acb1702dc2da7147208eb71f07d888f
| 409
|
py
|
Python
|
test/_test_client.py
|
eydam-prototyping/mp_modbus
|
8007c41dd16e6f71bd27b587628f57f38f27a7e0
|
[
"MIT"
] | 2
|
2022-01-06T02:21:16.000Z
|
2022-03-08T07:55:43.000Z
|
test/_test_client.py
|
eydam-prototyping/mp_modbus
|
8007c41dd16e6f71bd27b587628f57f38f27a7e0
|
[
"MIT"
] | 2
|
2021-12-10T15:56:52.000Z
|
2022-02-19T23:45:24.000Z
|
test/_test_client.py
|
eydam-prototyping/mp_modbus
|
8007c41dd16e6f71bd27b587628f57f38f27a7e0
|
[
"MIT"
] | 3
|
2021-07-30T11:16:55.000Z
|
2022-01-05T18:19:55.000Z
|
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s '
'%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
client = ModbusClient('192.168.178.61', port=502)
client.connect()
f = client.read_holding_registers(305,1)
print(f.registers)
| 37.181818
| 67
| 0.743276
| 54
| 409
| 5.592593
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07027
| 0.095355
| 409
| 11
| 68
| 37.181818
| 0.745946
| 0
| 0
| 0
| 0
| 0
| 0.246341
| 0.063415
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43ae1b68e450c7cd53ba9d214198e618977b86cc
| 1,297
|
py
|
Python
|
sdk/python/lib/test/langhost/future_input/__main__.py
|
pcen/pulumi
|
1bb85ca98c90f2161fe915df083d47c56c135e4d
|
[
"Apache-2.0"
] | 12,004
|
2018-06-17T23:56:29.000Z
|
2022-03-31T18:00:09.000Z
|
sdk/python/lib/test/langhost/future_input/__main__.py
|
pcen/pulumi
|
1bb85ca98c90f2161fe915df083d47c56c135e4d
|
[
"Apache-2.0"
] | 6,263
|
2018-06-17T23:27:24.000Z
|
2022-03-31T19:20:35.000Z
|
sdk/python/lib/test/langhost/future_input/__main__.py
|
pcen/pulumi
|
1bb85ca98c90f2161fe915df083d47c56c135e4d
|
[
"Apache-2.0"
] | 706
|
2018-06-17T23:56:50.000Z
|
2022-03-31T11:20:23.000Z
|
# Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from pulumi import CustomResource, Output, Input
async def read_a_file_or_something():
await asyncio.sleep(0)
return "here's a file"
def assert_eq(l, r):
assert l == r
class FileResource(CustomResource):
contents: Output[str]
def __init__(self, name: str, file_contents: Input[str]) -> None:
CustomResource.__init__(self, "test:index:FileResource", name, {
"contents": file_contents
})
# read_a_file_or_something returns a coroutine when called, which needs to be scheduled
# and awaited in order to yield a value.
file_res = FileResource("file", read_a_file_or_something())
file_res.contents.apply(lambda c: assert_eq(c, "here's a file"))
| 36.027778
| 87
| 0.739399
| 194
| 1,297
| 4.809278
| 0.561856
| 0.064309
| 0.028939
| 0.03537
| 0.064309
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012138
| 0.174248
| 1,297
| 35
| 88
| 37.057143
| 0.85901
| 0.529684
| 0
| 0
| 0
| 0
| 0.102694
| 0.038721
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43af456bb12d9242e1f8878ab32c7792bb2310ac
| 2,108
|
py
|
Python
|
tests/models/pr_test_data.py
|
heaven00/github-contribution-leaderboard
|
3de53a60a7c81b91291e29d063c7fd14696d426d
|
[
"Apache-2.0"
] | null | null | null |
tests/models/pr_test_data.py
|
heaven00/github-contribution-leaderboard
|
3de53a60a7c81b91291e29d063c7fd14696d426d
|
[
"Apache-2.0"
] | null | null | null |
tests/models/pr_test_data.py
|
heaven00/github-contribution-leaderboard
|
3de53a60a7c81b91291e29d063c7fd14696d426d
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
from ghcl.models.pull_request import PullRequest
class PRData:
def __init__(self, data: dict = None):
if data is None:
with open('./tests/models/empty_pr_data.json') as file:
self._data = json.load(file)
else:
self._data = data
def with_pr_url(self, url: str = 'some-url'):
data = copy.deepcopy(self._data)
data['issues_data']['pull_request']['html_url'] = url
return PRData(data)
def with_label(self, label_to_add: str = None):
data = copy.deepcopy(self._data)
if label_to_add is None:
label_number = len(data["issues_data"]["labels"]) + 1
label_to_add = f'label-{label_number}'
data['issues_data']['labels'].append({'name': label_to_add})
return PRData(data)
def with_created_at(self, created_at: str = '2014-04-24T16:34:47Z'):
data = copy.deepcopy(self._data)
data['issues_data']['created_at'] = created_at
return PRData(data)
def with_owner(self, owner: str = 'owner_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['base']['repo']['owner']['login'] = owner
return PRData(data)
def with_pr_raised_by(self, pr_raised_by: str = 'pr_raised_by_user_id'):
data = copy.deepcopy(self._data)
data['pr_data']['head']['user']['login'] = pr_raised_by
return PRData(data)
def with_merged(self, merged=False):
data = copy.deepcopy(self._data)
data['pr_data']['merged'] = merged
return PRData(data)
def with_state(self, state='some_state'):
data = copy.deepcopy(self._data)
data['issues_data']['state'] = state
return PRData(data)
def with_defaults(self):
return PRData(self._data).with_pr_url()\
.with_label()\
.with_label()\
.with_created_at()\
.with_owner()\
.with_pr_raised_by()\
.with_merged()\
.with_state()
def as_pull_request(self):
return PullRequest(**self._data)
| 31.939394
| 76
| 0.597723
| 275
| 2,108
| 4.298182
| 0.236364
| 0.081218
| 0.07445
| 0.118443
| 0.349408
| 0.192893
| 0.192893
| 0.192893
| 0.067682
| 0.067682
| 0
| 0.009677
| 0.264706
| 2,108
| 65
| 77
| 32.430769
| 0.752903
| 0
| 0
| 0.307692
| 0
| 0
| 0.136622
| 0.015655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.057692
| 0.038462
| 0.442308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43af80522808363696ca10665012f09669723d2f
| 609
|
py
|
Python
|
Validation/EventGenerator/python/BasicGenParticleValidation_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 2
|
2020-10-26T18:40:32.000Z
|
2021-04-10T16:33:25.000Z
|
Validation/EventGenerator/python/BasicGenParticleValidation_cfi.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 30
|
2015-11-04T11:42:27.000Z
|
2021-12-01T07:56:34.000Z
|
Validation/EventGenerator/python/BasicGenParticleValidation_cfi.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 8
|
2016-03-25T07:17:43.000Z
|
2021-07-08T17:11:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
basicGenParticleValidation = DQMEDAnalyzer('BasicGenParticleValidation',
hepmcCollection = cms.InputTag("generatorSmeared"),
genparticleCollection = cms.InputTag("genParticles",""),
genjetsCollection = cms.InputTag("ak4GenJets",""),
matchingPrecision = cms.double(0.001),
verbosity = cms.untracked.uint32(0),
UseWeightFromHepMC = cms.bool(True),
signalParticlesOnly = cms.bool(False)
)
basicGenParticleValidationHiMix = basicGenParticleValidation.clone(signalParticlesOnly = True)
| 40.6
| 94
| 0.784893
| 50
| 609
| 9.56
| 0.64
| 0.069038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014787
| 0.111658
| 609
| 14
| 95
| 43.5
| 0.868762
| 0
| 0
| 0
| 0
| 0
| 0.10509
| 0.042693
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b219f1675072d8c1034bc153a5f05238d1fdf2
| 639
|
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
| 25.56
| 80
| 0.596244
| 73
| 639
| 4.794521
| 0.547945
| 0.051429
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002075
| 0.245696
| 639
| 24
| 81
| 26.625
| 0.724066
| 0.153365
| 0
| 0
| 0
| 0.0625
| 0.284872
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b28c13174a1c70f27d43e88e2fd455da590fcc
| 4,764
|
py
|
Python
|
models/TextCNN/cnn2d.py
|
Renovamen/Text-Classification
|
4a4aa4001c402ed4371ebaabe1393b27794e5992
|
[
"MIT"
] | 72
|
2020-06-23T18:26:47.000Z
|
2022-03-26T13:33:30.000Z
|
models/TextCNN/cnn2d.py
|
Renovamen/Text-Classification
|
4a4aa4001c402ed4371ebaabe1393b27794e5992
|
[
"MIT"
] | 5
|
2020-12-04T13:31:09.000Z
|
2021-08-03T14:11:52.000Z
|
models/TextCNN/cnn2d.py
|
Renovamen/Text-Classification
|
4a4aa4001c402ed4371ebaabe1393b27794e5992
|
[
"MIT"
] | 15
|
2020-06-24T16:08:39.000Z
|
2022-02-04T06:53:38.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class TextCNN2D(nn.Module):
"""
Implementation of 2D version of TextCNN proposed in paper [1].
`Here <https://github.com/yoonkim/CNN_sentence>`_ is the official
implementation of TextCNN.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
n_kernels : int
Number of kernels
kernel_sizes : List[int]
Size of each kernel
dropout : float
Dropout
n_channels : int
Number of channels (1 / 2)
References
----------
1. "`Convolutional Neural Networks for Sentence Classification. \
<https://www.aclweb.org/anthology/D14-1181.pdf>`_" Yoon Kim. EMNLP 2014.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
n_kernels: int,
kernel_sizes: List[int],
dropout: float,
n_channels = 1
) -> None:
super(TextCNN2D, self).__init__()
# embedding layer
self.embedding1 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, fine_tune)
if n_channels == 2:
# multichannel: a static channel and a non-static channel
# which means embedding2 is frozen
self.embedding2 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, False)
else:
self.embedding2 = None
# 2d conv layer
self.convs = nn.ModuleList([
nn.Conv2d(
in_channels = n_channels,
out_channels = n_kernels,
kernel_size = (size, emb_size)
)
for size in kernel_sizes
])
self.fc = nn.Linear(len(kernel_sizes) * n_kernels, n_classes)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def set_embeddings(
self,
embeddings: torch.Tensor,
layer_id: int = 1,
fine_tune: bool = True
) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
layer_id : int
Embedding layer 1 or 2 (when adopting multichannel architecture)
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
if layer_id == 1:
self.embedding1.weight.data.uniform_(-0.1, 0.1)
else:
self.embedding2.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
if layer_id == 1:
self.embedding1.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
else:
self.embedding2.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# word embedding
embeddings = self.embedding1(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
# multichannel
if self.embedding2:
embeddings2 = self.embedding2(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
embeddings = torch.cat((embeddings, embeddings2), dim = 1) # (batch_size, 2, word_pad_len, emb_size)
# conv
conved = [self.relu(conv(embeddings)).squeeze(3) for conv in self.convs] # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)]
# pooling
pooled = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in conved] # [(batch size, n_kernels)]
# flatten
flattened = self.dropout(torch.cat(pooled, dim = 1)) # (batch size, n_kernels * len(kernel_sizes))
scores = self.fc(flattened) # (batch size, n_classes)
return scores
| 30.538462
| 145
| 0.588161
| 557
| 4,764
| 4.874327
| 0.276481
| 0.040516
| 0.018416
| 0.022099
| 0.222099
| 0.195948
| 0.195948
| 0.178269
| 0.12523
| 0.12523
| 0
| 0.018781
| 0.31822
| 4,764
| 155
| 146
| 30.735484
| 0.817118
| 0.407221
| 0
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.064516
| 0
| 0.145161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b37687b876abf43457859ada796360f659fa78
| 2,595
|
py
|
Python
|
heat/tests/convergence/framework/testutils.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/convergence/framework/testutils.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/convergence/framework/testutils.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log as logging
from heat.tests.convergence.framework import reality
from heat.tests.convergence.framework import scenario_template
LOG = logging.getLogger(__name__)
def verify(test, reality, tmpl):
for name in tmpl.resources:
rsrc_count = len(reality.resources_by_logical_name(name))
test.assertEqual(1, rsrc_count,
'Found %d copies of resource "%s"' % (rsrc_count,
name))
all_rsrcs = reality.all_resources()
for name, defn in tmpl.resources.items():
phys_rsrc = reality.resources_by_logical_name(name)[0]
for prop_name, prop_def in defn.properties.items():
real_value = reality.resource_properties(phys_rsrc, prop_name)
if isinstance(prop_def, scenario_template.GetAtt):
targs = reality.resources_by_logical_name(prop_def.target_name)
att_value = targs[0].properties_data[prop_def.attr]
test.assertEqual(att_value, real_value)
elif isinstance(prop_def, scenario_template.GetRes):
targs = reality.resources_by_logical_name(prop_def.target_name)
test.assertEqual(targs[0].nova_instance, real_value)
else:
test.assertEqual(prop_def, real_value)
test.assertEqual(len(defn.properties), len(phys_rsrc.properties_data))
test.assertEqual(len(tmpl.resources), len(all_rsrcs))
def scenario_globals(procs, testcase):
return {
'test': testcase,
'reality': reality.reality,
'verify': functools.partial(verify,
testcase,
reality.reality),
'Template': scenario_template.Template,
'RsrcDef': scenario_template.RsrcDef,
'GetRes': scenario_template.GetRes,
'GetAtt': scenario_template.GetAtt,
'engine': procs.engine,
'worker': procs.worker,
}
| 36.549296
| 79
| 0.649326
| 308
| 2,595
| 5.292208
| 0.38961
| 0.068712
| 0.044172
| 0.06135
| 0.191411
| 0.15092
| 0.062577
| 0.062577
| 0.062577
| 0.062577
| 0
| 0.004204
| 0.266667
| 2,595
| 70
| 80
| 37.071429
| 0.852338
| 0.210405
| 0
| 0.04878
| 0
| 0
| 0.043286
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 1
| 0.04878
| false
| 0
| 0.097561
| 0.02439
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b56590cfbfa648aa925a4f729f3fc4fe304008
| 2,605
|
py
|
Python
|
nova/tests/servicegroup/test_zk_driver.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 7
|
2017-06-19T19:37:00.000Z
|
2019-06-16T02:06:14.000Z
|
nova/tests/servicegroup/test_zk_driver.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 9
|
2015-05-20T11:20:17.000Z
|
2017-07-27T08:21:33.000Z
|
nova/tests/servicegroup/test_zk_driver.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 13
|
2015-05-05T09:34:04.000Z
|
2017-11-08T02:03:46.000Z
|
# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the ZooKeeper driver for servicegroup.
You need to install ZooKeeper locally and related dependencies
to run the test. It's unclear how to install python-zookeeper lib
in venv so you might have to run the test without it.
To set up in Ubuntu 12.04:
$ sudo apt-get install zookeeper zookeeperd python-zookeeper
$ sudo pip install evzookeeper
$ nosetests nova.tests.servicegroup.test_zk_driver
"""
import eventlet
from nova import servicegroup
from nova import test
class ZKServiceGroupTestCase(test.NoDBTestCase):
def setUp(self):
super(ZKServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
from nova.servicegroup.drivers import zk
self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper")
try:
zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
def test_join_leave(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
self.servicegroup_api.join(service_id['host'], service_id['topic'])
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
self.servicegroup_api.leave(service_id['host'], service_id['topic'])
# make sure zookeeper is updated and watcher is triggered
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
def test_stop(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
pulse = self.servicegroup_api.join(service_id['host'],
service_id['topic'], None)
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
pulse.stop()
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
| 39.469697
| 78
| 0.700576
| 345
| 2,605
| 5.185507
| 0.434783
| 0.100615
| 0.095584
| 0.058133
| 0.286752
| 0.286752
| 0.27166
| 0.27166
| 0.27166
| 0.27166
| 0
| 0.012615
| 0.208829
| 2,605
| 65
| 79
| 40.076923
| 0.85541
| 0.416891
| 0
| 0.322581
| 0
| 0
| 0.094126
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 1
| 0.096774
| false
| 0
| 0.16129
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b6084ad6323124af0ef6d980f927d5cab21334
| 780
|
py
|
Python
|
tests/test_misc.py
|
lordmauve/chopsticks
|
87c6a5d0049a45db1477a21510cba650f470a8ac
|
[
"Apache-2.0"
] | 171
|
2016-07-14T11:29:15.000Z
|
2022-03-12T07:39:12.000Z
|
tests/test_misc.py
|
moreati/chopsticks
|
87c6a5d0049a45db1477a21510cba650f470a8ac
|
[
"Apache-2.0"
] | 59
|
2016-07-23T14:05:58.000Z
|
2020-06-26T15:49:07.000Z
|
tests/test_misc.py
|
moreati/chopsticks
|
87c6a5d0049a45db1477a21510cba650f470a8ac
|
[
"Apache-2.0"
] | 17
|
2016-08-01T06:46:27.000Z
|
2018-03-25T14:46:15.000Z
|
"""Tests for miscellaneous properties, such as debuggability."""
import time
from chopsticks.tunnel import Docker
from chopsticks.group import Group
def test_tunnel_repr():
"""Tunnels have a usable repr."""
tun = Docker('py36', image='python:3.6')
assert repr(tun) == "Docker('py36')"
def test_group_repr():
"""Groups have a usable repr."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
assert repr(grp) == "Group([Docker('py35'), Docker('py36')])"
def test_group_reuse():
"""We can re-use a group."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
with grp:
grp.call(time.time)
grp.call(time.time)
| 25.16129
| 65
| 0.601282
| 105
| 780
| 4.409524
| 0.371429
| 0.107991
| 0.12959
| 0.136069
| 0.468683
| 0.326134
| 0.326134
| 0.326134
| 0.233261
| 0.233261
| 0
| 0.042553
| 0.216667
| 780
| 30
| 66
| 26
| 0.715221
| 0.174359
| 0
| 0.5
| 0
| 0
| 0.197432
| 0.035313
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.15
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b62d9d4c35cd12677417d9abccab4b3568c545
| 3,028
|
py
|
Python
|
Evaluation/PostProcesing.py
|
AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion
|
366ac5073cea96b662b934c3657446c9f1aa2f65
|
[
"MIT"
] | null | null | null |
Evaluation/PostProcesing.py
|
AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion
|
366ac5073cea96b662b934c3657446c9f1aa2f65
|
[
"MIT"
] | 3
|
2022-03-12T17:16:36.000Z
|
2022-03-17T12:14:56.000Z
|
Evaluation/PostProcesing.py
|
AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion
|
366ac5073cea96b662b934c3657446c9f1aa2f65
|
[
"MIT"
] | 1
|
2022-03-12T11:56:43.000Z
|
2022-03-12T11:56:43.000Z
|
import pandas as pd
import re
import glob
def rebuild_counts_from_csv(path,n_dims, shots):
df = pd.read_csv(path)
return rebuild_counts_from_dataframe(dataframe=df, n_dims=n_dims, shots=shots)
def rebuild_counts_from_dataframe(dataframe,n_dims,shots):
dimension_counts = {}
for dimension in range(n_dims):
dimension_counts[dimension] = []
pde = list(dataframe.probability_density)
for idx, density in enumerate(pde):
n_counts = int(density*shots)
for _ in range(n_counts):
# print(dataframe["dimension_0"][idx])
for dimension in range(n_dims):
dimension_key = "dimension_{}".format(dimension)
#
dimension_counts[dimension]+=[dataframe[dimension_key][idx]]
# print(dimension_counts)
rebuilt_dict = {}
for dimension in range(n_dims):
rebuilt_dict[f"d{dimension}"] = dimension_counts[dimension]
return rebuilt_dict
def rebuild_counts_from_dictionary(dictionary:dict, n_dims, shots):
dataframe = pd.DataFrame(dictionary)
return rebuild_counts_from_dataframe(dataframe=dataframe, n_dims=n_dims, shots=shots)
def get_stats_from_counts_dict(results_dict:dict):
dataframe = pd.DataFrame(results_dict)
return get_stats_from_counts_dataframe(dataframe)
def get_stats_from_counts_dataframe(counts_dataframe: pd.DataFrame)-> dict:
results_dict = {}
results_dict["corr"] = counts_dataframe.corr()
results_dict["cov"] = counts_dataframe.cov()
results_dict["mean"] = counts_dataframe.mean()
results_dict['var'] = counts_dataframe.var()
return results_dict
def get_n_steps_from_filepath(filepath)-> int:
filename = filepath.split('/')[-1]
return int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
def get_n_shots_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
nshots = int(re.findall(r"\d+shots",experiment_dir_name)[0].split('s')[0])
return nshots
def get_n_dims_from_path(path)-> int:
experiment_dir_name = path.split('/')[-1]
ndims = int(re.findall(r"\d+D_",experiment_dir_name)[0].split('D')[0])
return ndims
def extract_mean_variance_vs_nsteps(directory_path: str,dimension = 0):
nshots = get_n_shots_from_path(directory_path)
ndims = get_n_dims_from_path(directory_path)
assert dimension < ndims, "queried dimension exceeds experiment space"
files = glob.glob(directory_path+'/*/data/**.csv')
files.sort(key = get_n_steps_from_filepath)
n_steps = []
variance = []
mean = []
for filepath in files:
filename = filepath.split('/')[-1]
nsteps = int(re.findall(r"\d+_steps",filename)[0].split('_')[0])
rebuilt_dict = rebuild_counts_from_csv(filepath,n_dims=ndims,shots=nshots)
stats = get_stats_from_counts_dict(rebuilt_dict)
variance.append(stats['var'][dimension])
mean.append(stats['mean'][dimension])
n_steps.append(nsteps)
return n_steps, variance, mean
| 33.274725
| 89
| 0.691546
| 404
| 3,028
| 4.873762
| 0.180693
| 0.033012
| 0.051803
| 0.036567
| 0.34129
| 0.187913
| 0.134078
| 0.077197
| 0.077197
| 0.077197
| 0
| 0.005668
| 0.18428
| 3,028
| 90
| 90
| 33.644444
| 0.791498
| 0.019815
| 0
| 0.111111
| 0
| 0
| 0.047233
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 1
| 0.142857
| false
| 0
| 0.047619
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b693bbc83efef69f13c3a5a3bab32c542470ab
| 2,276
|
py
|
Python
|
app/wirecard/tasks.py
|
michel-rodrigues/viggio_backend
|
f419f0b939209722e1eb1e272f33de172cd5c1f1
|
[
"MIT"
] | null | null | null |
app/wirecard/tasks.py
|
michel-rodrigues/viggio_backend
|
f419f0b939209722e1eb1e272f33de172cd5c1f1
|
[
"MIT"
] | null | null | null |
app/wirecard/tasks.py
|
michel-rodrigues/viggio_backend
|
f419f0b939209722e1eb1e272f33de172cd5c1f1
|
[
"MIT"
] | null | null | null |
from sentry_sdk import capture_exception
from dateutil.parser import parse
from project_configuration.celery import app
from orders.models import Charge
from request_shoutout.domain.models import Charge as DomainCharge
from .models import WirecardTransactionData
CROSS_SYSTEMS_STATUS_MAPPING = {
'WAITING': DomainCharge.PROCESSING,
'IN_ANALYSIS': DomainCharge.PROCESSING,
'PRE_AUTHORIZED': DomainCharge.PRE_AUTHORIZED,
'AUTHORIZED': DomainCharge.PAID,
'CANCELLED': DomainCharge.CANCELLED,
'REFUNDED': DomainCharge.CANCELLED,
'REVERSED': DomainCharge.CANCELLED,
'SETTLED': DomainCharge.PAID,
}
def _update_status(wirecard_status, wirecard_payment_hash):
(
Charge.objects
.filter(order__third_party_transaction__wirecard_payment_hash=wirecard_payment_hash)
.update(status=CROSS_SYSTEMS_STATUS_MAPPING[wirecard_status])
)
def _update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp):
wirecard_transaction.payment_event_last_timestamp = payment_event_timestamp
wirecard_transaction.save()
def _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
if wirecard_transaction.payment_event_last_timestamp:
return payment_event_timestamp < wirecard_transaction.payment_event_last_timestamp
return False
@app.task
def update_payment_status(notification):
payment_event_timestamp = parse(notification['resource']['payment']['updatedAt'])
payment_status = notification['resource']['payment']['status']
wirecard_payment_hash = notification['resource']['payment']['id']
try:
wirecard_transaction = (
WirecardTransactionData.objects.get(wirecard_payment_hash=wirecard_payment_hash)
)
# Algumas vezes tem subido essa exceção, como não sabemos se é devido à falhas na sandbox
# da wirecard, estamos evitando quebrar a aplicação e enviando a exceção para o sentry
except WirecardTransactionData.DoesNotExist:
capture_exception()
else:
if not _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction):
_update_status(payment_status, wirecard_payment_hash)
_update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp)
| 38.576271
| 93
| 0.784271
| 249
| 2,276
| 6.799197
| 0.37751
| 0.085056
| 0.111636
| 0.119905
| 0.357354
| 0.295924
| 0.251034
| 0.217956
| 0.217956
| 0
| 0
| 0
| 0.146749
| 2,276
| 58
| 94
| 39.241379
| 0.871782
| 0.075571
| 0
| 0
| 0
| 0
| 0.064731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43b93580a409ca7d715e6c81e1d0f3517269cec7
| 4,277
|
py
|
Python
|
dygraph/alexnet/network.py
|
Sunyingbin/models
|
30a7f1757bfad79935aa865f4362a7b38e63a415
|
[
"Apache-2.0"
] | null | null | null |
dygraph/alexnet/network.py
|
Sunyingbin/models
|
30a7f1757bfad79935aa865f4362a7b38e63a415
|
[
"Apache-2.0"
] | null | null | null |
dygraph/alexnet/network.py
|
Sunyingbin/models
|
30a7f1757bfad79935aa865f4362a7b38e63a415
|
[
"Apache-2.0"
] | null | null | null |
"""
动态图构建 AlexNet
"""
import paddle.fluid as fluid
import numpy as np
class Conv2D(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2D, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
return x
class Conv2DPool(fluid.dygraph.Layer):
def __init__(self,
name_scope,
num_channels,
num_filters,
filter_size,
pool_size,
pool_stride,
pool_padding=0,
pool_type='max',
global_pooling=False,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(Conv2DPool, self).__init__(name_scope)
self._conv2d = fluid.dygraph.Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=param_attr,
bias_attr=bias_attr,
act=act,
use_cudnn=use_cudnn)
self._pool2d = fluid.dygraph.Pool2D(
pool_size=pool_size,
pool_type=pool_type,
pool_stride=pool_stride,
pool_padding=pool_padding,
global_pooling=global_pooling,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
x = self._pool2d(x)
return x
class AlexNet(fluid.dygraph.Layer):
def __init__(self, name_scope, class_dim):
super(AlexNet, self).__init__(name_scope)
self.conv_pool_1 = Conv2DPool(self.full_name(), 3, 64, 11, 3, 2, conv_stride=4, conv_padding=2, act='relu')
self.conv_pool_2 = Conv2DPool(self.full_name(), 64, 192, 5, 3, 2, conv_stride=1, conv_padding=2, act='relu')
self.conv_3 = Conv2D(self.full_name(), 192, 384, 3, 1, 1, act='relu')
self.conv_4 = Conv2D(self.full_name(), 384, 256, 3, 1, 1, act='relu')
self.conv_pool_5 = Conv2DPool(self.full_name(), 256, 256, 3, 3, 2, conv_stride=1, conv_padding=1, act='relu')
self.fc6 = fluid.dygraph.FC(self.full_name(), 9216, 4096, act='relu')
self.fc7 = fluid.dygraph.FC(self.full_name(), 4096, 4096, act='relu')
self.fc8 = fluid.dygraph.FC(self.full_name(), 4096, class_dim, act='softmax')
def forward(self, inputs, label=None):
out = self.conv_pool_1(inputs)
out = self.conv_pool_2(out)
out = self.conv_3(out)
out = self.conv_4(out)
out = self.conv_pool_5(out)
out = self.fc6(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc7(out)
out = fluid.layers.dropout(out, 0.5)
out = self.fc8(out)
if label is not None:
acc = fluid.layers.accuracy(input=out, label=label)
return out, acc
else:
return out
if __name__ == '__main__':
with fluid.dygraph.guard():
alexnet = AlexNet('alex-net', 3)
img = np.zeros([2, 3, 224, 224]).astype('float32')
img = fluid.dygraph.to_variable(img)
outs = alexnet(img).numpy()
print(outs)
| 32.9
| 118
| 0.53098
| 503
| 4,277
| 4.240557
| 0.184891
| 0.061885
| 0.045007
| 0.039381
| 0.498359
| 0.474449
| 0.46226
| 0.37459
| 0.357243
| 0.357243
| 0
| 0.046546
| 0.36708
| 4,277
| 129
| 119
| 33.155039
| 0.741411
| 0.00304
| 0
| 0.394495
| 0
| 0
| 0.014788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055046
| false
| 0
| 0.018349
| 0
| 0.137615
| 0.009174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43bbbe3418d6d5e2da95d398c3928141e4b68eab
| 905
|
py
|
Python
|
turtlegameproject/turtlegame.py
|
Ayon134/code_for_Kids
|
d90698bb38efe5e26c31f02bd129bfdadea158e2
|
[
"MIT"
] | null | null | null |
turtlegameproject/turtlegame.py
|
Ayon134/code_for_Kids
|
d90698bb38efe5e26c31f02bd129bfdadea158e2
|
[
"MIT"
] | null | null | null |
turtlegameproject/turtlegame.py
|
Ayon134/code_for_Kids
|
d90698bb38efe5e26c31f02bd129bfdadea158e2
|
[
"MIT"
] | 2
|
2021-01-08T03:52:46.000Z
|
2021-04-01T19:16:12.000Z
|
import turtle
import random
p1=turtle.Turtle()
p1.color("green")
p1.shape("turtle")
p1.penup()
p1.goto(-200,100)
p2=p1.clone()
p2.color("blue")
p2.penup()
p2.goto(-200,-100)
p1.goto(300,60)
p1.pendown()
p1.circle(40)
p1.penup()
p1.goto(-200,100)
p2.goto(300,-140)
p2.pendown()
p2.circle(40)
p2.penup()
p2.goto(-200,-100)
die=[1,2,3,4,5,6]
i=1
while(i <= 20):
if p1.pos() >= (300,100):
print("p1 wins")
break
elif p2.pos() >= (300,-100):
print("p2 wins")
break
else:
p1_turn=input("press enter to start")
die_out=random.choice(die)
print("you get", die_out)
print("the number of steps:", 20*die_out)
p1.forward(20*die_out)
p2_turn=input("press enter to challenge")
d=random.choice(die)
print("you get",d)
print("the number os steps:",20*d)
p2.forward(20*d)
| 17.745098
| 49
| 0.571271
| 148
| 905
| 3.452703
| 0.371622
| 0.054795
| 0.078278
| 0.050881
| 0.340509
| 0.258317
| 0.082192
| 0
| 0
| 0
| 0
| 0.139738
| 0.240884
| 905
| 51
| 50
| 17.745098
| 0.604076
| 0
| 0
| 0.243902
| 0
| 0
| 0.140177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04878
| 0
| 0.04878
| 0.146341
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43be862a8ae3652cfbde5c9e9ea45da257901956
| 1,633
|
py
|
Python
|
app.py
|
thliang01/nba-s
|
660d0e830989916b7b9f3123eb809d143b714186
|
[
"BSD-2-Clause"
] | null | null | null |
app.py
|
thliang01/nba-s
|
660d0e830989916b7b9f3123eb809d143b714186
|
[
"BSD-2-Clause"
] | null | null | null |
app.py
|
thliang01/nba-s
|
660d0e830989916b7b9f3123eb809d143b714186
|
[
"BSD-2-Clause"
] | null | null | null |
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# --------------------------------------------------------------
# Import and clean data
game_details = pd.read_csv('games_details.csv')
# print(game_details.head(5))
game_details.drop(['GAME_ID', 'TEAM_ID', 'PLAYER_ID', 'START_POSITION',
'COMMENT', 'TEAM_ABBREVIATION'], axis=1, inplace=True)
game_details['FTL'] = game_details['FTA'] - game_details['FTM']
game_details = game_details.dropna()
# game_details.shape
# game_details.info()
game_details['MIN'] = game_details['MIN'].str.strip(':').str[0:2]
df = game_details.copy()
if st.checkbox('Show dataframe'):
st.write("Players Game Details")
st.dataframe(df.head(10))
# --------------------------------------------------------------
st.write("Top 20 Players in the NBA")
top_activities = df.groupby(by='PLAYER_NAME')['PTS'].sum().sort_values(ascending=False).head(20).reset_index()
plt.figure(figsize=(15, 10))
plt.xlabel('POINTS', fontsize=15)
plt.ylabel('PLAYER_NAME', fontsize=15)
plt.title('Top 20 Players in the NBA League', fontsize=20)
ax = sns.barplot(x=top_activities['PTS'], y=top_activities['PLAYER_NAME'])
for i, (value, name) in enumerate(zip(top_activities['PTS'], top_activities['PLAYER_NAME'])):
ax.text(value, i - .05, f'{value:,.0f}', size=10, ha='left', va='center')
ax.set(xlabel='POINTS', ylabel='PLAYER_NAME')
st.pyplot(plt)
player = st.multiselect(
"Choose Player", df['PLAYER_NAME']
)
st.write("""
# My first app
Hello *world!*
""")
x = st.slider("Select a number")
st.write("You selected:", x)
| 32.019608
| 110
| 0.647887
| 234
| 1,633
| 4.380342
| 0.487179
| 0.150244
| 0.027317
| 0.027317
| 0.039024
| 0.039024
| 0
| 0
| 0
| 0
| 0
| 0.018569
| 0.109614
| 1,633
| 50
| 111
| 32.66
| 0.686382
| 0.131047
| 0
| 0
| 0
| 0
| 0.259207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43bfd11896f962234020d5d611ad3cb21b537df7
| 19,228
|
py
|
Python
|
python/craftassist/voxel_models/geoscorer/geoscorer_util.py
|
kepolol/craftassist
|
f60a7edd0b4ea72b774cca45ba468d2e275445c2
|
[
"MIT"
] | null | null | null |
python/craftassist/voxel_models/geoscorer/geoscorer_util.py
|
kepolol/craftassist
|
f60a7edd0b4ea72b774cca45ba468d2e275445c2
|
[
"MIT"
] | null | null | null |
python/craftassist/voxel_models/geoscorer/geoscorer_util.py
|
kepolol/craftassist
|
f60a7edd0b4ea72b774cca45ba468d2e275445c2
|
[
"MIT"
] | 1
|
2020-03-29T20:04:11.000Z
|
2020-03-29T20:04:11.000Z
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
from datetime import datetime
import sys
import argparse
import torch
import os
from inspect import currentframe, getframeinfo
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
from shapes import get_bounds
def pretty_log(log_string):
cf = currentframe().f_back
filename = getframeinfo(cf).filename.split("/")[-1]
print(
"{} {}:{} {}".format(
datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string
)
)
sys.stdout.flush()
## Train Fxns ##
def get_base_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", type=int, default=1, help="0 for cpu")
parser.add_argument("--batchsize", type=int, default=64, help="batchsize")
parser.add_argument("--dataset", default="shapes", help="shapes/segments/both")
parser.add_argument(
"--epochsize", type=int, default=1000, help="number of examples in an epoch"
)
parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs")
parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube")
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--num_layers", type=int, default=3, help="num layers")
parser.add_argument(
"--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding"
)
parser.add_argument(
"--num_words", type=int, default=256, help="number of words for the blockid embeds"
)
parser.add_argument("--lr", type=float, default=0.1, help="step size for net")
parser.add_argument(
"--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)"
)
parser.add_argument("--momentum", type=float, default=0.0, help="momentum")
parser.add_argument("--checkpoint", default="", help="where to save model")
parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers")
return parser
def add_dataset_flags(parser):
parser.add_argument(
"--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob"
)
parser.add_argument("--useid", type=bool, default=False, help="use blockid")
parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size")
parser.add_argument("--fixed_center", type=bool, default=False, help="fixed_center")
parser.add_argument(
"--min_seg_size", type=int, default=6, help="min seg size for seg data type"
)
parser.add_argument(
"--use_saved_data",
type=bool,
default=False,
help="use preparsed data for this min_seg_size",
)
def add_directional_flags(parser):
parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb")
parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb")
parser.add_argument(
"--seg_direction_net", type=bool, default=False, help="use segdirnet module"
)
parser.add_argument(
"--seg_use_viewer_pos", type=bool, default=False, help="use viewer pos in seg"
)
parser.add_argument(
"--seg_use_viewer_look", type=bool, default=False, help="use viewer look in seg"
)
parser.add_argument(
"--seg_use_direction", type=bool, default=False, help="use direction in seg"
)
parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers")
parser.add_argument(
"--cont_use_direction", type=bool, default=False, help="use direction in context"
)
parser.add_argument(
"--cont_use_xyz_from_viewer_look",
type=bool,
default=False,
help="use xyz position relative to viewer look in context emb",
)
def get_dataloader(dataset, opts, collate_fxn):
def init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32))
return torch.utils.data.DataLoader(
dataset,
batch_size=opts["batchsize"],
shuffle=True,
pin_memory=True,
drop_last=True,
num_workers=opts["num_workers"],
worker_init_fn=init_fn,
collate_fn=collate_fxn,
)
def to_cuda(list_modules):
for m in list_modules:
m.cuda()
def multitensor_collate_fxn(x):
"""
Takes a list of BATCHSIZE lists of tensors of length D.
Returns a list of length D of batched tensors.
"""
num_tensors_to_batch = len(x[0])
regroup_tensors = [[] for i in range(num_tensors_to_batch)]
for t_list in x:
for i, t in enumerate(t_list):
regroup_tensors[i].append(t.unsqueeze(0))
batched_tensors = [torch.cat(tl) for tl in regroup_tensors]
return batched_tensors
## 3D Utils ##
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_subsegment_corner(S):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(S)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_S = []
for s in S:
new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_S, shift_zero_vec
def subset_and_scale_3d(init_array, mins, maxs, scale=1):
return scale * init_array[mins[0] : maxs[0], mins[1] : maxs[1], mins[2] : maxs[2]]
def combine_seg_context(seg, context, seg_shift, seg_mult=1):
completed_context = context.clone()
# Calculate the region to copy over, sometimes the segment
# falls outside the range of the context bounding box
c_mins = [int(i) for i in seg_shift]
c_maxs = [int(min(ss + 8, 32)) for ss in seg_shift]
s_mins = [0 for i in range(3)]
# If the edge of the segment goes past the edge of the context (ss + 8 > 32),
# remove the extra from the segment.
s_maxs = [int(8 - max(0, (ss + 8) - 32)) for ss in seg_shift]
seg_to_add = subset_and_scale_3d(seg, s_mins, s_maxs, seg_mult)
context_subset = subset_and_scale_3d(completed_context, c_mins, c_maxs, 1)
completed_context[c_mins[0] : c_maxs[0], c_mins[1] : c_maxs[1], c_mins[2] : c_maxs[2]] = (
seg_to_add + context_subset
)
return completed_context
def get_vector(start, end):
return end - start
def get_random_viewer_info(sl):
viewer_pos = torch.tensor(random_int_triple(0, sl - 1))
viewer_look = torch.tensor(random_int_triple(0, sl - 1))
if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0):
if viewer_look[0] < sl + 1:
viewer_look[0] += 1
else:
viewer_look[0] -= 1
return viewer_pos, viewer_look
def b_greater_than_a(a, b):
if a == b:
return 0
return 1 if b > a else -1
def shift_block(b, s):
return tuple((tuple((b[0][0] + s[0], b[0][1] + s[1], b[0][2] + s[2])), b[1]))
def rotate_block(b, c, r):
""" rotates the block b around the point c by 90*r degrees
in the xz plane. r should be 1 or -1."""
# TODO add a reflection
c = np.array(c)
p = np.add(b[0], -c)
x = p[0]
z = p[2]
if r == -1:
p[0] = z
p[2] = -x
else:
p[0] = -z
p[2] = x
return (tuple(p + c), b[1])
def random_int_triple(minval, maxval):
t = [
random.randint(minval, maxval),
random.randint(minval, maxval),
random.randint(minval, maxval),
]
return t
def check_inrange(x, minval, maxval):
"""inclusive check"""
return all([v >= minval for v in x]) and all([v <= maxval for v in x])
def normalize(batched_vector):
vec = batched_vector.double()
norm = torch.norm(vec, dim=1)
# Set norm to 1 if it's 0
norm = norm + norm.eq(0).double()
expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1])
return torch.div(vec, expanded_norm)
def get_rotation_matrix(viewer_pos, viewer_look):
# VP, VL: N x 3, VP_to_VL: N x 3
vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2]
nlook_vec = normalize(vp_to_vl)
nly = nlook_vec[:, 1]
# Nlx necessary to correct for the range of acrcos
nlx = nlook_vec[:, 0]
nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double()
# Take care of nans created by raising 0 to a power
# and then masking the sin theta to 0 as intended
base = 1 - nly * nly
nan_mask = torch.isnan(torch.pow(base, 0.5)).double()
base = base + nan_mask
sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5)
nly = nly.unsqueeze(1)
sin_theta = sin_theta.unsqueeze(1)
rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1)
rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1)
rm = torch.cat([rm_pt1, rm_pt2], 1)
return rm
def rotate_x_y(coord, rotation_matrix):
return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0)
def float_equals(a, b, epsilon):
return True if abs(a - b) < epsilon else False
def get_argmax_list(vals, epsilon, minlist=False, maxlen=None):
mult = -1 if minlist else 1
max_ind = []
for i, v in enumerate(vals):
if not max_ind or float_equals(max_ind[0][1], v, epsilon):
if maxlen and len(max_ind) == maxlen:
continue
max_ind.append((i, v))
elif mult * (v - max_ind[0][1]) > 0:
max_ind = [(i, v)]
return max_ind
def get_firstmax(vals, epsilon, minlist=False):
return get_argmax_list(vals, epsilon, minlist, 1)[0]
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords):
# First verify the sizing and unsqueeze if necessary
btc_sizes = batched_target_coords.size()
vp_sizes = viewer_pos.size()
vl_sizes = viewer_look.size()
if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2:
raise Exception("One input has too many dimensions")
if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3:
raise Exception("The last dimension of all inputs should be size 3")
if len(btc_sizes) < 3:
for i in range(3 - len(btc_sizes)):
batched_target_coords = batched_target_coords.unsqueeze(0)
if len(vp_sizes) == 1:
viewer_pos = viewer_pos.unsqueeze(0)
if len(vl_sizes) == 1:
viewer_look = viewer_look.unsqueeze(0)
n = batched_target_coords.size()[0]
d = batched_target_coords.size()[1]
# Handle xy and z separately
# XY = N X D x 2
xy = batched_target_coords[:, :, 0:2].double()
# Z = N x D x 1
z = batched_target_coords[:, :, 2].unsqueeze(2).double()
## XY
# Shift such that viewer pos is the origin
# VPXY, VLXY: N x 2
vpxy = viewer_pos.double()[:, 0:2]
vlxy = viewer_look.double()[:, 0:2]
vpxy_to_vlxy = vlxy - vpxy
# VPXY to XY: N x D x 2
vpxy_to_xy = xy - vpxy.unsqueeze(1).expand(n, d, -1)
# Rotate them around the viewer position such that a normalized
# viewer look vector would be (0, 1)
# Rotation_matrix: N x 2 x 2
rotation_matrix = get_rotation_matrix(viewer_pos, viewer_look)
# N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2
r_vpxy_to_vlxy = torch.bmm(vpxy_to_vlxy.unsqueeze(1), rotation_matrix).unsqueeze(1)
# RM: N x 2 x 2 ==> N x D x 2 x 2
expanded_rm = rotation_matrix.unsqueeze(1).expand(n, d, 2, 2).contiguous().view(-1, 2, 2)
# N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2
reshape_vpxy_to_xy = vpxy_to_xy.contiguous().view(-1, 1, 2)
r_vpxy_to_xy = torch.bmm(reshape_vpxy_to_xy, expanded_rm).contiguous().view(n, d, 2)
# N x D x 2
# Get the xy position in this rotated coord system with rvl as the origin
rvl_to_rxy = r_vpxy_to_xy - r_vpxy_to_vlxy.squeeze(1).expand(n, d, 2)
## Z
# VLZ = N x 1
vlz = viewer_look.double()[:, 2]
# Z = N x D x 1
diffz = z - vlz.view(-1, 1, 1).expand(n, d, -1)
## Combine
# rvl_to_rxy: N x D x 2, diffz: N x D x 1
new_xyz = torch.cat([rvl_to_rxy, diffz], 2)
return new_xyz
def get_dir_dist(viewer_pos, viewer_look, batched_target_coords):
if len(batched_target_coords.size()) == 1:
batched_target_coords = batched_target_coords.unsqueeze(0)
xyz = get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords)
dist = xyz.abs()
direction = xyz.gt(0).double() - xyz.lt(0).double()
return direction, dist
def get_sampled_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.random.choice(3, p=ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
def get_max_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.argmax(ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
# outputs a dense voxel rep (np array) from a sparse one.
# size should be a tuple of (H, W, D) for the desired voxel representation
# useid=True puts the block id into the voxel representation,
# otherwise put a 1
def densify(blocks, size, center=(0, 0, 0), useid=False):
V = np.zeros((size[0], size[1], size[2]), dtype="int32")
offsets = (size[0] // 2 - center[0], size[1] // 2 - center[1], size[2] // 2 - center[2])
for b in blocks:
x = b[0][0] + offsets[0]
y = b[0][1] + offsets[1]
z = b[0][2] + offsets[2]
if x >= 0 and y >= 0 and z >= 0 and x < size[0] and y < size[1] and z < size[2]:
if type(b[1]) is int:
V[x, y, z] = b[1]
else:
V[x, y, z] = b[1][0]
if not useid:
V[V > 0] = 1
return V, offsets
def center_of_mass(S, seg=None):
seg = seg or [True for i in S]
if len(S[0]) == 2:
m = list(np.round(np.mean([S[i][0] for i in range(len(S)) if seg[i]], axis=0)))
else:
m = list(np.round(np.mean([S[i] for i in range(len(S)) if seg[i]], axis=0)))
return [int(i) for i in m]
def check_l1_dist(a, b, d):
return abs(b[0] - a[0]) <= d[0] and abs(b[1] - a[1]) <= d[1] and abs(b[2] - a[2]) <= d[2]
def sparsify_segment(seg, context):
seg_sparse = []
for i, use in enumerate(seg):
if use:
seg_sparse.append(context[i])
return seg_sparse
def get_dense_array_from_sl(sparse_shape, sl, useid):
center = [sl // 2, sl // 2, sl // 2]
shape_dense, _ = np.asarray(densify(sparse_shape, [sl, sl, sl], center=center, useid=useid))
return shape_dense
def convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, c_sl, s_sl, useid, vis=False
):
context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid)
seg_dense_uncentered = get_dense_array_from_sl(seg_sparse, c_sl, useid)
# For visualization
if vis:
context_dense = context_dense + seg_dense_uncentered
else:
context_dense = context_dense - seg_dense_uncentered
shifted_seg_sparse, shift_vec = shift_subsegment_corner(seg_sparse)
seg_dense_centered = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid)
target_coord = [-x for x in shift_vec]
target_index = coord_to_index(target_coord, c_sl)
return [
torch.from_numpy(context_dense),
torch.from_numpy(seg_dense_centered),
torch.tensor([target_index]),
]
############################################################################
# For these "S" is a list of blocks in ((x,y,z),(id, meta)) format
# the segment is a list of the same length as S with either True or False
# at each entry marking whether that block is in the segment
# each outputs a list of blocks in ((x,y,z),(id, meta)) format
def shift_negative_vec(S, segment, vec, args):
N = []
for s in range(len(segment)):
if not segment[s]:
new_coords = tuple(np.add(S[s][0], vec))
N.append([new_coords, S[s][1]])
else:
if "seg_id" in args:
N.append([S[s][0], (args["seg_id"], S[s][1][1])])
else:
N.append(S[s])
return N
def shift_negative(S, segment, args):
shift_max = args["shift_max"]
"""takes the blocks not in the sgement and shifts them randomly"""
shift_vec = random_int_triple(-shift_max, shift_max)
return shift_negative_vec(S, segment, shift_vec, args)
def rotate_negative(S, segment, args):
c = center_of_mass(S, seg=segment)
r = random.choice([1, -1])
return [rotate_block(S[i], c, r) if segment[i] else S[i] for i in range(len(S))]
def replace_negative(S, segment, args):
data = args["data"]
oseg, oS = data.get_positive()
c_pos = center_of_mass(S, seg=segment)
c_neg = center_of_mass(oS, seg=oseg)
offset = np.add(c_pos, -np.array(c_neg))
N = [S[i] for i in range(len(S)) if not segment[i]]
return N + [shift_block(oS[i], offset) for i in range(len(oS)) if oseg[i]]
class NegativeSampler:
def __init__(self, dataloader, shift_max=10, ntype_probs=[0.6, 0.2, 0.2]):
# self.data_prob = [x['prob'] for x in dataloaders.values()]
# self.dataloaders = [x['data'] for x in dataloaders.values()]
self.dataloader = dataloader
self.shift_max = shift_max
self.ntype_probs = ntype_probs
self.negative_samplers = [shift_negative, rotate_negative, replace_negative]
def build_negative(self, S, segment):
negative_fn = np.random.choice(self.negative_samplers, p=self.ntype_probs)
return negative_fn(S, segment, {"shift_max": self.shift_max, "data": self.dataloader})
| 34.27451
| 99
| 0.63808
| 3,103
| 19,228
| 3.775701
| 0.145343
| 0.023045
| 0.04353
| 0.019461
| 0.253158
| 0.196142
| 0.16661
| 0.127859
| 0.089962
| 0.078696
| 0
| 0.023307
| 0.225713
| 19,228
| 560
| 100
| 34.335714
| 0.763635
| 0.136364
| 0
| 0.121294
| 0
| 0
| 0.079521
| 0.007369
| 0
| 0
| 0
| 0.001786
| 0
| 1
| 0.115903
| false
| 0
| 0.024259
| 0.018868
| 0.245283
| 0.002695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43c0a7c7b3cc424327d10e1b990bf63c250e8eb4
| 4,907
|
py
|
Python
|
CryptoAttacks/tests/Block/test_gcm.py
|
akbarszcz/CryptoAttacks
|
ae675d016b314414a3dc9b23c7d8a32da4c62457
|
[
"MIT"
] | 54
|
2017-03-28T23:46:58.000Z
|
2022-02-23T01:53:38.000Z
|
CryptoAttacks/tests/Block/test_gcm.py
|
maximmasiutin/CryptoAttacks
|
d1d47d3cb2ce38738a60b728bc35ce80bfe64374
|
[
"MIT"
] | null | null | null |
CryptoAttacks/tests/Block/test_gcm.py
|
maximmasiutin/CryptoAttacks
|
d1d47d3cb2ce38738a60b728bc35ce80bfe64374
|
[
"MIT"
] | 13
|
2017-03-31T06:07:23.000Z
|
2021-11-20T19:01:30.000Z
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
import subprocess
from builtins import bytes, range
from os.path import abspath, dirname
from os.path import join as join_path
from random import randint
from CryptoAttacks.Block.gcm import *
from CryptoAttacks.Utils import log
def test_polynomials():
print("Test polynomials")
Pmod = GF_2k_generator(128, [128,7,2,1,0])
P = Pmod(0b10011010101100110100100110011101100110010111111000111011101000000110110100010101000101100100111100011001010100100110100111011000)
Q = Pmod(0b01111010101010110111000011011100010011101111000001010000011000010000111010001111100001111010110001001000011101000011111110010101)
print(P.to_bits(), bin(P.to_int()), P)
print(Q.to_bits(), bin(Q.to_int()), Q)
w = P*Q
print(w.to_bits(), bin(w.to_int()), w)
assert Q.coefficients == Pmod(Q.coefficients).coefficients
assert Q.coefficients == Pmod(Q.to_int()).coefficients
assert Q.coefficients == Pmod(Q.to_bytes()).coefficients
print('')
Pmod = GF_2k_generator(10, [11,7,2,1,0])
c1 = Pmod(1)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(1236)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(0)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
print(polynomial1)
print(polynomial2)
print("+", polynomial1 + polynomial2)
print("*", polynomial1 * polynomial2)
q = polynomial1 / polynomial2
r = polynomial1 % polynomial2
print("/", q)
print("%", r)
print('')
print(polynomial1)
print(polynomial2*q + r)
print('')
def test_gcm():
print("Test GCM")
plaintext = bytes(b'hn9YA(F BW&B (W&&W(RT&WEF f7*WB FTgsdc')
additional = bytes(b'j gej8g0SRYH8s 8s9yf sgd78taDS* GASyd ')
key = bytes(b'xgrtjdh&LA28XNwh')
nonce = bytes(b'a drO*1@((js')
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
assert gcm_verify(tag, ciphertext, additional, key, nonce)
blocks = aes_bytes_to_poly_blocks(ciphertext, additional)
ciphertext2, additional2 = poly_blocks_to_aes_bytes(blocks)
assert ciphertext == ciphertext2
assert additional == additional2
def polynomial_factors_product(factorization):
"""factorization: [(poly1, power), (poly2, power)]"""
result = factorization[0][0].one_element()
for f, f_degree in factorization:
result *= f**f_degree
return result
def test_factor():
print("Test factor")
Pmod = GF_2k_generator(9, [9,7,2,1,0])
c1 = Pmod(31)
c2 = Pmod(0)
c3 = Pmod(0)
c4 = Pmod(3)
polynomial1 = Polynomial_128([c1,c2,c3,c4])
c1 = Pmod(237)
c2 = Pmod(1)
c3 = Pmod(0)
c4 = Pmod(10)
polynomial2 = Polynomial_128([c1,c2,c3,c4])
polynomial = polynomial1 * polynomial2
print(polynomial1)
print(polynomial2)
print(polynomial)
print(polynomial.monic())
print('')
factorization = factor_polynomial(polynomial)
print(factorization)
result = polynomial.one_element()
for f, f_degree in factorization:
result *= f**f_degree
print(result)
print('')
assert polynomial_factors_product(factorization) == polynomial.monic()
def test_repeated_nonce():
print("Test Key-Recovery Attack on GCM with Repeated Nonces")
for _ in range(3):
nonce = random_bytes(12)
key = random_bytes(16)
h = bytes(AES.new(key, AES.MODE_ECB).encrypt(bytes(b'\x00'*16)))
h = aes_polynomial(h)
ciphertexts_additionals_tags = []
for _ in range(4):
plaintext = random_bytes(randint(0, 50))
additional = random_bytes(randint(0, 50))
ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce)
ciphertexts_additionals_tags.append((ciphertext, additional, tag))
valid_ciphertext, valid_additional, valid_tag = ciphertexts_additionals_tags[0]
auth_key_candidates = recover_key_repated_nonce(ciphertexts_additionals_tags)
assert h.to_bytes() in auth_key_candidates
# try found auth key candidates
correct_auth_key_found = False
for auth_key in auth_key_candidates:
forged_ciphertext = random_bytes(randint(0, 10))
forged_additional = random_bytes(randint(0, 10))
forged_tag = gcm_forge_tag(ciphertext=forged_ciphertext, additional=forged_additional, auth_key=auth_key,
valid_ciphertext=valid_ciphertext, valid_additional=valid_additional, valid_tag=valid_tag)
if gcm_verify(forged_tag, forged_ciphertext, forged_additional, key, nonce):
correct_auth_key_found = True
break
assert correct_auth_key_found
def run():
log.level = 'debug'
test_polynomials()
test_gcm()
test_factor()
test_repeated_nonce()
if __name__ == "__main__":
run()
| 31.254777
| 144
| 0.678419
| 604
| 4,907
| 5.304636
| 0.246689
| 0.021848
| 0.008739
| 0.011236
| 0.248127
| 0.172285
| 0.149189
| 0.105493
| 0.06804
| 0.031211
| 0
| 0.10718
| 0.210923
| 4,907
| 156
| 145
| 31.455128
| 0.7203
| 0.019156
| 0
| 0.237705
| 0
| 0
| 0.04432
| 0
| 0
| 0
| 0
| 0
| 0.07377
| 1
| 0.04918
| false
| 0
| 0.065574
| 0
| 0.122951
| 0.221311
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43c1a9b70d766525944aa92cfc1043f3d5e3bc1b
| 17,842
|
py
|
Python
|
owscapable/swe/common.py
|
b-cube/OwsCapable
|
a01815418fe982434503d6542cb18e1ac8989684
|
[
"BSD-3-Clause"
] | 1
|
2016-02-01T12:55:13.000Z
|
2016-02-01T12:55:13.000Z
|
owscapable/swe/common.py
|
b-cube/OwsCapable
|
a01815418fe982434503d6542cb18e1ac8989684
|
[
"BSD-3-Clause"
] | 1
|
2015-06-23T14:07:50.000Z
|
2015-06-23T14:07:50.000Z
|
owscapable/swe/common.py
|
b-cube/OwsCapable
|
a01815418fe982434503d6542cb18e1ac8989684
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
from owscapable.util import nspath_eval
from owscapable.namespaces import Namespaces
from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime
from dateutil import parser
from datetime import timedelta
from owscapable.etree import etree
def get_namespaces():
ns = Namespaces()
return ns.get_namespaces(["swe20", "xlink"])
namespaces = get_namespaces()
def nspv(path):
return nspath_eval(path, namespaces)
def make_pair(string, cast=None):
if string is None:
return None
string = string.split(" ")
if cast is not None:
try:
string = map(lambda x: cast(x), string)
except:
print("Could not cast pair to correct type. Setting to an empty tuple!")
string = ""
return tuple(string)
def get_uom(element):
uom = testXMLAttribute(element, "code")
if uom is None:
uom = testXMLAttribute(element, nspv("xlink:href"))
return uom
def get_boolean(value):
if value is None:
return None
if value is True or value.lower() in ["yes","true"]:
return True
elif value is False or value.lower() in ["no","false"]:
return False
else:
return None
def get_int(value):
try:
return int(value)
except:
return None
def get_float(value):
try:
return float(value)
except:
return None
AnyScalar = map(lambda x: nspv(x), ["swe20:Boolean", "swe20:Count", "swe20:Quantity", "swe20:Time", "swe20:Category", "swe20:Text"])
AnyNumerical = map(lambda x: nspv(x), ["swe20:Count", "swe20:Quantity", "swe20:Time"])
AnyRange = map(lambda x: nspv(x), ["swe20:QuantityRange", "swe20:TimeRange", "swe20:CountRange", "swe20:CategoryRange"])
class NamedObject(object):
def __init__(self, element):
# No call to super(), the type object will process that.
self.name = testXMLAttribute(element, "name")
try:
self.content = eval(element[-1].tag.split("}")[-1])(element[-1])
except IndexError:
self.content = None
except BaseException:
raise
# Revert to the content if attribute does not exists
def __getattr__(self, name):
return getattr(self.content, name)
class AbstractSWE(object):
def __init__(self, element):
# Attributes
self.id = testXMLAttribute(element,"id") # string, optional
# Elements
self.extention = [] # anyType, min=0, max=X
class AbstractSWEIdentifiable(AbstractSWE):
def __init__(self, element):
super(AbstractSWEIdentifiable, self).__init__(element)
# Elements
self.identifier = testXMLValue(element.find(nspv("swe20:identifier"))) # anyURI, min=0
self.label = testXMLValue(element.find(nspv("swe20:label"))) # string, min=0
self.description = testXMLValue(element.find(nspv("swe20:description"))) # string, min=0
class AbstractDataComponent(AbstractSWEIdentifiable):
def __init__(self, element):
super(AbstractDataComponent, self).__init__(element)
# Attributes
self.definition = testXMLAttribute(element,"definition") # anyURI, required
self.updatable = get_boolean(testXMLAttribute(element,"updatable")) # boolean, optional
self.optional = get_boolean(testXMLAttribute(element,"optional")) or False # boolean, default=False
class AbstractSimpleComponent(AbstractDataComponent):
def __init__(self, element):
super(AbstractSimpleComponent, self).__init__(element)
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, optional
self.axisID = testXMLAttribute(element,"axisID") # string, optional
# Elements
self.quality = filter(None, [Quality(q) for q in [e.find('*') for e in element.findall(nspv("swe20:quality"))] if q is not None])
try:
self.nilValues = NilValues(element.find(nspv("swe20:nilValues")))
except:
self.nilValues = None
class Quality(object):
def __new__(cls, element):
t = element.tag.split("}")[-1]
if t == "Quantity":
return Quantity(element)
elif t == "QuantityRange":
return QuantityRange(element)
elif t == "Category":
return Category(element)
elif t == "Text":
return Text(element)
else:
return None
class NilValues(AbstractSWE):
def __init__(self, element):
super(NilValues, self).__init__(element)
self.nilValue = filter(None, [nilValue(x) for x in element.findall(nspv("swe20:nilValue"))]) # string, min=0, max=X
class nilValue(object):
def __init__(self, element):
self.reason = testXMLAttribute(element, "reason")
self.value = testXMLValue(element)
class AllowedTokens(AbstractSWE):
def __init__(self, element):
super(AllowedTokens, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]) # string, min=0, max=X
self.pattern = testXMLValue(element.find(nspv("swe20:pattern"))) # string (Unicode Technical Standard #18, Version 13), min=0
class AllowedValues(AbstractSWE):
def __init__(self, element):
super(AllowedValues, self).__init__(element)
self.value = filter(None, map(lambda x: get_float(x), [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]))
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class AllowedTimes(AbstractSWE):
def __init__(self, element):
super(AllowedTimes, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))])
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class Boolean(AbstractSimpleComponent):
def __init__(self, element):
super(Boolean, self).__init__(element)
# Elements
"""
6.2.1 Boolean
A Boolean representation of a proptery can take only two values that should be "true/false" or "yes/no".
"""
value = get_boolean(testXMLValue(element.find(nspv("swe20:value")))) # boolean, min=0, max=1
class Text(AbstractSimpleComponent):
def __init__(self, element):
super(Text, self).__init__(element)
# Elements
"""
Req 6. A textual representation shall at least consist of a character string.
"""
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class Category(AbstractSimpleComponent):
def __init__(self, element):
super(Category, self).__init__(element)
# Elements
self.codeSpace = testXMLAttribute(element.find(nspv("swe20:codeSpace")), nspv("xlink:href")) # Reference, min=0, max=1
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class CategoryRange(Category):
def __init__(self, element):
super(CategoryRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.values = make_pair(value) if value is not None else None
class Count(AbstractSimpleComponent):
def __init__(self, element):
super(Count, self).__init__(element)
# Elements
self.value = get_int(testXMLValue(element.find(nspv("swe20:value")))) # integer, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class CountRange(Count):
def __init__(self, element):
super(CountRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,int) if value is not None else None
class Quantity(AbstractSimpleComponent):
def __init__(self, element):
super(Quantity, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
self.value = get_float(testXMLValue(element.find(nspv("swe20:value")))) # double, min=0, max=1
try:
self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1
except:
self.constraint = None
class QuantityRange(Quantity):
def __init__(self, element):
super(QuantityRange, self).__init__(element)
# Elements
value = testXMLValue(element.find(nspv("swe20:value")))
self.value = make_pair(value,float) if value is not None else None
def get_time(value, referenceTime, uom):
try:
value = parser.parse(value)
except (AttributeError, ValueError): # Most likely an integer/float using a referenceTime
try:
if uom.lower() == "s":
value = referenceTime + timedelta(seconds=float(value))
elif uom.lower() == "min":
value = referenceTime + timedelta(minutes=float(value))
elif uom.lower() == "h":
value = referenceTime + timedelta(hours=float(value))
elif uom.lower() == "d":
value = referenceTime + timedelta(days=float(value))
except (AttributeError, ValueError):
pass
except OverflowError: # Too many numbers (> 10) or INF/-INF
if value.lower() == "inf":
value = InfiniteDateTime()
elif value.lower() == "-inf":
value = NegativeInfiniteDateTime()
return value
class Time(AbstractSimpleComponent):
def __init__(self, element):
super(Time, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
value = testXMLValue(element.find(nspv("swe20:value"))) # TimePosition, min=0, max=1
self.value = get_time(value, self.referenceTime, self.uom)
class TimeRange(AbstractSimpleComponent):
def __init__(self, element):
super(TimeRange, self).__init__(element)
# Elements
self.uom = get_uom(element.find(nspv("swe20:uom")))
try:
self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1
except:
self.constraint = None
# Attributes
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
try:
self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional
except (AttributeError, ValueError):
self.referenceTime = None
values = make_pair(testXMLValue(element.find(nspv("swe20:value")))) # TimePosition, min=0, max=1
self.value = [get_time(t, self.referenceTime, self.uom) for t in values]
class DataRecord(AbstractDataComponent):
def __init__(self, element):
super(DataRecord, self).__init__(element)
# Elements
self.field = [Field(x) for x in element.findall(nspv("swe20:field"))]
def get_by_name(self, name):
return next((x for x in self.field if x.name == name), None)
class Field(NamedObject):
def __init__(self, element):
super(Field, self).__init__(element)
class Vector(AbstractDataComponent):
def __init__(self, element):
super(Vector, self).__init__(element)
# Elements
self.coordinate = [Coordinate(x) for x in element.findall(nspv("swe20:coordinate"))]
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional
def get_by_name(self, name):
return next((x for x in self.coordinate if x.name == name), None)
class Coordinate(NamedObject):
def __init__(self, element):
super(Coordinate, self).__init__(element)
#if element[-1].tag not in AnyNumerical:
# print "Coordinate does not appear to be an AnyNumerical member"
class DataChoice(AbstractDataComponent):
def __init__(self, element):
super(DataChoice, self).__init__(element)
self.item = [Item(x) for x in element.findall(nspv("swe20:item"))]
def get_by_name(self, name):
return next((x for x in self.item if x.name == name), None)
class Item(NamedObject):
def __init__(self, element):
super(Item, self).__init__(element)
class DataArray(AbstractDataComponent):
def __init__(self, element):
super(DataArray, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.values = testXMLValue(element.find(nspv("swe20:values")))
try:
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
except:
self.encoding = None
class Matrix(AbstractDataComponent):
def __init__(self, element):
super(Matrix, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
self.referenceFrame = testXMLAttribute(element, "referenceFrame") # anyURI, required
self.localFrame = testXMLAttribute(element, "localFrame") # anyURI, optional
class DataStream(AbstractSWEIdentifiable):
def __init__(self, element):
super(DataStream, self).__init__(element)
self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # optional
self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # optional
self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding")))
self.values = testXMLValue(element.find(nspv("swe20:values")))
class ElementType(NamedObject):
def __init__(self, element):
super(ElementType, self).__init__(element)
class AbstractEncoding(object):
def __new__(cls, element):
t = element[-1].tag.split("}")[-1]
if t == "TextEncoding":
return super(AbstractEncoding, cls).__new__(TextEncoding, element)
elif t == "XMLEncoding":
return super(AbstractEncoding, cls).__new__(XMLEncoding, element)
elif t == "BinaryEncoding":
return super(AbstractEncoding, cls).__new__(BinaryEncoding, element)
class TextEncoding(AbstractEncoding):
def __init__(self, element):
self.tokenSeparator = testXMLAttribute(element[-1], "tokenSeparator") # string, required
self.blockSeparator = testXMLAttribute(element[-1], "blockSeparator") # string, required
self.decimalSeparator = testXMLAttribute(element[-1], "decimalSeparator") or "." # string, optional, default="."
self.collapseWhiteSpaces = get_boolean(testXMLAttribute(element[-1], "collapseWhiteSpaces")) or True # boolean, optional, default=True
class XMLEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
class BinaryEncoding(AbstractEncoding):
def __init__(self, element):
raise NotImplementedError
| 43.200969
| 172
| 0.619325
| 1,838
| 17,842
| 5.84494
| 0.121872
| 0.04105
| 0.054454
| 0.072605
| 0.567067
| 0.496789
| 0.380434
| 0.3459
| 0.314717
| 0.314717
| 0
| 0.015447
| 0.267066
| 17,842
| 412
| 173
| 43.305825
| 0.806072
| 0.081045
| 0
| 0.417476
| 0
| 0
| 0.0886
| 0.021916
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152104
| false
| 0.003236
| 0.022654
| 0.016181
| 0.372168
| 0.006472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43c2d697bacb0820c4e842d6861cb1732909d8a0
| 11,386
|
py
|
Python
|
main_fed.py
|
gao969/scaffold-dgc-clustering
|
9f259dfdf0897dcb1dece2e1197268f585f54a69
|
[
"MIT"
] | null | null | null |
main_fed.py
|
gao969/scaffold-dgc-clustering
|
9f259dfdf0897dcb1dece2e1197268f585f54a69
|
[
"MIT"
] | null | null | null |
main_fed.py
|
gao969/scaffold-dgc-clustering
|
9f259dfdf0897dcb1dece2e1197268f585f54a69
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import os
import torch.distributed as dist
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import LocalUpdateF
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from torch.multiprocessing import Process
from deep_gradient_compression import DGC
import json
# __name__是内置的变量,在执行当前文件(main_fed.py)时,默认值为__main__
# 但是如果其他.py文件import当前文件(main_fed.py)时,在其他文件中执行main_fed.py中的__name__,此时main_fed.py中的__name__默认值为文件名main_fed.py
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu))
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
rank = 0
device_id = rank
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size)
# if torch.cuda.is_available() and args.gpu != -1 else 'cpu'
# load dataset and split users
if args.dataset == 'mnist':
# ToTensor():归一数据到(0,1),Normalize():(date-0.1307)/0.3081,将数据分布到(-1, 1)
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
if trans_mnist is not None:
print(1)
print(trans_mnist)
# 测试(60000)和训练集(10000)
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
# Noniid数据
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# print('df ',img_size) [1,28,28]
# build model
# print(args.model)
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
# print('x取值',x)
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
# add
control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
# 设置为训练模型
net_glob.train()
print(net_glob)
control_weights =control_global.state_dict()
# copy weights
# 初始化全局权重
w_glob = net_glob.state_dict()
c_glob = copy.deepcopy(net_glob.state_dict())
# print(w_glob)
# training
loss_train = []
accuracy = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
count = 0, 0
test_acc_list = []
if args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(args.num_users)]
# add
else:
# 初始化本地权重
c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in
range(args.num_users)]
for net in c_local:
net.load_state_dict(control_weights)
delta_c = copy.deepcopy(net_glob.state_dict())
# delta_x = copy.deepcopy(net_glob.state_dict())
# with open("test.txt", "w") as f:
# for i in range(0, len(c_local)):
# for k,v in c_local[i].state_dict().items():
# f.write(f"{k},{v}\n".format(k,v))
# with open("test.txt", "a") as f:
# for i in range(0, len(c_local)):
# for k, v in w_locals[i].items():
# f.write(f"{k},{v}\n".format(k, v))
# add 初始化变化量
# print("why?")
for iter in range(args.epochs):
# 初始换控制变量
for i in delta_c:
delta_c[i] = 0.0
# for i in delta_x:
# delta_x[i] = 0.0
loss_locals = []
if not args.all_clients:
w_locals = []
m = max(int(args.frac * args.num_users), 1)
# 每次随机十位幸运观众
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
# momentum法SGD
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local
= c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size)
# add
if iter != 0:
c_local[idx].load_state_dict(control_local_w)
if args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
# add
loss_locals.append(copy.deepcopy(loss))
# add
for i in delta_c:
if iter != 0:
delta_c[i] += w[i]
else:
delta_c[i] += local_delta_c[i]
# delta_x[i] += local_delta[i]
# add
# update the delta C
for i in delta_c:
delta_c[i] /= m
# delta_x[i] /= m
# update global weights
w_glob = FedAvg(w_locals)
# add 更新全局c,w
# w_glob = net_glob.state_dict()
control_global_w = control_global.state_dict()
for i in control_global_w:
if iter !=0:
# w_glob[i] = delta_x[i]
# else:
# w_glob[i] += delta_x[i]
control_global_w[i] += (m / args.num_users) * delta_c[i]
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# add
control_global.load_state_dict(control_global_w)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
loss_train.append(loss_avg)
# acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
accuracy.append(acc_test)
# add
for c in range(args.num_users):
local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
torch.cuda.empty_cache()
# net_glob.eval()
# print("Training accuracy: {:.2f}".format(acc_train))
# print("Testing accuracy: {:.2f}".format(acc_test))
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
# Fedavg
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_globF = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_globF = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_globF)
net_globF.train()
# copy weights
w_globF = net_globF.state_dict()
# training
loss_trainF = []
accuracyF = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
if args.all_clients:
print("Aggregation over all clients")
w_localsF = [w_globF for i in range(args.num_users)]
for iter in range(args.epochs):
loss_locals = []
if not args.all_clients:
w_localsF = []
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
for idx in idxs_users:
localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device))
if args.all_clients:
w_localsF[idx] = copy.deepcopy(w)
else:
w_localsF.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_globF = FedAvg(w_localsF)
# copy weight to net_globF
net_globF.load_state_dict(w_globF)
# print loss
loss_avgF = sum(loss_locals) / len(loss_locals)
print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF))
loss_trainF.append(loss_avgF)
acc_test, loss_test = test_img(net_globF, dataset_test, args)
accuracyF.append(acc_test)
# plot loss curve
plt.figure()
print(loss_train, loss_trainF)
plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2)
plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1)
plt.ylabel('train_loss')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid))
# testing
net_glob.eval()
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train))
print("Testing accuracy: {:.2f}".format(acc_test))
# plot loss curve
plt.figure()
# plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r')
plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2)
plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1)
plt.ylabel('test_acc')
plt.xlabel('epochs')
plt.legend(loc='best')
plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
| 35.033846
| 136
| 0.584578
| 1,505
| 11,386
| 4.2
| 0.168771
| 0.021041
| 0.022781
| 0.016137
| 0.528239
| 0.469862
| 0.399462
| 0.347888
| 0.313083
| 0.304224
| 0
| 0.014306
| 0.25101
| 11,386
| 324
| 137
| 35.141975
| 0.7269
| 0.14711
| 0
| 0.357513
| 0
| 0
| 0.065379
| 0.006985
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.093264
| 0
| 0.093264
| 0.056995
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43c3bca28b83f4b20caa188f5ac7f59f03173404
| 2,085
|
py
|
Python
|
b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py
|
gkazla/B.LambdaLayerCommon
|
1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980
|
[
"Apache-2.0"
] | null | null | null |
b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py
|
gkazla/B.LambdaLayerCommon
|
1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980
|
[
"Apache-2.0"
] | null | null | null |
b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py
|
gkazla/B.LambdaLayerCommon
|
1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980
|
[
"Apache-2.0"
] | null | null | null |
from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack, Duration
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_cfn_lambda_layer.package_version import PackageVersion
from b_lambda_layer_common.layer import Layer
from b_lambda_layer_common_test.unit import root
class FunctionWithUnitTests(Function):
"""
Function that lets us run unit tests inside lambda function. We want to run unit
tests both locally and remotely.
"""
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'{TestingStack.global_prefix()}FunctionWithUnitTests',
code=Code.from_asset(root),
handler='handler.handler',
runtime=Runtime.PYTHON_3_8,
timeout=Duration.minutes(5),
memory_size=512,
layers=[
Layer(
scope=scope,
name=f'{TestingStack.global_prefix()}TestingLayerWithUnitTests',
dependencies={
# These dependencies are required for running unit tests inside lambda functions.
# Pytest is used for running actual unit tests.
'pytest': PackageVersion.from_string_version('6.2.5'),
# Pook is used for HTTP mocking, therefore it is also needed here.
'pook': PackageVersion.from_string_version('1.0.1'),
# Not sure about this dependency. Lambda runtime throws errors if its missing.
'aws-cdk.core': PackageVersion.from_string_version('1.99.0'),
# This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this.
# For some reason it doesn't.
# Tests would fail with import error otherwise.
'importlib-resources': PackageVersion.from_string_version('5.4.0')
}
)
]
)
| 47.386364
| 129
| 0.601918
| 230
| 2,085
| 5.278261
| 0.513043
| 0.074135
| 0.079077
| 0.102142
| 0.088962
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013533
| 0.326619
| 2,085
| 43
| 130
| 48.488372
| 0.85114
| 0.268585
| 0
| 0.068966
| 0
| 0
| 0.122081
| 0.070714
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.241379
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43c657c522f9cb22a9a0ca2bb0912e5da035332c
| 7,309
|
py
|
Python
|
slow_tests/boot_test.py
|
rdturnermtl/mlpaper
|
5da5cb7b3a56d3cfdc7162d01fac2679c9050e76
|
[
"Apache-2.0"
] | 9
|
2020-07-23T02:12:48.000Z
|
2021-06-24T08:19:08.000Z
|
slow_tests/boot_test.py
|
rdturnermtl/benchmark_tools
|
5da5cb7b3a56d3cfdc7162d01fac2679c9050e76
|
[
"Apache-2.0"
] | 14
|
2017-11-29T04:17:04.000Z
|
2018-03-07T00:35:00.000Z
|
slow_tests/boot_test.py
|
rdturnermtl/mlpaper
|
5da5cb7b3a56d3cfdc7162d01fac2679c9050e76
|
[
"Apache-2.0"
] | 1
|
2017-12-29T01:46:31.000Z
|
2017-12-29T01:46:31.000Z
|
# Ryan Turner (turnerry@iro.umontreal.ca)
from __future__ import division, print_function
from builtins import range
import numpy as np
import scipy.stats as ss
import mlpaper.constants as cc
import mlpaper.mlpaper as bt
import mlpaper.perf_curves as pc
from mlpaper.classification import DEFAULT_NGRID, curve_boot
from mlpaper.test_constants import FPR
from mlpaper.util import area, interp1d
_FPR = FPR / 3.0 # Divide by number of test funcs
def fail_check_stat(fail, runs, expect_p_fail, fpr):
pvals_2side = [ss.binom_test(ff, runs, expect_p_fail) for ff in fail]
pvals_1side = [ss.binom_test(ff, runs, expect_p_fail, alternative="greater") for ff in fail]
# Note that we are not going multiple comparison correction between the
# two sided and one sided tests.
print(fail)
print(pvals_2side)
assert np.min(pvals_2side) >= fpr / len(pvals_2side)
print(pvals_1side)
assert np.min(pvals_1side) >= fpr / len(pvals_1side)
def test_boot(runs=100):
N = 201
confidence = 0.95
# Drawing more seeds than we need to be safe
seeds = np.nditer(np.random.randint(low=0, high=int(1e6), size=runs * 5))
def run_trial(y_true, y_score, y_score_ref, true_curve, curve_f, seed, x_grid=None):
epsilon = 1e-6
curve, _ = curve_f(y_true, y_score[:, 1])
auc, = area(*curve)
curve, _ = curve_f(y_true, y_score_ref[:, 1])
auc_ref, = area(*curve)
true_value, = area(*true_curve)
np.random.seed(seed)
(auc_, EB, pval), curve = curve_boot(
y_true, y_score, ref=true_value, curve_f=curve_f, confidence=confidence, x_grid=x_grid
)
true_curve_grid, = interp1d(curve[cc.XGRID].values, *true_curve)
assert auc_ == auc
fail_EB = np.abs(auc - true_value) > EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P = pval < 1.0 - confidence
fail_curve = (true_curve_grid < curve[cc.LB].values - epsilon) | (
curve[cc.UB].values + epsilon < true_curve_grid
)
assert (x_grid is None) or np.all(curve[cc.XGRID].values == x_grid)
np.random.seed(seed)
(auc_, EB_, pval), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=False, x_grid=x_grid
)
assert auc_ == auc
assert EB_ == EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P2 = pval < 1.0 - confidence
assert np.all(curve_.values == curve.values)
np.random.seed(seed)
(auc_, EB, pval_), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=True, x_grid=x_grid
)
assert auc_ == auc
fail_EB2 = np.abs(auc - auc_ref) > EB
# Could also test distn with 1-sided KS test but this easier for now
assert pval_ == pval
assert np.all(curve_.values == curve.values)
return fail_EB, fail_P, fail_EB2, fail_P2, fail_curve
fail = [0] * 12
fail_curve_roc = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_ap = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_prg = np.zeros(DEFAULT_NGRID, dtype=int)
for ii in range(runs):
mu = np.random.randn(2)
S = np.random.randn(2, 2)
S = np.dot(S, S.T)
# Coverage, esp at edges, is worse for imbalanced data. See issue #20.
p = 0.5
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 1.0]]), pc.LINEAR)
y_true = np.random.rand(N) <= p
y_score = np.random.multivariate_normal(mu, S, size=N)
if np.random.randn() <= 0.5: # resample to test dupes
idx = np.random.choice(N, size=N, replace=True)
y_score = y_score[idx, :]
y_score, y_score_ref = y_score.T
y_score = np.stack((np.zeros(N), y_score), axis=1)
y_score_ref = np.stack((np.zeros(N), y_score_ref), axis=1)
# Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20.
x_grid = np.linspace(0.05, 0.95, DEFAULT_NGRID)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.roc_curve, next(seeds), x_grid
)
fail[0] += fail_EB
fail[1] += fail_P
fail[2] += fail_EB2
fail[3] += fail_P2
fail_curve_roc += fail_curve
true_curve = (np.array([[0.0, 1.0]]), np.array([[p, p]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.recall_precision_curve, next(seeds), x_grid
)
fail[4] += fail_EB
fail[5] += fail_P
fail[6] += fail_EB2
fail[7] += fail_P2
fail_curve_ap += fail_curve
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 0.0]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.prg_curve, next(seeds), x_grid
)
fail[8] += fail_EB
fail[9] += fail_P
fail[10] += fail_EB2
fail[11] += fail_P2
fail_curve_prg += fail_curve
sub_FPR = _FPR / 4.0
expect_p_fail = 1.0 - confidence
fail_check_stat(fail, runs, expect_p_fail, sub_FPR)
print("ROC curve")
fail_check_stat(fail_curve_roc, runs, expect_p_fail, sub_FPR)
print("RP curve")
fail_check_stat(fail_curve_ap, runs, expect_p_fail, sub_FPR)
print("PRG curve")
fail_check_stat(fail_curve_prg, runs, expect_p_fail, sub_FPR)
def test_boot_mean(runs=100):
N = 201
confidence = 0.95
fail = 0
for ii in range(runs):
mu = np.random.randn()
S = np.abs(np.random.randn())
x = mu + S * np.random.randn(N)
mu_est = np.mean(x)
EB = bt.boot_EB(x, confidence=0.95)
fail += np.abs(mu - mu_est) > EB
expect_p_fail = 1.0 - confidence
print("boot mean")
fail_check_stat([fail], runs, expect_p_fail, _FPR)
def test_boot_EB_and_test(runs=100):
"""Arguably this should do out to its own file since it tests bt core."""
mu = np.random.randn()
stdev = np.abs(np.random.randn())
N = 201
confidence = 0.95
def run_trial(x, true_value):
_, _, CI = bt._boot_EB_and_test(x, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI = (true_value < LB) or (UB < true_value)
_, pval, CI = bt._boot_EB_and_test(x - true_value, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI2 = (0 < LB) or (UB < 0)
fail_P = pval < 1.0 - confidence
return fail_CI, fail_CI2, fail_P
fail = [0] * 3
for ii in range(runs):
x = mu + stdev * np.random.randn(N)
fail_CI, fail_CI2, fail_P = run_trial(x, mu)
fail[0] += fail_CI
fail[1] += fail_CI2
fail[2] += fail_P
expect_p_fail = 1.0 - confidence
print("boot mean and test")
fail_check_stat(fail, runs, expect_p_fail, _FPR)
if __name__ == "__main__":
np.random.seed(56467)
test_boot()
test_boot_mean()
test_boot_EB_and_test()
print("passed")
| 35.480583
| 118
| 0.623341
| 1,176
| 7,309
| 3.614796
| 0.182823
| 0.035286
| 0.027523
| 0.031757
| 0.516349
| 0.478946
| 0.401553
| 0.320866
| 0.283227
| 0.207245
| 0
| 0.031711
| 0.257901
| 7,309
| 205
| 119
| 35.653659
| 0.752028
| 0.0877
| 0
| 0.194969
| 0
| 0
| 0.011128
| 0
| 0
| 0
| 0
| 0
| 0.062893
| 1
| 0.037736
| false
| 0.006289
| 0.062893
| 0
| 0.113208
| 0.062893
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43c90a0a29279010bde058050d6af3ae4d07f61d
| 3,047
|
py
|
Python
|
core/test/test_timeseries_study.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
core/test/test_timeseries_study.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
core/test/test_timeseries_study.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
class TestTimeSeriesDailyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsds = tss.TimeSeriesDailyStudy(ts)
tsds.execute()
class TestTimeSeriesMinutelyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsms = tss.TimeSeriesMinutelyStudy(ts, freq_name="5 minutes")
tsms.execute()
class TestMapDictToDataframeTest1(hut.TestCase):
def test1(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict, functions=stat_funcs
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
add_prefix=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
progress_bar=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = sig_gen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
def _get_dict_of_series(self, seed: int) -> Dict[Any, pd.Series]:
n_items = 15
test_keys = ["test_key_" + str(x) for x in range(n_items)]
result_dict = {key: self._get_series(seed) for key in test_keys}
return result_dict
| 33.855556
| 72
| 0.628159
| 393
| 3,047
| 4.56743
| 0.307888
| 0.050139
| 0.020056
| 0.033426
| 0.547632
| 0.547632
| 0.547632
| 0.547632
| 0.547632
| 0.547632
| 0
| 0.02815
| 0.265507
| 3,047
| 89
| 73
| 34.235955
| 0.773905
| 0
| 0
| 0.441558
| 0
| 0
| 0.04168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.246753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43cc95eb28ba86bd35c1811cb4456f10d8f69c56
| 380
|
py
|
Python
|
forecasting_algorithms/Multiple_Timeseries/VAR/var.py
|
ans682/SafePredict_and_Forecasting
|
30ac5a0b665fce090567476bc07b54489b2f3d0f
|
[
"BSD-3-Clause"
] | 1
|
2021-08-05T23:01:47.000Z
|
2021-08-05T23:01:47.000Z
|
forecasting_algorithms/Multiple_Timeseries/VAR/var.py
|
ans682/SafePredict_and_Forecasting
|
30ac5a0b665fce090567476bc07b54489b2f3d0f
|
[
"BSD-3-Clause"
] | 1
|
2021-12-22T08:26:13.000Z
|
2021-12-22T08:26:13.000Z
|
forecasting_algorithms/Multiple_Timeseries/VAR/var.py
|
ans682/SafePredict_and_Forecasting
|
30ac5a0b665fce090567476bc07b54489b2f3d0f
|
[
"BSD-3-Clause"
] | null | null | null |
# VAR example
from statsmodels.tsa.vector_ar.var_model import VAR
from random import random
# contrived dataset with dependency
data = list()
for i in range(100):
v1 = i + random()
v2 = v1 + random()
row = [v1, v2]
data.append(row)
# fit model
model = VAR(data)
model_fit = model.fit()
# make prediction
yhat = model_fit.forecast(model_fit.y, steps=1)
print(yhat)
| 22.352941
| 51
| 0.697368
| 60
| 380
| 4.333333
| 0.566667
| 0.123077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029126
| 0.186842
| 380
| 16
| 52
| 23.75
| 0.812298
| 0.186842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43ccba90b50389b99008103e1fcff4ea674ca290
| 2,140
|
py
|
Python
|
candidate-scrape.py
|
jonykarki/hamroscraper
|
a7e34a9cdca89be10422d045f1ed34e9956bd75f
|
[
"MIT"
] | 2
|
2019-09-23T23:41:44.000Z
|
2019-10-06T03:13:17.000Z
|
candidate-scrape.py
|
jonykarki/hamroscraper
|
a7e34a9cdca89be10422d045f1ed34e9956bd75f
|
[
"MIT"
] | null | null | null |
candidate-scrape.py
|
jonykarki/hamroscraper
|
a7e34a9cdca89be10422d045f1ed34e9956bd75f
|
[
"MIT"
] | 4
|
2019-11-26T18:29:20.000Z
|
2021-01-22T06:30:20.000Z
|
import json
import urllib.request
import MySQLdb
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="", # your password
db="election")
cur = db.cursor()
# user_agent for sending headers with the request
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
# header
headers={'User-Agent':user_agent,}
district = input("Enter the Name of the district: ")
url = "http://election.ujyaaloonline.com/api/candidates?district=" + district
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
source = response.read()
# print(source)
data = json.loads(source)
#print(data['candidates']['2']['400'][0]['cName'])
election_area = data['election_areas']
# get all the possible election-areas from the district
# data needed for the database
'''
resultno :> autoincrement
constituencyname :>
stateno :> Remove the column?
districtno :>
candidate :>
gender :> Remove the column???
votes :> set to zero for now
'''
i = 0
j = 0
for key, value in election_area.items():
area_key = key
district_name = data['district_slug']
try:
for item in data["candidates"]['1'][area_key]:
print(item['aName'])
print(item["cName"])
i = i + 1
except:
for item in data["candidates"]['2'][area_key]:
constituencyname = item['aName'].encode('utf-8')
candidatename = item["cName"].encode('utf-8')
sql = "INSERT INTO `test` (`id`, `candidatename`, `constituencyname`) VALUES (NULL, %s, %s)"
cur.execute(sql, (candidatename, constituencyname))
db.commit()
print('INSERTED ' + item["cName"] + " into the database")
j = j + 1
print(data['district_slug'] + " has " + str(i) + " candidates in provincial election")
print(data['district_slug'] + " has " + str(j) + " candidates in federal election")
print("Total: " + str(i + j) + " candidates added to the database")
| 27.792208
| 105
| 0.619159
| 267
| 2,140
| 4.913858
| 0.441948
| 0.027439
| 0.036585
| 0.019817
| 0.07622
| 0.041159
| 0
| 0
| 0
| 0
| 0
| 0.020744
| 0.234112
| 2,140
| 76
| 106
| 28.157895
| 0.779744
| 0.120093
| 0
| 0
| 0
| 0.051282
| 0.322562
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.025641
| 0.076923
| 0
| 0.076923
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43cde366d5fb7850e5493e9384c566462676fb5d
| 3,101
|
py
|
Python
|
sangita/hindi/lemmatizer.py
|
ashiscs/sangita
|
b90c49859339147137db1c2bdb60a1039a00c706
|
[
"Apache-2.0"
] | 36
|
2017-05-30T04:41:06.000Z
|
2019-02-17T08:41:10.000Z
|
sangita/hindi/lemmatizer.py
|
07kshitij/sangita
|
b90c49859339147137db1c2bdb60a1039a00c706
|
[
"Apache-2.0"
] | 13
|
2018-06-25T11:14:48.000Z
|
2021-05-15T17:57:47.000Z
|
sangita/hindi/lemmatizer.py
|
07kshitij/sangita
|
b90c49859339147137db1c2bdb60a1039a00c706
|
[
"Apache-2.0"
] | 33
|
2018-06-23T21:46:39.000Z
|
2022-03-01T15:55:37.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 23:28:21 2017
@author: samriddhi
"""
import re
import sangita.hindi.tokenizer as tok
import sangita.hindi.corpora.lemmata as lt
def numericLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
num_match = re.compile(r'([०१२३४५६७८९]+[\.\,]*)+[०१२३४५६७८९]+|([-+]*\d+[\.\,]*)+\d+|([०१२३४५६७८९]+|\d+)')
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if num_match.search(str(item[0])):
instr[index] = (instr[index][1], instr[index][1])
else:
if num_match.search(str(item)):
instr[index] = (instr[index], instr[index][1])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
numericLemmatizer(instr)
else:
print("not supported")
return(instr)
def defaultLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) != tup):
instr[index] = (instr[index], instr[index])
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
defaultLemmatizer(instr)
else:
print("not supported")
return(instr)
def lookupLemmatizer(instr):
lst = type([1,2,3])
tup = type(("Hello", "Hi"))
string = type("Hello")
lemmatalist = lt.drawlist()
words = []
lemma = []
for item in lemmatalist:
words.append(item.split("\t")[0])
lemma.append(item.split("\t")[1])
tokens = set(words)
if(type(instr) == lst):
for index,item in enumerate(instr):
if(type(item) == tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index][1],tag)
else:
if(type(item) != tup):
if item in tokens:
tag = lemma[words.index(item)]
instr[index] = (instr[index], tag)
else:
if(type(instr) == string):
instr = tok.tokenize(instr)
lookupLemmatizer(instr)
else:
print("not supported")
return(instr)
def Lemmatizer(instr):
instr = lookupLemmatizer(instr)
instr = numericLemmatizer(instr)
instr = defaultLemmatizer(instr)
return(instr)
if __name__ == '__main__':
input_str = 'पुंछ में हुई मुठभेड़ के बारे में एक सरकारी अधिकारी ने बताया कि १३वीं सिख लाईट इनफेंट्री द्वारा लश्कर-ए - ताइबा गुट के आतंकियों को नियंत्रण-रेखा पर चुनौती देने पर मुठभेड़ रात ११.४५ बजे शुरू हुई।'
print(lookupLemmatizer(input_str))
print(numericLemmatizer(input_str))
print(defaultLemmatizer(input_str))
print(Lemmatizer(input_str))
| 27.936937
| 209
| 0.507256
| 436
| 3,101
| 3.713303
| 0.302752
| 0.080296
| 0.064855
| 0.086473
| 0.510191
| 0.48672
| 0.413836
| 0.413836
| 0.339716
| 0.26189
| 0
| 0.03134
| 0.331183
| 3,101
| 111
| 210
| 27.936937
| 0.719383
| 0.031925
| 0
| 0.564103
| 0
| 0.012821
| 0.118904
| 0.026052
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.038462
| 0
| 0.089744
| 0.089744
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43cfdd42faa2065cb7d2cefc439413b4ed53c719
| 4,471
|
py
|
Python
|
markdown_editing/tests/test_extension.py
|
makyo/markdown-editing
|
ecbc8970f4d416038f9d2c46fae22d4dbb79c647
|
[
"MIT"
] | null | null | null |
markdown_editing/tests/test_extension.py
|
makyo/markdown-editing
|
ecbc8970f4d416038f9d2c46fae22d4dbb79c647
|
[
"MIT"
] | null | null | null |
markdown_editing/tests/test_extension.py
|
makyo/markdown-editing
|
ecbc8970f4d416038f9d2c46fae22d4dbb79c647
|
[
"MIT"
] | null | null | null |
from markdown import markdown
from unittest import TestCase
from markdown_editing.extension import EditingExtension
class TestExtension(TestCase):
def test_substitution(self):
source = '~{out with the old}{in with the new}'
expected = '<p><span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
# Only need to test this once.
html = markdown(source, extensions=['markdown_editing'])
self.assertEqual(html, expected)
def test_addition(self):
source = 'foo +{bar} baz +{qux}(yap)'
expected = '<p>foo <ins class="addition">bar</ins> baz <ins class="addition">qux<q class="comment">yap</q></ins></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_deletion(self):
source = 'foo -{bar} baz -{qux}(yap)'
expected = '<p>foo <del class="deletion">bar</del> baz <del class="deletion">qux<q class="comment">yap</q></del></p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_selected(self):
source = 'foo ?{bar}(qux) baz'
expected = '<p>foo <mark class="selected">bar<q class="comment">qux</q></mark> baz</p>'
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_comments(self):
self.maxDiff = None
source = """
* Substitution: ~{out with the old}{in with the new}
* With comment: ~{out with the old}{in with the new}(is what I always say)
* With attribution: ~{out with the old}{in with the new}(is what I always say (Makyo))
* With date: ~{out with the old}{in with the new}(is what I always say (Makyo 2020-04-21))
* Comment thread: +{Foxes}(More foxes are always good)!{SGTM}
* Comment with attribution: !{SGTM}(Makyo 2020-04-22)
""".strip()
expected = """
<ul>
<li>Substitution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></li>
<li>With comment: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say</q></span></li>
<li>With attribution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span></q></span></li>
<li>With date: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span><span class="date">2020-04-21</span></q></span></li>
<li>Comment thread: <ins class="addition">Foxes<q class="comment">More foxes are always good</q></ins><q class="comment">SGTM</q></li>
<li>Comment with attribution: <q class="comment">SGTM<span class="attribution">Makyo</span><span class="date">2020-04-22</span></q></li>
</ul>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_level(self):
source = """
```
?{Some text}(bad wolf)
```
?{Some text}(bad wolf)
> ?{Some text}(good doggy)
""".strip()
expected = """
<p><code>?{Some text}(bad wolf)</code></p>
<pre><code>?{Some text}(bad wolf)
</code></pre>
<blockquote>
<p><mark class="selected">Some text<q class="comment">good doggy</q></mark></p>
</blockquote>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_nesting(self):
source = """
?{The only currently working form of nesting}(But what if...!{NO})
""".strip()
expected = """
<p><mark class="selected">The only currently working form of nesting<q class="comment">But what if...<q class="comment">NO</q></q></mark></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
def test_mixed(self):
source = """
+{some *fancy* new stuff}(With a **fancy** comment)
""".strip()
expected = """
<p><ins class="addition">some <em>fancy</em> new stuff<q class="comment">With a <strong>fancy</strong> comment</q></ins></p>
""".strip()
html = markdown(source, extensions=[EditingExtension()])
self.assertEqual(html, expected)
| 39.566372
| 224
| 0.636547
| 612
| 4,471
| 4.633987
| 0.150327
| 0.049365
| 0.059591
| 0.045839
| 0.605078
| 0.575811
| 0.520099
| 0.494711
| 0.47708
| 0.47708
| 0
| 0.008738
| 0.180944
| 4,471
| 112
| 225
| 39.919643
| 0.765702
| 0.006263
| 0
| 0.440476
| 0
| 0.190476
| 0.593018
| 0.177252
| 0
| 0
| 0
| 0
| 0.107143
| 1
| 0.095238
| false
| 0
| 0.035714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43cffed323ab5de7f6be36b25de0a210ece3af09
| 15,477
|
py
|
Python
|
apps/siren/test_handlers.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 15
|
2019-02-12T23:26:09.000Z
|
2021-12-21T08:53:58.000Z
|
apps/siren/test_handlers.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 2
|
2019-01-23T21:13:12.000Z
|
2019-06-28T15:45:51.000Z
|
apps/siren/test_handlers.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 6
|
2019-01-23T20:22:50.000Z
|
2022-02-03T03:27:04.000Z
|
"""
SIREN/DIANA basic functionality testing framework
Requires env vars:
- GMAIL_USER
- GMAIL_APP_PASSWORD
- GMAIL_BASE_NAME -- ie, abc -> abc+hobitduke@gmail.com
These env vars are set to default:
- ORTHANC_PASSWORD
- SPLUNK_PASSWORD
- SPLUNK_HEC_TOKEN
TODO: Move stuff to archive after collected
TODO: Write data into daily folder or something from mi-share ingress
TODO: Suppress dicom-simplify missing (series) creation time
"""
import time
import logging
import shutil
import io
import tempfile
from pathlib import Path
from pprint import pformat
from contextlib import redirect_stdout
from multiprocessing import Process
from datetime import datetime, timedelta
from interruptingcow import timeout
from crud.manager import EndpointManager
from crud.abc import Watcher, Trigger
from crud.endpoints import Splunk
from wuphf.endpoints import SmtpMessenger
from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir
from diana.dixel import Dixel, ShamDixel
from diana.utils.dicom import DicomLevel as DLv, DicomEventType as DEv
from wuphf.cli.string_descs import *
from diana.utils import unpack_data
from crud.utils import deserialize_dict
from diana.utils.gateways import suppress_urllib_debug
from diana.utils.endpoint.watcher import suppress_watcher_debug
from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, \
handle_file_arrived, start_watcher, tagged_studies
from trial_dispatcher import TrialDispatcher as Dispatcher
LOCAL_SERVICES = False # Set False to use UMich services
USE_GMAIL = True # Set False to use UMich smtp
DO_DIR_UPLOAD = False
CHECK_SPLUNK = False # Set False to skip long wait for dixel to index
CHECK_WATCH_STUDIES= False # Set False to skip long wait for orthanc watcher
EMAIL_DRYRUN = False # Set False to send live emails
# CONFIG
_services = "@services.yaml"
_subscriptions = "@subscriptions.yaml"
os.environ["SPLUNK_INDEX"] = "testing"
SMTP_MESSENGER_NAME = "smtp_server"
if LOCAL_SERVICES:
# Set everythin back to default
os.environ["UMICH_HOST"] = "localhost" # For testing
del os.environ["ORTHANC_USER"]
del os.environ["ORTHANC_PASSWORD"]
del os.environ["SPLUNK_USER"]
del os.environ["SPLUNK_PASSWORD"]
if USE_GMAIL:
SMTP_MESSENGER_NAME = "gmail:"
test_email_addr1 = "derek.merck@ufl.edu"
#test_email_addr1 = "ejacob@med.umich.edu"
#test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1")
# os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0]
anon_salt = "Test+Test+Test"
fkey = b'o-KzB3u1a_Vlb8Ji1CdyfTFpZ2FvdsPK4yQCRzFCcss='
msg_t = """to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n"""
notify_msg_t = "@./notify.txt.j2"
# TESTING CONfIG
test_sample_zip = os.path.abspath("../../tests/resources/dcm_zip/test.zip")
test_sample_file = os.path.abspath("../../tests/resources/dcm/IM2263")
test_sample_dir = os.path.expanduser("~/data/test") # Need to dl separately
# TESTS
def test_upload_one(orth: Orthanc, dixel: Dixel):
print("Testing can upload")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
assert (len(orth.studies()) > 0)
assert (orth.exists(dixel))
print("Passed!")
return True
def test_anonymize_one(orth: Orthanc, dixel: Dixel):
print("Testing can anonymize, tag, and untag")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
anon = ShamDixel.from_dixel(dixel, salt=anon_salt)
afile = orth.anonymize(anon, replacement_map=anon.orthanc_sham_map())
anon.file = afile
orth.put(anon)
orth.putm(anon.sham_parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=anon.pack_fields(fkey))
assert (len(orth.studies()) == 2)
orth.delete(dixel)
assert (len(orth.studies()) == 1)
oid = orth.studies()[0]
test = orth.get(oid)
assert( test.tags["PatientName"] == anon.meta["ShamName"] )
enc = orth.getm(test, key="signature")
tags = unpack_data(enc, fkey)
assert( tags["PatientName"] in dixel.tags["PatientName"] )
print("Passed!")
return True
def test_index_one( splunk: Splunk, dixel: Dixel, check_exists=CHECK_SPLUNK ):
print("Testing can index")
splunk.put(dixel, index=os.environ.get("SPLUNK_INDEX"))
if check_exists:
print("Waiting for 1 min to index")
time.sleep(60)
time_range = [
datetime.now()-timedelta(minutes=2),
datetime.now()
]
r = splunk.find("search index=testing", time_range=time_range)
logging.debug(r)
assert( len(r) > 0 )
print("Passed")
return True
def test_email_messenger( messenger: SmtpMessenger, dryrun=EMAIL_DRYRUN ):
print("Testing can email from template")
outgoing = "The quick brown fox jumped over the lazy dog"
data = {"item": {"msg_text": outgoing},
"recipient": {"email": test_email_addr1}}
msg = messenger.get(data, target=test_email_addr1)
assert( test_email_addr1 in msg )
assert( outgoing in msg )
if not dryrun:
messenger.send(data, target=test_email_addr1)
print("Passed!")
return True
def test_distribute( subscriptions, messenger: SmtpMessenger ):
print("Testing can dispatch")
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(channel_tags=ch)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
logging.debug(pformat(dispatch.subscribers))
data = {"tags": {"AccessionNumber": "ABC123",
"PatientName": "DOE^JOHN^S"},
"meta": {"signature":
{"trial": "hobit",
"site": "duke"}
}
}
sent = dispatch.put(data, dryrun=EMAIL_DRYRUN)
data["meta"]["signature"]["site"] = "detroit"
sent += dispatch.put(data, dryrun=EMAIL_DRYRUN)
print(sent)
msgs = [x['msg'] for x in sent]
msgs = "\n".join(msgs)
# logging.debug(pformat(msgs))
assert( "SIREN/HOBIT" in msgs )
assert( "+testing+hobit@gmail.com" in msgs )
assert( 'subject jacket for "DOE^JOHN^S"' in msgs )
print("Passed!")
return True
def test_upload_dir_handler(dcm_dir: DcmDir, orth: Orthanc):
print("Testing can upload dir w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_dir(dcm_dir, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 20)
print("Passed!")
return True
def test_upload_zip_handler(zip_file, orth: Orthanc):
print("Testing can upload zip w handler")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
handle_upload_zip(DcmDir(), zip_file, orth, fkey, anon_salt=anon_salt)
assert (len(orth.instances()) > 1)
print("Passed!")
return True
def test_file_arrived_handler(dcm_file, zip_file, orth: Orthanc):
print("Testing can handle file arrived")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
shutil.copy(zip_file, site_path)
data = {"fn": os.path.join( site_path, Path(zip_file).name )}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) > 1)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
shutil.copy(dcm_file, site_path)
data = {"fn": os.path.join(site_path, Path(dcm_file).name)}
handle_file_arrived(data, DcmDir(path=watch_path), orth,
fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature")
assert (len(orth.instances()) == 1)
time.sleep(1.0)
oid = orth.studies()[0]
data = orth.getm(oid, key="signature")
clear = unpack_data(data, fkey)
print(pformat(clear))
assert(clear["trial"] == "my_trial")
orth.clear()
assert (len(orth.studies()) == 0)
shutil.rmtree(watch_path, ignore_errors=True)
print("Passed!")
return True
def test_notify_handler(dixel, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
orth.put(dixel)
dixel.meta["trial"] = "hobit"
dixel.meta["site"] = "testing"
orth.putm(dixel.parent_oid(DLv.STUDIES),
level=DLv.STUDIES,
key="signature",
value=dixel.pack_fields(fkey, fields=["trial", "site"]))
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
data = {"oid": dixel.parent_oid(DLv.STUDIES)}
handle_notify_study(data, source=orth,
dispatcher=dispatch, dryrun=dryrun,
indexer=indexer, index_name=SPLUNK_INDEX,
fkey=fkey)
print("Passed!")
return True
def test_watch_orthanc(test_dixel, orth: ObservableOrthanc):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
watcher = Watcher()
trigger0 = Trigger(
evtype=DEv.INSTANCE_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger0)
trigger1 = Trigger(
evtype=DEv.STUDY_ADDED,
source=orth,
action=orth.say)
watcher.add_trigger(trigger1)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
orth.put(test_dixel)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the instance
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
if dixel.oid() in out:
print("Passed!")
return True
def test_watch_dir(test_file):
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "my_trial", "my_site")
os.makedirs(site_path)
dcm_dir = ObservableDcmDir(path=watch_path)
watcher = Watcher()
trigger = Trigger(
evtype=DEv.FILE_ADDED,
source=dcm_dir,
action=dcm_dir.say)
watcher.add_trigger(trigger)
def runner():
"""Pause to start watcher and then copy sample file to incoming"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting watcher")
with redirect_stdout(f):
print("In capture")
try:
with timeout(5): # Give it a little time to say the filename
watcher.run()
except RuntimeError:
print("Stopping watcher")
finally:
watcher.stop()
out = f.getvalue()
print("Watcher output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
from pathlib import Path
if Path(test_file).name in out:
print("Passed!")
return True
def test_siren_receiver(test_file, orth: Orthanc,
subscriptions, messenger: SmtpMessenger,
indexer: Splunk, dryrun=EMAIL_DRYRUN):
orth.clear()
tagged_studies.clear()
assert (len(orth.studies()) == 0)
ch, subs = deserialize_dict(subscriptions)
dispatch = Dispatcher(
channel_tags=ch
)
dispatch.add_subscribers(subs)
messenger.set_msg_t(notify_msg_t)
dispatch.email_messenger = messenger
watch_path = tempfile.mkdtemp()
site_path = os.path.join(watch_path, "hobit", "testing")
os.makedirs(site_path)
incoming = ObservableDcmDir(path=watch_path)
def runner():
"""Pause to start watcher and then copy sample file to incoming/trial/site"""
time.sleep(1.0)
shutil.copy(test_file, site_path)
p = Process(target=runner)
p.start()
f = io.StringIO()
print("Starting SIREN Receiver")
with redirect_stdout(f):
print("In capture")
try:
with timeout(90): # Give it a little time for the study to settle
watcher = start_watcher(
incoming,
orth,
fkey=fkey,
anon_salt=anon_salt,
dispatcher=dispatch,
dryrun=dryrun,
indexer=indexer,
index_name=os.environ.get("SPLUNK_INDEX")
)
except RuntimeError:
print("Stopping watcher subprocess")
out = f.getvalue()
print("SIREN Reciever output:")
print(out)
shutil.rmtree(watch_path, ignore_errors=True)
return True
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
suppress_urllib_debug()
suppress_watcher_debug()
# Create service endpoints
services = EndpointManager(serialized_ep_descs=_services)
print(pformat(services.ep_descs))
orth: ObservableOrthanc = services.get("hobit")
orth.polling_interval = 2.0
messenger: SmtpMessenger = services.get(SMTP_MESSENGER_NAME)
messenger.msg_t = msg_t
splunk: Splunk = services.get("splunk")
dcm_dir = DcmDir(path=test_sample_dir)
# Load a dixel
dixel = dcm_dir.get("HOBIT1172/IM0", file=True)
# assert( dixel )
# assert( dixel.file )
#
# # Verify that all endpoints are online
# assert( orth.check() )
# assert( messenger.check() )
# assert( splunk.check() )
#
# # Verify basic capabilities:
# # - upload
# # - anonymize
# # - index
# # - message
# # - distribute
#
# assert( test_upload_one(orth, dixel) )
# assert( test_anonymize_one(orth, dixel) )
# assert( test_index_one(splunk, dixel) )
assert( test_email_messenger(messenger) )
# assert( test_distribute(_subscriptions, messenger) )
exit()
# Verify observer daemons:
# - watch dir
# - watch orth
assert( test_watch_dir(test_sample_file) )
assert( test_watch_orthanc(dixel, orth) )
# Verify handlers:
# - directory
# - zip
# - file
# - notify
if DO_DIR_UPLOAD:
assert( test_upload_dir_handler(dcm_dir, orth) )
assert( test_upload_zip_handler(test_sample_zip, orth) )
assert( test_file_arrived_handler(test_sample_file, test_sample_zip, orth) )
assert( test_notify_handler(dixel, orth, _subscriptions, messenger, splunk) )
# Verify watcher pipeline
# - run watcher
assert( test_siren_receiver(test_sample_file, orth, _subscriptions, messenger, splunk) )
| 27.588235
| 151
| 0.648511
| 1,917
| 15,477
| 5.063641
| 0.18049
| 0.016689
| 0.022767
| 0.026785
| 0.461523
| 0.419594
| 0.382301
| 0.347687
| 0.315752
| 0.30648
| 0
| 0.006187
| 0.237708
| 15,477
| 560
| 152
| 27.6375
| 0.816579
| 0.116625
| 0
| 0.453039
| 0
| 0.002762
| 0.1091
| 0.010152
| 0
| 0
| 0
| 0.005357
| 0.099448
| 1
| 0.041436
| false
| 0.035912
| 0.071823
| 0
| 0.146409
| 0.110497
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d0fea901e478a41a7213fecbddf4d86fc4b79e
| 6,735
|
py
|
Python
|
deptree.py
|
jeking3/boost-deptree
|
27eda54df2d022af17347df4ba4892c39392e474
|
[
"BSL-1.0"
] | null | null | null |
deptree.py
|
jeking3/boost-deptree
|
27eda54df2d022af17347df4ba4892c39392e474
|
[
"BSL-1.0"
] | null | null | null |
deptree.py
|
jeking3/boost-deptree
|
27eda54df2d022af17347df4ba4892c39392e474
|
[
"BSL-1.0"
] | null | null | null |
#
# Copyright (c) 2019 James E. King III
#
# Use, modification, and distribution are subject to the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
#
import json
import networkx
import re
from pathlib import Path
class BoostDependencyTree(object):
"""
Generates a PlantUML dependency tree to visualize the dependencies.
One of the benefits of generating a visual graph is that cycles become
immediately evident.
"""
EDGES = {
2: "-->",
1: "..>"
}
STRENGTHS = {
"include": 2,
"src": 2,
"test": 1,
"tests": 1
}
def __init__(self, root: Path, out: Path):
"""
Arguments:
root: path to BOOST_ROOT
out: path to output file
"""
self.exp = re.compile(r"^\s*#\s*include\s*[<\"](?P<header>[^>\"]+)[>\"]\s*$")
self.graph = networkx.DiGraph()
self.headers = {} # key: header include path; value: repo key
self.repos = {} # key: repo key; value: repo path
self.out = out
self.root = root
self.libs = self.root / "libs"
with (self.libs / "config" / "include" / "boost" / "version.hpp").open() as fp:
vlines = fp.readlines()
for vline in vlines:
if "BOOST_LIB_VERSION" in vline:
#define BOOST_LIB_VERSION "1_71"
tokens = vline.split(" ")
self.boost_version = tokens[2].strip()[1:-1].replace("_", ".")
def load(self):
self.collect()
self.analyze()
def collect(self):
"""
Locate every .hpp and .h file and associate it with a repository.
"""
metas = self.libs.glob("**/libraries.json")
for meta in metas:
with meta.open() as fp:
metadata = json.loads(fp.read())
repodir = meta.parent.parent
metadata = metadata[0] if isinstance(metadata, list) else metadata # for boost/core
repokey = metadata["key"]
repoinc = repodir / "include"
if repoinc.is_dir(): # libs/geometry/index has no include but looks like a repo?
self.graph.add_node(repokey)
self.repos[repokey] = repodir
headers = repoinc.glob("**/*.h??")
for header in headers:
# print(str(header))
incpath = header.relative_to(repoinc)
assert incpath not in self.headers,\
f"{incpath} in {repokey} already in header map from "\
f"{self.headers[incpath]} - duplicate header paths!"
self.headers[str(incpath)] = repokey
def analyze(self):
"""
Find every include statement and create a graph of dependencies.
"""
for repokey, repodir in self.repos.items():
for ext in ["c", "cpp", "h", "hpp", "ipp"]:
files = repodir.glob("**/*." + ext)
for code in files:
inside = code.relative_to(repodir).parts[0]
if inside not in self.STRENGTHS.keys():
continue
weight = self.STRENGTHS[inside]
with code.open() as fp:
try:
#print(str(code))
source = fp.readlines()
except UnicodeDecodeError:
continue
for line in source:
match = self.exp.search(line)
if match:
include = match.group("header")
if include in self.headers:
deprepo = self.headers[include]
if repokey != deprepo: # avoid self-references
data = self.graph.get_edge_data(repokey, deprepo, {"weight": 0})
if data["weight"] > 0 and data["weight"] < weight:
self.graph.remove_edge(repokey, deprepo)
data["weight"] = 0
if data["weight"] == 0:
self.graph.add_edge(repokey, deprepo, weight=weight)
def report_cycles(self):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} Direct Dependency Cycles\n")
fp.write("footer Generated by boost-deptree (C) 2019 James E. King III\n")
fp.write("\n")
for edge in self.graph.edges:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
if fwdweight > 1:
if self.graph.get_edge_data(edge[1], edge[0], {"weight": 0})["weight"] > 1:
fp.write(f"['{edge[0]}'] --> ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
def report_dependencies_from(self, repokey):
with self.out.open("w") as fp:
fp.write("@startuml\n")
fp.write("\n")
fp.write(f"title Boost {self.boost_version} dependencies of {repokey}\n")
fp.write("footer Generated by boost-deptree (C) 2019 James E. King III\n")
fp.write("\n")
for edge in self.graph.edges:
if edge[0] == repokey:
fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"]
fp.write(f"['{edge[0]}'] {self.EDGES[fwdweight]} ['{edge[1]}']\n")
fp.write("\n")
fp.write("@enduml\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate PlantUML dependency tree.')
parser.add_argument('root', type=str, help='Boost root directory.')
parser.add_argument('out', type=str, help='Output filename.')
require_one = parser.add_mutually_exclusive_group(required=True)
require_one.add_argument('--cycles', action='store_true', help='Show direct repository dependency cycles.')
require_one.add_argument('--from', help='Show dependencies from a given repository.')
args = parser.parse_args()
root = Path(args.root)
assert root.is_dir(), "root is not a directory"
out = Path(args.out)
tree = BoostDependencyTree(root, out)
tree.load()
if args.cycles:
tree.report_cycles()
else:
tree.report_dependencies_from(args.__dict__["from"])
| 40.572289
| 111
| 0.515367
| 754
| 6,735
| 4.519894
| 0.287798
| 0.032864
| 0.028169
| 0.015845
| 0.17723
| 0.163732
| 0.139671
| 0.139671
| 0.139671
| 0.139671
| 0
| 0.011604
| 0.360208
| 6,735
| 165
| 112
| 40.818182
| 0.779299
| 0.120267
| 0
| 0.162602
| 0
| 0
| 0.158221
| 0.011548
| 0
| 0
| 0
| 0
| 0.01626
| 1
| 0.04878
| false
| 0
| 0.04065
| 0
| 0.113821
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d13fbbdf77afe2138ccc76bfc3468760cf2d47
| 7,357
|
py
|
Python
|
uberbackend.py
|
adiHusky/uber_backend
|
adc78882c081f7636b809d6e1889ba3297309e20
|
[
"MIT"
] | null | null | null |
uberbackend.py
|
adiHusky/uber_backend
|
adc78882c081f7636b809d6e1889ba3297309e20
|
[
"MIT"
] | null | null | null |
uberbackend.py
|
adiHusky/uber_backend
|
adc78882c081f7636b809d6e1889ba3297309e20
|
[
"MIT"
] | null | null | null |
from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort
from flask_cors import CORS
# from flask import status
from datetime import date, datetime, timedelta
from calendar import monthrange
from dateutil.parser import parse
import pytz
import os
import sys
import time
import uuid
import json
import random
import string
import pathlib
import io
from uuid import UUID
from bson.objectid import ObjectId
# straight mongo access
from pymongo import MongoClient
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://acea88276810494e96828c4fd0e1471f@o555579.ingest.sentry.io/5685529",
integrations=[FlaskIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# By default the SDK will try to use the SENTRY_RELEASE
# environment variable, or infer a git commit
# SHA as release, however you may want to set
# something more human-readable.
# release="myapp@1.0.0",
)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# mongo
# mongo_client = MongoClient('mongodb://localhost:27017/')
mongo_client = MongoClient(
"mongodb+srv://Mahitha-Maddi:Mahitha%4042@cluster0.1z0g8.mongodb.net/test")
app = Flask(__name__)
# CORS(app)
CORS(app, resources={r"/*": {"origins": "*"}})
basedir = os.path.abspath(os.path.dirname(__file__))
# Here are my datasets
bookings = dict()
################
# Apply to mongo
################
def atlas_connect():
# Node
# const MongoClient = require('mongodb').MongoClient;
# const uri = "mongodb+srv://admin:<password>@tweets.8ugzv.mongodb.net/myFirstDatabase?retryWrites=true&w=majority";
# const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true });
# client.connect(err => {
# const collection = client.db("test").collection("devices");
# // perform actions on the collection object
# client.close();
# });
# Python
client = pymongo.MongoClient(
"mongodb+srv://Mahitha-Maddi:Mahitha%4042@cluster0.1z0g8.mongodb.net/test")
db = client.test
# database access layer
def insert_one(r):
start_time = datetime.now()
with mongo_client:
# start_time_db = datetime.now()
db = mongo_client['Uber']
# microseconds_caching_db = (datetime.now() - start_time_db).microseconds
# print("*** It took " + str(microseconds_caching_db) + " microseconds to cache mongo handle.")
print("...insert_one() to mongo: ", r)
try:
mongo_collection = db['bookings']
result = mongo_collection.insert_one(r)
print("inserted _ids: ", result.inserted_id)
except Exception as e:
print(e)
microseconds_doing_mongo_work = (datetime.now() - start_time).microseconds
print("*** It took " + str(microseconds_doing_mongo_work) +
" microseconds to insert_one.")
def tryexcept(requesto, key, default):
lhs = None
try:
lhs = requesto.json[key]
# except Exception as e:
except:
lhs = default
return lhs
def ssm():
now = datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
return str((now - midnight).seconds)
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# endpoint to check Availability
@app.route("/checkAvailability", methods=["POST"])
def check_availability():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
with mongo_client:
#raise InvalidUsage('This view is gone', status_code=410)
db = mongo_client['Uber']
mongo_collection = db['available']
print(source)
myquery = {"source": {"$regex": str(source)}, "destination": {
"$regex": str(destination)}, "date": {"$regex": str(date)}}
cursor = dict()
cursor = mongo_collection.find(myquery, {"_id": 0})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
print(type(sorted_records))
return jsonify(sorted_records)
# endpoint to create new Booking
@app.route("/book", methods=["POST"])
def book_bus():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
startTime = request.json['startTime']
endTime = request.json['endTime']
user = request.json['user']
busnumber = request.json['busnumber']
booking = dict(user=user, source=source, destination=destination, busnumber=busnumber,
date=date, startTime=startTime, endTime=endTime, bookeddate=datetime.now(
).strftime("%Y-%m-%d %H:%M:%S"),
_id=str(ObjectId()))
insert_one(booking)
return jsonify(booking)
@app.route("/bookings-results", methods=["GET"])
def get_tweets_results():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['bookings']
cursor = mongo_collection.find({})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
return jsonify(sorted_records)
##################
# Apply from mongo
##################
def applyRecordLevelUpdates():
return None
def applyCollectionLevelUpdates():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['available']
cursor = mongo_collection.find({})
records = list(cursor)
# bookings[0] = records[0]
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
# return json.dumps({"results": sorted_records })
for booking in sorted_records:
bookings[booking['_id']] = booking
@app.route("/")
def home():
return """Welcome to Uber backend!<br/>"""
##################
# ADMINISTRATION #
##################
# This runs once before the first single request
# Used to bootstrap our collections
@app.before_first_request
def before_first_request_func():
applyCollectionLevelUpdates()
# This runs once before any request
@app.before_request
def before_request_func():
applyRecordLevelUpdates()
############################
# INFO on containerization #
############################
# To containerize a flask app:
# https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 29.079051
| 124
| 0.652984
| 857
| 7,357
| 5.472579
| 0.33839
| 0.03049
| 0.012793
| 0.014499
| 0.196802
| 0.196802
| 0.180597
| 0.166311
| 0.155224
| 0.155224
| 0
| 0.014393
| 0.206742
| 7,357
| 252
| 125
| 29.194444
| 0.789239
| 0.221286
| 0
| 0.27027
| 0
| 0.013514
| 0.1195
| 0.026073
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101351
| false
| 0
| 0.135135
| 0.013514
| 0.310811
| 0.060811
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d2040db0a01d747e5d0a9ffdc2859f95f69610
| 6,359
|
py
|
Python
|
sppas/sppas/src/models/acm/htkscripts.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/models/acm/htkscripts.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/models/acm/htkscripts.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.models.acm.htkscripts.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import os.path
import logging
# ---------------------------------------------------------------------------
class sppasHtkScripts(object):
"""HTK-ASCII scripts reader/writer.
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
:author: Brigitte Bigi
:contact: develop@sppas.org
This class is able to write all scripts of the VoxForge tutorial.
They are used to train acoustic models thanks to the HTK toolbox.
For details, refer to: http://www.voxforge.org/
"""
def __init__(self):
"""Create a sppasHtkScripts instance."""
self.configfile = ""
self.globalfile = ""
self.mkphones0file = ""
self.mkphones1file = ""
self.mktrifile = ""
self.maketriphonesfile = ""
self.silfile = ""
# -----------------------------------------------------------------------
def write_all(self, dirname):
"""Write all scripts at once.
Write scripts with their default name, in the given directory.
:param dirname: (str) a directory name (existing or to be created).
"""
if os.path.exists(dirname) is False:
os.mkdir(dirname)
self.write_global_ded(os.path.join(dirname, "global.ded"))
self.write_mkphones0_led(os.path.join(dirname, "mkphones0.led"))
self.write_mkphones1_led(os.path.join(dirname, "mkphones1.led"))
self.write_mktri_led(os.path.join(dirname, "mktri.led"))
self.write_maketriphones_ded(os.path.join(dirname, "maketriphones.ded"))
self.write_sil_hed(os.path.join(dirname, "sil.hed"))
# -----------------------------------------------------------------------
def write_global_ded(self, filename):
"""Write the htk script `global.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("RS cmu\n")
fp.write("MP sil sil sp\n")
fp.write("\n")
fp.close()
self.globalfile = filename
# -----------------------------------------------------------------------
def write_mkphones0_led(self, filename):
"""Write the htk script `mkphones0.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("DE sp\n")
fp.write("\n")
fp.close()
self.mkphones0file = filename
# -----------------------------------------------------------------------
def write_mkphones1_led(self, filename):
"""Write the htk script `mkphones1.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("EX\n")
fp.write("IS sil sil\n")
fp.write("\n")
fp.close()
self.mkphones1file = filename
# -----------------------------------------------------------------------
def write_mktri_led(self, filename):
"""Write the htk script `mktri.led`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("WB sp\n")
fp.write("WB sil\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.mktrifile = filename
# -----------------------------------------------------------------------
def write_maketriphones_ded(self, filename):
"""Write the htk script `maketriphones.ded`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AS sp\n")
fp.write("MP sil sil sp\n")
fp.write("TC\n")
fp.write("\n")
fp.close()
self.maketriphonesfile = filename
# -----------------------------------------------------------------------
def write_sil_hed(self, filename):
"""Write the htk script `sil.hed`.
:param filename: (str) Name of the script file.
"""
logging.info('Write script file: {!s:s}'.format(filename))
with open(filename, "w") as fp:
fp.write("AT 2 4 0.2 {sil.transP}\n")
fp.write("AT 4 2 0.2 {sil.transP}\n")
fp.write("AT 1 3 0.3 {sp.transP}\n")
fp.write("TI silst {sil.state[3],sp.state[2]}\n")
fp.write("\n")
fp.close()
self.silfile = filename
| 32.610256
| 80
| 0.492845
| 703
| 6,359
| 4.375533
| 0.268848
| 0.054616
| 0.046814
| 0.03316
| 0.421326
| 0.380364
| 0.352731
| 0.290312
| 0.265605
| 0.265605
| 0
| 0.008132
| 0.284479
| 6,359
| 194
| 81
| 32.778351
| 0.667912
| 0.465797
| 0
| 0.453333
| 0
| 0
| 0.150391
| 0.009115
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106667
| false
| 0
| 0.04
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d3b50d90e2618726a0619c25ddcb995a36172f
| 2,961
|
py
|
Python
|
icekit/plugins/map/tests.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 52
|
2016-09-13T03:50:58.000Z
|
2022-02-23T16:25:08.000Z
|
icekit/plugins/map/tests.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 304
|
2016-08-11T14:17:30.000Z
|
2020-07-22T13:35:18.000Z
|
icekit/plugins/map/tests.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 12
|
2016-09-21T18:46:35.000Z
|
2021-02-15T19:37:50.000Z
|
from mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class MapItemTestCase(WebTest):
def setUp(self):
self.embed_code = '''
<iframe
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670"
width="600"
height="450"
frameborder="0"
style="border:0"
allowfullscreen
></iframe>
'''
self.cleaned_embed_code = '<iframe allowfullscreen="" frameborder="0" src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" style="border: 0;"></iframe>'
self.layout_1 = G(
Layout,
template_name='icekit/layouts/default.html',
)
self.layout_1.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout_1.save()
self.staff_1 = User.objects.create(
email='test@test.com',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page_1 = LayoutPage()
self.page_1.title = 'Test Page'
self.page_1.slug = 'test-page'
self.page_1.parent_site = Site.objects.first()
self.page_1.layout = self.layout_1
self.page_1.author = self.staff_1
self.page_1.status = LayoutPage.PUBLISHED
self.page_1.save()
self.map_1 = fluent_contents.create_content_instance(
models.MapItem,
self.page_1,
_embed_code=self.embed_code,
)
self.map_item = models.MapItem(
parent_type=ContentType.objects.get_for_model(type(self.page_1)),
parent_id=self.page_1.id,
placeholder=self.page_1.get_placeholder_by_slot('main')[0],
_embed_code=self.embed_code,
)
self.page_1.publish()
def test_map_renders(self):
response = self.app.get(self.page_1.get_published().get_absolute_url())
response.mustcontain(self.cleaned_embed_code)
def test_cleaned_embed_code(self):
self.assertEqual(self.map_1._cleaned_embed_code.strip(), self.cleaned_embed_code)
| 38.960526
| 381
| 0.67207
| 375
| 2,961
| 5.106667
| 0.341333
| 0.058486
| 0.065796
| 0.031332
| 0.315405
| 0.267363
| 0.240209
| 0.240209
| 0.240209
| 0.240209
| 0
| 0.12706
| 0.221209
| 2,961
| 75
| 382
| 39.48
| 0.703382
| 0
| 0
| 0.031746
| 0
| 0.031746
| 0.306991
| 0.009119
| 0
| 0
| 0
| 0
| 0.015873
| 1
| 0.047619
| false
| 0
| 0.174603
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d619ff813d6467445c26ac811f7e5c110c5dd3
| 729
|
py
|
Python
|
terminalone/models/concept.py
|
amehta1/t1-python
|
4f7eb0bec7671b29baf3105b8cafafb373107e7b
|
[
"Apache-2.0"
] | 24
|
2015-07-09T18:49:10.000Z
|
2021-06-07T18:36:58.000Z
|
terminalone/models/concept.py
|
amehta1/t1-python
|
4f7eb0bec7671b29baf3105b8cafafb373107e7b
|
[
"Apache-2.0"
] | 100
|
2015-07-13T20:24:50.000Z
|
2020-08-10T11:16:39.000Z
|
terminalone/models/concept.py
|
amehta1/t1-python
|
4f7eb0bec7671b29baf3105b8cafafb373107e7b
|
[
"Apache-2.0"
] | 36
|
2015-07-09T18:51:48.000Z
|
2022-02-14T22:44:37.000Z
|
# -*- coding: utf-8 -*-
"""Provides concept object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class Concept(Entity):
"""Concept entity."""
collection = 'concepts'
resource = 'concept'
_relations = {
'advertiser',
}
_pull = {
'advertiser_id': int,
'created_on': t1types.strpt,
'id': int,
'name': None,
'status': t1types.int_to_bool,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Concept, self).__init__(session, properties, **kwargs)
| 22.78125
| 68
| 0.581619
| 73
| 729
| 5.493151
| 0.561644
| 0.064838
| 0.069825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009399
| 0.270233
| 729
| 31
| 69
| 23.516129
| 0.744361
| 0.08642
| 0
| 0
| 0
| 0
| 0.126718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d690157e44125280f30cea5097fb9b835832b6
| 932
|
py
|
Python
|
videofeed.py
|
dmeklund/asyncdemo
|
956f193c0fa38744965362966ac7f8ef224409b4
|
[
"MIT"
] | null | null | null |
videofeed.py
|
dmeklund/asyncdemo
|
956f193c0fa38744965362966ac7f8ef224409b4
|
[
"MIT"
] | null | null | null |
videofeed.py
|
dmeklund/asyncdemo
|
956f193c0fa38744965362966ac7f8ef224409b4
|
[
"MIT"
] | null | null | null |
"""
Mock up a video feed pipeline
"""
import asyncio
import logging
import sys
import cv2
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
async def process_video(filename):
cap = cv2.VideoCapture(filename)
tasks = list()
frame_ind = 0
while cap.isOpened():
ret, frame = cap.read()
tasks.append(asyncio.ensure_future(process_frame(frame, frame_ind)))
frame_ind += 1
await asyncio.sleep(0)
await asyncio.gather(tasks)
async def process_frame(frame, frame_ind):
logger.info("Processing frame {}".format(frame_ind))
await asyncio.sleep(20.0)
logger.info("Finished processing frame {}".format(frame_ind))
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(process_video(sys.argv[1]))
logger.info("Completed")
if __name__ == '__main__':
main()
| 22.731707
| 76
| 0.687768
| 123
| 932
| 5.02439
| 0.471545
| 0.07767
| 0.048544
| 0.071197
| 0.174757
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013021
| 0.175966
| 932
| 40
| 77
| 23.3
| 0.791667
| 0.031116
| 0
| 0
| 0
| 0
| 0.119553
| 0.02905
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.148148
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d763b4860a448a07b1ac979d461dd9025028b9
| 11,807
|
py
|
Python
|
parsers/read_lspci_and_glxinfo.py
|
mikeus9908/peracotta
|
c54c351acae8afec250185f4bc714a2f86c47c90
|
[
"MIT"
] | 3
|
2019-04-01T17:28:20.000Z
|
2020-11-19T17:25:32.000Z
|
parsers/read_lspci_and_glxinfo.py
|
mikeus9908/peracotta
|
c54c351acae8afec250185f4bc714a2f86c47c90
|
[
"MIT"
] | 142
|
2018-11-05T18:13:13.000Z
|
2022-03-12T17:43:40.000Z
|
parsers/read_lspci_and_glxinfo.py
|
mikeus9908/peracotta
|
c54c351acae8afec250185f4bc714a2f86c47c90
|
[
"MIT"
] | 10
|
2019-10-25T12:28:37.000Z
|
2021-05-17T17:32:56.000Z
|
#!/usr/bin/python3
"""
Read "lspci -v" and "glxinfo" outputs
"""
import re
from dataclasses import dataclass
from InputFileNotFoundError import InputFileNotFoundError
@dataclass
class VideoCard:
type = "graphics-card"
manufacturer_brand = ""
reseller_brand = ""
internal_name = ""
model = ""
capacity = -1 # bytes
warning = ""
def parse_lspci_output(gpu: VideoCard, lspci_path: str, interactive: bool = False):
try:
with open(lspci_path, "r") as f:
lspci_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(lspci_path)
lspci_sections = lspci_output.split("\n\n")
for section in lspci_sections:
if "VGA compatible controller" in section:
first_line = section.splitlines()[0].split(": ", 1)[
1
] # removes "VGA compatible controller:"
second_line = section.splitlines()[1]
part_between_square_brackets = None
try:
# take the first string between [] from the first line
part_between_square_brackets = first_line.split("[")[1].split("]")[0]
except IndexError:
# there may not be an argument in between []
pass
if "Subsystem:" in second_line:
# The model or model family is often repeated here, but removing it automatically is complicated
gpu.reseller_brand = (
second_line.split("Subsystem: ")[1].split("[", 1)[0].strip()
)
gpu.reseller_brand = gpu.reseller_brand.replace(
"Integrated Graphics Controller", ""
)
# -----------------------------------------------------------------
# AMD/ATI
# -----------------------------------------------------------------
if part_between_square_brackets is not None and (
"AMD" in part_between_square_brackets
or "ATI" in part_between_square_brackets
):
gpu.manufacturer_brand = part_between_square_brackets
# take second string between []
gpu.model = first_line.split("[")[2].split("]")[0]
if "controller" in gpu.model:
gpu.model = section.splitlines()[1].split(" ")[-1]
# -----------------------------------------------------------------
# Nvidia
# -----------------------------------------------------------------
elif "NVIDIA" in first_line.upper():
gpu.manufacturer_brand = "Nvidia"
gpu.model = part_between_square_brackets
if gpu.reseller_brand != "":
pieces = gpu.reseller_brand.rsplit(" ", 1)
gpu.reseller_brand = pieces[0]
gpu.internal_name = pieces[1]
# -----------------------------------------------------------------
# Intel
# -----------------------------------------------------------------
elif "INTEL" in first_line.upper():
gpu.manufacturer_brand = "Intel"
if "Integrated Graphics" in first_line:
tmp_model = first_line.split("Intel Corporation ")[1].split(
" Integrated Graphics"
)[0]
# if there are no numbers, e.g. "Core Processor", tmp_model is not a model number
if not re.search("\\d+", tmp_model):
tmp_model = ""
elif "HD Graphics" in first_line:
tmp_model = (
first_line.split("Intel Corporation ")[1]
.split("(", 1)[0]
.strip()
)
elif "[" in first_line and "]" in first_line:
tmp_model = first_line.split("[")[1].split("]")[0]
else:
tmp_model = ""
if tmp_model != "":
gpu.model = tmp_model
else:
gpu.model = ""
# -----------------------------------------------------------------
# VIA
# -----------------------------------------------------------------
elif first_line.startswith("VIA"):
gpu.manufacturer_brand = "VIA"
gpu.model = part_between_square_brackets
tmp_model = first_line.split("[")[0]
i = 0
for i, char in enumerate("VIA Technologies, Inc. "):
if tmp_model[i] != char:
break
gpu.internal_name = tmp_model[i:].strip()
# -----------------------------------------------------------------
# SiS
# -----------------------------------------------------------------
elif part_between_square_brackets == "SiS":
# May be written somewhere else on other models, but we have so few SiS cards that it's difficult to
# find more examples. Also, they haven't made any video card in the last 15 years or so.
gpu.manufacturer_brand = part_between_square_brackets
if gpu.reseller_brand.lower() == "silicon integrated systems":
gpu.reseller_brand = "SiS"
gpu.model = first_line.split("]", 1)[1]
# These may be useful for non-integrated cards, however the example ones are all integrated
if " PCIE" in gpu.model:
gpu.model = gpu.model.split(" PCIE", 1)[0].strip()
elif " PCI/AGP" in gpu.model:
gpu.model = gpu.model.split(" PCI/AGP", 1)[0].strip()
if gpu.model in gpu.reseller_brand:
gpu.reseller_brand = gpu.reseller_brand.split(gpu.model, 1)[
0
].strip()
else:
gpu.manufacturer_brand = None
error = (
"I couldn't find the Video Card brand. The model was set to 'None' and is to be edited "
"logging into the TARALLO afterwards. The information you're looking for should be in the "
f"following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
if gpu.model is None:
error = (
"I couldn't find the Integrated Graphics model. The model was set to 'None' and is to be "
"edited logging into the TARALLO afterwards. The information you're looking for should be in "
f"the following 2 lines:\n{first_line}\n{second_line}\n"
)
if interactive:
print(error)
gpu.warning += error
else:
# Try to remove duplicate information
gpu.reseller_brand = gpu.reseller_brand.replace(gpu.model, "").strip()
if gpu.internal_name is not None:
# Same
gpu.reseller_brand = gpu.reseller_brand.replace(
gpu.internal_name, ""
).strip()
break
def parse_glxinfo_output(gpu: VideoCard, glxinfo_path: str):
try:
with open(glxinfo_path, "r") as f:
glxinfo_output = f.read()
except FileNotFoundError:
raise InputFileNotFoundError(glxinfo_path)
for i, line in enumerate(glxinfo_output.splitlines()):
# this line comes before the "Dedicated video memory" line
# this basically saves a default value if the dedicated memory line cannot be found
if "Video memory" in line:
try:
tmp_vid_mem = int(line.split(" ")[6].split(" ")[0][:-2])
tmp_vid_mem_multiplier = line[-2:]
except ValueError:
exit(-1)
return # To stop complaints from PyCharm
gpu.capacity = convert_video_memory_size(
tmp_vid_mem, tmp_vid_mem_multiplier
)
if "Dedicated video memory" in line:
try:
tmp_vram = int(line.split(" ")[7].split(" ")[0])
tmp_vram_multiplier = line[-2:]
except ValueError:
exit(-1)
return
capacity = convert_video_memory_size(tmp_vram, tmp_vram_multiplier)
if capacity < 0:
gpu.warning = "Could not find dedicated video memory"
if gpu.capacity < 0:
gpu.warning += ". The value cannot be trusted."
else:
gpu.capacity = capacity
break
if gpu.capacity > 0:
# Round to the next power of 2
# this may be different from human readable capacity...
rounded = 2 ** (gpu.capacity - 1).bit_length()
one_and_half = int(rounded / 2 * 1.5)
# Accounts for 3 GB VRAM cards and similar
# Yes they do exist, try to remove this part and watch tests fail (and the card was manually verified to be 3 GB)
if one_and_half >= gpu.capacity:
gpu.capacity = one_and_half
else:
gpu.capacity = rounded
def convert_video_memory_size(capacity, units_of_measure):
if units_of_measure == "GB":
capacity *= 1024 * 1024 * 1024
elif units_of_measure == "MB":
capacity *= 1024 * 1024
elif units_of_measure.upper() == "KB":
capacity *= 1024
else:
capacity = -1
return capacity
def read_lspci_and_glxinfo(
has_dedicated: bool, lspci_path: str, glxinfo_path: str, interactive: bool = False
):
gpu = VideoCard()
if has_dedicated:
parse_lspci_output(gpu, lspci_path, interactive)
parse_glxinfo_output(gpu, glxinfo_path)
else: # integrated_in_mobo or integrated_in_cpu
parse_lspci_output(gpu, lspci_path, interactive)
# don't parse glxinfo because the VRAM is part of the RAM and varies
gpu.capacity = None
# print("The VRAM capacity could not be detected. "
# "Please try looking for it on the Video Card or on the Internet. "
# "The capacity value defaulted to 'None'. "
# "For an integrated GPU, the VRAM may also be shared with the system RAM, so an empty value is acceptable.")
result = {
"type": "graphics-card",
"brand": gpu.reseller_brand.strip(),
"model": gpu.model.strip(),
"internal-name": gpu.internal_name.strip(),
"capacity-byte": gpu.capacity,
"working": "yes", # Indeed it is working
}
if gpu.manufacturer_brand is not None and gpu.reseller_brand is not None:
if gpu.manufacturer_brand.lower() != gpu.reseller_brand.lower():
result["brand-manufacturer"] = gpu.manufacturer_brand
return result
if __name__ == "__main__":
import argparse
import json
parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output")
parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output")
parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output")
parser.add_argument(
"-d",
"--dedicated",
action="store_true",
default=False,
help="computer has dedicated GPU",
)
args = parser.parse_args()
try:
print(
json.dumps(
read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]),
indent=2,
)
)
except InputFileNotFoundError as e:
print(str(e))
exit(1)
| 40.023729
| 121
| 0.510206
| 1,249
| 11,807
| 4.670136
| 0.224179
| 0.042345
| 0.049374
| 0.04286
| 0.318704
| 0.271901
| 0.223727
| 0.143151
| 0.083319
| 0.083319
| 0
| 0.011693
| 0.348099
| 11,807
| 294
| 122
| 40.159864
| 0.746135
| 0.187008
| 0
| 0.21659
| 0
| 0.009217
| 0.11644
| 0.007749
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018433
| false
| 0.004608
| 0.023041
| 0
| 0.096774
| 0.018433
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d87b5ab1e5e10305ebbe366e85481beb47273f
| 2,637
|
py
|
Python
|
chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py
|
chris-zen/phd-thesis
|
1eefdff8e7ca1910304e27ae42551dc64496b101
|
[
"Unlicense"
] | 1
|
2015-12-22T00:53:18.000Z
|
2015-12-22T00:53:18.000Z
|
chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py
|
chris-zen/phd-thesis
|
1eefdff8e7ca1910304e27ae42551dc64496b101
|
[
"Unlicense"
] | null | null | null |
chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py
|
chris-zen/phd-thesis
|
1eefdff8e7ca1910304e27ae42551dc64496b101
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""
Classify oncodrive gene results and prepare for combination
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
* Input:
- oncodrive_ids: The mrna.oncodrive_genes to process
* Output:
- combinations: The mrna.combination prepared to be calculated
* Entities:
- mrna.oncodrive_genes
- mrna.combination
"""
import uuid
import json
from wok.task import Task
from wok.element import DataElement
from intogen.data.entity.server import EntityServer
from intogen.data.entity import types
def run(task):
# Initialization
task.check_conf(["entities"])
conf = task.conf
log = task.logger()
task.check_in_ports(["oncodrive_ids"])
task.check_out_ports(["combinations"])
oncodrive_port = task.ports["oncodrive_ids"]
combination_port = task.ports["combinations"]
es = EntityServer(conf["entities"])
em = es.manager()
log.info("Indexing available combination results ...")
comb_results_index = em.group_ids(
["icdo_topography", "icdo_morphology", "id_type"],
types.MRNA_COMBINATION, unique = True)
ENSEMBL_GENE = "ensembl:gene"
classif = {}
log.info("Classifying oncodrive results ...")
for oid in oncodrive_port:
o = em.find(oid, types.MRNA_ONCODRIVE_GENES)
if o is None:
log.error("{0} not found: {1}".format(types.MRNA_ONCODRIVE_GENES, oid))
continue
okey = (o["study_id"], o["platform_id"], o["icdo_topography"], o["icdo_morphology"])
key = (o["icdo_topography"], o["icdo_morphology"], ENSEMBL_GENE)
log.debug("Oncodrive results ({0}) [{1}] classified into ({2}) ...".format(", ".join(okey), oid, ", ".join(key)))
if key in classif:
classif[key] += [o]
else:
classif[key] = [o]
log.info("Preparing combinations ...")
for key in sorted(classif):
if key in comb_results_index:
cid = comb_results_index[key][0]
c = em.find(cid, types.MRNA_COMBINATION)
if c is None:
log.error("{0} not found: {1}".format(types.MRNA_COMBINATION, cid))
return
else:
c = DataElement(key_sep = "/")
c["id"] = cid = str(uuid.uuid4())
c["icdo_topography"] = key[0]
c["icdo_morphology"] = key[1]
c["id_type"] = ENSEMBL_GENE
olist = classif[key]
log.info("({0}) [{1}] --> {2} results".format(", ".join(key), cid, len(olist)))
ids = c.create_list()
flist = c.create_list()
for o in olist:
ids += [o["id"]]
flist += [o["results_file"]]
c["source"] = src = c.create_element()
src["type"] = types.MRNA_ONCODRIVE_GENES
src["ids"] = ids
c["files"] = flist
combination_port.write(json.dumps(c.to_native()))
em.close()
if __name__ == "__main__":
Task(run).start()
| 21.975
| 115
| 0.680319
| 365
| 2,637
| 4.753425
| 0.334247
| 0.031124
| 0.051873
| 0.039769
| 0.079539
| 0.079539
| 0.044957
| 0.044957
| 0.044957
| 0.044957
| 0
| 0.006306
| 0.158134
| 2,637
| 119
| 116
| 22.159664
| 0.775225
| 0.14562
| 0
| 0.03125
| 0
| 0
| 0.22247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015625
| false
| 0
| 0.09375
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43d983edaa81a2f049c07647c3d3908b2dea574f
| 1,605
|
py
|
Python
|
configs/utils/config_generator.py
|
user-wu/SOD_eval_metrics
|
d5b8804580cb52a4237c8e613818d10591dc6597
|
[
"MIT"
] | null | null | null |
configs/utils/config_generator.py
|
user-wu/SOD_eval_metrics
|
d5b8804580cb52a4237c8e613818d10591dc6597
|
[
"MIT"
] | null | null | null |
configs/utils/config_generator.py
|
user-wu/SOD_eval_metrics
|
d5b8804580cb52a4237c8e613818d10591dc6597
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from matplotlib import colors
# max = 148
_COLOR_Genarator = iter(
sorted(
[
color
for name, color in colors.cnames.items()
if name not in ["red", "white"] or not name.startswith("light") or "gray" in name
]
)
)
def curve_info_generator():
line_style_flag = True
def _template_generator(
method_info: dict, method_name: str, line_color: str = None, line_width: int = 3
) -> dict:
nonlocal line_style_flag
template_info = dict(
path_dict=method_info,
curve_setting=dict(
line_style="-" if line_style_flag else "--",
line_label=method_name,
line_width=line_width,
),
)
print(method_name)
if method_name == "Ours":
template_info["curve_setting"]["line_color"] = 'red'
template_info["curve_setting"]["line_style"] = '-'
# line_style_flag = not line_style_flag
else:
if line_color is not None:
template_info["curve_setting"]["line_color"] = line_color
else:
template_info["curve_setting"]["line_color"] = next(_COLOR_Genarator)
line_style_flag = not line_style_flag
return template_info
return _template_generator
def simple_info_generator():
def _template_generator(method_info: dict, method_name: str) -> dict:
template_info = dict(path_dict=method_info, label=method_name)
return template_info
return _template_generator
| 29.181818
| 93
| 0.598754
| 186
| 1,605
| 4.806452
| 0.284946
| 0.090604
| 0.10179
| 0.107383
| 0.479866
| 0.448546
| 0.246085
| 0.105145
| 0.105145
| 0
| 0
| 0.0045
| 0.307788
| 1,605
| 54
| 94
| 29.722222
| 0.80018
| 0.042991
| 0
| 0.146341
| 0
| 0
| 0.078329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.02439
| 0
| 0.219512
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43db9748cf12932e64e00e512404058350f2661e
| 1,151
|
py
|
Python
|
core/sms_service.py
|
kartik1000/jcc-registration-portal
|
053eade1122fa760ae112a8599a396d68dfb16b8
|
[
"MIT"
] | null | null | null |
core/sms_service.py
|
kartik1000/jcc-registration-portal
|
053eade1122fa760ae112a8599a396d68dfb16b8
|
[
"MIT"
] | null | null | null |
core/sms_service.py
|
kartik1000/jcc-registration-portal
|
053eade1122fa760ae112a8599a396d68dfb16b8
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlencode
from decouple import config
import hashlib
import requests
BASE = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
auth_key = config('AUTH_KEY')
url = 'http://sms.globehost.com/api/sendhttp.php?'
def encode_base(num, array=BASE):
if(num == 0):
return array[0]
retarr = []
base = len(array)
while num:
num, res = divmod(num, base)
retarr.append(array[res])
retarr.reverse()
return ''.join(retarr)[:6]
def generate(alphanum):
short = (hashlib.md5(alphanum.encode())).hexdigest()
short = int(short, 16)
short = encode_base(short)
return short
def send_message(team_name, team_id, contact):
message = 'Your unique team ID for Junior Code Cracker 2k18 is ' + \
team_id + '.Kindly take note and submit this at the event.'
data = {
'authkey': auth_key,
'mobiles': contact,
'message': message,
'sender': 'GNULUG',
'route': '4',
}
data_encoded = urlencode(data)
r = requests.get(url + data_encoded)
print('Message Sent Successfully !!')
return r.status_code
| 23.979167
| 72
| 0.644657
| 140
| 1,151
| 5.214286
| 0.578571
| 0.028767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022676
| 0.23371
| 1,151
| 47
| 73
| 24.489362
| 0.804989
| 0
| 0
| 0
| 0
| 0
| 0.241739
| 0.053913
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.111111
| 0
| 0.305556
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43dc511c1276023b6e01df3b43e2f8d7dd243462
| 1,522
|
py
|
Python
|
scripts/fetch_images.py
|
Protagonistss/sanic-for-v3
|
ba7e94273b77914b8d85d67cf513041ada00780d
|
[
"MIT"
] | null | null | null |
scripts/fetch_images.py
|
Protagonistss/sanic-for-v3
|
ba7e94273b77914b8d85d67cf513041ada00780d
|
[
"MIT"
] | null | null | null |
scripts/fetch_images.py
|
Protagonistss/sanic-for-v3
|
ba7e94273b77914b8d85d67cf513041ada00780d
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.pardir)
import random
import time
import requests
from contextlib import closing
from help import utils
from threading import Thread
def get_train_set_path(path: str):
create_path = utils.join_root_path(path)
return create_path
def create_train_set_dir(path='auth-set'):
create_path = get_train_set_path(path)
is_existed = os.path.exists(create_path)
if not is_existed:
os.mkdir(create_path)
def gen_image_name(char_pool):
prefix = ''
for i in range(4):
prefix += random.choice(char_pool)
suffix = str(time.time()).replace('.', '')
return "{}_{}".format(prefix, suffix)
def gen_image_all_url(path):
rule = '0123456789'
return '{}/{}.png'.format(path, gen_image_name(rule))
def get_image(url, count=20000, path='auth-set'):
create_train_set_dir(path)
for loop in range(count):
response = requests.get(url, verify=False, stream=True)
with closing(response) as response:
with open(gen_image_all_url(get_train_set_path(path)), 'wb') as f:
for i in response.iter_content(chunk_size=512):
f.write(i)
print('第{}张图片保存成功'.format(loop + 1))
def main():
get_image('https://gray.930pm.cn/home.php/Login/verify_c', path='auth-set')
if __name__ == '__main__':
t1 = Thread(target=main)
t2 = Thread(target=main)
t3 = Thread(target=main)
t4 = Thread(target=main)
t1.start()
t2.start()
t3.start()
t4.start()
| 24.15873
| 79
| 0.660972
| 223
| 1,522
| 4.286996
| 0.408072
| 0.041841
| 0.066946
| 0.047071
| 0.103556
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025726
| 0.208279
| 1,522
| 62
| 80
| 24.548387
| 0.767635
| 0
| 0
| 0
| 0
| 0
| 0.074901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.173913
| 0
| 0.369565
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43dd49ec321203c525ba8f13879673eb4d300e9f
| 3,912
|
py
|
Python
|
GeneralStats/example.py
|
haoruilee/statslibrary
|
01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a
|
[
"MIT"
] | 58
|
2019-02-04T13:53:16.000Z
|
2022-02-24T02:59:55.000Z
|
GeneralStats/example.py
|
haoruilee/statslibrary
|
01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a
|
[
"MIT"
] | null | null | null |
GeneralStats/example.py
|
haoruilee/statslibrary
|
01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a
|
[
"MIT"
] | 19
|
2019-03-21T01:54:55.000Z
|
2021-12-03T13:55:16.000Z
|
import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data平均值 = ",res)
print("data1平均值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data中位值 = ",res)
print("data1中位值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data众数值 = ",res)
print("data1众数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数
print("data 0.5分位数值 = ",res)
print("data1 0.5分位数值 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25分位数值s = ",res)
print("data1 0.25分位数值 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75分位数值 = ",res)
print("data1 0.75分位数值 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0分位数值 = ",res)
print("data1 1.0分位数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data极差 = ",res)
print("data1极差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data方差 = ",res)
print("data1方差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data标准差 = ",res)
print("data1标准差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data偏度 = ",res)
print("data1偏度 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("使用scipy skew方法验证的data偏度 = ",res)
res1=np.array(skew(data1))
print("使用scipy skew方法验证的data1偏度 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data峰度 = ",res)
print("data1峰度 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
| 36.222222
| 109
| 0.576431
| 666
| 3,912
| 3.358859
| 0.129129
| 0.024139
| 0.024139
| 0.048279
| 0.49173
| 0.39249
| 0.323201
| 0.289227
| 0.267769
| 0.223961
| 0
| 0.114954
| 0.201687
| 3,912
| 107
| 110
| 36.560748
| 0.601345
| 0.018149
| 0
| 0.197674
| 0
| 0
| 0.123357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05814
| 0
| 0.05814
| 0.348837
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43de15a64fd73557d8ace8fe63e08534f03c9747
| 400
|
py
|
Python
|
intro/matplotlib/examples/plot_good.py
|
zmoon/scipy-lecture-notes
|
75a89ddedeb48930dbdb6fe25a76e9ef0587ae21
|
[
"CC-BY-4.0"
] | 2,538
|
2015-01-01T04:58:41.000Z
|
2022-03-31T21:06:05.000Z
|
intro/matplotlib/examples/plot_good.py
|
zmoon/scipy-lecture-notes
|
75a89ddedeb48930dbdb6fe25a76e9ef0587ae21
|
[
"CC-BY-4.0"
] | 362
|
2015-01-18T14:16:23.000Z
|
2021-11-18T16:24:34.000Z
|
intro/matplotlib/examples/plot_good.py
|
zmoon/scipy-lecture-notes
|
75a89ddedeb48930dbdb6fe25a76e9ef0587ae21
|
[
"CC-BY-4.0"
] | 1,127
|
2015-01-05T14:39:29.000Z
|
2022-03-25T08:38:39.000Z
|
"""
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
| 18.181818
| 44
| 0.625
| 73
| 400
| 3.410959
| 0.575342
| 0.024096
| 0.024096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075145
| 0.135
| 400
| 21
| 45
| 19.047619
| 0.644509
| 0.2475
| 0
| 0
| 0
| 0
| 0.010239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43de29ccab29a96dd8a22a7b82fb926f80943d99
| 4,087
|
py
|
Python
|
pfio/_context.py
|
HiroakiMikami/pfio
|
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
|
[
"MIT"
] | 24
|
2020-05-23T13:00:27.000Z
|
2022-02-17T05:20:51.000Z
|
pfio/_context.py
|
HiroakiMikami/pfio
|
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
|
[
"MIT"
] | 88
|
2020-05-01T06:56:50.000Z
|
2022-03-16T07:15:34.000Z
|
pfio/_context.py
|
HiroakiMikami/pfio
|
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
|
[
"MIT"
] | 9
|
2020-05-07T05:47:35.000Z
|
2022-02-09T05:42:56.000Z
|
import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
class FileSystemDriverList(object):
def __init__(self):
# TODO(tianqi): dynamically create this list
# as well as the patterns upon loading the pfio module.
self.scheme_list = ["hdfs", "posix"]
self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)")
self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)")
self.pattern_list = {"hdfs": self.hdfs_pattern,
"posix": self.posix_pattern, }
def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]:
if None is not path:
for fs_type, pattern in self.pattern_list.items():
ret = pattern.match(path)
if ret:
return (fs_type, ret.groupdict()["path"], True)
return ("posix", path, False)
def format_path(self, fs: IO, path: str) -> Tuple[str, bool]:
fs_type = fs.type
if fs_type in self.pattern_list.keys():
pattern = self.pattern_list[fs_type]
ret = pattern.match(path)
if ret:
return (ret.groupdict()["path"], True)
else:
return (path, False)
else:
return (path, False)
def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]:
(fs_type, actual_path, is_URI) = self._determine_fs_type(path)
handler = create_fs_handler(fs_type)
return (handler, actual_path, is_URI)
def get_handler_for_root(self,
uri_or_handler_name: str) -> Tuple[IO, str, bool]:
if uri_or_handler_name in self.pattern_list.keys():
return (create_fs_handler(uri_or_handler_name), "", False)
else:
(new_handler, actual_path, is_URI) = self.get_handler_from_path(
uri_or_handler_name)
new_handler.root = actual_path
return (new_handler, actual_path, is_URI)
def is_supported_scheme(self, scheme: str) -> bool:
return scheme in self.scheme_list
class DefaultContext(object):
def __init__(self):
self._fs_handler_list = FileSystemDriverList()
self._root = ""
self._default_context = \
self._fs_handler_list.get_handler_for_root("posix")[0]
def set_root(self, uri_or_handler: Union[str, IO]) -> None:
# TODO(check) if root is directory
if isinstance(uri_or_handler, IO):
handler = uri_or_handler
self._root = ""
else:
(handler, self._root, is_URI) = \
self.get_handler_by_name(uri_or_handler)
assert handler is not None
if self._root:
if not handler.isdir(self._root):
raise RuntimeError("the URI does not point to a directory")
self._default_context = handler
def get_handler(self, path: str = "") -> Tuple[IO, str]:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
return (self._default_context, actual_path)
else:
return (handler, formatted_path)
def open_as_container(self, path: str) -> Container:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
handler = self._default_context
else:
actual_path = formatted_path
self._root = ""
return handler.open_as_container(actual_path)
def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]:
return self._fs_handler_list.get_handler_for_root(path)
def get_root_dir(self) -> str:
return self._root
def is_supported_scheme(self, scheme: str) -> bool:
return self._fs_handler_list.is_supported_scheme(scheme)
| 35.232759
| 79
| 0.614387
| 528
| 4,087
| 4.445076
| 0.168561
| 0.025565
| 0.040903
| 0.04346
| 0.325522
| 0.261184
| 0.220281
| 0.163613
| 0.13464
| 0.097997
| 0
| 0.000342
| 0.284805
| 4,087
| 115
| 80
| 35.53913
| 0.8026
| 0.031563
| 0
| 0.306818
| 0
| 0
| 0.029084
| 0.010622
| 0
| 0
| 0
| 0.008696
| 0.011364
| 1
| 0.147727
| false
| 0
| 0.068182
| 0.045455
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43e09c3343b0c13466ea8190e66d19dfafb80ae6
| 9,330
|
py
|
Python
|
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion
from Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable
from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace
from Analisis_Ascendente.Instrucciones.Select.select import Select
from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use
from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime
import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import
from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3
from Analisis_Ascendente.Instrucciones.Select import selectInst
from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion
from Analisis_Ascendente.Instrucciones.Drop.drop import Drop
from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase
from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable
from Analisis_Ascendente.Instrucciones.Update.Update import Update
from Analisis_Ascendente.Instrucciones.Delete.delete import Delete
from Analisis_Ascendente.Instrucciones.Select import SelectDist
from Analisis_Ascendente.Instrucciones.Type.type import CreateType
#----------------------------------Imports FASE2--------------------------
from Analisis_Ascendente.Instrucciones.Index.Index import Index
from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction
from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex
from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex
from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure
from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL
from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall
from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction
import C3D.GeneradorEtiquetas as GeneradorEtiquetas
import C3D.GeneradorTemporales as GeneradorTemporales
import Analisis_Ascendente.reportes.Reportes as Reportes
class Ifpl(Instruccion):
''' #1 If
#2 If elif else
#3 If else '''
def __init__(self, caso,e_if,s_if,elif_s,s_else, fila, columna):
self.caso = caso
self.e_if = e_if
self.s_if = s_if
self.elif_s = elif_s
self.s_else = s_else
self.fila = fila
self.columna = columna
def ejecutar(self,tsglobal,ts, consola, exceptions):
try:
if self.caso == 1:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x],ts,consola,exceptions,tsglobal)
else:
pass
elif self.caso == 2:
print('hola')
else:
resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions)
if resultado == True:
for x in range(0, len(self.s_if)):
self.procesar_instrucciones(self.s_if[x], ts, consola, exceptions,tsglobal)
else:
for x in range(0, len(self.s_else)):
self.procesar_instrucciones(self.s_else[x],ts,consola,exceptions,tsglobal)
except:
consola.append("XX000 : internal_error")
def procesar_instrucciones(self,instr,ts,consola,exceptions,tsglobal):
if isinstance(instr, CreateReplace):
CreateReplace.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Select):
if instr.caso == 1:
consola.append('caso 1')
selectTime.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 2:
consola.append('caso 2')
variable = SelectDist.Select_Dist()
SelectDist.Select_Dist.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 3:
consola.append('caso 3')
variable = selectInst.Select_inst()
selectInst.Select_inst.ejecutar(variable, instr, ts, consola, exceptions)
elif instr.caso == 4:
consola.append('caso 4')
Selectp3.ejecutar(instr, ts, consola, exceptions, True)
elif instr.caso == 6:
consola.append('caso 6')
elif isinstance(instr, CreateTable):
CreateTable.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Use):
Use.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, insert_import.InsertInto):
insert_import.InsertInto.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute un insert")
elif isinstance(instr, Drop):
Drop.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute drop")
elif isinstance(instr, AlterDatabase):
AlterDatabase.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter database")
elif isinstance(instr, AlterTable):
AlterTable.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute alter table")
elif isinstance(instr, Delete):
Delete.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute delete")
elif isinstance(instr, Update):
Update.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateType):
CreateType.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Index):
Index.ejecutar(instr, ts, consola, exceptions)
# print("Ejecute Index")
elif isinstance(instr, CreateFunction):
CreateFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropFunction):
DropFunction.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropIndex):
DropIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, AlterIndex):
AlterIndex.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, DropProcedure):
DropProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CreateProcedure):
CreateProcedure.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, CasePL):
CasePL.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, plCall):
plCall.ejecutar(instr, ts, consola, exceptions)
elif isinstance(instr, Plasignacion):
EjecutarFuncion.ejecutarPlasignacionIf(instr,ts,consola,exceptions,tsglobal)
elif isinstance(instr, Ifpl):
instr.ejecutar(tsglobal,ts,consola,exceptions)
else:
return
def getC3D(self, lista_optimizaciones_C3D):
etiqueta_if = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_else = GeneradorEtiquetas.nueva_etiqueta()
etiqueta_salida = GeneradorEtiquetas.nueva_etiqueta()
e_if = self.e_if.getC3D(lista_optimizaciones_C3D)
noOptimizado = '''if %s: goto .%s <br>
goto .%s<br>
label .%s<br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_if, etiqueta_else, etiqueta_if, etiqueta_else)
optimizado = '''if not %s: goto .%s <br>
<instrucciones><br>
label .%s''' % (e_if['tmp'], etiqueta_else, etiqueta_else)
optimizacion1 = Reportes.ListaOptimizacion(noOptimizado, optimizado, Reportes.TipoOptimizacion.REGLA3)
lista_optimizaciones_C3D.append(optimizacion1)
sentencias_if = ''
for sentencias in self.s_if:
sentencias_if += sentencias.getC3D(lista_optimizaciones_C3D)
c3d = '''
%s
if not %s: goto .%s
%s
goto .%s
''' % (e_if['code'], e_if['tmp'], etiqueta_else, sentencias_if, etiqueta_salida)
if self.s_else is not None:
sentencias_else = ''
for sentencias in self.s_else:
sentencias_else += sentencias.getC3D(lista_optimizaciones_C3D)
c3d += ''' label .%s
%s
label .%s''' % (etiqueta_else, sentencias_else, etiqueta_salida)
else:
c3d += ''' label .%s
label .%s
''' % (etiqueta_else, etiqueta_salida)
return c3d
def get_quemado(self):
sententias_if = ''
for sentencia in self.s_if:
sententias_if += sentencia.get_quemado() + ';\n'
quemado = ''' if %s then
%s
''' % (self.e_if.get_quemado(), sententias_if)
if self.s_else is not None:
sentencias_else = ''
for sentencia in self.s_else:
sentencias_else += sentencia.get_quemado() + ';\n'
quemado += '''ELSE
%s
''' % sentencias_else
quemado += ' end if'
return quemado
| 47.121212
| 111
| 0.653805
| 977
| 9,330
| 6.116684
| 0.138178
| 0.048193
| 0.10174
| 0.152276
| 0.479418
| 0.309739
| 0.271921
| 0.24247
| 0.11178
| 0.077644
| 0
| 0.006305
| 0.251983
| 9,330
| 197
| 112
| 47.360406
| 0.849979
| 0.028296
| 0
| 0.148571
| 0
| 0
| 0.040158
| 0.005656
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0.005714
| 0.188571
| 0
| 0.24
| 0.005714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43e232a6058aefed0715e6e5fea4ed4fd550c388
| 6,067
|
py
|
Python
|
pyhwpscan/hwp_scan.py
|
orca-eaa5a/dokkaebi_scanner
|
756314376e2cbbce6c03fd908ebd0b8cc27aa7fc
|
[
"MIT"
] | null | null | null |
pyhwpscan/hwp_scan.py
|
orca-eaa5a/dokkaebi_scanner
|
756314376e2cbbce6c03fd908ebd0b8cc27aa7fc
|
[
"MIT"
] | 1
|
2022-02-17T15:01:29.000Z
|
2022-02-20T07:15:31.000Z
|
pyhwpscan/hwp_scan.py
|
orca-eaa5a/dokkaebi_scanner
|
756314376e2cbbce6c03fd908ebd0b8cc27aa7fc
|
[
"MIT"
] | null | null | null |
from threading import current_thread
from jsbeautifier.javascript.beautifier import remove_redundant_indentation
from pyparser.oleparser import OleParser
from pyparser.hwp_parser import HwpParser
from scan.init_scan import init_hwp5_scan
from scan.bindata_scanner import BinData_Scanner
from scan.jscript_scanner import JS_Scanner
from scan.paratext_scanner import ParaText_Scanner
import zipfile
import os
import sys
import platform
from common.errors import *
from utils.dumphex import print_hexdump
js_scanner = None
bindata_scanner = None
paratext_scanner = None
_platform = None
binary_info = {
"type": "",
"p": None
}
def cmd_handler(cmdline):
global binary_info
global js_scanner
global bindata_scanner
global paratext_scanner
global _platform
ty = binary_info["type"]
parser = binary_info["p"]
s_cmd = cmdline.split(" ")
cmd = s_cmd[0]
arg = s_cmd[1:]
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
print(">> "+cmdline)
if cmd == "help":
print("> tree")
print(" Print the structure of target Binary")
print("> dump [binary_name] [directory]")
print(" Dump OLE or Zipped Binary at specific direcotry (default is current direcotry)")
print("> show-hex [binary_name]")
print(" Print hexcidecimal view of specific OLE or Zipped Binary")
print("> scan")
print(" re-scanning the target file")
print("> exit")
print(" quit command liner")
return 1
elif cmd == "clear":
if "windows" in _platform:
os.system('cls')
else:
os.system('clear')
return 0
elif cmd == "tree":
if ty == "hwp":
parser.ole_container.print_dir_entry_all()
else:
for file in parser.filelist:
print(file.filename)
return 0
elif cmd == "dump":
if len(arg) > 1:
binary_name, target_dir = arg[0], arg[1]
else:
binary_name, target_dir = arg[0], None
if not target_dir:
target_dir = os.getcwd()
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
targ = ""
for file in parser.filelist:
fname = file.filename.split("/")[-1]
if fname == binary_name:
targ = file.filename
break
if not targ:
print("no file exist")
return 0
stream = parser.read(targ)
with open(target_dir+"/"+binary_name, "wb") as f:
f.write(stream)
print("dump succeed..")
return 1
elif cmd == "show-hex":
binary_name = arg[0]
if ty == "hwp":
stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream()
else:
stream = parser.read(binary_name)
print_hexdump(stream)
return 1
elif cmd == "scan":
if ty == "hwp":
bindata_scanner.scan()
js_scanner.scan()
else:
paratext_scanner.scan()
return 1
elif cmd == "exit":
return -1
else:
print("unknown command..")
return 0
print()
class HWPScanner:
def __init__(self) -> None:
self.__platform__ = platform.platform()
self.hwpx_flag = False
self.ole_parser = OleParser()
self.hwp_parser = None
pass
def parse_hwpdoc(self, file_name):
self.file_name = file_name
self.ole_parser.read_ole_binary(file_name)
try:
self.ole_parser.parse()
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def parse_hwpdoc(self):
try:
self.hwp_parser = HwpParser(self.ole_parser)
self.hwp_parser.parse()
if not init_hwp5_scan(self.hwp_parser.hwp_header):
exit(-1)
except:
self.hwpx_docs = zipfile.ZipFile(self.file_name, "r")
self.hwpx_flag = True
pass
'''
def setup_scanner(self):
if not self.hwpx_flag:
self.js_scanner = JS_Scanner(self.hwp_parser)
self.bindata_scanner = BinData_Scanner(self.hwp_parser)
else:
self.paratext_scanner = ParaText_Scanner(self.hwpx_docs)
def get_file_structure(self):
strt = {}
if not self.hwpx_flag:
self.ole_parser.get_dir_entry_all(strt, entry_id=0, depth=0)
else:
for _file in self.hwpx_docs.filelist:
_path = os.path.split( _file.filename)
if _path[0] not in strt:
# root
if _path[0]:
strt[_path[0]] = {}
else:
strt[_path[1]] = _file.file_size
continue
cur_strt = strt[_path[0]]
for path in _path:
if path not in strt:
if path == _path[-1]:
cur_strt[path] = _file.file_size
else:
cur_strt[path] = {}
cur_strt = cur_strt[path]
else:
cur_strt = strt[path]
return strt
def scan(self):
scan_result = ""
if not self.hwpx_flag:
scan_result += self.js_scanner.scan()
scan_result += self.bindata_scanner.scan()
else:
scan_result += self.paratext_scanner.scan()
return scan_result
| 29.309179
| 102
| 0.543926
| 696
| 6,067
| 4.512931
| 0.201149
| 0.031519
| 0.037249
| 0.017829
| 0.247692
| 0.215855
| 0.187838
| 0.187838
| 0.187838
| 0.187838
| 0
| 0.007782
| 0.364595
| 6,067
| 207
| 103
| 29.309179
| 0.807004
| 0.000659
| 0
| 0.246988
| 0
| 0
| 0.077004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036145
| false
| 0.012048
| 0.084337
| 0
| 0.192771
| 0.114458
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43e2e7854a4f56963d0c0900b0d6355f030a3675
| 339
|
py
|
Python
|
commands/source.py
|
Open-Source-eUdeC/UdeCursos-bot
|
f900073044e1c74532af532618672501c0a43a13
|
[
"MIT"
] | 3
|
2022-03-01T17:14:06.000Z
|
2022-03-15T21:15:44.000Z
|
commands/source.py
|
Open-Source-eUdeC/UdeCursos-bot
|
f900073044e1c74532af532618672501c0a43a13
|
[
"MIT"
] | 1
|
2022-03-07T20:59:20.000Z
|
2022-03-07T20:59:20.000Z
|
commands/source.py
|
Open-Source-eUdeC/UdeCursos-bot
|
f900073044e1c74532af532618672501c0a43a13
|
[
"MIT"
] | 2
|
2022-02-28T19:32:54.000Z
|
2022-03-12T20:19:39.000Z
|
async def source(update, context):
source_code = "https://github.com/Open-Source-eUdeC/UdeCursos-bot"
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=(
"*UdeCursos bot v2.0*\n\n"
f"Código fuente: [GitHub]({source_code})"
),
parse_mode="Markdown"
)
| 30.818182
| 70
| 0.60177
| 42
| 339
| 4.714286
| 0.690476
| 0.10101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007968
| 0.259587
| 339
| 10
| 71
| 33.9
| 0.780876
| 0
| 0
| 0
| 0
| 0
| 0.353982
| 0.067847
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43e3929f6d656cd5f3e6cf6054493ace5b92bd70
| 1,255
|
py
|
Python
|
history/tests.py
|
MPIB/Lagerregal
|
3c950dffcf4fa164008c5a304c4839bc282a3388
|
[
"BSD-3-Clause"
] | 24
|
2017-03-19T16:17:37.000Z
|
2021-11-07T15:35:33.000Z
|
history/tests.py
|
MPIB/Lagerregal
|
3c950dffcf4fa164008c5a304c4839bc282a3388
|
[
"BSD-3-Clause"
] | 117
|
2016-04-19T12:35:10.000Z
|
2022-02-22T13:19:05.000Z
|
history/tests.py
|
MPIB/Lagerregal
|
3c950dffcf4fa164008c5a304c4839bc282a3388
|
[
"BSD-3-Clause"
] | 11
|
2017-08-08T12:11:39.000Z
|
2021-12-08T05:34:06.000Z
|
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from devices.models import Device
from users.models import Lageruser
class HistoryTests(TestCase):
def setUp(self):
self.client = Client()
self.admin = Lageruser.objects.create_superuser('test', 'test@test.com', "test")
self.client.login(username="test", password="test")
def test_global_view(self):
response = self.client.get('/history/global/')
self.assertEqual(response.status_code, 200)
def test_list_view(self):
content_type = ContentType.objects.get(model='device')
device = mommy.make(Device)
response = self.client.get('/history/%i/%i/' % (content_type.pk, device.pk))
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
device = mommy.make(Device)
response = self.client.post('/devices/%i/edit/' % device.pk, data={
'name': 'test',
'creator': self.admin.pk,
})
self.assertEqual(response.status_code, 302)
response = self.client.get('/history/version/1/')
self.assertEqual(response.status_code, 200)
| 34.861111
| 88
| 0.67251
| 155
| 1,255
| 5.354839
| 0.341935
| 0.072289
| 0.086747
| 0.139759
| 0.36506
| 0.285542
| 0.19759
| 0.103614
| 0
| 0
| 0
| 0.012948
| 0.2
| 1,255
| 35
| 89
| 35.857143
| 0.813745
| 0
| 0
| 0.178571
| 0
| 0
| 0.093227
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0.035714
| 0.214286
| 0
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78db3efa5c77dd290cf1467f8ac973b8fc19949b
| 13,168
|
py
|
Python
|
watcher_metering/tests/agent/test_agent.py
|
b-com/watcher-metering
|
7c09b243347146e5a421700d5b07d1d0a5c4d604
|
[
"Apache-2.0"
] | 2
|
2015-10-22T19:44:57.000Z
|
2017-06-15T15:01:07.000Z
|
watcher_metering/tests/agent/test_agent.py
|
b-com/watcher-metering
|
7c09b243347146e5a421700d5b07d1d0a5c4d604
|
[
"Apache-2.0"
] | 1
|
2015-10-26T13:52:58.000Z
|
2015-10-26T13:52:58.000Z
|
watcher_metering/tests/agent/test_agent.py
|
b-com/watcher-metering
|
7c09b243347146e5a421700d5b07d1d0a5c4d604
|
[
"Apache-2.0"
] | 4
|
2015-10-10T13:59:39.000Z
|
2020-05-29T11:47:07.000Z
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
import types
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
import msgpack
import operator
from oslo_config import cfg
from oslotest.base import BaseTestCase
from stevedore.driver import DriverManager
from stevedore.extension import Extension
from watcher_metering.agent.agent import Agent
from watcher_metering.agent.measurement import Measurement
from watcher_metering.tests.agent.agent_fixtures import ConfFixture
from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller
from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller
class TestAgent(BaseTestCase):
# patches to be applied for each test in this test suite
patches = []
def setUp(self):
super(TestAgent, self).setUp()
self.conf = cfg.ConfigOpts()
# To load the drivers without using the config file
self.useFixture(ConfFixture(self.conf))
def _fake_parse(self, args=[]):
return cfg.ConfigOpts._parse_cli_opts(self, [])
_fake_parse_method = types.MethodType(_fake_parse, self.conf)
self.conf._parse_cli_opts = _fake_parse_method
# First dependency to be returned
self.dummy_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=DummyMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=DummyMetricPuller,
obj=None,
),
namespace='TESTING',
)
# 2nd dependency to be returned
self.fake_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=FakeMetricPuller.get_name(),
entry_point='fake.entry.point',
plugin=FakeMetricPuller,
obj=None,
),
namespace='TESTING',
)
self.defaults_drivers = {
DummyMetricPuller.get_name(): self.dummy_driver_manager,
FakeMetricPuller.get_name(): self.fake_driver_manager,
}
def _fake_loader(name, **kw):
return self.defaults_drivers[name]
# Patches the agent socket
self.m_agent_socket = MagicMock(autospec=True)
self.patches.extend([
# Deactivates the nanomsg socket
patch(
"watcher_metering.agent.agent.nanomsg.Socket",
new=self.m_agent_socket,
),
# Sets the test namespace to 'TESTING'
patch.object(
Agent,
"namespace",
PropertyMock(return_value='TESTING'),
),
# Patches the driver manager to retourn our test drivers
# instead of the real ones
patch(
"watcher_metering.load.loader.DriverManager",
MagicMock(side_effect=_fake_loader),
),
])
# Applies all of our patches before each test
for _patch in self.patches:
_patch.start()
self.agent = Agent(
conf=self.conf,
driver_names=self.conf.agent.driver_names,
use_nanoconfig_service=False,
publisher_endpoint="fake",
nanoconfig_service_endpoint="",
nanoconfig_update_endpoint="",
nanoconfig_profile="nanoconfig://test_profile"
)
# Default ticking is set to 0 to reduce test execution time
self.agent.TICK_INTERVAL = 0
def tearDown(self):
super(TestAgent, self).tearDown()
# The drivers are stored at the class level so we need to clear
# it after each test
self.agent.drivers.clear()
for _patch in self.patches:
_patch.stop()
def test_register_driver(self):
expected_driver1_key = "metrics_driver.dummy_data.puller.dummy"
expected_driver2_key = "metrics_driver.fake_data.puller.fake"
self.agent.register_drivers()
self.assertEqual(
sorted(self.agent.drivers.keys()),
[expected_driver1_key, expected_driver2_key]
)
sorted_drivers = OrderedDict(
sorted(self.agent.drivers.items(), key=operator.itemgetter(0))
)
self.assertEqual(len(sorted_drivers), 2)
driver1 = self.agent.drivers[expected_driver1_key]
driver2 = self.agent.drivers[expected_driver2_key]
self.assertEqual(driver1.title, "metrics_driver.dummy")
self.assertEqual(driver1.probe_id, "data.puller.dummy")
self.assertEqual(driver1.interval, 0.01)
self.assertEqual(driver2.title, "metrics_driver.fake")
self.assertEqual(driver2.probe_id, "data.puller.fake")
self.assertEqual(driver2.interval, 0.01)
self.assertIn(self.agent, driver1._observers)
self.assertIn(self.agent, driver2._observers)
def test_unregister_driver(self):
driver_key = "metrics_driver.dummy_data.puller.dummy"
self.agent.register_drivers()
self.agent.unregister_driver(driver_key)
# Initial is 2 drivers => 2 - 1 == 1
self.assertEqual(len(self.agent.drivers), 1)
@patch.object(Measurement, "as_dict")
def test_send_measurements(self, m_as_dict):
self.agent.register_drivers()
measurement_dict = OrderedDict(
name="dummy.data.puller",
unit="",
type_="",
value=13.37,
resource_id="test_hostname",
host="test_hostname",
timestamp="2015-08-04T15:15:45.703542",
)
m_as_dict.return_value = measurement_dict
measurement = Measurement(**measurement_dict)
for driver in self.agent.drivers.values():
driver.send_measurements([measurement])
break # only the first one
expected_encoded_msg = msgpack.dumps(measurement_dict)
self.m_agent_socket.return_value.send.assert_called_once_with(
expected_encoded_msg
)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive(self, m_lock, m_start, m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.return_value = True # Emulates a thread that is running
m_start.return_value = None
self.agent.register_drivers()
self.agent.check_drivers_alive()
self.assertTrue(m_is_alive.called)
self.assertFalse(m_start.called)
@patch.object(DummyMetricPuller, "is_alive")
@patch.object(DummyMetricPuller, "start")
@patch("watcher_metering.agent.manager.MetricManager.lock")
def test_check_drivers_alive_with_driver_stopped(self, m_lock, m_start,
m_is_alive):
m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior
m_lock.release = Mock(return_value=True) # Emulates a thread behavior
m_is_alive.side_effect = [False, True]
m_start.side_effect = [RuntimeError, True, True] # Fails once
self.agent.register_drivers()
# should re-run the driver
self.agent.check_drivers_alive()
self.assertEqual(m_is_alive.call_count, 1)
self.assertEqual(m_start.call_count, 2)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_using_default(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = ""
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE") # First call
m_env_getter.assert_called_with("NN_CONFIG_UPDATES") # Last call
self.assertEqual(m_env_setter.call_count, 0)
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"FAKE_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"FAKE_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_valid_custom_values(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.agent.set_nanoconfig_endpoints()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_any_call("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
m_env_setter.assert_called_with("NN_CONFIG_UPDATES",
"CUSTOM_NN_CONFIG_UPDATES")
self.assertEqual(self.agent.nanoconfig_service_endpoint,
"CUSTOM_NN_CONFIG_SERVICE")
self.assertEqual(self.agent.nanoconfig_update_endpoint,
"CUSTOM_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_service(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = ""
self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_called_once_with("NN_CONFIG_SERVICE")
self.assertEqual(m_env_setter.call_count, 0)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.agent.agent.os.environ.get")
def test_setup_nanoconfig_invalid_update(self, m_env_getter, m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.agent.use_nanoconfig_service = True
self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.agent.nanoconfig_update_endpoint = ""
self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_called_once_with("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
@patch.object(Agent, 'check_drivers_alive', MagicMock())
@patch("watcher_metering.agent.manager."
"MetricManager.terminated",
new_callable=PropertyMock)
def test_run_agent(self, m_terminated):
# Patches the guard/exit condition of the thread periodic event loop
# -> 1st time = False (carry on) and 2nd = True (Should terminate)
m_terminated.side_effect = [False, True]
self.agent.run()
self.assertEqual(m_terminated.call_count, 2)
@patch.object(DummyMetricPuller, 'send_measurements', MagicMock())
def test_stop_agent(self):
self.agent.register_drivers()
self.agent.start()
self.agent.join(timeout=.01)
self.agent.stop()
self.assertEqual(len(self.agent.drivers.values()), 2)
self.assertTrue(
all([driver.terminated for driver in self.agent.drivers.values()])
)
self.assertTrue(self.agent.terminated)
self.assertFalse(self.agent.is_alive())
| 40.024316
| 79
| 0.659478
| 1,546
| 13,168
| 5.329884
| 0.192756
| 0.052427
| 0.020631
| 0.024272
| 0.471238
| 0.450364
| 0.403884
| 0.365413
| 0.330461
| 0.309223
| 0
| 0.007855
| 0.255544
| 13,168
| 328
| 80
| 40.146341
| 0.832704
| 0.128645
| 0
| 0.359504
| 0
| 0
| 0.121215
| 0.080606
| 0
| 0
| 0
| 0
| 0.169421
| 1
| 0.061983
| false
| 0
| 0.082645
| 0.008264
| 0.161157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78dce9aa3f78b6fd58cffc69a08166742b99da9b
| 31,044
|
py
|
Python
|
mmtbx/bulk_solvent/mosaic.py
|
ndevenish/cctbx_project
|
1f1a2627ae20d01d403f367948e7269cef0f0217
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/bulk_solvent/mosaic.py
|
ndevenish/cctbx_project
|
1f1a2627ae20d01d403f367948e7269cef0f0217
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/bulk_solvent/mosaic.py
|
ndevenish/cctbx_project
|
1f1a2627ae20d01d403f367948e7269cef0f0217
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from scitbx import matrix
import math
from libtbx import adopt_init_args
import scitbx.lbfgs
from mmtbx.bulk_solvent import kbu_refinery
from cctbx import maptbx
import mmtbx.masks
import boost_adaptbx.boost.python as bp
asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext")
from libtbx import group_args
from mmtbx import bulk_solvent
from mmtbx.ncs import tncs
from collections import OrderedDict
import mmtbx.f_model
import sys
from libtbx.test_utils import approx_equal
from mmtbx import masks
from cctbx.masks import vdw_radii_from_xray_structure
ext = bp.import_ext("mmtbx_masks_ext")
mosaic_ext = bp.import_ext("mmtbx_mosaic_ext")
APPLY_SCALE_K1_TO_FOBS = False
def moving_average(x, n):
r = []
for i, xi in enumerate(x):
s = 0
cntr = 0
for j in range(max(0,i-n), min(i+n+1, len(x))):
s+=x[j]
cntr+=1
s = s/cntr
r.append(s)
return r
# Utilities used by algorithm 2 ------------------------------------------------
class minimizer(object):
def __init__(self, max_iterations, calculator):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.cntr=0
exception_handling_params = scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_step_at_lower_bound=True,
)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
exception_handling_params=exception_handling_params,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations))
def compute_functional_and_gradients(self):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
t = self.calculator.target()
g = self.calculator.gradients()
#print "step: %4d"%self.cntr, "target:", t, "params:", \
# " ".join(["%10.6f"%i for i in self.x]), math.log(t)
return t,g
class minimizer2(object):
def __init__(self, calculator, min_iterations=0, max_iterations=2000):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.n = self.x.size()
self.cntr=0
def run(self, use_curvatures=0):
self.minimizer = kbu_refinery.lbfgs_run(
target_evaluator=self,
min_iterations=self.min_iterations,
max_iterations=self.max_iterations,
use_curvatures=use_curvatures)
self(requests_f_and_g=True, requests_diag=False)
return self
def __call__(self, requests_f_and_g, requests_diag):
self.cntr+=1
self.calculator.update_target_and_grads(x=self.x)
if (not requests_f_and_g and not requests_diag):
requests_f_and_g = True
requests_diag = True
if (requests_f_and_g):
self.f = self.calculator.target()
self.g = self.calculator.gradients()
self.d = None
if (requests_diag):
self.d = self.calculator.curvatures()
#assert self.d.all_ne(0)
if(self.d.all_eq(0)): self.d=None
else:
self.d = 1 / self.d
#print "step: %4d"%self.cntr, "target:", self.f, "params:", \
# " ".join(["%10.6f"%i for i in self.x]) #, math.log(self.f)
return self.x, self.f, self.g, self.d
class tg(object):
def __init__(self, x, i_obs, F, use_curvatures):
self.x = x
self.i_obs = i_obs
self.F = F
self.t = None
self.g = None
self.d = None
# Needed to do sums from small to large to prefent loss
s = flex.sort_permutation(self.i_obs.data())
self.i_obs = self.i_obs.select(s)
self.F = [f.select(s) for f in self.F]
#
self.sum_i_obs = flex.sum(self.i_obs.data()) # needed for Python version
self.use_curvatures=use_curvatures
self.tgo = mosaic_ext.alg2_tg(
F = [f.data() for f in self.F],
i_obs = self.i_obs.data())
self.update_target_and_grads(x=x)
def update(self, x):
self.update_target_and_grads(x = x)
def update_target_and_grads(self, x):
self.x = x
self.tgo.update(self.x)
self.t = self.tgo.target()
self.g = self.tgo.gradient()
#
# Reference implementation in Python
# s = 1 #180/math.pi
# i_model = flex.double(self.i_obs.data().size(),0)
# for n, kn in enumerate(self.x):
# for m, km in enumerate(self.x):
# tmp = self.F[n].data()*flex.conj(self.F[m].data())
# i_model += kn*km*flex.real(tmp)
# #pn = self.F[n].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fn = flex.abs(self.F[n].data())
# #Fm = flex.abs(self.F[m].data())
# #i_model += kn*km*Fn*Fm*flex.cos(pn-pm)
# diff = i_model - self.i_obs.data()
# #print (flex.min(diff), flex.max(diff))
# t = flex.sum(diff*diff)/4
# #
# g = flex.double()
# for j in range(len(self.F)):
# tmp = flex.double(self.i_obs.data().size(),0)
# for m, km in enumerate(self.x):
# tmp += km * flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
# #pj = self.F[j].phases().data()*s
# #pm = self.F[m].phases().data()*s
# #Fj = flex.abs(self.F[j].data())
# #Fm = flex.abs(self.F[m].data())
# #tmp += km * Fj*Fm*flex.cos(pj-pm)
# g.append(flex.sum(diff*tmp))
# self.t = t/self.sum_i_obs
# self.g = g/self.sum_i_obs
# #print (self.t,t1)
# #print (list(self.g))
# #print (list(g1))
# #print ()
# #assert approx_equal(self.t, t1, 5)
# #assert approx_equal(self.g, g1, 1.e-6)
#
if self.use_curvatures:
d = flex.double()
for j in range(len(self.F)):
tmp1 = flex.double(self.i_obs.data().size(),0)
tmp2 = flex.double(self.i_obs.data().size(),0)
for m, km in enumerate(self.x):
zz = flex.real( self.F[j].data()*flex.conj(self.F[m].data()) )
tmp1 += km * zz
tmp2 += zz
#pj = self.F[j].phases().data()*s
#pm = self.F[m].phases().data()*s
#Fj = flex.abs(self.F[j].data())
#Fm = flex.abs(self.F[m].data())
#tmp += km * Fj*Fm*flex.cos(pj-pm)
d.append(flex.sum(tmp1*tmp1 + tmp2))
self.d=d
def target(self): return self.t
def gradients(self): return self.g
def gradient(self): return self.gradients()
def curvatures(self): return self.d/self.sum_i_obs
#-------------------------------------------------------------------------------
def write_map_file(crystal_symmetry, map_data, file_name):
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name = file_name,
unit_cell = crystal_symmetry.unit_cell(),
space_group = crystal_symmetry.space_group(),
map_data = map_data,
labels = flex.std_string([""]))
class refinery(object):
def __init__(self, fmodel, fv, alg, anomaly=True, log = sys.stdout):
assert alg in ["alg0", "alg2", "alg4", None]
self.log = log
self.f_obs = fmodel.f_obs()
self.r_free_flags = fmodel.r_free_flags()
k_mask_overall = fmodel.k_masks()[0]
self.bin_selections = fmodel.bin_selections
#
k_total = fmodel.k_total()
self.f_calc = fmodel.f_model()
self.F = [self.f_calc.deep_copy()] + fv.keys()
#
n_zones_start = len(self.F)
r4_start = fmodel.r_work4()
for it in range(5):
#
if(it>0):
r4 = self.fmodel.r_work4()
print(r4_start, r4, abs(round(r4-r4_start,4)))
if(abs(round(r4-r4_start,4))<1.e-4):
break
r4_start = r4
#if(it>0 and n_zones_start == len(self.F)): break
#
#if it>0:
# self.F = [self.fmodel.f_model().deep_copy()] + self.F[1:]
self._print("cycle: %2d"%it)
self._print(" volumes: "+" ".join([str(fv[f]) for f in self.F[1:]]))
f_obs = self.f_obs.deep_copy()
if it==0: k_total = fmodel.k_total()
else: k_total = self.fmodel.k_total()
i_obs = f_obs.customized_copy(data = f_obs.data()*f_obs.data())
K_MASKS = OrderedDict()
self.bin_selections = self.f_obs.log_binning(
n_reflections_in_lowest_resolution_bin = 100*len(self.F))
for i_bin, sel in enumerate(self.bin_selections):
d_max, d_min = f_obs.select(sel).d_max_min()
if d_max<3: continue
bin = " bin %2d: %5.2f-%-5.2f: "%(i_bin, d_max, d_min)
F = [f.select(sel) for f in self.F]
k_total_sel = k_total.select(sel)
F_scaled = [F[0].deep_copy()]+[f.customized_copy(data=f.data()*k_total_sel) for f in F[1:]]
#
# XXX WHY NOT THIS INSTEAD (INVESTIGATE LATER)?
#F_scaled = [f.customized_copy(data=f.data()*k_total_sel) for f in F]
#r00=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, F[0].data()*k_total_sel)
# algorithm_0
if(alg=="alg0"):
k_masks = algorithm_0(
f_obs = f_obs.select(sel),
F = F_scaled,
kt=k_total_sel)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r0=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_4
if(alg=="alg4"):
if it==0: phase_source = fmodel.f_model().select(sel)
else: phase_source = self.fmodel.f_model().select(sel)
k_masks = algorithm_4(
f_obs = self.f_obs.select(sel),
F = F_scaled,
auto_converge_eps = 0.0001,
phase_source = phase_source)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r4=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
# algorithm_2
if(alg=="alg2"):
k_masks = algorithm_2(
i_obs = i_obs.select(sel),
F = F_scaled,
x = self._get_x_init(i_bin),
use_curvatures = False)
#fd = flex.complex_double(F[0].data().size())
#for i,f in enumerate(F):
# fd = fd + f.data()*k_masks[i]
#r2=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel)
#self._print(bin+" ".join(["%6.2f"%k for k in k_masks])+" %6.4f %6.4f %6.4f %6.4f"%(r00,r0,r4, r2))
k_mean = flex.mean(k_mask_overall.select(sel))
k_masks_plus = [k_masks[0]]+[k_mean + k for k in k_masks[1:]]
self._print(bin+" ".join(["%6.2f"%k for k in k_masks_plus]) )
K_MASKS[sel] = [k_masks, k_masks_plus]
#
if(len(self.F)==2): break # stop and fall back onto using largest mask
#
#
#print()
#self.update_k_masks(K_MASKS)
#for k_masks in K_MASKS.values():
# self._print(bin+" ".join(["%6.2f"%k for k in k_masks]))
#
f_calc_data = self.f_calc.data().deep_copy()
f_bulk_data = flex.complex_double(fmodel.f_calc().data().size(), 0)
for sel, k_masks in zip(K_MASKS.keys(), K_MASKS.values()):
k_masks = k_masks[0] # 1 is shifted!
f_bulk_data_ = flex.complex_double(sel.count(True), 0)
for i_mask, k_mask in enumerate(k_masks):
if i_mask==0:
f_calc_data = f_calc_data.set_selected(sel,
f_calc_data.select(sel)*k_mask)
continue
f_bulk_data_ += self.F[i_mask].data().select(sel)*k_mask
f_bulk_data = f_bulk_data.set_selected(sel,f_bulk_data_)
#
self.update_F(K_MASKS)
f_bulk = fmodel.f_calc().customized_copy(data = f_bulk_data)
if(len(self.F)==2):
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
f_calc = fmodel.f_calc(),
f_mask = self.F[1],
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
else:
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.f_calc,
bin_selections = self.bin_selections,
f_mask = f_bulk,
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
#
self.fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
r_free_flags = self.r_free_flags,
#f_calc = self.f_obs.customized_copy(data = f_calc_data),
f_calc = self.fmodel.f_calc(),
f_mask = self.fmodel.f_bulk(),
k_mask = flex.double(f_obs.data().size(),1)
)
self.fmodel.update_all_scales(remove_outliers=False,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self._print(self.fmodel.r_factors(prefix=" "))
#self._print(self.fmodel.r_factors(prefix=" "))
self.mc = self.fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
#def update_k_masks(self, K_MASKS):
# tmp = []
# for i_mask, F in enumerate(self.F):
# k_masks = [k_masks_bin[i_mask] for k_masks_bin in K_MASKS.values()]
# found = False
# for i_bin, k_masks_bin in enumerate(K_MASKS.values()):
# if(not found and k_masks_bin[i_mask]<=0.009):
# found = True
# K_MASKS.values()[i_bin][i_mask]=0
# elif found:
# K_MASKS.values()[i_bin][i_mask]=0
def _print(self, m):
if(self.log is not None):
print(m, file=self.log)
def update_F(self, K_MASKS):
tmp = []
for i_mask, F in enumerate(self.F):
k_masks = [k_masks_bin[1][i_mask] for k_masks_bin in K_MASKS.values()]
if(i_mask == 0): tmp.append(self.F[0])
elif moving_average(k_masks,2)[0]>=0.03: tmp.append(F)
self.F = tmp[:]
def _get_x_init(self, i_bin):
return flex.double([1] + [1]*len(self.F[1:]))
#k_maks1_init = 0.35 - i_bin*0.35/len(self.bin_selections)
#x = flex.double([1,k_maks1_init])
#x.extend( flex.double(len(self.F)-2, 0.1))
#return x
def get_f_mask(xrs, ma, step, option = 2, r_shrink = None, r_sol = None):
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs.unit_cell(),
space_group_info = xrs.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
n_real = crystal_gridding.n_real()
atom_radii = vdw_radii_from_xray_structure(xray_structure = xrs)
mask_params = masks.mask_master_params.extract()
grid_step_factor = ma.d_min()/step
if(r_shrink is not None): mask_params.shrink_truncation_radius = r_shrink
if(r_sol is not None): mask_params.solvent_radius = r_sol
mask_params.grid_step_factor = grid_step_factor
# 1
if(option==1):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
group = xrs.space_group(),
resolution = ma.d_min(),
grid_step_factor = grid_step_factor,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 2
elif(option==2):
asu_mask = ext.atom_mask(
unit_cell = xrs.unit_cell(),
space_group = xrs.space_group(),
gridding_n_real = n_real,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius)
asu_mask.compute(xrs.sites_frac(), atom_radii)
fm_asu = asu_mask.structure_factors(ma.indices())
f_mask = ma.set().array(data = fm_asu)
# 3
elif(option==3):
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xrs,
p1 = True,
for_structure_factors = True,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
n_real = n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
mask = asu_map_ext.asymmetric_map(
xrs.crystal_symmetry().space_group().type(), mask_p1).data()
f_mask = ma.structure_factors_from_asu_map(
asu_map_data = mask, n_real = n_real)
# 4
elif(option==4):
f_mask = masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
grid_step = step,
atom_radii = atom_radii).structure_factors(
miller_set = ma)
elif(option==5):
o = mmtbx.masks.bulk_solvent(
xray_structure = xrs,
ignore_zero_occupancy_atoms = False,
solvent_radius = mask_params.solvent_radius,
shrink_truncation_radius = mask_params.shrink_truncation_radius,
ignore_hydrogen_atoms = False,
gridding_n_real = n_real,
atom_radii = atom_radii)
assert approx_equal(n_real, o.data.accessor().all())
f_mask = o.structure_factors(ma)
elif(option==6):
# XXX No control over n_real, so results with others don't match
mask_manager = masks.manager(
miller_array = ma,
miller_array_twin = None,
mask_params = mask_params)
f_mask = mask_manager.shell_f_masks(xray_structure=xrs, force_update=True)[0]
else: assert 0
#
return f_mask
def filter_mask(mask_p1, volume_cutoff, crystal_symmetry,
for_structure_factors = False):
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = True,
wrapping = True)
mi, ma = flex.min(mask_p1), flex.max(mask_p1)
print (mask_p1.size(), (mask_p1<0).count(True))
assert mi == 0, mi
assert ma == 1, ma
a,b,c = crystal_symmetry.unit_cell().parameters()[:3]
na,nb,nc = mask_p1.accessor().all()
step = flex.mean(flex.double([a/na, b/nb, c/nc]))
if(crystal_symmetry.space_group_number() != 1):
co.merge_symmetry_related_regions(space_group=crystal_symmetry.space_group())
conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
if(i==0): continue # skip macromolecule
# skip small volume
volume = v*step**3
if volume < volume_cutoff:
conn = conn.set_selected(conn==i, 0)
conn = conn.set_selected(conn>0, 1)
if for_structure_factors:
conn = conn / crystal_symmetry.space_group().order_z()
return conn
class mosaic_f_mask(object):
def __init__(self,
xray_structure,
step,
volume_cutoff=None,
mean_diff_map_threshold=None,
compute_whole=False,
preprocess_against_shallow=True,
largest_only=False,
wrapping=True,
f_obs=None,
r_sol=1.1,
r_shrink=0.9,
f_calc=None,
log = None,
write_masks=False):
adopt_init_args(self, locals())
#
self.dsel = f_obs.d_spacings().data()>=0 # XXX WHY????????????
self.miller_array = f_obs.select(self.dsel)
#
# To avoid "Miller index not in structure factor map" crash
step = min(step, self.miller_array.d_min()/3)
#
self.crystal_symmetry = self.xray_structure.crystal_symmetry()
# compute mask in p1 (via ASU)
self.crystal_gridding = maptbx.crystal_gridding(
unit_cell = xray_structure.unit_cell(),
space_group_info = xray_structure.space_group_info(),
symmetry_flags = maptbx.use_space_group_symmetry,
step = step)
self.n_real = self.crystal_gridding.n_real()
# XXX Where do we want to deal with H and occ==0?
mask_p1 = mmtbx.masks.mask_from_xray_structure(
xray_structure = xray_structure,
p1 = True,
for_structure_factors = True,
solvent_radius = r_sol,
shrink_truncation_radius = r_shrink,
n_real = self.n_real,
in_asu = False).mask_data
maptbx.unpad_in_place(map=mask_p1)
self.f_mask_whole = None
if(compute_whole):
mask = asu_map_ext.asymmetric_map(
xray_structure.crystal_symmetry().space_group().type(), mask_p1).data()
self.f_mask_whole = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask, n_real = self.n_real)
self.solvent_content = 100.*mask_p1.count(1)/mask_p1.size()
if(write_masks):
write_map_file(crystal_symmetry=xray_structure.crystal_symmetry(),
map_data=mask_p1, file_name="mask_whole.mrc")
# conn analysis
co = maptbx.connectivity(
map_data = mask_p1,
threshold = 0.01,
preprocess_against_shallow = preprocess_against_shallow,
wrapping = wrapping)
co.merge_symmetry_related_regions(space_group=xray_structure.space_group())
del mask_p1
self.conn = co.result().as_double()
z = zip(co.regions(),range(0,co.regions().size()))
sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True)
#
f_mask_data_0 = flex.complex_double(f_obs.data().size(), 0)
f_mask_data = flex.complex_double(f_obs.data().size(), 0)
self.FV = OrderedDict()
self.mc = None
diff_map = None
mean_diff_map = None
self.regions = OrderedDict()
self.f_mask_0 = None
self.f_mask = None
#
if(log is not None):
print(" # volume_p1 uc(%) mFo-DFc: min,max,mean,sd", file=log)
#
for i_seq, p in enumerate(sorted_by_volume):
v, i = p
# skip macromolecule
if(i==0): continue
# skip small volume
volume = v*step**3
uc_fraction = v*100./self.conn.size()
if(volume_cutoff is not None):
if volume < volume_cutoff: continue
selection = self.conn==i
mask_i_asu = self.compute_i_mask_asu(selection = selection, volume = volume)
volume_asu = (mask_i_asu>0).count(True)*step**3
if(uc_fraction >= 1):
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data_0 += f_mask_i.data()
elif(largest_only): break
if(uc_fraction < 1 and diff_map is None):
diff_map = self.compute_diff_map(f_mask_data = f_mask_data_0)
mi,ma,me,sd = None,None,None,None
if(diff_map is not None):
blob = diff_map.select(selection.iselection())
mean_diff_map = flex.mean(diff_map.select(selection.iselection()))
mi,ma,me = flex.min(blob), flex.max(blob), flex.mean(blob)
sd = blob.sample_standard_deviation()
if(log is not None):
print("%3d"%i_seq,"%12.3f"%volume, "%8.4f"%round(uc_fraction,4),
"%7s"%str(None) if diff_map is None else "%7.3f %7.3f %7.3f %7.3f"%(
mi,ma,me,sd), file=log)
if(mean_diff_map_threshold is not None and
mean_diff_map is not None and mean_diff_map<=mean_diff_map_threshold):
continue
self.regions[i_seq] = group_args(
id = i,
i_seq = i_seq,
volume = volume,
uc_fraction = uc_fraction,
diff_map = group_args(mi=mi, ma=ma, me=me, sd=sd))
f_mask_i = self.compute_f_mask_i(mask_i_asu)
f_mask_data += f_mask_i.data()
self.FV[f_mask_i] = [round(volume, 3), round(uc_fraction,1)]
#
self.f_mask_0 = f_obs.customized_copy(data = f_mask_data_0)
self.f_mask = f_obs.customized_copy(data = f_mask_data)
self.do_mosaic = False
self.n_regions = len(self.FV.keys())
if(self.n_regions>1):
self.do_mosaic = True
def compute_f_mask_i(self, mask_i_asu):
f_mask_i = self.miller_array.structure_factors_from_asu_map(
asu_map_data = mask_i_asu, n_real = self.n_real)
data = flex.complex_double(self.dsel.size(), 0)
data = data.set_selected(self.dsel, f_mask_i.data())
return self.f_obs.set().array(data = data)
def compute_diff_map(self, f_mask_data):
if(self.f_calc is None): return None
f_mask = self.f_obs.customized_copy(data = f_mask_data)
fmodel = mmtbx.f_model.manager(
f_obs = self.f_obs,
f_calc = self.f_calc,
f_mask = f_mask)
fmodel = fmodel.select(self.dsel)
fmodel.update_all_scales(remove_outliers=True,
apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS)
self.mc = fmodel.electron_density_map().map_coefficients(
map_type = "mFobs-DFmodel",
isotropize = True,
exclude_free_r_reflections = False)
fft_map = self.mc.fft_map(crystal_gridding = self.crystal_gridding)
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded()
def compute_i_mask_asu(self, selection, volume):
mask_i = flex.double(flex.grid(self.n_real), 0)
mask_i = mask_i.set_selected(selection, 1)
if(self.write_masks):
write_map_file(
crystal_symmetry = self.crystal_symmetry,
map_data = mask_i,
file_name = "mask_%s.mrc"%str(round(volume,3)))
tmp = asu_map_ext.asymmetric_map(
self.crystal_symmetry.space_group().type(), mask_i).data()
return tmp
def algorithm_0(f_obs, F, kt):
"""
Grid search
"""
fc, f_masks = F[0], F[1:]
k_mask_trial_range=[]
s = -1
while s<1:
k_mask_trial_range.append(s)
s+=0.0001
r = []
fc_data = fc.data()
for i, f_mask in enumerate(f_masks):
#print("mask ",i)
assert f_obs.data().size() == fc.data().size()
assert f_mask.data().size() == fc.data().size()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data))
kmask_, k_ = \
bulk_solvent.k_mask_and_k_overall_grid_search(
f_obs.data()*kt,
fc_data*kt,
f_mask.data()*kt,
flex.double(k_mask_trial_range),
flex.bool(fc.data().size(),True))
r.append(kmask_)
fc_data += fc_data*k_ + kmask_*f_mask.data()
#print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_))
r = [1,]+r
return r
def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10):
"""
Unphased one-step search
"""
calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures)
for it in range(macro_cycles):
if(use_curvatures):
m = minimizer(max_iterations=100, calculator=calculator)
else:
#upper = flex.double([1.1] + [1]*(x.size()-1))
#lower = flex.double([0.9] + [-1]*(x.size()-1))
upper = flex.double([1.1] + [5]*(x.size()-1))
lower = flex.double([0.9] + [-5]*(x.size()-1))
#upper = flex.double([10] + [5]*(x.size()-1))
#lower = flex.double([0.1] + [-5]*(x.size()-1))
#upper = flex.double([10] + [0.65]*(x.size()-1))
#lower = flex.double([0.1] + [0]*(x.size()-1))
#upper = flex.double([1] + [0.65]*(x.size()-1))
#lower = flex.double([1] + [0]*(x.size()-1))
#upper = flex.double([1] + [5.65]*(x.size()-1))
#lower = flex.double([1] + [-5]*(x.size()-1))
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
initial_values = x).run()
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
if(use_curvatures):
for it in range(10):
m = minimizer(max_iterations=100, calculator=calculator)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True)
calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures)
return m.x
def algorithm_3(i_obs, fc, f_masks):
"""
Unphased two-step search
"""
F = [fc]+f_masks
Gnm = []
cs = {}
cntr=0
nm=[]
# Compute and store Gnm
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
if m < n:
continue
Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) )
cs[(n,m)] = cntr
cntr+=1
nm.append((n,m))
# Keep track of indices for "upper triangular matrix vs full"
for k,v in zip(list(cs.keys()), list(cs.values())):
i,j=k
if i==j: continue
else: cs[(j,i)]=v
# Generate and solve system Ax=b, x = A_1*b
A = []
b = []
for u, Gnm_u in enumerate(Gnm):
for v, Gnm_v in enumerate(Gnm):
scale = 2
n,m=nm[v]
if n==m: scale=1
A.append( flex.sum(Gnm_u*Gnm_v)*scale )
b.append( flex.sum(Gnm_u * i_obs.data()) )
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
# Expand Xmn from solution x
Xmn = []
for n, Fn in enumerate(F):
rows = []
for m, Fm in enumerate(F):
x_ = x[cs[(n,m)]]
rows.append(x_)
Xmn.append(rows)
# Do formula (19)
lnK = []
for j, Fj in enumerate(F):
t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) )
t2 = 0
for n, Fn in enumerate(F):
for m, Fm in enumerate(F):
t2 += math.log(Xmn[n][m])
t2 = t2 / (2*len(F))
lnK.append( 1/len(F)*(t1-t2) )
return [math.exp(x) for x in lnK]
def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7,
use_cpp=True):
"""
Phased simultaneous search (alg4)
"""
fc, f_masks = F[0], F[1:]
fc = fc.deep_copy()
F = [fc]+F[1:]
# C++ version
if(use_cpp):
return mosaic_ext.alg4(
[f.data() for f in F],
f_obs.data(),
phase_source.data(),
max_cycles,
auto_converge_eps)
# Python version (1.2-3 times slower, but much more readable!)
cntr = 0
x_prev = None
while True:
f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source)
A = []
b = []
for j, Fj in enumerate(F):
A_rows = []
for n, Fn in enumerate(F):
Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) )
A_rows.append( flex.sum(Gjn) )
Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) )
b.append(flex.sum(Hj))
A.extend(A_rows)
A = matrix.sqr(A)
A_1 = A.inverse()
b = matrix.col(b)
x = A_1 * b
#
fc_d = flex.complex_double(phase_source.indices().size(), 0)
for i, f in enumerate(F):
fc_d += f.data()*x[i]
phase_source = phase_source.customized_copy(data = fc_d)
x_ = x[:]
#
cntr+=1
if(cntr>max_cycles): break
if(x_prev is None): x_prev = x_[:]
else:
max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_)))
if(max_diff<=auto_converge_eps): break
x_prev = x_[:]
return x_
| 36.266355
| 107
| 0.60862
| 4,735
| 31,044
| 3.726716
| 0.099683
| 0.020118
| 0.009521
| 0.00714
| 0.469738
| 0.3938
| 0.346537
| 0.311119
| 0.27077
| 0.257792
| 0
| 0.017564
| 0.251739
| 31,044
| 855
| 108
| 36.308772
| 0.74209
| 0.161996
| 0
| 0.261755
| 0
| 0
| 0.01066
| 0.00093
| 0
| 0
| 0
| 0
| 0.010972
| 1
| 0.043887
| false
| 0
| 0.03605
| 0.007837
| 0.111285
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78de98de938be5cc3ac224e5095778425f0adabc
| 14,828
|
py
|
Python
|
members_abundances_in_out_uncertainties.py
|
kcotar/Gaia_clusters_potential
|
aee2658c40446891d31528f8dec3cec899b63c68
|
[
"MIT"
] | null | null | null |
members_abundances_in_out_uncertainties.py
|
kcotar/Gaia_clusters_potential
|
aee2658c40446891d31528f8dec3cec899b63c68
|
[
"MIT"
] | null | null | null |
members_abundances_in_out_uncertainties.py
|
kcotar/Gaia_clusters_potential
|
aee2658c40446891d31528f8dec3cec899b63c68
|
[
"MIT"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
def _prepare_pdf_data(means, stds, range, norm=True):
x_vals = np.linspace(range[0], range[1], 250)
y_vals = np.zeros_like(x_vals)
# create and sum all PDF of stellar abundances
for d_m, d_s in zip(means, stds):
if np.isfinite([d_m, d_s]).all():
y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s)
# return normalized summed pdf of all stars
if norm and np.nansum(y_vals) > 0.:
y_vals = 1. * y_vals/np.nanmax(y_vals)
return x_vals, y_vals
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.nanmax(heights)
return edges[:-1], heights, width
def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high):
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def fit_abund_trend(p_data, a_data,
steps=3, sigma_low=2.5, sigma_high=2.5,
order=5, window=10, n_min_perc=10.,func='poly'):
idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data))
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
if data_len <= order + 1:
return None, None
p_offset = np.nanmedian(p_data)
for i_f in range(steps): # number of sigma clipping steps
if func == 'cheb':
coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window)
f_data = splev(p_data - p_offset, coef)
idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
a_std = np.nanstd(a_data - f_data)
return [coef, p_offset], a_std
def eval_abund_trend(p_data, m_data, func='poly'):
coef, p_offset = m_data
if func == 'cheb':
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
f_data = splev(p_data - p_offset, coef)
return f_data
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1],
lw=0, s=3, color='C2', label='Field')
sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10.,
func='poly')
if np.sum(idx_u5) > 0:
sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5])
else:
ax[-1, -3].legend(handles=[sl1, sl1, sl3])
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
x_p = -2
y_p = -1
ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3)
# plt.show()
plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250)
plt.close(fig)
chdir('..')
| 43.740413
| 215
| 0.55874
| 2,191
| 14,828
| 3.502054
| 0.160201
| 0.070377
| 0.028151
| 0.011729
| 0.486641
| 0.447934
| 0.397889
| 0.372214
| 0.343151
| 0.324645
| 0
| 0.030922
| 0.293364
| 14,828
| 338
| 216
| 43.869822
| 0.701374
| 0.069059
| 0
| 0.279528
| 0
| 0
| 0.092328
| 0.022792
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019685
| false
| 0.003937
| 0.03937
| 0
| 0.082677
| 0.035433
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78e3235c058d0f0d01fe78bcda45b0e5210cc956
| 3,798
|
py
|
Python
|
modules/pygsm/devicewrapper.py
|
whanderley/eden
|
08ced3be3d52352c54cbd412ed86128fbb68b1d2
|
[
"MIT"
] | 205
|
2015-01-20T08:26:09.000Z
|
2022-03-27T19:59:33.000Z
|
modules/pygsm/devicewrapper.py
|
nursix/eden-asp
|
e49f46cb6488918f8d5a163dcd5a900cd686978c
|
[
"MIT"
] | 249
|
2015-02-10T09:56:35.000Z
|
2022-03-23T19:54:36.000Z
|
modules/pygsm/devicewrapper.py
|
nursix/eden-asp
|
e49f46cb6488918f8d5a163dcd5a900cd686978c
|
[
"MIT"
] | 231
|
2015-02-10T09:33:17.000Z
|
2022-02-18T19:56:05.000Z
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
# arch: pacman -S python-pyserial
# debian/ubuntu: apt-get install python-serial
import serial
import re
import errors
class DeviceWrapper(object):
def __init__(self, logger, *args, **kwargs):
self.device = serial.Serial(*args, **kwargs)
self.logger = logger
def isOpen(self):
return self.device.isOpen()
def close(self):
self.device.close()
def write(self, str):
self.device.write(str)
def _read(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) until _terminator_ is hit,
(defaults to \r\n, which reads a single "line"), and return."""
buffer = []
# if a different timeout was requested just
# for _this_ read, store and override the
# current device setting (not thread safe!)
if read_timeout is not None:
old_timeout = self.device.timeout
self.device.timeout = read_timeout
def __reset_timeout():
"""restore the device's previous timeout
setting, if we overrode it earlier."""
if read_timeout is not None:
self.device.timeout =\
old_timeout
# the default terminator reads
# until a newline is hit
if read_term is None:
read_term = "\r\n"
while(True):
buf = self.device.read()
buffer.append(buf)
# if a timeout was hit, raise an exception including the raw data that
# we've already read (in case the calling func was _expecting_ a timeout
# (wouldn't it be nice if serial.Serial.read returned None for this?)
if buf == '':
__reset_timeout()
raise(errors.GsmReadTimeoutError(buffer))
# if last n characters of the buffer match the read
# terminator, return what we've received so far
if ''.join(buffer[-len(read_term):]) == read_term:
buf_str = ''.join(buffer)
__reset_timeout()
self._log(repr(buf_str), 'read')
return buf_str
def read_lines(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) one line at a time until a response
terminator ("OK", "ERROR", or "CMx ERROR...") is hit, then return
a list containing the lines."""
buffer = []
# keep on looping until a command terminator
# is encountered. these are NOT the same as the
# "read_term" argument - only OK or ERROR is valid
while(True):
buf = self._read(
read_term=read_term,
read_timeout=read_timeout)
buf = buf.strip()
buffer.append(buf)
# most commands return OK for success, but there
# are some exceptions. we're not checking those
# here (unlike RubyGSM), because they should be
# handled when they're _expected_
if buf == "OK":
return buffer
# some errors contain useful error codes, so raise a
# proper error with a description from pygsm/errors.py
m = re.match(r"^\+(CM[ES]) ERROR: (\d+)$", buf)
if m is not None:
type, code = m.groups()
raise(errors.GsmModemError(type, int(code)))
# ...some errors are not so useful
# (at+cmee=1 should enable error codes)
if buf == "ERROR":
raise(errors.GsmModemError)
def _log(self, str, type="debug"):
if hasattr(self, "logger"):
self.logger(self, str, type)
| 35.166667
| 84
| 0.561611
| 472
| 3,798
| 4.419492
| 0.402542
| 0.034516
| 0.012943
| 0.01534
| 0.073826
| 0.073826
| 0.052733
| 0.052733
| 0.052733
| 0.052733
| 0
| 0.002014
| 0.346235
| 3,798
| 108
| 85
| 35.166667
| 0.838099
| 0.386256
| 0
| 0.185185
| 0
| 0
| 0.022636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.055556
| 0.018519
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78e6e9a7d73aab5ad3ba5822b10f0996d16afd5b
| 1,762
|
py
|
Python
|
examples/sim_tfidf.py
|
sunyilgdx/CwVW-SIF
|
85ef56d80512e2f6bff1266e030552075566b240
|
[
"MIT"
] | 12
|
2019-05-14T10:31:53.000Z
|
2022-01-20T17:16:59.000Z
|
examples/sim_tfidf.py
|
sunyilgdx/CwVW-SIF
|
85ef56d80512e2f6bff1266e030552075566b240
|
[
"MIT"
] | null | null | null |
examples/sim_tfidf.py
|
sunyilgdx/CwVW-SIF
|
85ef56d80512e2f6bff1266e030552075566b240
|
[
"MIT"
] | 1
|
2020-12-21T09:16:51.000Z
|
2020-12-21T09:16:51.000Z
|
import pickle, sys
sys.path.append('../src')
import data_io, sim_algo, eval, params
## run
# wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016)
# '../data/glove.840B.300d.txt' # need to download it first
# ]
wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016)
'../data/glove.6B.50d.txt' # need to download it first
]
rmpcs = [0,1]
comment4para = [ # need to align with the following loop
['word vector files', wordfiles], # comments and values,
['remove principal component or not', rmpcs]
]
params = params.params()
parr4para = {}
sarr4para = {}
for wordfile in wordfiles:
(words, We) = data_io.getWordmap(wordfile)
weight4ind = data_io.getIDFWeight(wordfile)
for rmpc in rmpcs:
print('word vectors loaded from %s' % wordfile)
print('word weights computed from idf')
params.rmpc = rmpc
print('remove the first %d principal components' % rmpc)
# eval just one example dataset
parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
## eval all datasets; need to obtained datasets from John Wieting (https://github.com/jwieting/iclr2016)
# parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
paras = (wordfile, rmpc)
parr4para[paras] = parr
sarr4para[paras] = sarr
## save result
save_result = False # True
result_file = 'result/sim_tfidf.result'
if save_result:
with open(result_file, 'w') as f:
pickle.dump([parr4para, sarr4para, comment4para] , f)
| 39.155556
| 139
| 0.685585
| 236
| 1,762
| 5.012712
| 0.427966
| 0.030431
| 0.030431
| 0.057481
| 0.380389
| 0.316145
| 0.275571
| 0.275571
| 0.275571
| 0.275571
| 0
| 0.028189
| 0.194665
| 1,762
| 44
| 140
| 40.045455
| 0.805497
| 0.377412
| 0
| 0
| 0
| 0
| 0.186803
| 0.04368
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78e74ab110d94c6516104012ed887badd152a66c
| 1,602
|
py
|
Python
|
theano-rfnn/mnist_loader.py
|
jhja/RFNN
|
a63641d6e584df743a5e0a9efaf41911f057a977
|
[
"MIT"
] | 55
|
2016-05-11T18:53:30.000Z
|
2022-02-22T12:31:08.000Z
|
theano-rfnn/mnist_loader.py
|
jhja/RFNN
|
a63641d6e584df743a5e0a9efaf41911f057a977
|
[
"MIT"
] | null | null | null |
theano-rfnn/mnist_loader.py
|
jhja/RFNN
|
a63641d6e584df743a5e0a9efaf41911f057a977
|
[
"MIT"
] | 14
|
2016-08-16T02:00:47.000Z
|
2022-03-08T13:16:00.000Z
|
import numpy as np
import os
from random import shuffle
datasets_dir = './../data/'
def one_hot(x,n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x),n))
o_h[np.arange(len(x)),x] = 1
return o_h
def mnist(ntrain=60000,ntest=10000,onehot=True):
ntrain=np.array(ntrain).astype(int).squeeze()
data_dir = os.path.join(datasets_dir,'mnist/')
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY_shuffle = []
trX_shuffle = []
index_shuf = range(len(trY))
shuffle(index_shuf)
for i in index_shuf:
trY_shuffle.append(trY[i])
trX_shuffle.append(trX[i])
trX = np.asarray(trX_shuffle)
trY = np.asarray(trY_shuffle)
trX = trX/255.
teX = teX/255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX,teX,trY,teY
| 26.262295
| 63
| 0.624844
| 255
| 1,602
| 3.839216
| 0.290196
| 0.035751
| 0.051073
| 0.04903
| 0.343207
| 0.343207
| 0.343207
| 0.343207
| 0.277835
| 0.277835
| 0
| 0.052467
| 0.202871
| 1,602
| 60
| 64
| 26.7
| 0.714174
| 0
| 0
| 0.083333
| 0
| 0
| 0.066167
| 0.05618
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.0625
| 0
| 0.145833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78e7d5ba18b9d335d132f7d6ec0d73b6ca3d020d
| 686
|
py
|
Python
|
Ejercicio 2.py
|
crltsnch/Ejercicios-grupales
|
72e01d6489816ea1b9308af1abd62792e5464c93
|
[
"Apache-2.0"
] | null | null | null |
Ejercicio 2.py
|
crltsnch/Ejercicios-grupales
|
72e01d6489816ea1b9308af1abd62792e5464c93
|
[
"Apache-2.0"
] | null | null | null |
Ejercicio 2.py
|
crltsnch/Ejercicios-grupales
|
72e01d6489816ea1b9308af1abd62792e5464c93
|
[
"Apache-2.0"
] | null | null | null |
import math
import os
import random
import re
import sys
def compareTriplets(a, b):
puntosA=0
puntosB=0
for i in range (0,3):
if a[i]<b[i]:
puntosB+=1
elif a[i]>b[i]:
puntosA+=1
puntosTotales=[puntosA, puntosB]
return puntosTotales
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'] + 'solucion2.txt', 'w')
print("Escribe las notas de a")
a = list(map(int, input().rstrip().split()))
print("Escribe las notas de b")
b = list(map(int, input().rstrip().split()))
result = compareTriplets(a, b)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 21.4375
| 65
| 0.580175
| 96
| 686
| 4.052083
| 0.520833
| 0.082262
| 0.087404
| 0.020566
| 0.246787
| 0.133676
| 0
| 0
| 0
| 0
| 0
| 0.01378
| 0.259475
| 686
| 32
| 66
| 21.4375
| 0.751969
| 0
| 0
| 0
| 0
| 0
| 0.116448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.2
| 0
| 0.28
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f03cf1af94e18c9a855dfd8bbdda1565566674
| 17,569
|
py
|
Python
|
autokeras/hypermodel/graph.py
|
Sette/autokeras
|
c5a83607a899ad545916b3794561d6908d9cdbac
|
[
"MIT"
] | null | null | null |
autokeras/hypermodel/graph.py
|
Sette/autokeras
|
c5a83607a899ad545916b3794561d6908d9cdbac
|
[
"MIT"
] | null | null | null |
autokeras/hypermodel/graph.py
|
Sette/autokeras
|
c5a83607a899ad545916b3794561d6908d9cdbac
|
[
"MIT"
] | null | null | null |
import functools
import pickle
import kerastuner
import tensorflow as tf
from tensorflow.python.util import nest
from autokeras.hypermodel import base
from autokeras.hypermodel import compiler
class Graph(kerastuner.engine.stateful.Stateful):
"""A graph consists of connected Blocks, HyperBlocks, Preprocessors or Heads.
# Arguments
inputs: A list of input node(s) for the Graph.
outputs: A list of output node(s) for the Graph.
override_hps: A list of HyperParameters. The predefined HyperParameters that
will override the space of the Hyperparameters defined in the Hypermodels
with the same names.
"""
def __init__(self, inputs, outputs, override_hps=None):
super().__init__()
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
self._node_to_id = {}
self._nodes = []
self.blocks = []
self._block_to_id = {}
self._build_network()
self.override_hps = override_hps or []
def compile(self, func):
"""Share the information between blocks by calling functions in compiler.
# Arguments
func: A dictionary. The keys are the block classes. The values are
corresponding compile functions.
"""
for block in self.blocks:
if block.__class__ in func:
func[block.__class__](block)
def _register_hps(self, hp):
"""Register the override HyperParameters for current HyperParameters."""
for single_hp in self.override_hps:
name = single_hp.name
if name not in hp.values:
hp.register(single_hp.name,
single_hp.__class__.__name__,
single_hp.get_config())
hp.values[name] = single_hp.default
def _build_network(self):
self._node_to_id = {}
# Recursively find all the interested nodes.
for input_node in self.inputs:
self._search_network(input_node, self.outputs, set(), set())
self._nodes = sorted(list(self._node_to_id.keys()),
key=lambda x: self._node_to_id[x])
for node in (self.inputs + self.outputs):
if node not in self._node_to_id:
raise ValueError('Inputs and outputs not connected.')
# Find the blocks.
blocks = []
for input_node in self._nodes:
for block in input_node.out_blocks:
if any([output_node in self._node_to_id
for output_node in block.outputs]) and block not in blocks:
blocks.append(block)
# Check if all the inputs of the blocks are set as inputs.
for block in blocks:
for input_node in block.inputs:
if input_node not in self._node_to_id:
raise ValueError('A required input is missing for HyperModel '
'{name}.'.format(name=block.name))
# Calculate the in degree of all the nodes
in_degree = [0] * len(self._nodes)
for node_id, node in enumerate(self._nodes):
in_degree[node_id] = len([
block for block in node.in_blocks if block in blocks])
# Add the blocks in topological order.
self.blocks = []
self._block_to_id = {}
while len(blocks) != 0:
new_added = []
# Collect blocks with in degree 0.
for block in blocks:
if any([in_degree[self._node_to_id[node]]
for node in block.inputs]):
continue
new_added.append(block)
# Remove the collected blocks from blocks.
for block in new_added:
blocks.remove(block)
for block in new_added:
# Add the collected blocks to the AutoModel.
self._add_block(block)
# Decrease the in degree of the output nodes.
for output_node in block.outputs:
if output_node not in self._node_to_id:
continue
output_node_id = self._node_to_id[output_node]
in_degree[output_node_id] -= 1
def _search_network(self, input_node, outputs, in_stack_nodes,
visited_nodes):
visited_nodes.add(input_node)
in_stack_nodes.add(input_node)
outputs_reached = False
if input_node in outputs:
outputs_reached = True
for block in input_node.out_blocks:
for output_node in block.outputs:
if output_node in in_stack_nodes:
raise ValueError('The network has a cycle.')
if output_node not in visited_nodes:
self._search_network(output_node, outputs, in_stack_nodes,
visited_nodes)
if output_node in self._node_to_id.keys():
outputs_reached = True
if outputs_reached:
self._add_node(input_node)
in_stack_nodes.remove(input_node)
def _add_block(self, block):
if block not in self.blocks:
block_id = len(self.blocks)
self._block_to_id[block] = block_id
self.blocks.append(block)
def _add_node(self, input_node):
if input_node not in self._node_to_id:
self._node_to_id[input_node] = len(self._node_to_id)
def _get_block(self, name):
for block in self.blocks:
if block.name == name:
return block
raise ValueError('Cannot find block named {name}.'.format(name=name))
def get_state(self):
# TODO: Include everything including the graph structure.
block_state = {str(block_id): block.get_state()
for block_id, block in enumerate(self.blocks)}
node_state = {str(node_id): node.get_state()
for node_id, node in enumerate(self._nodes)}
return {'blocks': block_state, 'nodes': node_state}
def set_state(self, state):
# TODO: Include everything including the graph structure.
block_state = state['blocks']
node_state = state['nodes']
for block_id, block in enumerate(self.blocks):
block.set_state(block_state[str(block_id)])
for node_id, node in enumerate(self._nodes):
node.set_state(node_state[str(node_id)])
def save(self, fname):
state = self.get_state()
with tf.io.gfile.GFile(fname, 'wb') as f:
pickle.dump(state, f)
return str(fname)
def reload(self, fname):
with tf.io.gfile.GFile(fname, 'rb') as f:
state = pickle.load(f)
self.set_state(state)
def build(self, hp):
self._register_hps(hp)
class PlainGraph(Graph):
"""A graph built from a HyperGraph to produce KerasGraph and PreprocessGraph.
A PlainGraph does not contain HyperBlock. HyperGraph's hyper_build function
returns an instance of PlainGraph, which can be directly built into a KerasGraph
and a PreprocessGraph.
# Arguments
inputs: A list of input node(s) for the PlainGraph.
outputs: A list of output node(s) for the PlainGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
self._keras_model_inputs = []
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
def _build_network(self):
super()._build_network()
# Find the model input nodes
for node in self._nodes:
if self._is_keras_model_inputs(node):
self._keras_model_inputs.append(node)
self._keras_model_inputs = sorted(self._keras_model_inputs,
key=lambda x: self._node_to_id[x])
@staticmethod
def _is_keras_model_inputs(node):
for block in node.in_blocks:
if not isinstance(block, base.Preprocessor):
return False
for block in node.out_blocks:
if not isinstance(block, base.Preprocessor):
return True
return False
def build_keras_graph(self):
return KerasGraph(self._keras_model_inputs,
self.outputs,
override_hps=self.override_hps)
def build_preprocess_graph(self):
return PreprocessGraph(self.inputs,
self._keras_model_inputs,
override_hps=self.override_hps)
class KerasGraph(Graph, kerastuner.HyperModel):
"""A graph and HyperModel to be built into a Keras model."""
def build(self, hp):
"""Build the HyperModel into a Keras Model."""
super().build(hp)
self.compile(compiler.AFTER)
real_nodes = {}
for input_node in self.inputs:
node_id = self._node_to_id[input_node]
real_nodes[node_id] = input_node.build()
for block in self.blocks:
if isinstance(block, base.Preprocessor):
continue
temp_inputs = [real_nodes[self._node_to_id[input_node]]
for input_node in block.inputs]
outputs = block.build(hp, inputs=temp_inputs)
outputs = nest.flatten(outputs)
for output_node, real_output_node in zip(block.outputs, outputs):
real_nodes[self._node_to_id[output_node]] = real_output_node
model = tf.keras.Model(
[real_nodes[self._node_to_id[input_node]] for input_node in
self.inputs],
[real_nodes[self._node_to_id[output_node]] for output_node in
self.outputs])
return self._compile_keras_model(hp, model)
def _get_metrics(self):
metrics = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
metrics[block.name] = block.metrics
return metrics
def _get_loss(self):
loss = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
loss[block.name] = block.loss
return loss
def _compile_keras_model(self, hp, model):
# Specify hyperparameters from compile(...)
optimizer = hp.Choice('optimizer',
['adam', 'adadelta', 'sgd'],
default='adam')
model.compile(optimizer=optimizer,
metrics=self._get_metrics(),
loss=self._get_loss())
return model
class PreprocessGraph(Graph):
"""A graph consists of only Preprocessors.
It is both a search space with Hyperparameters and a model to be fitted. It
preprocess the dataset with the Preprocessors. The output is the input to the
Keras model. It does not extend Hypermodel class because it cannot be built into
a Keras model.
"""
def preprocess(self, dataset, validation_data=None, fit=False):
"""Preprocess the data to be ready for the Keras Model.
# Arguments
dataset: tf.data.Dataset. Training data.
validation_data: tf.data.Dataset. Validation data.
fit: Boolean. Whether to fit the preprocessing layers with x and y.
# Returns
if validation data is provided.
A tuple of two preprocessed tf.data.Dataset, (train, validation).
Otherwise, return the training dataset.
"""
dataset = self._preprocess(dataset, fit=fit)
if validation_data:
validation_data = self._preprocess(validation_data)
return dataset, validation_data
def _preprocess(self, dataset, fit=False):
# A list of input node ids in the same order as the x in the dataset.
input_node_ids = [self._node_to_id[input_node] for input_node in self.inputs]
# Iterate until all the model inputs have their data.
while set(map(lambda node: self._node_to_id[node], self.outputs)
) - set(input_node_ids):
# Gather the blocks for the next iteration over the dataset.
blocks = []
for node_id in input_node_ids:
for block in self._nodes[node_id].out_blocks:
if block in self.blocks:
blocks.append(block)
if fit:
# Iterate the dataset to fit the preprocessors in current depth.
self._fit(dataset, input_node_ids, blocks)
# Transform the dataset.
output_node_ids = []
dataset = dataset.map(functools.partial(
self._transform,
input_node_ids=input_node_ids,
output_node_ids=output_node_ids,
blocks=blocks,
fit=fit))
# Build input_node_ids for next depth.
input_node_ids = output_node_ids
return dataset
def _fit(self, dataset, input_node_ids, blocks):
# Iterate the dataset to fit the preprocessors in current depth.
for x, y in dataset:
x = nest.flatten(x)
id_to_data = {
node_id: temp_x for temp_x, node_id in zip(x, input_node_ids)
}
for block in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in block.inputs]
block.update(data, y=y)
# Finalize and set the shapes of the output nodes.
for block in blocks:
block.finalize()
nest.flatten(block.outputs)[0].shape = block.output_shape
def _transform(self,
x,
y,
input_node_ids,
output_node_ids,
blocks,
fit=False):
x = nest.flatten(x)
id_to_data = {
node_id: temp_x
for temp_x, node_id in zip(x, input_node_ids)
}
output_data = {}
# Transform each x by the corresponding block.
for hm in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in hm.inputs]
data = tf.py_function(functools.partial(hm.transform, fit=fit),
inp=nest.flatten(data),
Tout=hm.output_types())
data = nest.flatten(data)[0]
data.set_shape(hm.output_shape)
output_data[self._node_to_id[hm.outputs[0]]] = data
# Keep the Keras Model inputs even they are not inputs to the blocks.
for node_id, data in id_to_data.items():
if self._nodes[node_id] in self.outputs:
output_data[node_id] = data
for node_id in sorted(output_data.keys()):
output_node_ids.append(node_id)
return tuple(map(
lambda node_id: output_data[node_id], output_node_ids)), y
def build(self, hp):
"""Obtain the values of all the HyperParameters.
Different from the build function of Hypermodel. This build function does not
produce a Keras model. It only obtain the hyperparameter values from
HyperParameters.
# Arguments
hp: HyperParameters.
"""
super().build(hp)
self.compile(compiler.BEFORE)
for block in self.blocks:
block.build(hp)
def copy(old_instance):
instance = old_instance.__class__()
instance.set_state(old_instance.get_state())
return instance
class HyperGraph(Graph):
"""A HyperModel based on connected Blocks and HyperBlocks.
# Arguments
inputs: A list of input node(s) for the HyperGraph.
outputs: A list of output node(s) for the HyperGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(inputs, outputs, **kwargs)
self.compile(compiler.HYPER)
def build_graphs(self, hp):
plain_graph = self.hyper_build(hp)
preprocess_graph = plain_graph.build_preprocess_graph()
preprocess_graph.build(hp)
return (preprocess_graph,
plain_graph.build_keras_graph())
def hyper_build(self, hp):
"""Build a GraphHyperModel with no HyperBlock but only Block."""
# Make sure get_uid would count from start.
tf.keras.backend.clear_session()
inputs = []
old_node_to_new = {}
for old_input_node in self.inputs:
input_node = copy(old_input_node)
inputs.append(input_node)
old_node_to_new[old_input_node] = input_node
for old_block in self.blocks:
inputs = [old_node_to_new[input_node]
for input_node in old_block.inputs]
if isinstance(old_block, base.HyperBlock):
outputs = old_block.build(hp, inputs=inputs)
else:
outputs = copy(old_block)(inputs)
for output_node, old_output_node in zip(outputs, old_block.outputs):
old_node_to_new[old_output_node] = output_node
inputs = []
for input_node in self.inputs:
inputs.append(old_node_to_new[input_node])
outputs = []
for output_node in self.outputs:
outputs.append(old_node_to_new[output_node])
return PlainGraph(inputs, outputs, override_hps=self.override_hps)
| 37.620985
| 85
| 0.594001
| 2,178
| 17,569
| 4.557851
| 0.12213
| 0.049864
| 0.025184
| 0.030221
| 0.348242
| 0.26715
| 0.199557
| 0.154931
| 0.117155
| 0.077667
| 0
| 0.000763
| 0.32859
| 17,569
| 466
| 86
| 37.701717
| 0.840793
| 0.194263
| 0
| 0.224359
| 0
| 0
| 0.013887
| 0
| 0
| 0
| 0
| 0.002146
| 0
| 1
| 0.099359
| false
| 0
| 0.022436
| 0.00641
| 0.195513
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f06ac9567797f0104f062bd9b9ac12e57cffa6
| 474
|
py
|
Python
|
Python/longest-valid-parentheses.py
|
shreyventure/LeetCode-Solutions
|
74423d65702b78974e390f17c9d6365d17e6eed5
|
[
"MIT"
] | 388
|
2020-06-29T08:41:27.000Z
|
2022-03-31T22:55:05.000Z
|
Python/longest-valid-parentheses.py
|
shreyventure/LeetCode-Solutions
|
74423d65702b78974e390f17c9d6365d17e6eed5
|
[
"MIT"
] | 178
|
2020-07-16T17:15:28.000Z
|
2022-03-09T21:01:50.000Z
|
Python/longest-valid-parentheses.py
|
shreyventure/LeetCode-Solutions
|
74423d65702b78974e390f17c9d6365d17e6eed5
|
[
"MIT"
] | 263
|
2020-07-13T18:33:20.000Z
|
2022-03-28T13:54:10.000Z
|
'''
Speed: 95.97%
Memory: 24.96%
Time complexity: O(n)
Space complexity: O(n)
'''
class Solution(object):
def longestValidParentheses(self, s):
ans=0
stack=[-1]
for i in range(len(s)):
if(s[i]=='('):
stack.append(i)
else:
stack.pop()
if(len(stack)==0):
stack.append(i)
else:
ans=max(ans,i-stack[-1])
return ans
| 23.7
| 44
| 0.436709
| 56
| 474
| 3.696429
| 0.589286
| 0.10628
| 0.115942
| 0.154589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043165
| 0.413502
| 474
| 20
| 45
| 23.7
| 0.701439
| 0.154008
| 0
| 0.285714
| 0
| 0
| 0.002538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f17ff49e114c184b6a1474d4e3188bcdc4d56c
| 447
|
py
|
Python
|
setup.py
|
i25ffz/openaes
|
a0dbde40d4ce0e4186ea14c4dc9519fe152c018c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
i25ffz/openaes
|
a0dbde40d4ce0e4186ea14c4dc9519fe152c018c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
i25ffz/openaes
|
a0dbde40d4ce0e4186ea14c4dc9519fe152c018c
|
[
"BSD-2-Clause"
] | null | null | null |
from distutils.core import setup, Extension
import os.path
kw = {
'name':"PyOpenAES",
'version':"0.10.0",
'description':"OpenAES cryptographic library for Python.",
'ext_modules':[
Extension(
'openaes',
include_dirs = ['inc', 'src/isaac'],
# define_macros=[('ENABLE_PYTHON', '1')],
sources = [
os.path.join('src/oaes_lib.c'),
os.path.join('src/oaes_py.c'),
os.path.join('src/isaac/rand.c')
]
)
]
}
setup(**kw)
| 20.318182
| 59
| 0.624161
| 60
| 447
| 4.55
| 0.633333
| 0.087912
| 0.10989
| 0.142857
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013441
| 0.167785
| 447
| 22
| 60
| 20.318182
| 0.72043
| 0.087248
| 0
| 0
| 0
| 0
| 0.371007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f2293017d6edca3048eb7b10371f7d73e4c830
| 967
|
py
|
Python
|
examples/isosurface_demo2.py
|
jayvdb/scitools
|
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
|
[
"BSD-3-Clause"
] | 62
|
2015-03-28T18:07:51.000Z
|
2022-02-12T20:32:36.000Z
|
examples/isosurface_demo2.py
|
jayvdb/scitools
|
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
|
[
"BSD-3-Clause"
] | 7
|
2015-06-09T09:56:03.000Z
|
2021-05-20T17:53:15.000Z
|
examples/isosurface_demo2.py
|
jayvdb/scitools
|
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
|
[
"BSD-3-Clause"
] | 29
|
2015-04-16T03:48:57.000Z
|
2022-02-03T22:06:52.000Z
|
#!/usr/bin/env python
# Example taken from:
# http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html
from scitools.easyviz import *
from time import sleep
from scipy import io
setp(interactive=False)
# Displaying an Isosurface:
mri = io.loadmat('mri_matlab_v6.mat')
D = mri['D']
#Ds = smooth3(D);
isosurface(D,5,indexing='xy')
#hiso = isosurface(Ds,5),
# 'FaceColor',[1,.75,.65],...
# 'EdgeColor','none');
shading('interp')
# Adding an Isocap to Show a Cutaway Surface:
#hcap = patch(isocaps(D,5),...
# 'FaceColor','interp',...
# 'EdgeColor','none');
#colormap(map)
# Define the View:
view(45,30)
axis('tight')
daspect([1,1,.4])
# Add Lighting:
#lightangle(45,30);
#set(gcf,'Renderer','zbuffer'); lighting phong
#isonormals(Ds,hiso)
#set(hcap,'AmbientStrength',.6)
#set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50)
show()
raw_input('Press Return key to quit: ')
#savefig('tmp_isosurf2a.eps')
#savefig('tmp_isosurf2a.png')
| 20.574468
| 78
| 0.701138
| 138
| 967
| 4.876812
| 0.717391
| 0.005944
| 0.056464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036782
| 0.10031
| 967
| 46
| 79
| 21.021739
| 0.736782
| 0.644261
| 0
| 0
| 0
| 0
| 0.177019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f2658f7e058410b484a9d45fd69949bca2813c
| 4,099
|
py
|
Python
|
structural_model/util_morphology.py
|
zibneuro/udvary-et-al-2022
|
8b456c41e72958677cb6035028d9c23013cb7c7e
|
[
"MIT"
] | 1
|
2022-03-11T13:43:50.000Z
|
2022-03-11T13:43:50.000Z
|
structural_model/util_morphology.py
|
zibneuro/udvary-et-al-2022
|
8b456c41e72958677cb6035028d9c23013cb7c7e
|
[
"MIT"
] | null | null | null |
structural_model/util_morphology.py
|
zibneuro/udvary-et-al-2022
|
8b456c41e72958677cb6035028d9c23013cb7c7e
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import json
import util_amira
def getEdgeLabelName(label):
if(label == 6):
return "axon"
elif(label == 4):
return "apical"
elif(label == 5):
return "basal"
elif(label == 7):
return "soma"
else:
return "other"
def getSomaPosition(points):
somaPos = []
for p in points:
if(p["edge_label"] == "soma"):
somaPos.append(p["position"])
return np.mean(np.vstack(tuple(somaPos)), axis=0)
def loadAmiraExport(filename):
with open(filename) as f:
lines = f.readlines()
labels = lines[0].rstrip().split(",")
points = []
for i in range(1, len(lines)):
line = lines[i].rstrip().split(",")
point = {}
point["edge_id"] = int(line[labels.index("edge_id")])
point["source_node_id"] = int(line[labels.index("source_node")])
point["target_node_id"] = int(line[labels.index("target_node")])
point["edge_label"] = getEdgeLabelName(
int(line[labels.index("edge_label")]))
point["edge_point_id"] = int(line[labels.index("edge_point")])
point["position"] = np.array([float(line[labels.index("x")]), float(
line[labels.index("y")]), float(line[labels.index("z")])])
point["radius"] = float(line[labels.index("radius")])
point["inside_vS1"] = int(line[labels.index("inside_vS1")])
if(point["edge_label"] != "other"):
points.append(point)
return points
def separateCompartments(edgePoints):
apical = []
basal = []
axon = []
for edgePoint in edgePoints:
if(edgePoint["edge_label"] == "apical"):
apical.append(edgePoint)
elif(edgePoint["edge_label"] == "basal"):
basal.append(edgePoint)
elif(edgePoint["edge_label"] == "axon"):
axon.append(edgePoint)
compartments = {}
compartments["apical"] = apical
compartments["basal"] = basal
compartments["axon"] = axon
return compartments
def loadGraphset(networkDir):
if(os.path.exists(os.path.join(networkDir, "morphologies", "Morphologies.am"))):
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "Morphologies.am"), legacy=False)
else:
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "MorphologiesWithNeuronIDs.am"), legacy=True)
return graphset
def writeToCache(filename, transformation, neuronId):
transformationFile = "/tmp/transformation_{}".format(neuronId)
np.savetxt(transformationFile, transformation)
meta = {
"morphologyFile" : filename,
"transformationFile" : transformationFile
}
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile, "w") as f:
print("meta", meta)
json.dump(meta, f)
def readFromCache(neuronId):
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile) as f:
meta = json.load(f)
transformationFile = meta["transformationFile"]
T = np.loadtxt(transformationFile)
morphologyFile = meta["morphologyFile"]
return morphologyFile, T
def loadAxon(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
idx = len(graphset[neuronId]) - 1
filename = graphset[neuronId][idx]["file"]
T = graphset[neuronId][idx]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T)
def loadDendrite(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
filename = graphset[neuronId][0]["file"]
T = graphset[neuronId][0]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T)
| 32.275591
| 138
| 0.613808
| 424
| 4,099
| 5.867925
| 0.257075
| 0.040193
| 0.060289
| 0.043408
| 0.35209
| 0.343248
| 0.27492
| 0.250804
| 0.250804
| 0.156752
| 0
| 0.003871
| 0.243718
| 4,099
| 127
| 139
| 32.275591
| 0.79871
| 0
| 0
| 0.156863
| 0
| 0
| 0.12878
| 0.012195
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.039216
| 0
| 0.245098
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f3cd314838c8b00373f5ff15a91db4a0e4e749
| 1,427
|
py
|
Python
|
scripts/Interfacing/encoder_class.py
|
noshluk2/Wifi-Signal-Robot-localization
|
538e6c4e7a63486f22ab708908c476cd808f720c
|
[
"MIT"
] | null | null | null |
scripts/Interfacing/encoder_class.py
|
noshluk2/Wifi-Signal-Robot-localization
|
538e6c4e7a63486f22ab708908c476cd808f720c
|
[
"MIT"
] | null | null | null |
scripts/Interfacing/encoder_class.py
|
noshluk2/Wifi-Signal-Robot-localization
|
538e6c4e7a63486f22ab708908c476cd808f720c
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
import threading
class Encoder(object):
def __init__(self, r_en_a,r_en_b,l_en_a,l_en_b):
GPIO.setmode(GPIO.BCM)
GPIO.setup(r_en_a, GPIO.IN)
GPIO.setup(r_en_b, GPIO.IN)
GPIO.setup(l_en_a, GPIO.IN)
GPIO.setup(l_en_b, GPIO.IN)
self.l_en_a=l_en_a;self.l_en_b=l_en_b;
self.r_en_a=r_en_a;self.r_en_b=r_en_b;
GPIO.add_event_detect(r_en_a, GPIO.BOTH, callback=self.Update_encR)
GPIO.add_event_detect(l_en_a, GPIO.BOTH, callback=self.Update_encL)
self.count_R =0
self.count_L=0
def Update_encR(self,channel):
if GPIO.input(self.r_en_a) == GPIO.input(self.r_en_b):
self.count_R=self.count_R + 1
else :
self.count_R = self.count_R - 1
def Update_encL(self,channel):
if GPIO.input(self.l_en_a) == GPIO.input(self.l_en_b):
self.count_L=self.count_L + 1
else :
self.count_L = self.count_L - 1
return (self.count_L)
def get_r_enc(self):
return self.count_R
def get_l_enc(self):
return self.count_L
def clear_encoders(self):
self.count_R=0
self.count_L=0
# r_en_a = 27
# r_en_b = 10
# l_en_a = 5
# l_en_b = 6
# enc_obj = Encoder(27,10,5,6)
# def update_encoders():
# threading.Timer(1,update_encoders).start()
# print(" looping ")
# update_encoders()
| 26.425926
| 75
| 0.618781
| 255
| 1,427
| 3.117647
| 0.192157
| 0.169811
| 0.100629
| 0.030189
| 0.548428
| 0.396226
| 0.233962
| 0.055346
| 0
| 0
| 0
| 0.019981
| 0.26349
| 1,427
| 54
| 76
| 26.425926
| 0.736441
| 0.130343
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.060606
| 0.060606
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f527fe8104b4c467eef06ba01999f8a1c7339e
| 2,286
|
py
|
Python
|
systori/apps/equipment/urls.py
|
systori/systori
|
e309c63e735079ff6032fdaf1db354ec872b28b1
|
[
"BSD-3-Clause"
] | 12
|
2018-01-30T00:44:06.000Z
|
2020-07-13T05:20:48.000Z
|
systori/apps/equipment/urls.py
|
systori/systori
|
e309c63e735079ff6032fdaf1db354ec872b28b1
|
[
"BSD-3-Clause"
] | 36
|
2018-03-06T17:49:50.000Z
|
2020-06-23T19:26:00.000Z
|
systori/apps/equipment/urls.py
|
systori/systori
|
e309c63e735079ff6032fdaf1db354ec872b28b1
|
[
"BSD-3-Clause"
] | 3
|
2018-08-03T07:03:09.000Z
|
2020-07-09T20:21:10.000Z
|
from django.conf.urls import url
from django.urls import path, include
from systori.apps.user.authorization import office_auth
from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate
urlpatterns = [
# two url rules to make the active_filter keyword optional
url(
r"^equipment/$", office_auth(EquipmentListView.as_view()), name="equipment.list"
),
url(
r"^equipment/(?P<active_filter>[\w-]+)$",
office_auth(EquipmentListView.as_view()),
name="equipment.list",
),
url(
r"^equipment-(?P<pk>\d+)$",
office_auth(EquipmentView.as_view()),
name="equipment.view",
),
url(
r"^create-equipment$",
office_auth(EquipmentCreate.as_view()),
name="equipment.create",
),
url(
r"^equipment-(?P<pk>\d+)/edit$",
office_auth(EquipmentUpdate.as_view()),
name="equipment.edit",
),
url(
r"^equipment-(?P<pk>\d+)/delete$",
office_auth(EquipmentDelete.as_view()),
name="equipment.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-refueling-stop$",
office_auth(RefuelingStopCreate.as_view()),
name="refueling_stop.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/update$",
office_auth(RefuelingStopUpdate.as_view()),
name="refueling_stop.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/delete",
office_auth(RefuelingStopDelete.as_view()),
name="refueling_stop.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-maintenance",
office_auth(MaintenanceCreate.as_view()),
name="maintenance.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/update$",
office_auth(MaintenanceUpdate.as_view()),
name="maintenance.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/delete",
office_auth(MaintenanceDelete.as_view()),
name="maintenance.delete",
),
]
| 33.130435
| 244
| 0.624672
| 248
| 2,286
| 5.620968
| 0.21371
| 0.093257
| 0.086083
| 0.10043
| 0.408895
| 0.355093
| 0.271162
| 0.271162
| 0.212339
| 0.212339
| 0
| 0
| 0.208224
| 2,286
| 68
| 245
| 33.617647
| 0.770166
| 0.024497
| 0
| 0.375
| 0
| 0.015625
| 0.313734
| 0.236535
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f57ad1256f2c324b8101344d3e6ef85566b84c
| 632
|
py
|
Python
|
40_3.py
|
rursvd/pynumerical2
|
4b2d33125b64a39099ac8eddef885e0ea11b237d
|
[
"MIT"
] | null | null | null |
40_3.py
|
rursvd/pynumerical2
|
4b2d33125b64a39099ac8eddef885e0ea11b237d
|
[
"MIT"
] | null | null | null |
40_3.py
|
rursvd/pynumerical2
|
4b2d33125b64a39099ac8eddef885e0ea11b237d
|
[
"MIT"
] | 1
|
2019-12-03T01:34:19.000Z
|
2019-12-03T01:34:19.000Z
|
from numpy import zeros
# Define ab2 function
def ab2(f,t0,tf,y0,n):
h = (tf - t0)/n
t = zeros(n+1)
y = zeros(n+1)
t[0] = t0
y[0] = y0
y[1] = y[0] + h * f(t[0],y[0])
t[1] = t[0] + h
for i in range(1,n):
y[i+1] = y[i] + (3.0/2.0) * h * f(t[i],y[i])-1.0/2.0 * h * f(t[i-1],y[i-1])
t[i+1] = t[i] + h
return t,y
# Define functions
def f(t,y):
return t - y
# Set initial conditions
t0 = 0.0
tf = 1.0
y0 = 1.0
n = 5
# Execute AB2
t, yab2 = ab2(f,t0,tf,y0,n)
# Print results
print("%5s %8s" % ('t','y'))
for i in range(n+1):
print("%8.4f %8.4f" % (t[i],yab2[i]))
| 18.588235
| 83
| 0.463608
| 144
| 632
| 2.034722
| 0.263889
| 0.03413
| 0.030717
| 0.040956
| 0.122867
| 0.122867
| 0.047782
| 0
| 0
| 0
| 0
| 0.116592
| 0.294304
| 632
| 33
| 84
| 19.151515
| 0.540359
| 0.136076
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0.043478
| 0.217391
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78f5d63c04bc9e40555fc089be45ac3e10cbd62a
| 40,331
|
py
|
Python
|
test/test_parse_cs.py
|
NeonDaniel/lingua-franca
|
eee95702016b4013b0d81dc74da98cd2d2f53358
|
[
"Apache-2.0"
] | null | null | null |
test/test_parse_cs.py
|
NeonDaniel/lingua-franca
|
eee95702016b4013b0d81dc74da98cd2d2f53358
|
[
"Apache-2.0"
] | null | null | null |
test/test_parse_cs.py
|
NeonDaniel/lingua-franca
|
eee95702016b4013b0d81dc74da98cd2d2f53358
|
[
"Apache-2.0"
] | 1
|
2020-09-22T12:39:17.000Z
|
2020-09-22T12:39:17.000Z
|
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime, timedelta
from lingua_franca import get_default_lang, set_default_lang, \
load_language, unload_language
from lingua_franca.parse import extract_datetime
from lingua_franca.parse import extract_duration
from lingua_franca.parse import extract_number, extract_numbers
from lingua_franca.parse import fuzzy_match
from lingua_franca.parse import get_gender
from lingua_franca.parse import match_one
from lingua_franca.parse import normalize
def setUpModule():
load_language("cs-cz")
set_default_lang("cs")
def tearDownModule():
unload_language("cs")
class TestFuzzyMatch(unittest.TestCase):
def test_matches(self):
self.assertTrue(fuzzy_match("ty a já", "ty a já") >= 1.0)
self.assertTrue(fuzzy_match("ty a já", "ty") < 0.5)
self.assertTrue(fuzzy_match("Ty", "ty") >= 0.5)
self.assertTrue(fuzzy_match("ty a já", "ty") ==
fuzzy_match("ty", "ty a já"))
self.assertTrue(fuzzy_match("ty a já", "on nebo oni") < 0.23)
def test_match_one(self):
# test list of choices
choices = ['frank', 'kate', 'harry', 'henry']
self.assertEqual(match_one('frank', choices)[0], 'frank')
self.assertEqual(match_one('fran', choices)[0], 'frank')
self.assertEqual(match_one('enry', choices)[0], 'henry')
self.assertEqual(match_one('katt', choices)[0], 'kate')
# test dictionary of choices
choices = {'frank': 1, 'kate': 2, 'harry': 3, 'henry': 4}
self.assertEqual(match_one('frank', choices)[0], 1)
self.assertEqual(match_one('enry', choices)[0], 4)
class TestNormalize(unittest.TestCase):
def test_extract_number(self):
self.assertEqual(extract_number("tohle je první test",
ordinals=True), 1)
self.assertEqual(extract_number("tohle je 2 test"), 2)
self.assertEqual(extract_number("tohle je druhý test",
ordinals=True), 2)
#self.assertEqual(extract_number("tohle je třetí test"), 1.0 / 3.0)
self.assertEqual(extract_number("tohle je třetí test",
ordinals=True), 3.0)
self.assertEqual(extract_number("ten čtvrtý", ordinals=True), 4.0)
self.assertEqual(extract_number(
"ten třicátý šestý", ordinals=True), 36.0)
self.assertEqual(extract_number("tohle je test číslo 4"), 4)
self.assertEqual(extract_number("jedna třetina šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("tři šálky"), 3)
self.assertEqual(extract_number("1/3 šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("1/4 cup"), 0.25)
self.assertEqual(extract_number("jedna čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("2/3 šálků"), 2.0 / 3.0)
self.assertEqual(extract_number("3/4 šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("1 a 3/4 šálků"), 1.75)
self.assertEqual(extract_number("1 šálek a půl"), 1.5)
self.assertEqual(extract_number("jeden šálek a polovina"), 1.5)
self.assertEqual(extract_number("jedna a půl šálků"), 1.5)
self.assertEqual(extract_number("jedna a jedna polovina šálků"), 1.5)
self.assertEqual(extract_number("tři čtvrtina šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("tři čtvrtiny šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("dvacet dva"), 22)
self.assertEqual(extract_number(
"Dvacet dva s velkým písmenam na začátku"), 22)
self.assertEqual(extract_number(
"dvacet Dva s dva krát velkým písmem"), 22)
self.assertEqual(extract_number(
"dvacet Dva s různou velikostí písmen"), 22)
self.assertEqual(extract_number("Dvacet dva a Tři Pětiny"), 22.6)
self.assertEqual(extract_number("dvě sto"), 200)
self.assertEqual(extract_number("devět tisíc"), 9000)
self.assertEqual(extract_number("šest sto šedesát šest"), 666)
self.assertEqual(extract_number("dva million"), 2000000)
self.assertEqual(extract_number("dva million pět sto tisíc "
"tun žhavého kovu"), 2500000)
self.assertEqual(extract_number("šest trillion"), 6000000000000.0)
self.assertEqual(extract_number("šest trilion", short_scale=False),
6e+18)
self.assertEqual(extract_number("jedna tečka pět"), 1.5)
self.assertEqual(extract_number("tři tečka čtrnáct"), 3.14)
self.assertEqual(extract_number("nula tečka dva"), 0.2)
self.assertEqual(extract_number("billion roků "),
1000000000.0)
self.assertEqual(extract_number("bilion roků",
short_scale=False),
1000000000000.0)
self.assertEqual(extract_number("jedno sto tisíc"), 100000)
self.assertEqual(extract_number("mínus 2"), -2)
self.assertEqual(extract_number("záporné sedmdesát"), -70)
self.assertEqual(extract_number("tisíc million"), 1000000000)
self.assertEqual(extract_number("miliarda", short_scale=False),
1000000000)
self.assertEqual(extract_number("šestina třetina"),
1 / 6 / 3)
self.assertEqual(extract_number("šestina třetí", ordinals=True),
3)
self.assertEqual(extract_number("třicet sekund"), 30)
self.assertEqual(extract_number("třicátý druhý", ordinals=True), 32)
self.assertEqual(extract_number("tohle je billiontý test",
ordinals=True), 1e09)
print("tohle udělat později")
#self.assertEqual(extract_number("tohle je billiontý test"), 1e-9)
self.assertEqual(extract_number("tohle je biliontý test",
ordinals=True,
short_scale=False), 1e12)
print("tohle udělat později")
# self.assertEqual(extract_number("tohle je biliontý test",
# short_scale=False), 1e-12)
# Verify non-power multiples of ten no longer discard
# adjacent multipliers
self.assertEqual(extract_number("dvacet tisíc"), 20000)
self.assertEqual(extract_number("padesát million"), 50000000)
# Verify smaller powers of ten no longer cause miscalculation of larger
# powers of ten (see MycroftAI#86)
self.assertEqual(extract_number("dvacet billion tři sto million \
devět sto padesát tisíc šest sto \
sedmdesát pět tečka osm"),
20300950675.8)
self.assertEqual(extract_number("devět sto devadesát devět million devět \
sto devadesát devět tisíc devět \
sto devadesát devět tečka devět"),
999999999.9)
# TODO why does "trillion" result in xxxx.0?
self.assertEqual(extract_number("osm sto trillion dva sto \
padesát sedm"), 800000000000257.0)
# TODO handle this case
# self.assertEqual(
# extract_number("6 dot six six six"),
# 6.666)
self.assertTrue(extract_number("Tenisový hráč je rychlý") is False)
self.assertTrue(extract_number("křehký") is False)
self.assertTrue(extract_number("křehká nula") is not False)
self.assertEqual(extract_number("křehká nula"), 0)
#self.assertTrue(extract_number("grobo 0") is not False)
#self.assertEqual(extract_number("grobo 0"), 0)
self.assertEqual(extract_number("dvojice piv"), 2)
self.assertEqual(extract_number("dvojice sto piv"), 200)
self.assertEqual(extract_number("dvojice tisíc piv"), 2000)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=True), 7)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=False), 7)
self.assertTrue(extract_number("tohle je n. test") is False)
self.assertEqual(extract_number("tohle je 1. test"), 1)
self.assertEqual(extract_number("tohle je 2. test"), 2)
self.assertEqual(extract_number("tohle je 3. test"), 3)
self.assertEqual(extract_number("tohle je 31. test"), 31)
self.assertEqual(extract_number("tohle je 32. test"), 32)
self.assertEqual(extract_number("tohle je 33. test"), 33)
self.assertEqual(extract_number("tohle je 34. test"), 34)
self.assertEqual(extract_number("celkem 100%"), 100)
def test_extract_duration_cs(self):
self.assertEqual(extract_duration("10 sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5 minut"),
(timedelta(minutes=5), ""))
self.assertEqual(extract_duration("2 hodiny"),
(timedelta(hours=2), ""))
self.assertEqual(extract_duration("3 dny"),
(timedelta(days=3), ""))
self.assertEqual(extract_duration("25 týdnů"),
(timedelta(weeks=25), ""))
self.assertEqual(extract_duration("sedm hodin"),
(timedelta(hours=7), ""))
self.assertEqual(extract_duration("7.5 sekund"),
(timedelta(seconds=7.5), ""))
self.assertEqual(extract_duration("osm a polovina dne třicet"
" devět sekund"),
(timedelta(days=8.5, seconds=39), ""))
self.assertEqual(extract_duration("Nastav časovač na 30 minut"),
(timedelta(minutes=30), "nastav časovač na"))
self.assertEqual(extract_duration("Čtyři a půl minuty do"
" západu"),
(timedelta(minutes=4.5), "do západu"))
self.assertEqual(extract_duration("devatenáct minut po hodině"),
(timedelta(minutes=19), "po hodině"))
self.assertEqual(extract_duration("vzbuď mě za tři týdny, čtyři"
" sto devadesát sedm dní, a"
" tři sto 91.6 sekund"),
(timedelta(weeks=3, days=497, seconds=391.6),
"vzbuď mě za , , a"))
self.assertEqual(extract_duration("film je jedna hodina, padesát sedm"
" a půl minuty dlouhý"),
(timedelta(hours=1, minutes=57.5),
"film je , dlouhý"))
self.assertEqual(extract_duration("10-sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5-minut"),
(timedelta(minutes=5), ""))
def test_extractdatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 13, 4) # Tue June 27, 2017 @ 1:04pm
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("nyní je čas",
"2017-06-27 13:04:00", "je čas")
testExtract("za sekundu",
"2017-06-27 13:04:01", "")
testExtract("za minutu",
"2017-06-27 13:05:00", "")
# testExtract("ve dvou minutách",
# "2017-06-27 13:06:00", "")
# testExtract("in a couple of minutes",
# "2017-06-27 13:06:00", "")
# testExtract("ve dvou hodinách",
# "2017-06-27 15:04:00", "")
# testExtract("in a couple of hours",
# "2017-06-27 15:04:00", "")
# testExtract("v dvoje týden",
# "2017-07-11 00:00:00", "")
# testExtract("in a couple of weeks",
# "2017-07-11 00:00:00", "")
# testExtract("v dvoje měsíc",
# "2017-08-27 00:00:00", "")
# testExtract("v dvoje rok",
# "2019-06-27 00:00:00", "")
# testExtract("in a couple of months",
# "2017-08-27 00:00:00", "")
# testExtract("in a couple of years",
# "2019-06-27 00:00:00", "")
testExtract("v desetiletí",
"2027-06-27 00:00:00", "")
# testExtract("in a couple of decades",
# "2037-06-27 00:00:00", "")
testExtract("další desetiletí",
"2027-06-27 00:00:00", "")
testExtract("v století",
"2117-06-27 00:00:00", "")
testExtract("v tisíciletí",
"3017-06-27 00:00:00", "")
testExtract("v dvoje desetiletí",
"2037-06-27 00:00:00", "")
testExtract("v 5 desetiletí",
"2067-06-27 00:00:00", "")
testExtract("v dvoje století",
"2217-06-27 00:00:00", "")
# testExtract("in a couple of centuries",
# "2217-06-27 00:00:00", "")
testExtract("v 2 století",
"2217-06-27 00:00:00", "")
testExtract("v dvoje tisíciletí",
"4017-06-27 00:00:00", "")
# testExtract("in a couple of millenniums",
# "4017-06-27 00:00:00", "")
testExtract("v hodina",
"2017-06-27 14:04:00", "")
testExtract("chci to během hodiny",
"2017-06-27 14:04:00", "chci to")
testExtract("za 1 sekundu",
"2017-06-27 13:04:01", "")
testExtract("za 2 sekundy",
"2017-06-27 13:04:02", "")
testExtract("Nastav časovač na 1 minutu",
"2017-06-27 13:05:00", "nastav časovač")
testExtract("Nastav časovač na půl hodina",
"2017-06-27 13:34:00", "nastav časovač")
testExtract("Nastav časovač na 5 den od dnes",
"2017-07-02 00:00:00", "nastav časovač")
testExtract("den po zítřku",
"2017-06-29 00:00:00", "")
testExtract("Jaké je počasí den po zítřku?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("Připomeň mi v 10:45 pm",
"2017-06-27 22:45:00", "připomeň mi")
testExtract("jaké je počasí v pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je zítřejší počasí",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto odpoledne",
"2017-06-27 15:00:00", "jaké je počasí")
testExtract("jaké je počasí tento večer",
"2017-06-27 19:00:00", "jaké je počasí")
testExtract("jaké bylo počasí toto ráno",
"2017-06-27 08:00:00", "jaké bylo počasí")
testExtract("připomeň mi abych zavolal mámě v 8 týden a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v srpen 3",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě") # přidat i třetího slovně
testExtract("připomeň mi zítra abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi zítra abych zavolal mámě v 10pm",
"2017-06-28 22:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v hodina",
"2017-06-27 14:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 1730",
"2017-06-27 17:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 0630",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě večer v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin večer",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin ráno",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek večer v 7 hodin",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek ráno v 7 hodin",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek večer",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa v večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě Středa večer v 8",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za dvě hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 2 hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 15 minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za patnáct minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am 2 den po této sobota",
"2017-07-03 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("Přehraj Rick Astley hudbu 2 dny od Pátek",
"2017-07-02 00:00:00", "přehraj rick astley hudbu")
testExtract("Začni invazi v 3:45 pm v Čtvrtek",
"2017-06-29 15:45:00", "začni invazi")
testExtract("V Pondělí, objednej koláč z pekárny",
"2017-07-03 00:00:00", "objednej koláč z pekárny")
testExtract("Přehraj Happy Birthday hudbu 5 roků od dnes",
"2022-06-27 00:00:00", "přehraj happy birthday hudbu")
testExtract("Skype Mámě v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "skype mámě")
testExtract("Jaké je počasí příští Pátek?",
"2017-06-30 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Středa?",
"2017-07-05 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Čtvrtek?",
"2017-07-06 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek večer",
"2017-06-30 19:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek odpoledne",
"2017-06-30 15:00:00", "jaké je počasí")
testExtract("připomeň mi abych zavolal mámě v srpen třetího",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("Kup ohňostroj v 4 Červenec",
"2017-07-04 00:00:00", "kup ohňostroj")
testExtract("jaké je počasí 2 týdny od další pátek",
"2017-07-14 00:00:00", "jaké je počasí")
testExtract("jaké je počasí Středa v 0700 hodina",
"2017-06-28 07:00:00", "jaké je počasí")
testExtract("Nastav budík Středa v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("Nastav schůzku v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "nastav schůzku")
testExtract("Jaké je počasí tento Čtvrtek?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("nastav návštěvu na 2 týdny a 6 dní od Sobota",
"2017-07-21 00:00:00", "nastav návštěvu")
testExtract("Zahaj invazi v 03 45 v Čtvrtek",
"2017-06-29 03:45:00", "zahaj invazi")
testExtract("Zahaj invazi v 800 hodin v Čtvrtek",
"2017-06-29 08:00:00", "zahaj invazi")
testExtract("Zahaj párty v 8 hodin v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj párty")
testExtract("Zahaj invazi v 8 v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v poledne",
"2017-06-29 12:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v půlnoc",
"2017-06-29 00:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v 0500",
"2017-06-29 05:00:00", "zahaj invazi")
testExtract("připomeň mi abych vstal v 4 roky",
"2021-06-27 00:00:00", "připomeň mi abych vstal")
testExtract("připomeň mi abych vstal v 4 roky a 4 dny",
"2021-07-01 00:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí 3 dny po zítra?",
"2017-07-01 00:00:00", "jaké je počasí")
testExtract("prosinec 3",
"2017-12-03 00:00:00", "")
testExtract("sejdeme se v 8:00 dnes večer",
"2017-06-27 20:00:00", "sejdeme se")
testExtract("sejdeme se v 5pm",
"2017-06-27 17:00:00", "sejdeme se")
testExtract("sejdeme se v 8 am",
"2017-06-28 08:00:00", "sejdeme se")
testExtract("připomeň mi abych vstal v 8 am",
"2017-06-28 08:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí v úterý",
"2017-06-27 00:00:00", "jaké je počasí")
testExtract("jaké je počasí v pondělí",
"2017-07-03 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto Středa",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("v Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("tento Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("poslední pondělí jaké bylo počasí",
"2017-06-26 00:00:00", "jaké bylo počasí")
testExtract("nastav budík na Středa večer v 8",
"2017-06-28 20:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v odpoledne",
"2017-06-28 15:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v ráno",
"2017-06-28 03:00:00", "nastav budík")
testExtract("nastav budík na Středa ráno v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("nastav budík na dnes v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
testExtract("nastav budík na tento večer v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
# TODO: This test is imperfect due to the "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("nastav budík na tento večer v 7:00",
"2017-06-27 19:00:00", "nastav budík v 7:00")
testExtract("večer v červen 5 2017 připomeň mi" +
" abych zavolal mámě",
"2017-06-05 19:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to the missing "for" in the
# remainder. But let it pass for now since time is correct
testExtract("aktualizuj můj kalendář na ranní schůzku s julius" +
" v březnu 4",
"2018-03-04 08:00:00",
"aktualizuj můj kalendář schůzku s julius")
testExtract("připomeň mi abych zavolal mámě další úterý",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě 3 týdny",
"2017-07-18 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny",
"2017-08-22 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 4 dny",
"2017-07-01 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 3 měsíce",
"2017-09-27 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 2 roky a 2 dny",
"2019-06-29 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě další týden",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am v Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am tato Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10 další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
# test yesterday
testExtract("jaký den byl včera",
"2017-06-26 00:00:00", "jaký den byl")
testExtract("jaký den byl den před včera",
"2017-06-25 00:00:00", "jaký den byl")
testExtract("měl jsem večeři včera v 6",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 am",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 pm",
"2017-06-26 18:00:00", "měl jsem večeři")
# Below two tests, ensure that time is picked
# even if no am/pm is specified
# in case of weekdays/tonight
testExtract("nastav budík na 9 o víkendech",
"2017-06-27 21:00:00", "nastav budík víkendech")
testExtract("na 8 dnes večer",
"2017-06-27 20:00:00", "")
testExtract("na 8:30pm dnes večer",
"2017-06-27 20:30:00", "")
# Tests a time with ':' & without am/pm
testExtract("nastav budík na dnes večer 9:30",
"2017-06-27 21:30:00", "nastav budík")
testExtract("nastav budík na 9:00 na dnes večer",
"2017-06-27 21:00:00", "nastav budík")
# Check if it picks intent irrespective of correctness
testExtract("nastav budík na 9 hodin dnes večer",
"2017-06-27 21:00:00", "nastav budík")
testExtract("připomeň mi hru dnes v noci v 11:30",
"2017-06-27 23:30:00", "připomeň mi hru")
testExtract("nastav budík v 7:30 o výkendech",
"2017-06-27 19:30:00", "nastav budík o výkendech")
# "# days <from X/after X>"
testExtract("mé narozeniny jsou 2 dny od dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("připomeň mi abych zavolal mámě v 10am 2 dny po další Sobota",
"2017-07-10 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("mé narozeniny jsou 2 dny od včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
# "# days ago>"
testExtract("mé narozeniny byly před 1 den",
"2017-06-26 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 2 dny",
"2017-06-25 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 3 dny",
"2017-06-24 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 4 dny",
"2017-06-23 00:00:00", "mé narozeniny byly")
# TODO this test is imperfect due to "tonight" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se dnes v noci",
"2017-06-27 22:00:00", "sejdeme se noci")
# TODO this test is imperfect due to "at night" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se později v noci",
"2017-06-27 22:00:00", "sejdeme se později v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("Jaké bude počasí zítra v noci",
"2017-06-28 22:00:00", "jaké bude počasí v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("jaké bude počasí příští úterý v noci",
"2017-07-04 22:00:00", "jaké bude počasí v noci")
def test_extract_ambiguous_time_cs(self):
morning = datetime(2017, 6, 27, 8, 1, 2)
večer = datetime(2017, 6, 27, 20, 1, 2)
noonish = datetime(2017, 6, 27, 12, 1, 2)
self.assertEqual(
extract_datetime('krmení ryb'), None)
self.assertEqual(
extract_datetime('den'), None)
self.assertEqual(
extract_datetime('týden'), None)
self.assertEqual(
extract_datetime('měsíc'), None)
self.assertEqual(
extract_datetime('rok'), None)
self.assertEqual(
extract_datetime(' '), None)
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', morning)[0],
datetime(2017, 6, 27, 10, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', noonish)[0],
datetime(2017, 6, 27, 22, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', večer)[0],
datetime(2017, 6, 27, 22, 0, 0))
"""
In Czech is May and may have different format
def test_extract_date_with_may_I_cs(self):
now = datetime(2019, 7, 4, 8, 1, 2)
may_date = datetime(2019, 5, 2, 10, 11, 20)
self.assertEqual(
extract_datetime('Můžu vědět jaký je to čas zítra', now)[0],
datetime(2019, 7, 5, 0, 0, 0))
self.assertEqual(
extract_datetime('Můžu vědět kdy je 10 hodin', now)[0],
datetime(2019, 7, 4, 10, 0, 0))
self.assertEqual(
extract_datetime('24. můžu chtít připomenutí', may_date)[0],
datetime(2019, 5, 24, 0, 0, 0))
"""
def test_extract_relativedatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 10, 1, 2)
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("sejdeme se za 5 minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5 sekund",
"2017-06-27 10:01:07", "sejdeme se")
testExtract("sejdeme se za 1 hodinu",
"2017-06-27 11:01:02", "sejdeme se")
testExtract("sejdeme se za 2 hodiny",
"2017-06-27 12:01:02", "sejdeme se")
print("TODO") # Need better normaliting procedure for czech inflexion
# testExtract("sejdeme se za 2hodiny",
# "2017-06-27 12:01:02", "sejdeme se")
testExtract("sejdeme se za 1 minutu",
"2017-06-27 10:02:02", "sejdeme se")
testExtract("sejdeme se za 1 sekundu",
"2017-06-27 10:01:03", "sejdeme se")
testExtract("sejdeme se za 5sekund",
"2017-06-27 10:01:07", "sejdeme se")
def test_spaces(self):
self.assertEqual(normalize(" tohle je test"),
"tohle je test")
self.assertEqual(normalize(" tohle je test "),
"tohle je test")
self.assertEqual(normalize(" tohle je jedna test"),
"tohle je 1 test")
def test_numbers(self):
self.assertEqual(normalize("tohle je jedna dva tři test"),
"tohle je 1 2 3 test")
self.assertEqual(normalize(" to je čtyři pět šest test"),
"to je 4 5 6 test")
self.assertEqual(normalize("to je sedum osum devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("to je sedm osm devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("tohle je deset jedenáct dvanáct test"),
"tohle je 10 11 12 test")
self.assertEqual(normalize("tohle je třináct čtrnáct test"),
"tohle je 13 14 test")
self.assertEqual(normalize("tohle je patnáct šestnáct sedmnáct"),
"tohle je 15 16 17")
self.assertEqual(normalize("tohle je osmnáct devatenáct dvacet"),
"tohle je 18 19 20")
self.assertEqual(normalize("tohle je jedna devatenáct dvacet dva"),
"tohle je 1 19 20 2")
self.assertEqual(normalize("tohle je jedna sto"),
"tohle je 1 sto")
self.assertEqual(normalize("tohle je jedna dva dvacet dva"),
"tohle je 1 2 20 2")
self.assertEqual(normalize("tohle je jedna a půl"),
"tohle je 1 a půl")
self.assertEqual(normalize("tohle je jedna a půl a pět šest"),
"tohle je 1 a půl a 5 6")
def test_multiple_numbers(self):
self.assertEqual(extract_numbers("tohle je jedna dva tři test"),
[1.0, 2.0, 3.0])
self.assertEqual(extract_numbers("to je čtyři pět šest test"),
[4.0, 5.0, 6.0])
self.assertEqual(extract_numbers("tohle je deset jedenáct dvanáct test"),
[10.0, 11.0, 12.0])
self.assertEqual(extract_numbers("tohle je jedna dvacet jedna test"),
[1.0, 21.0])
self.assertEqual(extract_numbers("1 pes, sedm prasat, macdonald měl "
"farmu, 3 krát 5 makaréna"),
[1, 7, 3, 5])
self.assertEqual(extract_numbers("dva piva pro dva medvědy"),
[2.0, 2.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet"),
[20, 20, 20])
self.assertEqual(extract_numbers("dvacet 20 22"),
[20.0, 20.0, 22.0])
self.assertEqual(extract_numbers("dvacet dvacet dva dvacet"),
[20, 22, 20])
self.assertEqual(extract_numbers("dvacet 2"),
[22.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet 2"),
[20, 20, 22])
self.assertEqual(extract_numbers("třetina jedna"),
[1 / 3, 1])
self.assertEqual(extract_numbers("třetí", ordinals=True), [3])
self.assertEqual(extract_numbers("šest trillion", short_scale=True),
[6e12])
self.assertEqual(extract_numbers("šest trilion", short_scale=False),
[6e18])
self.assertEqual(extract_numbers("dvě prasátka a šest trillion bakterií",
short_scale=True), [2, 6e12])
self.assertEqual(extract_numbers("dvě prasátka a šest trilion bakterií",
short_scale=False), [2, 6e18])
self.assertEqual(extract_numbers("třicátý druhý nebo první",
ordinals=True), [32, 1])
self.assertEqual(extract_numbers("tohle je sedm osm devět a"
" půl test"),
[7.0, 8.0, 9.5])
if __name__ == "__main__":
unittest.main()
| 54.208333
| 114
| 0.564851
| 5,157
| 40,331
| 4.375218
| 0.110529
| 0.033861
| 0.117006
| 0.089704
| 0.702522
| 0.618535
| 0.551256
| 0.475646
| 0.384701
| 0.353233
| 0
| 0.119882
| 0.321192
| 40,331
| 743
| 115
| 54.281292
| 0.704277
| 0.084203
| 0
| 0.207516
| 0
| 0
| 0.38263
| 0
| 0
| 0
| 0
| 0.001346
| 0.240196
| 1
| 0.026144
| false
| 0
| 0.01634
| 0
| 0.04902
| 0.004902
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78fa9f898e64c035eed240732e89631cf36a87b3
| 18,049
|
py
|
Python
|
exhale/deploy.py
|
florianhumblot/exhale
|
d6fa84fa32ee079c6b70898a1b0863a38e703591
|
[
"BSD-3-Clause"
] | null | null | null |
exhale/deploy.py
|
florianhumblot/exhale
|
d6fa84fa32ee079c6b70898a1b0863a38e703591
|
[
"BSD-3-Clause"
] | null | null | null |
exhale/deploy.py
|
florianhumblot/exhale
|
d6fa84fa32ee079c6b70898a1b0863a38e703591
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf8 -*-
########################################################################################
# This file is part of exhale. Copyright (c) 2017-2022, Stephen McDowell. #
# Full BSD 3-Clause license available here: #
# #
# https://github.com/svenevs/exhale/blob/master/LICENSE #
########################################################################################
'''
The deploy module is responsible for two primary actions:
1. Executing Doxygen (if requested in ``exhale_args``).
2. Launching the full API generation via the :func:`~exhale.deploy.explode` function.
'''
from __future__ import unicode_literals
from . import configs
from . import utils
from .graph import ExhaleRoot
import os
import sys
import six
import re
import codecs
import tempfile
import textwrap
from subprocess import PIPE, Popen, STDOUT
def _generate_doxygen(doxygen_input):
'''
This method executes doxygen based off of the specified input. By the time this
method is executed, it is assumed that Doxygen is intended to be run in the
**current working directory**. Search for ``returnPath`` in the implementation of
:func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect.
This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`,
which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`.
Two versions of the
doxygen command can be executed:
1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a
``Doxyfile`` exists in the **current working directory**. Meaning the command
being executed is simply ``doxygen``.
2. For all other values, ``doxygen_input`` represents the arguments as to be
specified on ``stdin`` to the process.
**Parameters**
``doxygen_input`` (str)
Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the
selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``)
that will be ``communicate``d to the ``doxygen`` process on ``stdin``.
.. note::
If using Python **3**, the input **must** still be a ``str``. This
method will convert the input to ``bytes`` as follows:
.. code-block:: py
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
**Return**
``str`` or ``None``
If an error occurs, a string describing the error is returned with the
intention of the caller raising the exception. If ``None`` is returned,
then the process executed without error. Example usage:
.. code-block:: py
status = _generate_doxygen("Doxygen")
if status:
raise RuntimeError(status)
Though a little awkward, this is done to enable the intended caller of this
method to restore some state before exiting the program (namely, the working
directory before propagating an exception to ``sphinx-build``).
'''
if not isinstance(doxygen_input, six.string_types):
return "Error: the `doxygen_input` variable must be of type `str`."
doxyfile = doxygen_input == "Doxyfile"
try:
# Setup the arguments to launch doxygen
if doxyfile:
args = ["doxygen"]
kwargs = {}
else:
args = ["doxygen", "-"]
kwargs = {"stdin": PIPE}
if configs._on_rtd:
# On RTD, any capturing of Doxygen output can cause buffer overflows for
# even medium sized projects. So it is disregarded entirely to ensure the
# build will complete (otherwise, it silently fails after `cat conf.py`)
devnull_file = open(os.devnull, "w")
kwargs["stdout"] = devnull_file
kwargs["stderr"] = STDOUT
else:
# TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the
# `communicate` to hang due to buffer overflows.
#
# See excellent synopsis:
# https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
if six.PY2:
tempfile_kwargs = {}
else:
# encoding argument introduced in python 3
tempfile_kwargs = {"encoding": "utf-8"}
tempfile_kwargs["mode"] = "r+"
tmp_out_file = tempfile.TemporaryFile(
prefix="doxygen_stdout_buff", **tempfile_kwargs
)
tmp_err_file = tempfile.TemporaryFile(
prefix="doxygen_stderr_buff", **tempfile_kwargs
)
# Write to the tempfiles over PIPE to avoid buffer overflowing
kwargs["stdout"] = tmp_out_file
kwargs["stderr"] = tmp_err_file
# Note: overload of args / kwargs, Popen is expecting a list as the first
# parameter (aka no *args, just args)!
doxygen_proc = Popen(args, **kwargs)
# Communicate can only be called once, arrange whether or not stdin has value
if not doxyfile:
# In Py3, make sure we are communicating a bytes-like object which is no
# longer interchangeable with strings (as was the case in Py2).
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
comm_kwargs = {"input": doxygen_input}
else:
comm_kwargs = {}
# Waits until doxygen has completed
doxygen_proc.communicate(**comm_kwargs)
# Print out what was written to the tmpfiles by doxygen
if not configs._on_rtd and not configs.exhaleSilentDoxygen:
# Doxygen output (some useful information, mostly just enumeration of the
# configurations you gave it {useful for debugging...})
if tmp_out_file.tell() > 0:
tmp_out_file.seek(0)
print(tmp_out_file.read())
# Doxygen error (e.g. any warnings, or invalid input)
if tmp_err_file.tell() > 0:
# Making them stick out, ideally users would reduce this output to 0 ;)
# This will print a yellow [~] before every line, but not make the
# entire line yellow because it's definitively not helpful
prefix = utils._use_color(
utils.prefix("[~]", " "), utils.AnsiColors.BOLD_YELLOW, sys.stderr
)
tmp_err_file.seek(0)
sys.stderr.write(utils.prefix(prefix, tmp_err_file.read()))
# Close the file handles opened for communication with subprocess
if configs._on_rtd:
devnull_file.close()
else:
# Delete the tmpfiles
tmp_out_file.close()
tmp_err_file.close()
# Make sure we had a valid execution of doxygen
exit_code = doxygen_proc.returncode
if exit_code != 0:
raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code))
except Exception as e:
return "Unable to execute 'doxygen': {0}".format(e)
# returning None signals _success_
return None
def _valid_config(config, required):
'''
.. todo:: add documentation of this method
``config``: doxygen input we're looking for
``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present
'''
re_template = r"\s*{config}\s*=.*".format(config=config)
found = re.search(re_template, configs.exhaleDoxygenStdin)
if required:
return found is not None
else:
return found is None
def generateDoxygenXML():
# If this happens, we really shouldn't be here...
if not configs.exhaleExecutesDoxygen:
return textwrap.dedent('''
`generateDoxygenXML` should *ONLY* be called internally. You should
set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`.
''')
# Case 1: the user has their own `Doxyfile`.
if configs.exhaleUseDoxyfile:
return _generate_doxygen("Doxyfile")
# Case 2: use stdin, with some defaults and potentially additional specs from user
else:
# There are two doxygen specs that we explicitly disallow
#
# 1. OUTPUT_DIRECTORY: this is *ALREADY* specified implicitly via breathe
# 2. STRIP_FROM_PATH: this is a *REQUIRED* config (`doxygenStripFromPath`)
#
# There is one doxygen spec that is REQUIRED to be given:
#
# 1. INPUT (where doxygen should parse).
#
# The below is a modest attempt to validate that these were / were not given.
if not isinstance(configs.exhaleDoxygenStdin, six.string_types):
return "`exhaleDoxygenStdin` config must be a string!"
if not _valid_config("OUTPUT_DIRECTORY", False):
# If we are hitting this code, these should both exist and be configured
# since this method is called **AFTER** the configuration verification code
# performed in configs.apply_sphinx_configurations
breathe_projects = configs._the_app.config.breathe_projects
breathe_default_project = configs._the_app.config.breathe_default_project
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `OUTPUT_DIRECTORY`. Exhale does
this internally by reading what you provided to `breathe_projects` in
your `conf.py`.
Based on what you had in `conf.py`, Exhale will be using
- The `breathe_default_project`:
{default}
- The output path specfied (`breathe_projects[breathe_default_project]`):
{path}
NOTE: the above path has the `xml` portion removed from what you
provided. This path is what is sent to Doxygen, Breathe
requires you include the `xml` directory path; so Exhale simply
re-uses this variable and adapts the value for our needs.
'''.format(
default=breathe_default_project,
path=breathe_projects[breathe_default_project].rsplit("{sep}xml".format(sep=os.sep), 1)[0]
))
if not _valid_config("STRIP_FROM_PATH", False):
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `STRIP_FROM_PATH`. Exhale does
this internally by using the value you provided to `exhale_args` in
your `conf.py` for the key `doxygenStripFromPath`.
Based on what you had in `conf.py`, Exhale will be using:
{strip}
NOTE: the above is what you specified directly in `exhale_args`. Exhale
will be using an absolute path to send to Doxygen. It is:
{absolute}
'''.format(
strip=configs._the_app.config.exhale_args["doxygenStripFromPath"],
absolute=configs.doxygenStripFromPath
))
if not _valid_config("INPUT", True):
return textwrap.dedent('''
`exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable.
The INPUT variable is what tells Doxygen where to look for code to
extract documentation from. For example, if you had a directory layout
project_root/
docs/
conf.py
Makefile
... etc ...
include/
my_header.hpp
src/
my_header.cpp
Then you would include the line
INPUT = ../include
in the string provided to `exhale_args["exhaleDoxygenStdin"]`.
''')
# For these, we just want to warn them of the impact but still allow an override
re_template = r"\s*{config}\s*=\s*(.*)"
for cfg in ("ALIASES", "PREDEFINED"):
found = re.search(re_template.format(config=cfg), configs.exhaleDoxygenStdin)
if found:
sys.stderr.write(utils.info(textwrap.dedent('''
You have supplied to `exhaleDoxygenStdin` a configuration of:
{cfg} = {theirs}
This has an important impact, as it overrides a default setting that
Exhale is using.
1. If you are intentionally overriding this configuration, simply
ignore this message --- what you intended will happen.
2. If you meant to _continue_ adding to the defaults Exhale provides,
you need to use a `+=` instead of a raw `=`. So do instead
{cfg} += {theirs}
'''.format(cfg=cfg, theirs=found.groups()[0])), utils.AnsiColors.BOLD_YELLOW))
# Include their custom doxygen definitions after the defaults so that they can
# override anything they want to. Populate the necessary output dir and strip path.
doxy_dir = configs._doxygen_xml_output_directory.rsplit("{sep}xml".format(sep=os.sep), 1)[0]
internal_configs = textwrap.dedent('''
# Tell doxygen to output wherever breathe is expecting things
OUTPUT_DIRECTORY = "{out}"
# Tell doxygen to strip the path names (RTD builds produce long abs paths...)
STRIP_FROM_PATH = "{strip}"
'''.format(out=doxy_dir, strip=configs.doxygenStripFromPath))
external_configs = textwrap.dedent(configs.exhaleDoxygenStdin)
# Place external configs last so that if the _valid_config method isn't actually
# catching what it should be, the internal configs will override theirs
full_input = "{base}\n{external}\n{internal}\n\n".format(base=configs.DEFAULT_DOXYGEN_STDIN_BASE,
external=external_configs,
internal=internal_configs)
# << verboseBuild
if configs.verboseBuild:
msg = "[*] The following input will be sent to Doxygen:\n"
if not configs.alwaysColorize and not sys.stderr.isatty():
sys.stderr.write(msg)
sys.stderr.write(full_input)
else:
sys.stderr.write(utils.colorize(msg, utils.AnsiColors.BOLD_CYAN))
sys.stderr.write(utils.__fancy(full_input, "make", "console"))
return _generate_doxygen(full_input)
########################################################################################
#
##
###
####
##### Primary entry point.
####
###
##
#
########################################################################################
def explode():
'''
This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has
already been applied. It performs minimal sanity checking, and then performs in
order
1. Creates a :class:`~exhale.graph.ExhaleRoot` object.
2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object.
3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object.
4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will
only produce output when :data:`~exhale.configs.verboseBuild` is ``True``).
This results in the full API being generated, and control is subsequently passed
back to Sphinx to now read in the source documents (many of which were just
generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the
final output.
'''
# Quick sanity check to make sure the bare minimum have been set in the configs
err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly."
if configs.containmentFolder is None:
raise RuntimeError(err_msg.format(config="containmentFolder"))
if configs.rootFileName is None:
raise RuntimeError(err_msg.format(config="rootFileName"))
if configs.doxygenStripFromPath is None:
raise RuntimeError(err_msg.format(config="doxygenStripFromPath"))
# From here on, we assume that everything else has been checked / configured.
try:
textRoot = ExhaleRoot()
except:
utils.fancyError("Unable to create an `ExhaleRoot` object:")
try:
sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML.")))
start = utils.get_time()
textRoot.parse()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while parsing:")
try:
sys.stdout.write("{0}\n".format(
utils.info("Exhale: generating reStructuredText documents.")
))
start = utils.get_time()
textRoot.generateFullAPI()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: generated reStructuredText documents in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while generating:")
# << verboseBuild
# toConsole only prints if verbose mode is enabled
textRoot.toConsole()
# allow access to the result after-the-fact
configs._the_app.exhale_root = textRoot
| 42.468235
| 106
| 0.588066
| 2,054
| 18,049
| 5.07741
| 0.275073
| 0.014958
| 0.005753
| 0.012273
| 0.143159
| 0.092147
| 0.076422
| 0.066641
| 0.054847
| 0.049477
| 0
| 0.00512
| 0.307441
| 18,049
| 424
| 107
| 42.568396
| 0.8292
| 0.364231
| 0
| 0.190698
| 0
| 0
| 0.389342
| 0.019926
| 0
| 0
| 0
| 0.002358
| 0
| 1
| 0.018605
| false
| 0
| 0.060465
| 0
| 0.134884
| 0.004651
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78fb0646e467b92a38f001788a56ced3c1f8a48d
| 3,816
|
py
|
Python
|
src/bayesian_reliability_comparison.py
|
rloganiv/bayesian-blackbox
|
6a111553200b6aa755149e08174abe1a61d37198
|
[
"MIT"
] | 8
|
2019-12-23T13:27:15.000Z
|
2021-12-01T13:33:34.000Z
|
src/bayesian_reliability_comparison.py
|
rloganiv/bayesian-blackbox
|
6a111553200b6aa755149e08174abe1a61d37198
|
[
"MIT"
] | 11
|
2020-03-31T11:06:55.000Z
|
2022-02-10T00:39:33.000Z
|
src/bayesian_reliability_comparison.py
|
disiji/bayesian-blackbox
|
6a111553200b6aa755149e08174abe1a61d37198
|
[
"MIT"
] | 2
|
2020-01-24T10:21:57.000Z
|
2020-02-22T04:41:14.000Z
|
import argparse
import multiprocessing
import os
import random
import numpy as np
from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR
from models import SumOfBetaEce
random.seed(2020)
num_cores = multiprocessing.cpu_count()
NUM_BINS = 10
NUM_RUNS = 100
N_list = [100, 200, 500, 1000, 2000, 5000, 10000]
OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/"
def main(args) -> None:
# load data
categories, observations, confidences, idx2category, category2idx, labels = prepare_data(
DATAFILE_LIST[args.dataset], False)
# train a ground_truth ece model
if args.ground_truth_type == 'bayesian':
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
else:
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=1e-3)
ground_truth_model.update_batch(confidences, observations)
results = np.zeros((args.num_runs, len(N_list), 5))
for run_id in range(args.num_runs):
tmp = list(zip(confidences, observations))
random.shuffle(tmp)
confidences, observations = zip(*tmp)
model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
for i in range(len(N_list)):
tmp = 0 if i == 0 else N_list[i - 1]
model.update_batch(confidences[tmp: N_list[i]], observations[tmp: N_list[i]])
results[run_id, i, 0] = N_list[i]
results[run_id, i, 1] = model.eval
results[run_id, i, 2] = model.frequentist_eval
results[run_id, i, 3] = model.calibration_estimation_error(ground_truth_model, args.weight_type)
results[run_id, i, 4] = model.frequentist_calibration_estimation_error(ground_truth_model, args.weight_type)
results_mean = np.mean(results, axis=0)
results_variance = np.std(results, axis=0)
if args.weight_type == 'online':
OUTPUT_DIR += "online_weights/"
try:
os.stat(OUTPUT_DIR)
except:
os.mkdir(OUTPUT_DIR)
if args.ground_truth_type == 'frequentist':
filename_mean = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
else:
filename_mean = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
header = 'N, bayesian_ece, frequentist_ece, bayesian_estimation_error, frequentist_estimation_error'
np.savetxt(filename_mean, results_mean, delimiter=',', header=header)
np.savetxt(filename_std, results_variance, delimiter=',', header=header)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, default='cifar100', help='input dataset')
parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior')
parser.add_argument('-ground_truth_type', type=str, default='bayesian',
help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist')
parser.add_argument('-weight_type', type=str, default='pool',
help='weigh each bin with all data or only data seen so far, online or pool')
parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs')
parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram')
args, _ = parser.parse_known_args()
if args.dataset not in DATASET_LIST:
raise ValueError("%s is not in DATASET_LIST." % args.dataset)
main(args)
| 41.032258
| 120
| 0.70152
| 509
| 3,816
| 5.003929
| 0.275049
| 0.060463
| 0.040047
| 0.02552
| 0.292108
| 0.262269
| 0.262269
| 0.24735
| 0.225363
| 0.225363
| 0
| 0.017487
| 0.190776
| 3,816
| 92
| 121
| 41.478261
| 0.807319
| 0.010482
| 0
| 0.058824
| 0
| 0
| 0.184469
| 0.071031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.102941
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78fbbb7e97d40f03f6fe9dcf3d1d397ff5d9dbb9
| 29,044
|
py
|
Python
|
psyneulink/core/components/functions/statefulfunctions/statefulfunction.py
|
SamKG/PsyNeuLink
|
70558bcd870868e1688cb7a7c424d29ca336f2df
|
[
"Apache-2.0"
] | null | null | null |
psyneulink/core/components/functions/statefulfunctions/statefulfunction.py
|
SamKG/PsyNeuLink
|
70558bcd870868e1688cb7a7c424d29ca336f2df
|
[
"Apache-2.0"
] | 77
|
2020-10-01T06:27:19.000Z
|
2022-03-31T02:03:33.000Z
|
psyneulink/core/components/functions/statefulfunctions/statefulfunction.py
|
SamKG/PsyNeuLink
|
70558bcd870868e1688cb7a7c424d29ca336f2df
|
[
"Apache-2.0"
] | null | null | null |
#
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
#
# ***************************************** STATEFUL FUNCTION *********************************************************
"""
* `StatefulFunction`
* `IntegratorFunctions`
* `MemoryFunctions`
"""
import abc
import typecheck as tc
import warnings
import numbers
import numpy as np
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter
from psyneulink.core.components.functions.function import Function_Base, FunctionError
from psyneulink.core.components.functions.distributionfunctions import DistributionFunction
from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.context import ContextFlags, handle_external_context
__all__ = ['StatefulFunction']
class StatefulFunction(Function_Base): # ---------------------------------------------------------------------
"""
StatefulFunction( \
default_variable=None, \
initializer, \
rate=1.0, \
noise=0.0, \
params=None, \
owner=None, \
prefs=None, \
)
.. _StatefulFunction:
Abstract base class for Functions the result of which depend on their `previous_value
<StatefulFunction.previous_value>` attribute.
COMMENT:
NARRATIVE HERE THAT EXPLAINS:
A) initializers and stateful_attributes
B) initializer (note singular) is a prespecified member of initializers
that contains the value with which to initiailzer previous_value
COMMENT
Arguments
---------
default_variable : number, list or array : default class_defaults.variable
specifies a template for `variable <StatefulFunction.variable>`.
initializer : float, list or 1d array : default 0.0
specifies initial value for `previous_value <StatefulFunction.previous_value>`. If it is a list or array,
it must be the same length as `variable <StatefulFunction.variable>` (see `initializer
<StatefulFunction.initializer>` for details).
rate : float, list or 1d array : default 1.0
specifies value used as a scaling parameter in a subclass-dependent way (see `rate <StatefulFunction.rate>` for
details); if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>`.
noise : float, function, list or 1d array : default 0.0
specifies random value added in each call to `function <StatefulFunction.function>`; if it is a list or
array, it must be the same length as `variable <StatefulFunction.default_variable>` (see `noise
<StatefulFunction.noise>` for details).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
current input value.
initializer : float or 1d array
determines initial value assigned to `previous_value <StatefulFunction.previous_value>`. If `variable
<StatefulFunction.variable>` is a list or array, and initializer is a float or has a single element, it is
applied to each element of `previous_value <StatefulFunction.previous_value>`. If initializer is a list or
array,each element is applied to the corresponding element of `previous_value <Integrator.previous_value>`.
previous_value : 1d array
last value returned (i.e., for which state is being maintained).
initializers : list
stores the names of the initialization attributes for each of the stateful attributes of the function. The
index i item in initializers provides the initialization value for the index i item in `stateful_attributes
<StatefulFunction.stateful_attributes>`.
stateful_attributes : list
stores the names of each of the stateful attributes of the function. The index i item in stateful_attributes is
initialized by the value of the initialization attribute whose name is stored in index i of `initializers
<StatefulFunction.initializers>`. In most cases, the stateful_attributes, in that order, are the return values
of the function.
.. _Stateful_Rate:
rate : float or 1d array
on each call to `function <StatefulFunction.function>`, applied to `variable <StatefulFunction.variable>`,
`previous_value <StatefulFunction.previous_value>`, neither, or both, depending on implementation by
subclass. If it is a float or has a single value, it is applied to all elements of its target(s); if it has
more than one element, each element is applied to the corresponding element of its target(s).
.. _Stateful_Noise:
noise : float, function, list, or 1d array
random value added on each call to `function <StatefulFunction.function>`. If `variable
<StatefulFunction.variable>` is a list or array, and noise is a float or function, it is applied
for each element of `variable <StatefulFunction.variable>`. If noise is a function, it is executed and applied
separately for each element of `variable <StatefulFunction.variable>`. If noise is a list or array,
it is applied elementwise (i.e., in Hadamard form).
.. hint::
To generate random noise that varies for every execution, a probability distribution function should be
used (see `Distribution Functions <DistributionFunction>` for details), that generates a new noise value
from its distribution on each execution. If noise is specified as a float, a function with a fixed
output, or a list or array of either of these, then noise is simply an offset that remains the same
across all executions.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict
the `PreferenceSet` for the Function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentType = STATEFUL_FUNCTION_TYPE
componentName = STATEFUL_FUNCTION
class Parameters(Function_Base.Parameters):
"""
Attributes
----------
initializer
see `initializer <StatefulFunction.initializer>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
noise
see `noise <StatefulFunction.noise>`
:default value: 0.0
:type: ``float``
previous_value
see `previous_value <StatefulFunction.previous_value>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
rate
see `rate <StatefulFunction.rate>`
:default value: 1.0
:type: ``float``
"""
noise = Parameter(0.0, modulable=True)
rate = Parameter(1.0, modulable=True)
previous_value = Parameter(np.array([0]), initializer='initializer', pnl_internal=True)
initializer = Parameter(np.array([0]), pnl_internal=True)
has_initializers = Parameter(True, setter=_has_initializers_setter, pnl_internal=True)
@handle_external_context()
@tc.typecheck
def __init__(self,
default_variable=None,
rate=None,
noise=None,
initializer=None,
params: tc.optional(tc.optional(dict)) = None,
owner=None,
prefs: tc.optional(is_pref_set) = None,
context=None,
**kwargs
):
if not hasattr(self, "initializers"):
self.initializers = ["initializer"]
if not hasattr(self, "stateful_attributes"):
self.stateful_attributes = ["previous_value"]
super().__init__(
default_variable=default_variable,
rate=rate,
initializer=initializer,
noise=noise,
params=params,
owner=owner,
prefs=prefs,
context=context,
**kwargs
)
def _validate(self, context=None):
self._validate_rate(self.defaults.rate)
self._validate_initializers(self.defaults.variable, context=context)
super()._validate(context=context)
def _validate_params(self, request_set, target_set=None, context=None):
# Handle list or array for rate specification
if RATE in request_set:
rate = request_set[RATE]
if isinstance(rate, (list, np.ndarray)) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
# If the variable was not specified, then reformat it to match rate specification
# and assign class_defaults.variable accordingly
# Note: this situation can arise when the rate is parametrized (e.g., as an array) in the
# StatefulFunction's constructor, where that is used as a specification for a function parameter
# (e.g., for an IntegratorMechanism), whereas the input is specified as part of the
# object to which the function parameter belongs (e.g., the IntegratorMechanism); in that
# case, the StatefulFunction gets instantiated using its class_defaults.variable ([[0]]) before
# the object itself, thus does not see the array specification for the input.
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self._instantiate_defaults(variable=np.zeros_like(np.array(rate)), context=context)
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({}) "
"must match the length of the default input ({}).".format(
self.name,
# rate,
len(rate),
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
super()._validate_params(request_set=request_set,
target_set=target_set,
context=context)
if NOISE in target_set:
noise = target_set[NOISE]
if isinstance(noise, DistributionFunction):
noise.owner = self
target_set[NOISE] = noise.execute
self._validate_noise(target_set[NOISE])
def _validate_initializers(self, default_variable, context=None):
for initial_value_name in self.initializers:
initial_value = self._get_current_parameter_value(initial_value_name, context=context)
if isinstance(initial_value, (list, np.ndarray)):
if len(initial_value) != 1:
# np.atleast_2d may not be necessary here?
if np.shape(np.atleast_2d(initial_value)) != np.shape(np.atleast_2d(default_variable)):
raise FunctionError("{}'s {} ({}) is incompatible with its default_variable ({}) ."
.format(self.name, initial_value_name, initial_value, default_variable))
elif not isinstance(initial_value, (float, int)):
raise FunctionError("{}'s {} ({}) must be a number or a list/array of numbers."
.format(self.name, initial_value_name, initial_value))
def _validate_rate(self, rate):
# FIX: CAN WE JUST GET RID OF THIS?
# kmantel: this duplicates much code in _validate_params above, but that calls _instantiate_defaults
# which I don't think is the right thing to do here, but if you don't call it in _validate_params
# then a lot of things don't get instantiated properly
if rate is not None:
if isinstance(rate, list):
rate = np.asarray(rate)
rate_type_msg = 'The rate parameter of {0} must be a number or an array/list of at most 1d (you gave: {1})'
if isinstance(rate, np.ndarray):
# kmantel: current test_gating test depends on 2d rate
# this should be looked at but for now this restriction is removed
# if rate.ndim > 1:
# raise FunctionError(rate_type_msg.format(self.name, rate))
pass
elif not isinstance(rate, numbers.Number):
raise FunctionError(rate_type_msg.format(self.name, rate))
if isinstance(rate, np.ndarray) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self.defaults.variable = np.zeros_like(np.array(rate))
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
self._instantiate_value()
self._variable_shape_flexibility = DefaultsFlexibility.INCREASE_DIMENSION
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({})"
"must match the length of the default input ({}).".format(
len(rate),
# rate,
self.name,
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
# Ensure that the noise parameter makes sense with the input type and shape; flag any noise functions that will
# need to be executed
def _validate_noise(self, noise):
# Noise is a list or array
if isinstance(noise, (np.ndarray, list)):
if len(noise) == 1:
pass
# Variable is a list/array
elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable)
and not iscompatible(np.atleast_1d(noise), self.defaults.variable) and len(noise) > 1):
raise FunctionError(
"Noise parameter ({}) does not match default variable ({}). Noise parameter of {} "
"must be specified as a float, a function, or an array of the appropriate shape ({}).".format(
noise, self.defaults.variable, self.name,
np.shape(np.array(self.defaults.variable))
),
component=self
)
else:
for i in range(len(noise)):
if isinstance(noise[i], DistributionFunction):
noise[i] = noise[i].execute
# if not isinstance(noise[i], (float, int)) and not callable(noise[i]):
if not np.isscalar(noise[i]) and not callable(noise[i]):
raise FunctionError("The elements of a noise list or array must be scalars or functions. "
"{} is not a valid noise element for {}".format(noise[i], self.name))
def _try_execute_param(self, param, var, context=None):
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D BELOW]
param_shape = np.array(param).shape
if not len(param_shape):
param_shape = np.array(var).shape
# param is a list; if any element is callable, execute it
if isinstance(param, (np.ndarray, list)):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
# FIX: WHY FORCE 2d??
param = np.atleast_2d(param)
for i in range(len(param)):
for j in range(len(param[i])):
try:
param[i][j] = param[i][j](context=context)
except TypeError:
try:
param[i][j] = param[i][j]()
except TypeError:
pass
try:
param = param.reshape(param_shape)
except ValueError:
if object_has_single_value(param):
param = np.full(param_shape, float(param))
# param is one function
elif callable(param):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
new_param = []
# FIX: WHY FORCE 2d??
for row in np.atleast_2d(var):
# for row in np.atleast_1d(var):
# for row in var:
new_row = []
for item in row:
try:
val = param(context=context)
except TypeError:
val = param()
new_row.append(val)
new_param.append(new_row)
param = np.asarray(new_param)
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D ABOVE]
try:
if len(np.squeeze(param)):
param = param.reshape(param_shape)
except TypeError:
pass
return param
def _instantiate_attributes_before_function(self, function=None, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(self.defaults.variable), context)
# use np.broadcast_to to guarantee that all initializer type attributes take on the same shape as variable
if not np.isscalar(self.defaults.variable):
for attr in self.initializers:
param = getattr(self.parameters, attr)
param._set(
np.broadcast_to(
param._get(context),
self.defaults.variable.shape
).copy(),
context
)
# create all stateful attributes and initialize their values to the current values of their
# corresponding initializer attributes
for attr_name in self.stateful_attributes:
initializer_value = getattr(self.parameters, getattr(self.parameters, attr_name).initializer)._get(context).copy()
getattr(self.parameters, attr_name)._set(initializer_value, context)
super()._instantiate_attributes_before_function(function=function, context=context)
def _initialize_previous_value(self, initializer, context=None):
initializer = convert_to_np_array(initializer, dimension=1)
self.defaults.initializer = initializer.copy()
self.parameters.initializer._set(initializer.copy(), context)
self.defaults.previous_value = initializer.copy()
self.parameters.previous_value.set(initializer.copy(), context)
return initializer
@handle_external_context()
def _update_default_variable(self, new_default_variable, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(new_default_variable), context)
super()._update_default_variable(new_default_variable, context=context)
def _parse_value_order(self, **kwargs):
"""
Returns:
tuple: the values of the keyword arguments in the order
in which they appear in this Component's `value
<Component.value>`
"""
return tuple(v for k, v in kwargs.items())
@handle_external_context(fallback_most_recent=True)
def reset(self, *args, context=None, **kwargs):
"""
Resets `value <StatefulFunction.previous_value>` and `previous_value <StatefulFunction.previous_value>`
to the specified value(s).
If arguments are passed into the reset method, then reset sets each of the attributes in
`stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding argument.
Next, it sets the `value <StatefulFunction.value>` to a list containing each of the argument values.
If reset is called without arguments, then it sets each of the attributes in `stateful_attributes
<StatefulFunction.stateful_attributes>` to the value of the corresponding attribute in `initializers
<StatefulFunction.initializers>`. Next, it sets the `value <StatefulFunction.value>` to a list containing
the values of each of the attributes in `initializers <StatefulFunction.initializers>`.
Often, the only attribute in `stateful_attributes <StatefulFunction.stateful_attributes>` is
`previous_value <StatefulFunction.previous_value>` and the only attribute in `initializers
<StatefulFunction.initializers>` is `initializer <StatefulFunction.initializer>`, in which case
the reset method sets `previous_value <StatefulFunction.previous_value>` and `value
<StatefulFunction.value>` to either the value of the argument (if an argument was passed into
reset) or the current value of `initializer <StatefulFunction.initializer>`.
For specific types of StatefulFunction functions, the reset method may carry out other
reinitialization steps.
"""
num_stateful_attrs = len(self.stateful_attributes)
if num_stateful_attrs >= 2:
# old args specification can be supported only in subclasses
# that explicitly define an order by overriding reset
if len(args) > 0:
raise FunctionError(
f'{self}.reset has more than one stateful attribute'
f' ({self.stateful_attributes}). You must specify reset'
' values by keyword.'
)
if len(kwargs) != num_stateful_attrs:
type_name = type(self).__name__
raise FunctionError(
'StatefulFunction.reset must receive a keyword argument for'
f' each item in {type_name}.stateful_attributes in the order in'
f' which they appear in {type_name}.value'
)
if num_stateful_attrs == 1:
try:
kwargs[self.stateful_attributes[0]]
except KeyError:
try:
kwargs[self.stateful_attributes[0]] = args[0]
except IndexError:
kwargs[self.stateful_attributes[0]] = None
invalid_args = []
# iterates in order arguments are sent in function call, so it
# will match their order in value as long as they are listed
# properly in subclass reset method signatures
for attr in kwargs:
try:
kwargs[attr]
except KeyError:
kwargs[attr] = None
if kwargs[attr] is not None:
# from before: unsure if conversion to 1d necessary
kwargs[attr] = np.atleast_1d(kwargs[attr])
else:
try:
kwargs[attr] = self._get_current_parameter_value(getattr(self.parameters, attr).initializer, context=context)
except AttributeError:
invalid_args.append(attr)
if len(invalid_args) > 0:
raise FunctionError(
f'Arguments {invalid_args} to reset are invalid because they do'
f" not correspond to any of {self}'s stateful_attributes"
)
# rebuilding value rather than simply returning reinitialization_values in case any of the stateful
# attrs are modified during assignment
value = []
for attr, v in kwargs.items():
# FIXME: HACK: Do not reinitialize random_state
if attr != "random_state":
getattr(self.parameters, attr).set(kwargs[attr],
context, override=True)
value.append(getattr(self.parameters, attr)._get(context))
self.parameters.value.set(value, context, override=True)
return value
def _gen_llvm_function_reset(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
assert "reset" in tags
for a in self.stateful_attributes:
initializer = getattr(self.parameters, a).initializer
source_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, initializer)
dest_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, a)
if source_ptr.type != dest_ptr.type:
warnings.warn("Shape mismatch: stateful param does not match the initializer: {}({}) vs. {}({})".format(initializer, source_ptr.type, a, dest_ptr.type))
# Take a guess that dest just has an extra dimension
assert len(dest_ptr.type.pointee) == 1
dest_ptr = builder.gep(dest_ptr, [ctx.int32_ty(0),
ctx.int32_ty(0)])
builder.store(builder.load(source_ptr), dest_ptr)
return builder
@abc.abstractmethod
def _function(self, *args, **kwargs):
raise FunctionError("StatefulFunction is not meant to be called explicitly")
| 48.895623
| 168
| 0.596302
| 3,226
| 29,044
| 5.263174
| 0.149411
| 0.009423
| 0.024736
| 0.020025
| 0.301313
| 0.229519
| 0.192532
| 0.174627
| 0.156546
| 0.131162
| 0
| 0.004486
| 0.324542
| 29,044
| 593
| 169
| 48.978078
| 0.860995
| 0.398189
| 0
| 0.249158
| 0
| 0.003367
| 0.101476
| 0.004959
| 0
| 0
| 0
| 0.001686
| 0.006734
| 1
| 0.047138
| false
| 0.013468
| 0.047138
| 0
| 0.124579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78fe8574d8b2d8646e13f689bf2f902a5d2ca204
| 2,637
|
py
|
Python
|
shdw/tools/welford.py
|
wbrandenburger/ShadowDetection
|
2a58df93e32e8baf99806555655a7daf7e68735a
|
[
"MIT"
] | 2
|
2020-09-06T16:45:37.000Z
|
2021-04-25T15:16:20.000Z
|
dl_multi/utils/welford.py
|
wbrandenburger/MTPIA
|
02c773ce60b7efd5b15f270f047a6da5a8f00b7e
|
[
"MIT"
] | null | null | null |
dl_multi/utils/welford.py
|
wbrandenburger/MTPIA
|
02c773ce60b7efd5b15f270f047a6da5a8f00b7e
|
[
"MIT"
] | 1
|
2020-04-30T03:08:56.000Z
|
2020-04-30T03:08:56.000Z
|
import math
import numpy as np
# plt.style.use('seaborn')
# plt.rcParams['figure.figsize'] = (12, 8)
def welford(x_array):
k = 0
M = 0
S = 0
for x in x_array:
k += 1
Mnext = M + (x - M) / k
S = S + (x - M)*(x - Mnext)
M = Mnext
return (M, S/(k-1))
class Welford(object):
""" Implements Welford's algorithm for computing a running mean
and standard deviation as described at:
http://www.johndcook.com/standard_deviation.html
can take single values or iterables
Properties:
mean - returns the mean
std - returns the std
meanfull- returns the mean and std of the mean
Usage:
>>> foo = Welford()
>>> foo(range(100))
>>> foo
<Welford: 49.5 +- 29.0114919759>
>>> foo([1]*1000)
>>> foo
<Welford: 5.40909090909 +- 16.4437417146>
>>> foo.mean
5.409090909090906
>>> foo.std
16.44374171455467
>>> foo.meanfull
(5.409090909090906, 0.4957974674244838)
"""
def __init__(self,lst=None, num=1, mean=0, std=0):
self._num = num
self._mean = mean
self._std = math.pow(std, 2)*(num-1)
self.__call__(lst)
@property
def num(self):
return self._num
@property
def mean(self):
return self._mean
@property
def std(self):
if self._num==1:
return 0
return math.sqrt(self._std/(self._num-1))
@property
def meanfull(self):
return self._mean, self._std/math.sqrt(self._num)
@property
def stats(self):
return self._mean, self.std
def update(self, lst):
if lst is None:
return
if hasattr(lst, "__iter__"):
for x in lst:
self.update_welford(x)
else:
self.update_welford(lst)
def update_welford(self, x):
if x is None:
return
new_mean = self._mean + (x - self._mean)*1./self._num
new_std = self._std + (x - self._mean)*(x - new_mean)
self._num += 1
self._mean, self._std = new_mean, new_std
def consume(self,lst):
if isinstance(lst, np.ndarray):
npfunc = np.vectorize(self.update)
npfunc(lst)
else:
lst = iter(lst)
for x in lst:
self.update(x)
def __call__(self,x):
if hasattr(x,"__iter__"):
self.consume(x)
else:
self.update(x)
def __repr__(self):
return "<Stats: {} +- {}>".format(self.mean, self.std)
| 23.972727
| 67
| 0.525597
| 336
| 2,637
| 3.96131
| 0.28869
| 0.054095
| 0.041322
| 0.045079
| 0.066116
| 0.066116
| 0
| 0
| 0
| 0
| 0
| 0.076112
| 0.352294
| 2,637
| 109
| 68
| 24.192661
| 0.703162
| 0.267728
| 0
| 0.215385
| 0
| 0
| 0.018013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184615
| false
| 0
| 0.030769
| 0.076923
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
78feca6a377149a92c2667955b4f314e64f31df6
| 819
|
py
|
Python
|
day3/functions.py
|
lilbond/bitis
|
58e5eeebade6cea99fbf86fdf285721fb602e4ef
|
[
"MIT"
] | null | null | null |
day3/functions.py
|
lilbond/bitis
|
58e5eeebade6cea99fbf86fdf285721fb602e4ef
|
[
"MIT"
] | null | null | null |
day3/functions.py
|
lilbond/bitis
|
58e5eeebade6cea99fbf86fdf285721fb602e4ef
|
[
"MIT"
] | null | null | null |
def greet():
print("Hi")
def greet_again(message):
print(message)
def greet_again_with_type(message):
print(type(message))
print(message)
greet()
greet_again("Hello Again")
greet_again_with_type("One Last Time")
greet_again_with_type(1234)
# multiple types
def multiple_types(x):
if x < 0:
return -1
else:
return "Returning Hello"
print(multiple_types(-2))
print(multiple_types(10))
# variable arguments
def var_arguments(*args): # args will be tuples containing all the values
for value in args:
print(value)
var_arguments(1, 2, 3)
a = [1, 2, 3]
var_arguments(a)
var_arguments(*a) # expanding
def key_arg(**kwargs):
for key,value in kwargs.items():
print(key, value)
v
b = {"first" : "python", "second" : "python again"}
key_arg(b)
| 14.625
| 73
| 0.664225
| 120
| 819
| 4.366667
| 0.416667
| 0.09542
| 0.080153
| 0.103053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023112
| 0.20757
| 819
| 56
| 74
| 14.625
| 0.784284
| 0.108669
| 0
| 0.064516
| 0
| 0
| 0.096552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193548
| false
| 0
| 0
| 0
| 0.258065
| 0.258065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6001e3cd1b64684fad98768a1d1677fc7dbf592e
| 1,043
|
py
|
Python
|
filehandler.py
|
miciux/telegram-bot-admin
|
feb267ba6ce715b734b1a5911487c1080410a4a9
|
[
"MIT"
] | 1
|
2017-04-30T13:12:32.000Z
|
2017-04-30T13:12:32.000Z
|
filehandler.py
|
miciux/telegram-bot-admin
|
feb267ba6ce715b734b1a5911487c1080410a4a9
|
[
"MIT"
] | null | null | null |
filehandler.py
|
miciux/telegram-bot-admin
|
feb267ba6ce715b734b1a5911487c1080410a4a9
|
[
"MIT"
] | null | null | null |
import logging
import abstracthandler
import os
class FileHandler(abstracthandler.AbstractHandler):
def __init__(self, conf, bot):
abstracthandler.AbstractHandler.__init__(self, 'file', conf, bot)
self.log = logging.getLogger(__name__)
self.commands={}
self.commands['list'] = self.get_file_list
def handle_message(self,cid, command, args):
try:
self.commands[command](cid,args)
except Exception as e:
self.send_formatted_message(cid,self.get_sorry_message())
self.log.error(e)
def get_file_list(self, cid, args):
if len(args) >= 1:
for folder in args:
self.send_formatted_message(cid,self.get_folder_content(folder))
else:
self.send_formatted_message(cid,'*file list* usage: file list _[DIRECTORY]_...')
def get_folder_content(self, folder):
message = 'Lista dei files in *%s*:\n_%s_'
files = '\n'.join(os.listdir(folder))
return message % (folder,files);
| 32.59375
| 92
| 0.637584
| 127
| 1,043
| 4.976378
| 0.393701
| 0.050633
| 0.080696
| 0.113924
| 0.150316
| 0.107595
| 0.107595
| 0
| 0
| 0
| 0
| 0.001274
| 0.247363
| 1,043
| 31
| 93
| 33.645161
| 0.803822
| 0
| 0
| 0
| 0
| 0
| 0.081574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.12
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6001f3dc9b3e815ad90ab2f8d8d4027fbf828f6c
| 6,276
|
py
|
Python
|
tensorflow_federated/python/learning/federated_evaluation.py
|
Tensorflow-Devs/federated
|
5df96d42d72fa43a050df6465271a38175a5fd7a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/learning/federated_evaluation.py
|
Tensorflow-Devs/federated
|
5df96d42d72fa43a050df6465271a38175a5fd7a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/learning/federated_evaluation.py
|
Tensorflow-Devs/federated
|
5df96d42d72fa43a050df6465271a38175a5fd7a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple implementation of federated evaluation."""
import collections
from typing import Callable, Optional
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import dataset_reduce
from tensorflow_federated.python.learning.framework import optimizer_utils
# Convenience aliases.
SequenceType = computation_types.SequenceType
def build_federated_evaluation(
model_fn: Callable[[], model_lib.Model],
broadcast_process: Optional[measured_process.MeasuredProcess] = None,
use_experimental_simulation_loop: bool = False,
) -> computation_base.Computation:
"""Builds the TFF computation for federated evaluation of the given model.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
broadcast_process: A `tff.templates.MeasuredProcess` that broadcasts the
model weights on the server to the clients. It must support the signature
`(input_values@SERVER -> output_values@CLIENTS)` and have empty state. If
set to default None, the server model is broadcast to the clients using
the default tff.federated_broadcast.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A federated computation (an instance of `tff.Computation`) that accepts
model parameters and federated data, and returns the evaluation metrics
as aggregated by `tff.learning.Model.federated_output_computation`.
"""
if broadcast_process is not None:
if not isinstance(broadcast_process, measured_process.MeasuredProcess):
raise ValueError('`broadcast_process` must be a `MeasuredProcess`, got '
f'{type(broadcast_process)}.')
if optimizer_utils.is_stateful_process(broadcast_process):
raise ValueError(
'Cannot create a federated evaluation with a stateful '
'broadcast process, must be stateless, has state: '
f'{broadcast_process.initialize.type_signature.result!r}')
# Construct the model first just to obtain the metadata and define all the
# types needed to define the computations that follow.
# TODO(b/124477628): Ideally replace the need for stamping throwaway models
# with some other mechanism.
with tf.Graph().as_default():
model = model_fn()
model_weights_type = model_utils.weights_type_from_model(model)
batch_type = computation_types.to_type(model.input_spec)
@computations.tf_computation(model_weights_type, SequenceType(batch_type))
@tf.function
def client_eval(incoming_model_weights, dataset):
"""Returns local outputs after evaluting `model_weights` on `dataset`."""
with tf.init_scope():
model = model_fn()
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
incoming_model_weights)
def reduce_fn(num_examples, batch):
model_output = model.forward_pass(batch, training=False)
if model_output.num_examples is None:
# Compute shape from the size of the predictions if model didn't use the
# batch size.
return num_examples + tf.shape(
model_output.predictions, out_type=tf.int64)[0]
else:
return num_examples + tf.cast(model_output.num_examples, tf.int64)
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
num_examples = dataset_reduce_fn(
reduce_fn=reduce_fn,
dataset=dataset,
initial_state_fn=lambda: tf.zeros([], dtype=tf.int64))
return collections.OrderedDict(
local_outputs=model.report_local_outputs(), num_examples=num_examples)
@computations.federated_computation(
computation_types.at_server(model_weights_type),
computation_types.at_clients(SequenceType(batch_type)))
def server_eval(server_model_weights, federated_dataset):
if broadcast_process is not None:
# TODO(b/179091838): Zip the measurements from the broadcast_process with
# the result of `model.federated_output_computation` below to avoid
# dropping these metrics.
broadcast_output = broadcast_process.next(broadcast_process.initialize(),
server_model_weights)
client_outputs = intrinsics.federated_map(
client_eval, (broadcast_output.result, federated_dataset))
else:
client_outputs = intrinsics.federated_map(client_eval, [
intrinsics.federated_broadcast(server_model_weights),
federated_dataset
])
model_metrics = model.federated_output_computation(
client_outputs.local_outputs)
statistics = collections.OrderedDict(
num_examples=intrinsics.federated_sum(client_outputs.num_examples))
return intrinsics.federated_zip(
collections.OrderedDict(eval=model_metrics, stat=statistics))
return server_eval
| 47.18797
| 80
| 0.755736
| 802
| 6,276
| 5.719451
| 0.306733
| 0.045346
| 0.045128
| 0.0569
| 0.141923
| 0.10944
| 0.081535
| 0
| 0
| 0
| 0
| 0.006392
| 0.177342
| 6,276
| 132
| 81
| 47.545455
| 0.882045
| 0.364404
| 0
| 0.08
| 0
| 0
| 0.06001
| 0.020429
| 0
| 0
| 0
| 0.007576
| 0
| 1
| 0.053333
| false
| 0.013333
| 0.16
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|